summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig136
-rw-r--r--arch/x86/Kconfig.debug1
-rw-r--r--arch/x86/Makefile30
-rw-r--r--arch/x86/boot/Makefile17
-rw-r--r--arch/x86/boot/bioscall.S6
-rw-r--r--arch/x86/boot/boot.h10
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/aslr.c316
-rw-r--r--arch/x86/boot/compressed/cmdline.c2
-rw-r--r--arch/x86/boot/compressed/cpuflags.c12
-rw-r--r--arch/x86/boot/compressed/head_32.S10
-rw-r--r--arch/x86/boot/compressed/head_64.S16
-rw-r--r--arch/x86/boot/compressed/misc.c18
-rw-r--r--arch/x86/boot/compressed/misc.h37
-rw-r--r--arch/x86/boot/copy.S22
-rw-r--r--arch/x86/boot/cpucheck.c100
-rw-r--r--arch/x86/boot/cpuflags.c119
-rw-r--r--arch/x86/boot/cpuflags.h19
-rw-r--r--arch/x86/boot/header.S9
-rw-r--r--arch/x86/boot/video.h2
-rw-r--r--arch/x86/crypto/Makefile1
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S2811
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c147
-rw-r--r--arch/x86/include/asm/amd_nb.h2
-rw-r--r--arch/x86/include/asm/archrandom.h21
-rw-r--r--arch/x86/include/asm/barrier.h43
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/dmi.h6
-rw-r--r--arch/x86/include/asm/efi.h80
-rw-r--r--arch/x86/include/asm/fixmap.h59
-rw-r--r--arch/x86/include/asm/fpu-internal.h13
-rw-r--r--arch/x86/include/asm/futex.h21
-rw-r--r--arch/x86/include/asm/hash.h7
-rw-r--r--arch/x86/include/asm/hw_irq.h3
-rw-r--r--arch/x86/include/asm/intel-mid.h48
-rw-r--r--arch/x86/include/asm/iosf_mbi.h90
-rw-r--r--arch/x86/include/asm/irq.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/asm/kvm_para.h33
-rw-r--r--arch/x86/include/asm/mce.h1
-rw-r--r--arch/x86/include/asm/microcode.h15
-rw-r--r--arch/x86/include/asm/microcode_amd.h7
-rw-r--r--arch/x86/include/asm/mpspec.h1
-rw-r--r--arch/x86/include/asm/mwait.h43
-rw-r--r--arch/x86/include/asm/page.h1
-rw-r--r--arch/x86/include/asm/page_32.h4
-rw-r--r--arch/x86/include/asm/page_64_types.h15
-rw-r--r--arch/x86/include/asm/paravirt.h2
-rw-r--r--arch/x86/include/asm/paravirt_types.h9
-rw-r--r--arch/x86/include/asm/pci.h3
-rw-r--r--arch/x86/include/asm/pgtable-2level.h100
-rw-r--r--arch/x86/include/asm/pgtable.h25
-rw-r--r--arch/x86/include/asm/pgtable_64_types.h2
-rw-r--r--arch/x86/include/asm/pgtable_types.h6
-rw-r--r--arch/x86/include/asm/preempt.h11
-rw-r--r--arch/x86/include/asm/processor.h42
-rw-r--r--arch/x86/include/asm/ptrace.h1
-rw-r--r--arch/x86/include/asm/setup.h3
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/thread_info.h8
-rw-r--r--arch/x86/include/asm/timer.h78
-rw-r--r--arch/x86/include/asm/tlbflush.h6
-rw-r--r--arch/x86/include/asm/tsc.h3
-rw-r--r--arch/x86/include/asm/uaccess.h124
-rw-r--r--arch/x86/include/asm/uaccess_64.h4
-rw-r--r--arch/x86/include/asm/uv/uv.h2
-rw-r--r--arch/x86/include/asm/vmx.h1
-rw-r--r--arch/x86/include/asm/x86_init.h2
-rw-r--r--arch/x86/include/asm/xen/page.h8
-rw-r--r--arch/x86/include/asm/xsave.h14
-rw-r--r--arch/x86/include/uapi/asm/bootparam.h2
-rw-r--r--arch/x86/include/uapi/asm/hyperv.h13
-rw-r--r--arch/x86/include/uapi/asm/msr-index.h2
-rw-r--r--arch/x86/include/uapi/asm/sembuf.h10
-rw-r--r--arch/x86/include/uapi/asm/stat.h42
-rw-r--r--arch/x86/kernel/Makefile13
-rw-r--r--arch/x86/kernel/acpi/boot.c7
-rw-r--r--arch/x86/kernel/acpi/cstate.c23
-rw-r--r--arch/x86/kernel/amd_nb.c2
-rw-r--r--arch/x86/kernel/apic/apic.c66
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c5
-rw-r--r--arch/x86/kernel/apic/apic_noop.c1
-rw-r--r--arch/x86/kernel/apic/io_apic.c23
-rw-r--r--arch/x86/kernel/apic/ipi.c1
-rw-r--r--arch/x86/kernel/apic/summit_32.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c1
-rw-r--r--arch/x86/kernel/check.c2
-rw-r--r--arch/x86/kernel/cpu/Makefile3
-rw-r--r--arch/x86/kernel/cpu/amd.c28
-rw-r--r--arch/x86/kernel/cpu/centaur.c1
-rw-r--r--arch/x86/kernel/cpu/common.c14
-rw-r--r--arch/x86/kernel/cpu/cyrix.c1
-rw-r--r--arch/x86/kernel/cpu/intel.c42
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c12
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/p5.c1
-rw-r--r--arch/x86/kernel/cpu/mcheck/winchip.c1
-rw-r--r--arch/x86/kernel/cpu/microcode/Makefile7
-rw-r--r--arch/x86/kernel/cpu/microcode/amd.c (renamed from arch/x86/kernel/microcode_amd.c)15
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_early.c (renamed from arch/x86/kernel/microcode_amd_early.c)266
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c (renamed from arch/x86/kernel/microcode_core.c)0
-rw-r--r--arch/x86/kernel/cpu/microcode/core_early.c (renamed from arch/x86/kernel/microcode_core_early.c)0
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c (renamed from arch/x86/kernel/microcode_intel.c)2
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_early.c (renamed from arch/x86/kernel/microcode_intel_early.c)10
-rw-r--r--arch/x86/kernel/cpu/microcode/intel_lib.c (renamed from arch/x86/kernel/microcode_intel_lib.c)0
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c16
-rw-r--r--arch/x86/kernel/cpu/perf_event.h15
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c53
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c679
-rw-r--r--arch/x86/kernel/cpu/rdrand.c14
-rw-r--r--arch/x86/kernel/cpu/transmeta.c1
-rw-r--r--arch/x86/kernel/cpu/umc.c1
-rw-r--r--arch/x86/kernel/crash.c1
-rw-r--r--arch/x86/kernel/doublefault.c1
-rw-r--r--arch/x86/kernel/e820.c2
-rw-r--r--arch/x86/kernel/entry_32.S4
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/ftrace.c83
-rw-r--r--arch/x86/kernel/hw_breakpoint.c1
-rw-r--r--arch/x86/kernel/iosf_mbi.c226
-rw-r--r--arch/x86/kernel/irq.c96
-rw-r--r--arch/x86/kernel/irqinit.c4
-rw-r--r--arch/x86/kernel/kgdb.c1
-rw-r--r--arch/x86/kernel/ksysfs.c340
-rw-r--r--arch/x86/kernel/kvm.c34
-rw-r--r--arch/x86/kernel/machine_kexec_32.c1
-rw-r--r--arch/x86/kernel/pci-nommu.c1
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/quirks.c37
-rw-r--r--arch/x86/kernel/setup.c52
-rw-r--r--arch/x86/kernel/smpboot.c8
-rw-r--r--arch/x86/kernel/traps.c22
-rw-r--r--arch/x86/kernel/tsc.c328
-rw-r--r--arch/x86/kernel/tsc_msr.c127
-rw-r--r--arch/x86/kernel/tsc_sync.c1
-rw-r--r--arch/x86/kernel/vsmp_64.c8
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/kernel/xsave.c10
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/i8254.c18
-rw-r--r--arch/x86/kvm/lapic.c19
-rw-r--r--arch/x86/kvm/lapic.h2
-rw-r--r--arch/x86/kvm/mmu.c12
-rw-r--r--arch/x86/kvm/paging_tmpl.h8
-rw-r--r--arch/x86/kvm/svm.c15
-rw-r--r--arch/x86/kvm/vmx.c335
-rw-r--r--arch/x86/kvm/x86.c140
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/lguest/boot.c12
-rw-r--r--arch/x86/lib/Makefile2
-rw-r--r--arch/x86/lib/copy_user_64.S12
-rw-r--r--arch/x86/lib/delay.c1
-rw-r--r--arch/x86/lib/hash.c88
-rw-r--r--arch/x86/lib/x86-opcode-map.txt4
-rw-r--r--arch/x86/math-emu/errors.c5
-rw-r--r--arch/x86/mm/fault.c32
-rw-r--r--arch/x86/mm/gup.c21
-rw-r--r--arch/x86/mm/hugetlbpage.c9
-rw-r--r--arch/x86/mm/init_32.c5
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/kmmio.c1
-rw-r--r--arch/x86/mm/memtest.c2
-rw-r--r--arch/x86/mm/numa.c65
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/pageattr-test.c1
-rw-r--r--arch/x86/mm/pageattr.c461
-rw-r--r--arch/x86/mm/srat.c21
-rw-r--r--arch/x86/mm/tlb.c52
-rw-r--r--arch/x86/net/bpf_jit_comp.c14
-rw-r--r--arch/x86/pci/fixup.c1
-rw-r--r--arch/x86/pci/intel_mid_pci.c6
-rw-r--r--arch/x86/pci/mmconfig-shared.c1
-rw-r--r--arch/x86/pci/mmconfig_32.c1
-rw-r--r--arch/x86/pci/xen.c2
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c12
-rw-r--r--arch/x86/platform/efi/efi.c358
-rw-r--r--arch/x86/platform/efi/efi_32.c18
-rw-r--r--arch/x86/platform/efi/efi_64.c129
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S54
-rw-r--r--arch/x86/platform/intel-mid/Makefile4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_emc1403.c4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_ipc.h5
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_lis331.c4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_max7315.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_msic.h4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_tca6416.c4
-rw-r--r--arch/x86/platform/intel-mid/early_printk_intel_mid.c1
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c64
-rw-r--r--arch/x86/platform/intel-mid/intel_mid_weak_decls.h19
-rw-r--r--arch/x86/platform/intel-mid/mfld.c75
-rw-r--r--arch/x86/platform/intel-mid/mrfl.c103
-rw-r--r--arch/x86/platform/intel-mid/sfi.c46
-rw-r--r--arch/x86/platform/iris/iris.c1
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c3
-rw-r--r--arch/x86/platform/uv/tlb_uv.c66
-rw-r--r--arch/x86/platform/uv/uv_nmi.c65
-rw-r--r--arch/x86/realmode/init.c26
-rw-r--r--arch/x86/realmode/rm/Makefile17
-rw-r--r--arch/x86/realmode/rm/reboot.S1
-rw-r--r--arch/x86/realmode/rm/trampoline_32.S1
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S1
-rw-r--r--arch/x86/syscalls/syscall_32.tbl2
-rw-r--r--arch/x86/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/tools/relocs.c50
-rw-r--r--arch/x86/tools/relocs.h7
-rw-r--r--arch/x86/tools/relocs_common.c16
-rw-r--r--arch/x86/vdso/vclock_gettime.c8
-rw-r--r--arch/x86/vdso/vdso.S1
-rw-r--r--arch/x86/vdso/vdsox32.S1
-rw-r--r--arch/x86/xen/Kconfig4
-rw-r--r--arch/x86/xen/enlighten.c138
-rw-r--r--arch/x86/xen/grant-table.c64
-rw-r--r--arch/x86/xen/irq.c13
-rw-r--r--arch/x86/xen/mmu.c186
-rw-r--r--arch/x86/xen/p2m.c15
-rw-r--r--arch/x86/xen/platform-pci-unplug.c79
-rw-r--r--arch/x86/xen/setup.c44
-rw-r--r--arch/x86/xen/smp.c49
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/x86/xen/time.c1
-rw-r--r--arch/x86/xen/xen-head.S25
-rw-r--r--arch/x86/xen/xen-ops.h1
230 files changed, 9170 insertions, 1643 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f85fe3459cb6..0af5250d914f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -23,9 +23,11 @@ config X86
def_bool y
select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
select ARCH_MIGHT_HAVE_PC_PARPORT
+ select ARCH_MIGHT_HAVE_PC_SERIO
select HAVE_AOUT if X86_32
select HAVE_UNSTABLE_SCHED_CLOCK
select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_INT128 if X86_64
select ARCH_WANTS_PROT_NUMA_PROT_NONE
select HAVE_IDE
select HAVE_OPROFILE
@@ -124,6 +126,7 @@ config X86
select RTC_LIB
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
+ select HAVE_CC_STACKPROTECTOR
config INSTRUCTION_DECODER
def_bool y
@@ -277,13 +280,13 @@ config SMP
bool "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
- a system with only one CPU, like most personal computers, say N. If
- you have a system with more than one CPU, say Y.
+ a system with only one CPU, say N. If you have a system with more
+ than one CPU, say Y.
- If you say N here, the kernel will run on single and multiprocessor
+ If you say N here, the kernel will run on uni- and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on many, but not all,
- singleprocessor machines. On a singleprocessor machine, the kernel
+ uniprocessor machines. On a uniprocessor machine, the kernel
will run faster if you say N here.
Note that if you say Y here and choose architecture "586" or
@@ -437,42 +440,27 @@ config X86_INTEL_CE
This option compiles in support for the CE4100 SOC for settop
boxes and media devices.
-config X86_WANT_INTEL_MID
+config X86_INTEL_MID
bool "Intel MID platform support"
depends on X86_32
depends on X86_EXTENDED_PLATFORM
- ---help---
- Select to build a kernel capable of supporting Intel MID platform
- systems which do not have the PCI legacy interfaces (Moorestown,
- Medfield). If you are building for a PC class system say N here.
-
-if X86_WANT_INTEL_MID
-
-config X86_INTEL_MID
- bool
-
-config X86_MDFLD
- bool "Medfield MID platform"
+ depends on X86_PLATFORM_DEVICES
depends on PCI
depends on PCI_GOANY
depends on X86_IO_APIC
- select X86_INTEL_MID
select SFI
+ select I2C
select DW_APB_TIMER
select APB_TIMER
- select I2C
- select SPI
select INTEL_SCU_IPC
- select X86_PLATFORM_DEVICES
select MFD_INTEL_MSIC
---help---
- Medfield is Intel's Low Power Intel Architecture (LPIA) based Moblin
- Internet Device(MID) platform.
- Unlike standard x86 PCs, Medfield does not have many legacy devices
- nor standard legacy replacement devices/features. e.g. Medfield does
- not contain i8259, i8254, HPET, legacy BIOS, most of the io ports.
+ Select to build a kernel capable of supporting Intel MID (Mobile
+ Internet Device) platform systems which do not have the PCI legacy
+ interfaces. If you are building for a PC class system say N here.
-endif
+ Intel MID platforms are based on an Intel processor and chipset which
+ consume less power than most of the x86 derivatives.
config X86_INTEL_LPSS
bool "Intel Low Power Subsystem Support"
@@ -745,6 +733,7 @@ config APB_TIMER
# The code disables itself when not needed.
config DMI
default y
+ select DMI_SCAN_MACHINE_NON_EFI_FALLBACK
bool "Enable DMI scanning" if EXPERT
---help---
Enabled scanning of DMI to identify machine quirks. Say Y
@@ -1063,9 +1052,9 @@ config MICROCODE_INTEL
This options enables microcode patch loading support for Intel
processors.
- For latest news and information on obtaining all the required
- Intel ingredients for this driver, check:
- <http://www.urbanmyth.org/microcode/>.
+ For the current Intel microcode data package go to
+ <https://downloadcenter.intel.com> and search for
+ 'Linux Processor Microcode Data File'.
config MICROCODE_AMD
bool "AMD microcode loading support"
@@ -1079,10 +1068,6 @@ config MICROCODE_OLD_INTERFACE
def_bool y
depends on MICROCODE
-config MICROCODE_INTEL_LIB
- def_bool y
- depends on MICROCODE_INTEL
-
config MICROCODE_INTEL_EARLY
def_bool n
@@ -1616,22 +1601,6 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
-config CC_STACKPROTECTOR
- bool "Enable -fstack-protector buffer overflow detection"
- ---help---
- This option turns on the -fstack-protector GCC feature. This
- feature puts, at the beginning of functions, a canary value on
- the stack just before the return address, and validates
- the value just before actually returning. Stack based buffer
- overflows (that need to overwrite this return address) now also
- overwrite the canary, which gets detected and the attack is then
- neutralized via a kernel panic.
-
- This feature requires gcc version 4.2 or above, or a distribution
- gcc with the feature backported. Older versions are automatically
- detected and for those versions, this configuration option is
- ignored. (and a warning is printed during bootup)
-
source kernel/Kconfig.hz
config KEXEC
@@ -1727,16 +1696,67 @@ config RELOCATABLE
Note: If CONFIG_RELOCATABLE=y, then the kernel runs from the address
it has been loaded at and the compile time physical address
- (CONFIG_PHYSICAL_START) is ignored.
+ (CONFIG_PHYSICAL_START) is used as the minimum location.
-# Relocation on x86-32 needs some additional build support
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
+ depends on RELOCATABLE
+ depends on !HIBERNATION
+ default n
+ ---help---
+ Randomizes the physical and virtual address at which the
+ kernel image is decompressed, as a security feature that
+ deters exploit attempts relying on knowledge of the location
+ of kernel internals.
+
+ Entropy is generated using the RDRAND instruction if it is
+ supported. If RDTSC is supported, it is used as well. If
+ neither RDRAND nor RDTSC are supported, then randomness is
+ read from the i8254 timer.
+
+ The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET,
+ and aligned according to PHYSICAL_ALIGN. Since the kernel is
+ built using 2GiB addressing, and PHYSICAL_ALGIN must be at a
+ minimum of 2MiB, only 10 bits of entropy is theoretically
+ possible. At best, due to page table layouts, 64-bit can use
+ 9 bits of entropy and 32-bit uses 8 bits.
+
+ If unsure, say N.
+
+config RANDOMIZE_BASE_MAX_OFFSET
+ hex "Maximum kASLR offset allowed" if EXPERT
+ depends on RANDOMIZE_BASE
+ range 0x0 0x20000000 if X86_32
+ default "0x20000000" if X86_32
+ range 0x0 0x40000000 if X86_64
+ default "0x40000000" if X86_64
+ ---help---
+ The lesser of RANDOMIZE_BASE_MAX_OFFSET and available physical
+ memory is used to determine the maximal offset in bytes that will
+ be applied to the kernel when kernel Address Space Layout
+ Randomization (kASLR) is active. This must be a multiple of
+ PHYSICAL_ALIGN.
+
+ On 32-bit this is limited to 512MiB by page table layouts. The
+ default is 512MiB.
+
+ On 64-bit this is limited by how the kernel fixmap page table is
+ positioned, so this cannot be larger than 1GiB currently. Without
+ RANDOMIZE_BASE, there is a 512MiB to 1.5GiB split between kernel
+ and modules. When RANDOMIZE_BASE_MAX_OFFSET is above 512MiB, the
+ modules area will shrink to compensate, up to the current maximum
+ 1GiB to 1GiB split. The default is 1GiB.
+
+ If unsure, leave at the default value.
+
+# Relocation on x86 needs some additional build support
config X86_NEED_RELOCS
def_bool y
- depends on X86_32 && RELOCATABLE
+ depends on RANDOMIZE_BASE || (X86_32 && RELOCATABLE)
config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned"
- default "0x1000000"
+ default "0x200000"
range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help---
@@ -2392,6 +2412,14 @@ config X86_DMA_REMAP
bool
depends on STA2X11
+config IOSF_MBI
+ bool
+ depends on PCI
+ ---help---
+ To be selected by modules requiring access to the Intel OnChip System
+ Fabric (IOSF) Sideband MailBox Interface (MBI). For MBI platforms
+ enumerable by PCI.
+
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 0f3621ed1db6..321a52ccf63a 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -184,6 +184,7 @@ config HAVE_MMIOTRACE_SUPPORT
config X86_DECODER_SELFTEST
bool "x86 instruction decoder selftest"
depends on DEBUG_KERNEL && KPROBES
+ depends on !COMPILE_TEST
---help---
Perform x86 instruction decoder selftests at build time.
This option is useful for checking the sanity of x86 instruction
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 57d021507120..eeda43abed6e 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -11,6 +11,28 @@ else
KBUILD_DEFCONFIG := $(ARCH)_defconfig
endif
+# How to compile the 16-bit code. Note we always compile for -march=i386;
+# that way we can complain to the user if the CPU is insufficient.
+#
+# The -m16 option is supported by GCC >= 4.9 and clang >= 3.5. For
+# older versions of GCC, we need to play evil and unreliable tricks to
+# attempt to ensure that our asm(".code16gcc") is first in the asm
+# output.
+CODE16GCC_CFLAGS := -m32 -include $(srctree)/arch/x86/boot/code16gcc.h \
+ $(call cc-option, -fno-toplevel-reorder,\
+ $(call cc-option, -fno-unit-at-a-time))
+M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
+
+REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
+ -DDISABLE_BRANCH_PROFILING \
+ -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
+ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ -mno-mmx -mno-sse \
+ $(call cc-option, -ffreestanding) \
+ $(call cc-option, -fno-stack-protector) \
+ $(call cc-option, -mpreferred-stack-boundary=2)
+export REALMODE_CFLAGS
+
# BITS is used as extension for files which are available in a 32 bit
# and a 64 bit version to simplify shared Makefiles.
# e.g.: obj-y += foo_$(BITS).o
@@ -89,13 +111,11 @@ else
KBUILD_CFLAGS += -maccumulate-outgoing-args
endif
+# Make sure compiler does not have buggy stack-protector support.
ifdef CONFIG_CC_STACKPROTECTOR
cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
- ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
- stackp-y := -fstack-protector
- KBUILD_CFLAGS += $(stackp-y)
- else
- $(warning stack protector enabled but no compiler support)
+ ifneq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
+ $(warning stack-protector enabled but compiler support broken)
endif
endif
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index d9c11956fce0..878df7e88cd4 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -20,7 +20,7 @@ targets := vmlinux.bin setup.bin setup.elf bzImage
targets += fdimage fdimage144 fdimage288 image.iso mtools.conf
subdir- := compressed
-setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpucheck.o
+setup-y += a20.o bioscall.o cmdline.o copy.o cpu.o cpuflags.o cpucheck.o
setup-y += early_serial_console.o edd.o header.o main.o mca.o memory.o
setup-y += pm.o pmjump.o printf.o regs.o string.o tty.o video.o
setup-y += video-mode.o version.o
@@ -51,20 +51,7 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
# ---------------------------------------------------------------------------
-# How to compile the 16-bit code. Note we always compile for -march=i386,
-# that way we can complain to the user if the CPU is insufficient.
-KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
- -DDISABLE_BRANCH_PROFILING \
- -Wall -Wstrict-prototypes \
- -march=i386 -mregparm=3 \
- -include $(srctree)/$(src)/code16gcc.h \
- -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
- -mno-mmx -mno-sse \
- $(call cc-option, -ffreestanding) \
- $(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
- $(call cc-option, -fno-stack-protector) \
- $(call cc-option, -mpreferred-stack-boundary=2)
+KBUILD_CFLAGS := $(USERINCLUDE) $(REALMODE_CFLAGS) -D_SETUP
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/boot/bioscall.S b/arch/x86/boot/bioscall.S
index 1dfbf64e52a2..d401b4a262b0 100644
--- a/arch/x86/boot/bioscall.S
+++ b/arch/x86/boot/bioscall.S
@@ -1,6 +1,6 @@
/* -----------------------------------------------------------------------
*
- * Copyright 2009 Intel Corporation; author H. Peter Anvin
+ * Copyright 2009-2014 Intel Corporation; author H. Peter Anvin
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2 or (at your
@@ -13,8 +13,8 @@
* touching registers they shouldn't be.
*/
- .code16gcc
- .text
+ .code16
+ .section ".inittext","ax"
.globl intcall
.type intcall, @function
intcall:
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index ef72baeff484..50f8c5e0f37e 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -26,9 +26,8 @@
#include <asm/boot.h>
#include <asm/setup.h>
#include "bitops.h"
-#include <asm/cpufeature.h>
-#include <asm/processor-flags.h>
#include "ctype.h"
+#include "cpuflags.h"
/* Useful macros */
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
@@ -307,14 +306,7 @@ static inline int cmdline_find_option_bool(const char *option)
return __cmdline_find_option_bool(cmd_line_ptr, option);
}
-
/* cpu.c, cpucheck.c */
-struct cpu_features {
- int level; /* Family, or 64 for x86-64 */
- int model;
- u32 flags[NCAPINTS];
-};
-extern struct cpu_features cpu;
int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr);
int validate_cpu(void);
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index c8a6792e7842..0fcd9133790c 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -28,7 +28,7 @@ HOST_EXTRACFLAGS += -I$(srctree)/tools/include
VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
$(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \
- $(obj)/piggy.o
+ $(obj)/piggy.o $(obj)/cpuflags.o $(obj)/aslr.o
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
new file mode 100644
index 000000000000..90a21f430117
--- /dev/null
+++ b/arch/x86/boot/compressed/aslr.c
@@ -0,0 +1,316 @@
+#include "misc.h"
+
+#ifdef CONFIG_RANDOMIZE_BASE
+#include <asm/msr.h>
+#include <asm/archrandom.h>
+#include <asm/e820.h>
+
+#include <generated/compile.h>
+#include <linux/module.h>
+#include <linux/uts.h>
+#include <linux/utsname.h>
+#include <generated/utsrelease.h>
+
+/* Simplified build-specific string for starting entropy. */
+static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
+ LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
+
+#define I8254_PORT_CONTROL 0x43
+#define I8254_PORT_COUNTER0 0x40
+#define I8254_CMD_READBACK 0xC0
+#define I8254_SELECT_COUNTER0 0x02
+#define I8254_STATUS_NOTREADY 0x40
+static inline u16 i8254(void)
+{
+ u16 status, timer;
+
+ do {
+ outb(I8254_PORT_CONTROL,
+ I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
+ status = inb(I8254_PORT_COUNTER0);
+ timer = inb(I8254_PORT_COUNTER0);
+ timer |= inb(I8254_PORT_COUNTER0) << 8;
+ } while (status & I8254_STATUS_NOTREADY);
+
+ return timer;
+}
+
+static unsigned long rotate_xor(unsigned long hash, const void *area,
+ size_t size)
+{
+ size_t i;
+ unsigned long *ptr = (unsigned long *)area;
+
+ for (i = 0; i < size / sizeof(hash); i++) {
+ /* Rotate by odd number of bits and XOR. */
+ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+ hash ^= ptr[i];
+ }
+
+ return hash;
+}
+
+/* Attempt to create a simple but unpredictable starting entropy. */
+static unsigned long get_random_boot(void)
+{
+ unsigned long hash = 0;
+
+ hash = rotate_xor(hash, build_str, sizeof(build_str));
+ hash = rotate_xor(hash, real_mode, sizeof(*real_mode));
+
+ return hash;
+}
+
+static unsigned long get_random_long(void)
+{
+#ifdef CONFIG_X86_64
+ const unsigned long mix_const = 0x5d6008cbf3848dd3UL;
+#else
+ const unsigned long mix_const = 0x3f39e593UL;
+#endif
+ unsigned long raw, random = get_random_boot();
+ bool use_i8254 = true;
+
+ debug_putstr("KASLR using");
+
+ if (has_cpuflag(X86_FEATURE_RDRAND)) {
+ debug_putstr(" RDRAND");
+ if (rdrand_long(&raw)) {
+ random ^= raw;
+ use_i8254 = false;
+ }
+ }
+
+ if (has_cpuflag(X86_FEATURE_TSC)) {
+ debug_putstr(" RDTSC");
+ rdtscll(raw);
+
+ random ^= raw;
+ use_i8254 = false;
+ }
+
+ if (use_i8254) {
+ debug_putstr(" i8254");
+ random ^= i8254();
+ }
+
+ /* Circular multiply for better bit diffusion */
+ asm("mul %3"
+ : "=a" (random), "=d" (raw)
+ : "a" (random), "rm" (mix_const));
+ random += raw;
+
+ debug_putstr("...\n");
+
+ return random;
+}
+
+struct mem_vector {
+ unsigned long start;
+ unsigned long size;
+};
+
+#define MEM_AVOID_MAX 5
+struct mem_vector mem_avoid[MEM_AVOID_MAX];
+
+static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
+{
+ /* Item at least partially before region. */
+ if (item->start < region->start)
+ return false;
+ /* Item at least partially after region. */
+ if (item->start + item->size > region->start + region->size)
+ return false;
+ return true;
+}
+
+static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
+{
+ /* Item one is entirely before item two. */
+ if (one->start + one->size <= two->start)
+ return false;
+ /* Item one is entirely after item two. */
+ if (one->start >= two->start + two->size)
+ return false;
+ return true;
+}
+
+static void mem_avoid_init(unsigned long input, unsigned long input_size,
+ unsigned long output, unsigned long output_size)
+{
+ u64 initrd_start, initrd_size;
+ u64 cmd_line, cmd_line_size;
+ unsigned long unsafe, unsafe_len;
+ char *ptr;
+
+ /*
+ * Avoid the region that is unsafe to overlap during
+ * decompression (see calculations at top of misc.c).
+ */
+ unsafe_len = (output_size >> 12) + 32768 + 18;
+ unsafe = (unsigned long)input + input_size - unsafe_len;
+ mem_avoid[0].start = unsafe;
+ mem_avoid[0].size = unsafe_len;
+
+ /* Avoid initrd. */
+ initrd_start = (u64)real_mode->ext_ramdisk_image << 32;
+ initrd_start |= real_mode->hdr.ramdisk_image;
+ initrd_size = (u64)real_mode->ext_ramdisk_size << 32;
+ initrd_size |= real_mode->hdr.ramdisk_size;
+ mem_avoid[1].start = initrd_start;
+ mem_avoid[1].size = initrd_size;
+
+ /* Avoid kernel command line. */
+ cmd_line = (u64)real_mode->ext_cmd_line_ptr << 32;
+ cmd_line |= real_mode->hdr.cmd_line_ptr;
+ /* Calculate size of cmd_line. */
+ ptr = (char *)(unsigned long)cmd_line;
+ for (cmd_line_size = 0; ptr[cmd_line_size++]; )
+ ;
+ mem_avoid[2].start = cmd_line;
+ mem_avoid[2].size = cmd_line_size;
+
+ /* Avoid heap memory. */
+ mem_avoid[3].start = (unsigned long)free_mem_ptr;
+ mem_avoid[3].size = BOOT_HEAP_SIZE;
+
+ /* Avoid stack memory. */
+ mem_avoid[4].start = (unsigned long)free_mem_end_ptr;
+ mem_avoid[4].size = BOOT_STACK_SIZE;
+}
+
+/* Does this memory vector overlap a known avoided area? */
+bool mem_avoid_overlap(struct mem_vector *img)
+{
+ int i;
+
+ for (i = 0; i < MEM_AVOID_MAX; i++) {
+ if (mem_overlaps(img, &mem_avoid[i]))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN];
+unsigned long slot_max = 0;
+
+static void slots_append(unsigned long addr)
+{
+ /* Overflowing the slots list should be impossible. */
+ if (slot_max >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
+ CONFIG_PHYSICAL_ALIGN)
+ return;
+
+ slots[slot_max++] = addr;
+}
+
+static unsigned long slots_fetch_random(void)
+{
+ /* Handle case of no slots stored. */
+ if (slot_max == 0)
+ return 0;
+
+ return slots[get_random_long() % slot_max];
+}
+
+static void process_e820_entry(struct e820entry *entry,
+ unsigned long minimum,
+ unsigned long image_size)
+{
+ struct mem_vector region, img;
+
+ /* Skip non-RAM entries. */
+ if (entry->type != E820_RAM)
+ return;
+
+ /* Ignore entries entirely above our maximum. */
+ if (entry->addr >= CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
+ return;
+
+ /* Ignore entries entirely below our minimum. */
+ if (entry->addr + entry->size < minimum)
+ return;
+
+ region.start = entry->addr;
+ region.size = entry->size;
+
+ /* Potentially raise address to minimum location. */
+ if (region.start < minimum)
+ region.start = minimum;
+
+ /* Potentially raise address to meet alignment requirements. */
+ region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
+
+ /* Did we raise the address above the bounds of this e820 region? */
+ if (region.start > entry->addr + entry->size)
+ return;
+
+ /* Reduce size by any delta from the original address. */
+ region.size -= region.start - entry->addr;
+
+ /* Reduce maximum size to fit end of image within maximum limit. */
+ if (region.start + region.size > CONFIG_RANDOMIZE_BASE_MAX_OFFSET)
+ region.size = CONFIG_RANDOMIZE_BASE_MAX_OFFSET - region.start;
+
+ /* Walk each aligned slot and check for avoided areas. */
+ for (img.start = region.start, img.size = image_size ;
+ mem_contains(&region, &img) ;
+ img.start += CONFIG_PHYSICAL_ALIGN) {
+ if (mem_avoid_overlap(&img))
+ continue;
+ slots_append(img.start);
+ }
+}
+
+static unsigned long find_random_addr(unsigned long minimum,
+ unsigned long size)
+{
+ int i;
+ unsigned long addr;
+
+ /* Make sure minimum is aligned. */
+ minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
+
+ /* Verify potential e820 positions, appending to slots list. */
+ for (i = 0; i < real_mode->e820_entries; i++) {
+ process_e820_entry(&real_mode->e820_map[i], minimum, size);
+ }
+
+ return slots_fetch_random();
+}
+
+unsigned char *choose_kernel_location(unsigned char *input,
+ unsigned long input_size,
+ unsigned char *output,
+ unsigned long output_size)
+{
+ unsigned long choice = (unsigned long)output;
+ unsigned long random;
+
+ if (cmdline_find_option_bool("nokaslr")) {
+ debug_putstr("KASLR disabled...\n");
+ goto out;
+ }
+
+ /* Record the various known unsafe memory ranges. */
+ mem_avoid_init((unsigned long)input, input_size,
+ (unsigned long)output, output_size);
+
+ /* Walk e820 and find a random address. */
+ random = find_random_addr(choice, output_size);
+ if (!random) {
+ debug_putstr("KASLR could not find suitable E820 region...\n");
+ goto out;
+ }
+
+ /* Always enforce the minimum. */
+ if (random < choice)
+ goto out;
+
+ choice = random;
+out:
+ return (unsigned char *)choice;
+}
+
+#endif /* CONFIG_RANDOMIZE_BASE */
diff --git a/arch/x86/boot/compressed/cmdline.c b/arch/x86/boot/compressed/cmdline.c
index bffd73b45b1f..b68e3033e6b9 100644
--- a/arch/x86/boot/compressed/cmdline.c
+++ b/arch/x86/boot/compressed/cmdline.c
@@ -1,6 +1,6 @@
#include "misc.h"
-#ifdef CONFIG_EARLY_PRINTK
+#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE
static unsigned long fs;
static inline void set_fs(unsigned long seg)
diff --git a/arch/x86/boot/compressed/cpuflags.c b/arch/x86/boot/compressed/cpuflags.c
new file mode 100644
index 000000000000..aa313466118b
--- /dev/null
+++ b/arch/x86/boot/compressed/cpuflags.c
@@ -0,0 +1,12 @@
+#ifdef CONFIG_RANDOMIZE_BASE
+
+#include "../cpuflags.c"
+
+bool has_cpuflag(int flag)
+{
+ get_cpuflags();
+
+ return test_bit(flag, cpu.flags);
+}
+
+#endif
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 5d6f6891b188..9116aac232c7 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -117,9 +117,11 @@ preferred_addr:
addl %eax, %ebx
notl %eax
andl %eax, %ebx
-#else
- movl $LOAD_PHYSICAL_ADDR, %ebx
+ cmpl $LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
#endif
+ movl $LOAD_PHYSICAL_ADDR, %ebx
+1:
/* Target address to relocate to for decompression */
addl $z_extract_offset, %ebx
@@ -191,14 +193,14 @@ relocated:
leal boot_heap(%ebx), %eax
pushl %eax /* heap area */
pushl %esi /* real mode pointer */
- call decompress_kernel
+ call decompress_kernel /* returns kernel location in %eax */
addl $24, %esp
/*
* Jump to the decompressed kernel.
*/
xorl %ebx, %ebx
- jmp *%ebp
+ jmp *%eax
/*
* Stack and heap for uncompression
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index c337422b575d..c5c1ae0997e7 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -94,9 +94,11 @@ ENTRY(startup_32)
addl %eax, %ebx
notl %eax
andl %eax, %ebx
-#else
- movl $LOAD_PHYSICAL_ADDR, %ebx
+ cmpl $LOAD_PHYSICAL_ADDR, %ebx
+ jge 1f
#endif
+ movl $LOAD_PHYSICAL_ADDR, %ebx
+1:
/* Target address to relocate to for decompression */
addl $z_extract_offset, %ebx
@@ -269,9 +271,11 @@ preferred_addr:
addq %rax, %rbp
notq %rax
andq %rax, %rbp
-#else
- movq $LOAD_PHYSICAL_ADDR, %rbp
+ cmpq $LOAD_PHYSICAL_ADDR, %rbp
+ jge 1f
#endif
+ movq $LOAD_PHYSICAL_ADDR, %rbp
+1:
/* Target address to relocate to for decompression */
leaq z_extract_offset(%rbp), %rbx
@@ -339,13 +343,13 @@ relocated:
movl $z_input_len, %ecx /* input_len */
movq %rbp, %r8 /* output target address */
movq $z_output_len, %r9 /* decompressed length */
- call decompress_kernel
+ call decompress_kernel /* returns kernel location in %rax */
popq %rsi
/*
* Jump to the decompressed kernel.
*/
- jmp *%rbp
+ jmp *%rax
.code32
no_longmode:
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 434f077d2c4d..196eaf373a06 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -112,14 +112,8 @@ struct boot_params *real_mode; /* Pointer to real-mode data */
void *memset(void *s, int c, size_t n);
void *memcpy(void *dest, const void *src, size_t n);
-#ifdef CONFIG_X86_64
-#define memptr long
-#else
-#define memptr unsigned
-#endif
-
-static memptr free_mem_ptr;
-static memptr free_mem_end_ptr;
+memptr free_mem_ptr;
+memptr free_mem_end_ptr;
static char *vidmem;
static int vidport;
@@ -395,7 +389,7 @@ static void parse_elf(void *output)
free(phdrs);
}
-asmlinkage void decompress_kernel(void *rmode, memptr heap,
+asmlinkage void *decompress_kernel(void *rmode, memptr heap,
unsigned char *input_data,
unsigned long input_len,
unsigned char *output,
@@ -422,6 +416,10 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
free_mem_ptr = heap; /* Heap */
free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
+ output = choose_kernel_location(input_data, input_len,
+ output, output_len);
+
+ /* Validate memory location choices. */
if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
error("Destination address inappropriately aligned");
#ifdef CONFIG_X86_64
@@ -441,5 +439,5 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
parse_elf(output);
handle_relocations(output, output_len);
debug_putstr("done.\nBooting the kernel.\n");
- return;
+ return output;
}
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index 674019d8e235..24e3e569a13c 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -23,7 +23,15 @@
#define BOOT_BOOT_H
#include "../ctype.h"
+#ifdef CONFIG_X86_64
+#define memptr long
+#else
+#define memptr unsigned
+#endif
+
/* misc.c */
+extern memptr free_mem_ptr;
+extern memptr free_mem_end_ptr;
extern struct boot_params *real_mode; /* Pointer to real-mode data */
void __putstr(const char *s);
#define error_putstr(__x) __putstr(__x)
@@ -39,23 +47,40 @@ static inline void debug_putstr(const char *s)
#endif
-#ifdef CONFIG_EARLY_PRINTK
-
+#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE
/* cmdline.c */
int cmdline_find_option(const char *option, char *buffer, int bufsize);
int cmdline_find_option_bool(const char *option);
+#endif
-/* early_serial_console.c */
-extern int early_serial_base;
-void console_init(void);
+#if CONFIG_RANDOMIZE_BASE
+/* aslr.c */
+unsigned char *choose_kernel_location(unsigned char *input,
+ unsigned long input_size,
+ unsigned char *output,
+ unsigned long output_size);
+/* cpuflags.c */
+bool has_cpuflag(int flag);
#else
+static inline
+unsigned char *choose_kernel_location(unsigned char *input,
+ unsigned long input_size,
+ unsigned char *output,
+ unsigned long output_size)
+{
+ return output;
+}
+#endif
+#ifdef CONFIG_EARLY_PRINTK
/* early_serial_console.c */
+extern int early_serial_base;
+void console_init(void);
+#else
static const int early_serial_base;
static inline void console_init(void)
{ }
-
#endif
#endif
diff --git a/arch/x86/boot/copy.S b/arch/x86/boot/copy.S
index 11f272c6f5e9..1eb7d298b47d 100644
--- a/arch/x86/boot/copy.S
+++ b/arch/x86/boot/copy.S
@@ -14,7 +14,7 @@
* Memory copy routines
*/
- .code16gcc
+ .code16
.text
GLOBAL(memcpy)
@@ -30,7 +30,7 @@ GLOBAL(memcpy)
rep; movsb
popw %di
popw %si
- ret
+ retl
ENDPROC(memcpy)
GLOBAL(memset)
@@ -45,25 +45,25 @@ GLOBAL(memset)
andw $3, %cx
rep; stosb
popw %di
- ret
+ retl
ENDPROC(memset)
GLOBAL(copy_from_fs)
pushw %ds
pushw %fs
popw %ds
- call memcpy
+ calll memcpy
popw %ds
- ret
+ retl
ENDPROC(copy_from_fs)
GLOBAL(copy_to_fs)
pushw %es
pushw %fs
popw %es
- call memcpy
+ calll memcpy
popw %es
- ret
+ retl
ENDPROC(copy_to_fs)
#if 0 /* Not currently used, but can be enabled as needed */
@@ -71,17 +71,17 @@ GLOBAL(copy_from_gs)
pushw %ds
pushw %gs
popw %ds
- call memcpy
+ calll memcpy
popw %ds
- ret
+ retl
ENDPROC(copy_from_gs)
GLOBAL(copy_to_gs)
pushw %es
pushw %gs
popw %es
- call memcpy
+ calll memcpy
popw %es
- ret
+ retl
ENDPROC(copy_to_gs)
#endif
diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
index 4d3ff037201f..100a9a10076a 100644
--- a/arch/x86/boot/cpucheck.c
+++ b/arch/x86/boot/cpucheck.c
@@ -28,8 +28,6 @@
#include <asm/required-features.h>
#include <asm/msr-index.h>
-struct cpu_features cpu;
-static u32 cpu_vendor[3];
static u32 err_flags[NCAPINTS];
static const int req_level = CONFIG_X86_MINIMUM_CPU_FAMILY;
@@ -69,92 +67,8 @@ static int is_transmeta(void)
cpu_vendor[2] == A32('M', 'x', '8', '6');
}
-static int has_fpu(void)
-{
- u16 fcw = -1, fsw = -1;
- u32 cr0;
-
- asm("movl %%cr0,%0" : "=r" (cr0));
- if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
- cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
- asm volatile("movl %0,%%cr0" : : "r" (cr0));
- }
-
- asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
- : "+m" (fsw), "+m" (fcw));
-
- return fsw == 0 && (fcw & 0x103f) == 0x003f;
-}
-
-static int has_eflag(u32 mask)
-{
- u32 f0, f1;
-
- asm("pushfl ; "
- "pushfl ; "
- "popl %0 ; "
- "movl %0,%1 ; "
- "xorl %2,%1 ; "
- "pushl %1 ; "
- "popfl ; "
- "pushfl ; "
- "popl %1 ; "
- "popfl"
- : "=&r" (f0), "=&r" (f1)
- : "ri" (mask));
-
- return !!((f0^f1) & mask);
-}
-
-static void get_flags(void)
-{
- u32 max_intel_level, max_amd_level;
- u32 tfms;
-
- if (has_fpu())
- set_bit(X86_FEATURE_FPU, cpu.flags);
-
- if (has_eflag(X86_EFLAGS_ID)) {
- asm("cpuid"
- : "=a" (max_intel_level),
- "=b" (cpu_vendor[0]),
- "=d" (cpu_vendor[1]),
- "=c" (cpu_vendor[2])
- : "a" (0));
-
- if (max_intel_level >= 0x00000001 &&
- max_intel_level <= 0x0000ffff) {
- asm("cpuid"
- : "=a" (tfms),
- "=c" (cpu.flags[4]),
- "=d" (cpu.flags[0])
- : "a" (0x00000001)
- : "ebx");
- cpu.level = (tfms >> 8) & 15;
- cpu.model = (tfms >> 4) & 15;
- if (cpu.level >= 6)
- cpu.model += ((tfms >> 16) & 0xf) << 4;
- }
-
- asm("cpuid"
- : "=a" (max_amd_level)
- : "a" (0x80000000)
- : "ebx", "ecx", "edx");
-
- if (max_amd_level >= 0x80000001 &&
- max_amd_level <= 0x8000ffff) {
- u32 eax = 0x80000001;
- asm("cpuid"
- : "+a" (eax),
- "=c" (cpu.flags[6]),
- "=d" (cpu.flags[1])
- : : "ebx");
- }
- }
-}
-
/* Returns a bitmask of which words we have error bits in */
-static int check_flags(void)
+static int check_cpuflags(void)
{
u32 err;
int i;
@@ -187,8 +101,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
if (has_eflag(X86_EFLAGS_AC))
cpu.level = 4;
- get_flags();
- err = check_flags();
+ get_cpuflags();
+ err = check_cpuflags();
if (test_bit(X86_FEATURE_LM, cpu.flags))
cpu.level = 64;
@@ -207,8 +121,8 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
eax &= ~(1 << 15);
asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
- get_flags(); /* Make sure it really did something */
- err = check_flags();
+ get_cpuflags(); /* Make sure it really did something */
+ err = check_cpuflags();
} else if (err == 0x01 &&
!(err_flags[0] & ~(1 << X86_FEATURE_CX8)) &&
is_centaur() && cpu.model >= 6) {
@@ -223,7 +137,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
set_bit(X86_FEATURE_CX8, cpu.flags);
- err = check_flags();
+ err = check_cpuflags();
} else if (err == 0x01 && is_transmeta()) {
/* Transmeta might have masked feature bits in word 0 */
@@ -238,7 +152,7 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
: : "ecx", "ebx");
asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
- err = check_flags();
+ err = check_cpuflags();
}
if (err_flags_ptr)
diff --git a/arch/x86/boot/cpuflags.c b/arch/x86/boot/cpuflags.c
new file mode 100644
index 000000000000..431fa5f84537
--- /dev/null
+++ b/arch/x86/boot/cpuflags.c
@@ -0,0 +1,119 @@
+#include <linux/types.h>
+#include "bitops.h"
+
+#include <asm/processor-flags.h>
+#include <asm/required-features.h>
+#include <asm/msr-index.h>
+#include "cpuflags.h"
+
+struct cpu_features cpu;
+u32 cpu_vendor[3];
+
+static bool loaded_flags;
+
+static int has_fpu(void)
+{
+ u16 fcw = -1, fsw = -1;
+ unsigned long cr0;
+
+ asm volatile("mov %%cr0,%0" : "=r" (cr0));
+ if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
+ cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
+ asm volatile("mov %0,%%cr0" : : "r" (cr0));
+ }
+
+ asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+ : "+m" (fsw), "+m" (fcw));
+
+ return fsw == 0 && (fcw & 0x103f) == 0x003f;
+}
+
+/*
+ * For building the 16-bit code we want to explicitly specify 32-bit
+ * push/pop operations, rather than just saying 'pushf' or 'popf' and
+ * letting the compiler choose. But this is also included from the
+ * compressed/ directory where it may be 64-bit code, and thus needs
+ * to be 'pushfq' or 'popfq' in that case.
+ */
+#ifdef __x86_64__
+#define PUSHF "pushfq"
+#define POPF "popfq"
+#else
+#define PUSHF "pushfl"
+#define POPF "popfl"
+#endif
+
+int has_eflag(unsigned long mask)
+{
+ unsigned long f0, f1;
+
+ asm volatile(PUSHF " \n\t"
+ PUSHF " \n\t"
+ "pop %0 \n\t"
+ "mov %0,%1 \n\t"
+ "xor %2,%1 \n\t"
+ "push %1 \n\t"
+ POPF " \n\t"
+ PUSHF " \n\t"
+ "pop %1 \n\t"
+ POPF
+ : "=&r" (f0), "=&r" (f1)
+ : "ri" (mask));
+
+ return !!((f0^f1) & mask);
+}
+
+/* Handle x86_32 PIC using ebx. */
+#if defined(__i386__) && defined(__PIC__)
+# define EBX_REG "=r"
+#else
+# define EBX_REG "=b"
+#endif
+
+static inline void cpuid(u32 id, u32 *a, u32 *b, u32 *c, u32 *d)
+{
+ asm volatile(".ifnc %%ebx,%3 ; movl %%ebx,%3 ; .endif \n\t"
+ "cpuid \n\t"
+ ".ifnc %%ebx,%3 ; xchgl %%ebx,%3 ; .endif \n\t"
+ : "=a" (*a), "=c" (*c), "=d" (*d), EBX_REG (*b)
+ : "a" (id)
+ );
+}
+
+void get_cpuflags(void)
+{
+ u32 max_intel_level, max_amd_level;
+ u32 tfms;
+ u32 ignored;
+
+ if (loaded_flags)
+ return;
+ loaded_flags = true;
+
+ if (has_fpu())
+ set_bit(X86_FEATURE_FPU, cpu.flags);
+
+ if (has_eflag(X86_EFLAGS_ID)) {
+ cpuid(0x0, &max_intel_level, &cpu_vendor[0], &cpu_vendor[2],
+ &cpu_vendor[1]);
+
+ if (max_intel_level >= 0x00000001 &&
+ max_intel_level <= 0x0000ffff) {
+ cpuid(0x1, &tfms, &ignored, &cpu.flags[4],
+ &cpu.flags[0]);
+ cpu.level = (tfms >> 8) & 15;
+ cpu.model = (tfms >> 4) & 15;
+ if (cpu.level >= 6)
+ cpu.model += ((tfms >> 16) & 0xf) << 4;
+ }
+
+ cpuid(0x80000000, &max_amd_level, &ignored, &ignored,
+ &ignored);
+
+ if (max_amd_level >= 0x80000001 &&
+ max_amd_level <= 0x8000ffff) {
+ cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
+ &cpu.flags[1]);
+ }
+ }
+}
diff --git a/arch/x86/boot/cpuflags.h b/arch/x86/boot/cpuflags.h
new file mode 100644
index 000000000000..ea97697e51e4
--- /dev/null
+++ b/arch/x86/boot/cpuflags.h
@@ -0,0 +1,19 @@
+#ifndef BOOT_CPUFLAGS_H
+#define BOOT_CPUFLAGS_H
+
+#include <asm/cpufeature.h>
+#include <asm/processor-flags.h>
+
+struct cpu_features {
+ int level; /* Family, or 64 for x86-64 */
+ int model;
+ u32 flags[NCAPINTS];
+};
+
+extern struct cpu_features cpu;
+extern u32 cpu_vendor[3];
+
+int has_eflag(unsigned long mask);
+void get_cpuflags(void);
+
+#endif
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 9ec06a1f6d61..ec3b8ba68096 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -391,7 +391,14 @@ xloadflags:
#else
# define XLF23 0
#endif
- .word XLF0 | XLF1 | XLF23
+
+#if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC)
+# define XLF4 XLF_EFI_KEXEC
+#else
+# define XLF4 0
+#endif
+
+ .word XLF0 | XLF1 | XLF23 | XLF4
cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line,
#added with boot protocol
diff --git a/arch/x86/boot/video.h b/arch/x86/boot/video.h
index ff339c5db311..0bb25491262d 100644
--- a/arch/x86/boot/video.h
+++ b/arch/x86/boot/video.h
@@ -80,7 +80,7 @@ struct card_info {
u16 xmode_n; /* Size of unprobed mode range */
};
-#define __videocard struct card_info __attribute__((section(".videocards")))
+#define __videocard struct card_info __attribute__((used,section(".videocards")))
extern struct card_info video_cards[], video_cards_end[];
int mode_defined(u16 mode); /* video.c */
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index e0fc24db234a..6ba54d640383 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -76,6 +76,7 @@ ifeq ($(avx2_supported),yes)
endif
aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
+aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o
ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
crc32c-intel-y := crc32c-intel_glue.o
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
new file mode 100644
index 000000000000..522ab68d1c88
--- /dev/null
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -0,0 +1,2811 @@
+########################################################################
+# Copyright (c) 2013, Intel Corporation
+#
+# This software is available to you under a choice of one of two
+# licenses. You may choose to be licensed under the terms of the GNU
+# General Public License (GPL) Version 2, available from the file
+# COPYING in the main directory of this source tree, or the
+# OpenIB.org BSD license below:
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the
+# distribution.
+#
+# * Neither the name of the Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+#
+# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
+# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES# LOSS OF USE, DATA, OR
+# PROFITS# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+########################################################################
+##
+## Authors:
+## Erdinc Ozturk <erdinc.ozturk@intel.com>
+## Vinodh Gopal <vinodh.gopal@intel.com>
+## James Guilford <james.guilford@intel.com>
+## Tim Chen <tim.c.chen@linux.intel.com>
+##
+## References:
+## This code was derived and highly optimized from the code described in paper:
+## Vinodh Gopal et. al. Optimized Galois-Counter-Mode Implementation
+## on Intel Architecture Processors. August, 2010
+## The details of the implementation is explained in:
+## Erdinc Ozturk et. al. Enabling High-Performance Galois-Counter-Mode
+## on Intel Architecture Processors. October, 2012.
+##
+## Assumptions:
+##
+##
+##
+## iv:
+## 0 1 2 3
+## 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+## | Salt (From the SA) |
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+## | Initialization Vector |
+## | (This is the sequence number from IPSec header) |
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+## | 0x1 |
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+##
+##
+##
+## AAD:
+## AAD padded to 128 bits with 0
+## for example, assume AAD is a u32 vector
+##
+## if AAD is 8 bytes:
+## AAD[3] = {A0, A1}#
+## padded AAD in xmm register = {A1 A0 0 0}
+##
+## 0 1 2 3
+## 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+## | SPI (A1) |
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+## | 32-bit Sequence Number (A0) |
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+## | 0x0 |
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+##
+## AAD Format with 32-bit Sequence Number
+##
+## if AAD is 12 bytes:
+## AAD[3] = {A0, A1, A2}#
+## padded AAD in xmm register = {A2 A1 A0 0}
+##
+## 0 1 2 3
+## 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+## | SPI (A2) |
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+## | 64-bit Extended Sequence Number {A1,A0} |
+## | |
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+## | 0x0 |
+## +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+##
+## AAD Format with 64-bit Extended Sequence Number
+##
+##
+## aadLen:
+## from the definition of the spec, aadLen can only be 8 or 12 bytes.
+## The code additionally supports aadLen of length 16 bytes.
+##
+## TLen:
+## from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
+##
+## poly = x^128 + x^127 + x^126 + x^121 + 1
+## throughout the code, one tab and two tab indentations are used. one tab is
+## for GHASH part, two tabs is for AES part.
+##
+
+#include <linux/linkage.h>
+#include <asm/inst.h>
+
+.data
+.align 16
+
+POLY: .octa 0xC2000000000000000000000000000001
+POLY2: .octa 0xC20000000000000000000001C2000000
+TWOONE: .octa 0x00000001000000000000000000000001
+
+# order of these constants should not change.
+# more specifically, ALL_F should follow SHIFT_MASK, and ZERO should follow ALL_F
+
+SHUF_MASK: .octa 0x000102030405060708090A0B0C0D0E0F
+SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
+ALL_F: .octa 0xffffffffffffffffffffffffffffffff
+ZERO: .octa 0x00000000000000000000000000000000
+ONE: .octa 0x00000000000000000000000000000001
+ONEf: .octa 0x01000000000000000000000000000000
+
+.text
+
+
+##define the fields of the gcm aes context
+#{
+# u8 expanded_keys[16*11] store expanded keys
+# u8 shifted_hkey_1[16] store HashKey <<1 mod poly here
+# u8 shifted_hkey_2[16] store HashKey^2 <<1 mod poly here
+# u8 shifted_hkey_3[16] store HashKey^3 <<1 mod poly here
+# u8 shifted_hkey_4[16] store HashKey^4 <<1 mod poly here
+# u8 shifted_hkey_5[16] store HashKey^5 <<1 mod poly here
+# u8 shifted_hkey_6[16] store HashKey^6 <<1 mod poly here
+# u8 shifted_hkey_7[16] store HashKey^7 <<1 mod poly here
+# u8 shifted_hkey_8[16] store HashKey^8 <<1 mod poly here
+# u8 shifted_hkey_1_k[16] store XOR HashKey <<1 mod poly here (for Karatsuba purposes)
+# u8 shifted_hkey_2_k[16] store XOR HashKey^2 <<1 mod poly here (for Karatsuba purposes)
+# u8 shifted_hkey_3_k[16] store XOR HashKey^3 <<1 mod poly here (for Karatsuba purposes)
+# u8 shifted_hkey_4_k[16] store XOR HashKey^4 <<1 mod poly here (for Karatsuba purposes)
+# u8 shifted_hkey_5_k[16] store XOR HashKey^5 <<1 mod poly here (for Karatsuba purposes)
+# u8 shifted_hkey_6_k[16] store XOR HashKey^6 <<1 mod poly here (for Karatsuba purposes)
+# u8 shifted_hkey_7_k[16] store XOR HashKey^7 <<1 mod poly here (for Karatsuba purposes)
+# u8 shifted_hkey_8_k[16] store XOR HashKey^8 <<1 mod poly here (for Karatsuba purposes)
+#} gcm_ctx#
+
+HashKey = 16*11 # store HashKey <<1 mod poly here
+HashKey_2 = 16*12 # store HashKey^2 <<1 mod poly here
+HashKey_3 = 16*13 # store HashKey^3 <<1 mod poly here
+HashKey_4 = 16*14 # store HashKey^4 <<1 mod poly here
+HashKey_5 = 16*15 # store HashKey^5 <<1 mod poly here
+HashKey_6 = 16*16 # store HashKey^6 <<1 mod poly here
+HashKey_7 = 16*17 # store HashKey^7 <<1 mod poly here
+HashKey_8 = 16*18 # store HashKey^8 <<1 mod poly here
+HashKey_k = 16*19 # store XOR of HashKey <<1 mod poly here (for Karatsuba purposes)
+HashKey_2_k = 16*20 # store XOR of HashKey^2 <<1 mod poly here (for Karatsuba purposes)
+HashKey_3_k = 16*21 # store XOR of HashKey^3 <<1 mod poly here (for Karatsuba purposes)
+HashKey_4_k = 16*22 # store XOR of HashKey^4 <<1 mod poly here (for Karatsuba purposes)
+HashKey_5_k = 16*23 # store XOR of HashKey^5 <<1 mod poly here (for Karatsuba purposes)
+HashKey_6_k = 16*24 # store XOR of HashKey^6 <<1 mod poly here (for Karatsuba purposes)
+HashKey_7_k = 16*25 # store XOR of HashKey^7 <<1 mod poly here (for Karatsuba purposes)
+HashKey_8_k = 16*26 # store XOR of HashKey^8 <<1 mod poly here (for Karatsuba purposes)
+
+#define arg1 %rdi
+#define arg2 %rsi
+#define arg3 %rdx
+#define arg4 %rcx
+#define arg5 %r8
+#define arg6 %r9
+#define arg7 STACK_OFFSET+8*1(%r14)
+#define arg8 STACK_OFFSET+8*2(%r14)
+#define arg9 STACK_OFFSET+8*3(%r14)
+
+i = 0
+j = 0
+
+out_order = 0
+in_order = 1
+DEC = 0
+ENC = 1
+
+.macro define_reg r n
+reg_\r = %xmm\n
+.endm
+
+.macro setreg
+.altmacro
+define_reg i %i
+define_reg j %j
+.noaltmacro
+.endm
+
+# need to push 4 registers into stack to maintain
+STACK_OFFSET = 8*4
+
+TMP1 = 16*0 # Temporary storage for AAD
+TMP2 = 16*1 # Temporary storage for AES State 2 (State 1 is stored in an XMM register)
+TMP3 = 16*2 # Temporary storage for AES State 3
+TMP4 = 16*3 # Temporary storage for AES State 4
+TMP5 = 16*4 # Temporary storage for AES State 5
+TMP6 = 16*5 # Temporary storage for AES State 6
+TMP7 = 16*6 # Temporary storage for AES State 7
+TMP8 = 16*7 # Temporary storage for AES State 8
+
+VARIABLE_OFFSET = 16*8
+
+################################
+# Utility Macros
+################################
+
+# Encryption of a single block
+.macro ENCRYPT_SINGLE_BLOCK XMM0
+ vpxor (arg1), \XMM0, \XMM0
+ i = 1
+ setreg
+.rep 9
+ vaesenc 16*i(arg1), \XMM0, \XMM0
+ i = (i+1)
+ setreg
+.endr
+ vaesenclast 16*10(arg1), \XMM0, \XMM0
+.endm
+
+#ifdef CONFIG_AS_AVX
+###############################################################################
+# GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
+# Input: A and B (128-bits each, bit-reflected)
+# Output: C = A*B*x mod poly, (i.e. >>1 )
+# To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input
+# GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly.
+###############################################################################
+.macro GHASH_MUL_AVX GH HK T1 T2 T3 T4 T5
+
+ vpshufd $0b01001110, \GH, \T2
+ vpshufd $0b01001110, \HK, \T3
+ vpxor \GH , \T2, \T2 # T2 = (a1+a0)
+ vpxor \HK , \T3, \T3 # T3 = (b1+b0)
+
+ vpclmulqdq $0x11, \HK, \GH, \T1 # T1 = a1*b1
+ vpclmulqdq $0x00, \HK, \GH, \GH # GH = a0*b0
+ vpclmulqdq $0x00, \T3, \T2, \T2 # T2 = (a1+a0)*(b1+b0)
+ vpxor \GH, \T2,\T2
+ vpxor \T1, \T2,\T2 # T2 = a0*b1+a1*b0
+
+ vpslldq $8, \T2,\T3 # shift-L T3 2 DWs
+ vpsrldq $8, \T2,\T2 # shift-R T2 2 DWs
+ vpxor \T3, \GH, \GH
+ vpxor \T2, \T1, \T1 # <T1:GH> = GH x HK
+
+ #first phase of the reduction
+ vpslld $31, \GH, \T2 # packed right shifting << 31
+ vpslld $30, \GH, \T3 # packed right shifting shift << 30
+ vpslld $25, \GH, \T4 # packed right shifting shift << 25
+
+ vpxor \T3, \T2, \T2 # xor the shifted versions
+ vpxor \T4, \T2, \T2
+
+ vpsrldq $4, \T2, \T5 # shift-R T5 1 DW
+
+ vpslldq $12, \T2, \T2 # shift-L T2 3 DWs
+ vpxor \T2, \GH, \GH # first phase of the reduction complete
+
+ #second phase of the reduction
+
+ vpsrld $1,\GH, \T2 # packed left shifting >> 1
+ vpsrld $2,\GH, \T3 # packed left shifting >> 2
+ vpsrld $7,\GH, \T4 # packed left shifting >> 7
+ vpxor \T3, \T2, \T2 # xor the shifted versions
+ vpxor \T4, \T2, \T2
+
+ vpxor \T5, \T2, \T2
+ vpxor \T2, \GH, \GH
+ vpxor \T1, \GH, \GH # the result is in GH
+
+
+.endm
+
+.macro PRECOMPUTE_AVX HK T1 T2 T3 T4 T5 T6
+
+ # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+ vmovdqa \HK, \T5
+
+ vpshufd $0b01001110, \T5, \T1
+ vpxor \T5, \T1, \T1
+ vmovdqa \T1, HashKey_k(arg1)
+
+ GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly
+ vmovdqa \T5, HashKey_2(arg1) # [HashKey_2] = HashKey^2<<1 mod poly
+ vpshufd $0b01001110, \T5, \T1
+ vpxor \T5, \T1, \T1
+ vmovdqa \T1, HashKey_2_k(arg1)
+
+ GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly
+ vmovdqa \T5, HashKey_3(arg1)
+ vpshufd $0b01001110, \T5, \T1
+ vpxor \T5, \T1, \T1
+ vmovdqa \T1, HashKey_3_k(arg1)
+
+ GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly
+ vmovdqa \T5, HashKey_4(arg1)
+ vpshufd $0b01001110, \T5, \T1
+ vpxor \T5, \T1, \T1
+ vmovdqa \T1, HashKey_4_k(arg1)
+
+ GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly
+ vmovdqa \T5, HashKey_5(arg1)
+ vpshufd $0b01001110, \T5, \T1
+ vpxor \T5, \T1, \T1
+ vmovdqa \T1, HashKey_5_k(arg1)
+
+ GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly
+ vmovdqa \T5, HashKey_6(arg1)
+ vpshufd $0b01001110, \T5, \T1
+ vpxor \T5, \T1, \T1
+ vmovdqa \T1, HashKey_6_k(arg1)
+
+ GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly
+ vmovdqa \T5, HashKey_7(arg1)
+ vpshufd $0b01001110, \T5, \T1
+ vpxor \T5, \T1, \T1
+ vmovdqa \T1, HashKey_7_k(arg1)
+
+ GHASH_MUL_AVX \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly
+ vmovdqa \T5, HashKey_8(arg1)
+ vpshufd $0b01001110, \T5, \T1
+ vpxor \T5, \T1, \T1
+ vmovdqa \T1, HashKey_8_k(arg1)
+
+.endm
+
+## if a = number of total plaintext bytes
+## b = floor(a/16)
+## num_initial_blocks = b mod 4#
+## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext
+## r10, r11, r12, rax are clobbered
+## arg1, arg2, arg3, r14 are used as a pointer only, not modified
+
+.macro INITIAL_BLOCKS_AVX num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC
+ i = (8-\num_initial_blocks)
+ setreg
+
+ mov arg6, %r10 # r10 = AAD
+ mov arg7, %r12 # r12 = aadLen
+
+
+ mov %r12, %r11
+
+ vpxor reg_i, reg_i, reg_i
+_get_AAD_loop\@:
+ vmovd (%r10), \T1
+ vpslldq $12, \T1, \T1
+ vpsrldq $4, reg_i, reg_i
+ vpxor \T1, reg_i, reg_i
+
+ add $4, %r10
+ sub $4, %r12
+ jg _get_AAD_loop\@
+
+
+ cmp $16, %r11
+ je _get_AAD_loop2_done\@
+ mov $16, %r12
+
+_get_AAD_loop2\@:
+ vpsrldq $4, reg_i, reg_i
+ sub $4, %r12
+ cmp %r11, %r12
+ jg _get_AAD_loop2\@
+
+_get_AAD_loop2_done\@:
+
+ #byte-reflect the AAD data
+ vpshufb SHUF_MASK(%rip), reg_i, reg_i
+
+ # initialize the data pointer offset as zero
+ xor %r11, %r11
+
+ # start AES for num_initial_blocks blocks
+ mov arg5, %rax # rax = *Y0
+ vmovdqu (%rax), \CTR # CTR = Y0
+ vpshufb SHUF_MASK(%rip), \CTR, \CTR
+
+
+ i = (9-\num_initial_blocks)
+ setreg
+.rep \num_initial_blocks
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, reg_i
+ vpshufb SHUF_MASK(%rip), reg_i, reg_i # perform a 16Byte swap
+ i = (i+1)
+ setreg
+.endr
+
+ vmovdqa (arg1), \T_key
+ i = (9-\num_initial_blocks)
+ setreg
+.rep \num_initial_blocks
+ vpxor \T_key, reg_i, reg_i
+ i = (i+1)
+ setreg
+.endr
+
+ j = 1
+ setreg
+.rep 9
+ vmovdqa 16*j(arg1), \T_key
+ i = (9-\num_initial_blocks)
+ setreg
+.rep \num_initial_blocks
+ vaesenc \T_key, reg_i, reg_i
+ i = (i+1)
+ setreg
+.endr
+
+ j = (j+1)
+ setreg
+.endr
+
+
+ vmovdqa 16*10(arg1), \T_key
+ i = (9-\num_initial_blocks)
+ setreg
+.rep \num_initial_blocks
+ vaesenclast \T_key, reg_i, reg_i
+ i = (i+1)
+ setreg
+.endr
+
+ i = (9-\num_initial_blocks)
+ setreg
+.rep \num_initial_blocks
+ vmovdqu (arg3, %r11), \T1
+ vpxor \T1, reg_i, reg_i
+ vmovdqu reg_i, (arg2 , %r11) # write back ciphertext for num_initial_blocks blocks
+ add $16, %r11
+.if \ENC_DEC == DEC
+ vmovdqa \T1, reg_i
+.endif
+ vpshufb SHUF_MASK(%rip), reg_i, reg_i # prepare ciphertext for GHASH computations
+ i = (i+1)
+ setreg
+.endr
+
+
+ i = (8-\num_initial_blocks)
+ j = (9-\num_initial_blocks)
+ setreg
+ GHASH_MUL_AVX reg_i, \T2, \T1, \T3, \T4, \T5, \T6
+
+.rep \num_initial_blocks
+ vpxor reg_i, reg_j, reg_j
+ GHASH_MUL_AVX reg_j, \T2, \T1, \T3, \T4, \T5, \T6 # apply GHASH on num_initial_blocks blocks
+ i = (i+1)
+ j = (j+1)
+ setreg
+.endr
+ # XMM8 has the combined result here
+
+ vmovdqa \XMM8, TMP1(%rsp)
+ vmovdqa \XMM8, \T3
+
+ cmp $128, %r13
+ jl _initial_blocks_done\@ # no need for precomputed constants
+
+###############################################################################
+# Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM1
+ vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM2
+ vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM3
+ vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM4
+ vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM5
+ vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM6
+ vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM7
+ vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM8
+ vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
+
+ vmovdqa (arg1), \T_key
+ vpxor \T_key, \XMM1, \XMM1
+ vpxor \T_key, \XMM2, \XMM2
+ vpxor \T_key, \XMM3, \XMM3
+ vpxor \T_key, \XMM4, \XMM4
+ vpxor \T_key, \XMM5, \XMM5
+ vpxor \T_key, \XMM6, \XMM6
+ vpxor \T_key, \XMM7, \XMM7
+ vpxor \T_key, \XMM8, \XMM8
+
+ i = 1
+ setreg
+.rep 9 # do 9 rounds
+ vmovdqa 16*i(arg1), \T_key
+ vaesenc \T_key, \XMM1, \XMM1
+ vaesenc \T_key, \XMM2, \XMM2
+ vaesenc \T_key, \XMM3, \XMM3
+ vaesenc \T_key, \XMM4, \XMM4
+ vaesenc \T_key, \XMM5, \XMM5
+ vaesenc \T_key, \XMM6, \XMM6
+ vaesenc \T_key, \XMM7, \XMM7
+ vaesenc \T_key, \XMM8, \XMM8
+ i = (i+1)
+ setreg
+.endr
+
+
+ vmovdqa 16*i(arg1), \T_key
+ vaesenclast \T_key, \XMM1, \XMM1
+ vaesenclast \T_key, \XMM2, \XMM2
+ vaesenclast \T_key, \XMM3, \XMM3
+ vaesenclast \T_key, \XMM4, \XMM4
+ vaesenclast \T_key, \XMM5, \XMM5
+ vaesenclast \T_key, \XMM6, \XMM6
+ vaesenclast \T_key, \XMM7, \XMM7
+ vaesenclast \T_key, \XMM8, \XMM8
+
+ vmovdqu (arg3, %r11), \T1
+ vpxor \T1, \XMM1, \XMM1
+ vmovdqu \XMM1, (arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM1
+ .endif
+
+ vmovdqu 16*1(arg3, %r11), \T1
+ vpxor \T1, \XMM2, \XMM2
+ vmovdqu \XMM2, 16*1(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM2
+ .endif
+
+ vmovdqu 16*2(arg3, %r11), \T1
+ vpxor \T1, \XMM3, \XMM3
+ vmovdqu \XMM3, 16*2(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM3
+ .endif
+
+ vmovdqu 16*3(arg3, %r11), \T1
+ vpxor \T1, \XMM4, \XMM4
+ vmovdqu \XMM4, 16*3(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM4
+ .endif
+
+ vmovdqu 16*4(arg3, %r11), \T1
+ vpxor \T1, \XMM5, \XMM5
+ vmovdqu \XMM5, 16*4(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM5
+ .endif
+
+ vmovdqu 16*5(arg3, %r11), \T1
+ vpxor \T1, \XMM6, \XMM6
+ vmovdqu \XMM6, 16*5(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM6
+ .endif
+
+ vmovdqu 16*6(arg3, %r11), \T1
+ vpxor \T1, \XMM7, \XMM7
+ vmovdqu \XMM7, 16*6(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM7
+ .endif
+
+ vmovdqu 16*7(arg3, %r11), \T1
+ vpxor \T1, \XMM8, \XMM8
+ vmovdqu \XMM8, 16*7(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM8
+ .endif
+
+ add $128, %r11
+
+ vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
+ vpxor TMP1(%rsp), \XMM1, \XMM1 # combine GHASHed value with the corresponding ciphertext
+ vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
+
+###############################################################################
+
+_initial_blocks_done\@:
+
+.endm
+
+# encrypt 8 blocks at a time
+# ghash the 8 previously encrypted ciphertext blocks
+# arg1, arg2, arg3 are used as pointers only, not modified
+# r11 is the data offset value
+.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
+
+ vmovdqa \XMM1, \T2
+ vmovdqa \XMM2, TMP2(%rsp)
+ vmovdqa \XMM3, TMP3(%rsp)
+ vmovdqa \XMM4, TMP4(%rsp)
+ vmovdqa \XMM5, TMP5(%rsp)
+ vmovdqa \XMM6, TMP6(%rsp)
+ vmovdqa \XMM7, TMP7(%rsp)
+ vmovdqa \XMM8, TMP8(%rsp)
+
+.if \loop_idx == in_order
+ vpaddd ONE(%rip), \CTR, \XMM1 # INCR CNT
+ vpaddd ONE(%rip), \XMM1, \XMM2
+ vpaddd ONE(%rip), \XMM2, \XMM3
+ vpaddd ONE(%rip), \XMM3, \XMM4
+ vpaddd ONE(%rip), \XMM4, \XMM5
+ vpaddd ONE(%rip), \XMM5, \XMM6
+ vpaddd ONE(%rip), \XMM6, \XMM7
+ vpaddd ONE(%rip), \XMM7, \XMM8
+ vmovdqa \XMM8, \CTR
+
+ vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
+.else
+ vpaddd ONEf(%rip), \CTR, \XMM1 # INCR CNT
+ vpaddd ONEf(%rip), \XMM1, \XMM2
+ vpaddd ONEf(%rip), \XMM2, \XMM3
+ vpaddd ONEf(%rip), \XMM3, \XMM4
+ vpaddd ONEf(%rip), \XMM4, \XMM5
+ vpaddd ONEf(%rip), \XMM5, \XMM6
+ vpaddd ONEf(%rip), \XMM6, \XMM7
+ vpaddd ONEf(%rip), \XMM7, \XMM8
+ vmovdqa \XMM8, \CTR
+.endif
+
+
+ #######################################################################
+
+ vmovdqu (arg1), \T1
+ vpxor \T1, \XMM1, \XMM1
+ vpxor \T1, \XMM2, \XMM2
+ vpxor \T1, \XMM3, \XMM3
+ vpxor \T1, \XMM4, \XMM4
+ vpxor \T1, \XMM5, \XMM5
+ vpxor \T1, \XMM6, \XMM6
+ vpxor \T1, \XMM7, \XMM7
+ vpxor \T1, \XMM8, \XMM8
+
+ #######################################################################
+
+
+
+
+
+ vmovdqu 16*1(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ vmovdqu 16*2(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+
+ #######################################################################
+
+ vmovdqa HashKey_8(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1
+ vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0
+
+ vpshufd $0b01001110, \T2, \T6
+ vpxor \T2, \T6, \T6
+
+ vmovdqa HashKey_8_k(arg1), \T5
+ vpclmulqdq $0x00, \T5, \T6, \T6
+
+ vmovdqu 16*3(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ vmovdqa TMP2(%rsp), \T1
+ vmovdqa HashKey_7(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpshufd $0b01001110, \T1, \T3
+ vpxor \T1, \T3, \T3
+ vmovdqa HashKey_7_k(arg1), \T5
+ vpclmulqdq $0x10, \T5, \T3, \T3
+ vpxor \T3, \T6, \T6
+
+ vmovdqu 16*4(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ #######################################################################
+
+ vmovdqa TMP3(%rsp), \T1
+ vmovdqa HashKey_6(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpshufd $0b01001110, \T1, \T3
+ vpxor \T1, \T3, \T3
+ vmovdqa HashKey_6_k(arg1), \T5
+ vpclmulqdq $0x10, \T5, \T3, \T3
+ vpxor \T3, \T6, \T6
+
+ vmovdqu 16*5(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ vmovdqa TMP4(%rsp), \T1
+ vmovdqa HashKey_5(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpshufd $0b01001110, \T1, \T3
+ vpxor \T1, \T3, \T3
+ vmovdqa HashKey_5_k(arg1), \T5
+ vpclmulqdq $0x10, \T5, \T3, \T3
+ vpxor \T3, \T6, \T6
+
+ vmovdqu 16*6(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+
+ vmovdqa TMP5(%rsp), \T1
+ vmovdqa HashKey_4(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpshufd $0b01001110, \T1, \T3
+ vpxor \T1, \T3, \T3
+ vmovdqa HashKey_4_k(arg1), \T5
+ vpclmulqdq $0x10, \T5, \T3, \T3
+ vpxor \T3, \T6, \T6
+
+ vmovdqu 16*7(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ vmovdqa TMP6(%rsp), \T1
+ vmovdqa HashKey_3(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpshufd $0b01001110, \T1, \T3
+ vpxor \T1, \T3, \T3
+ vmovdqa HashKey_3_k(arg1), \T5
+ vpclmulqdq $0x10, \T5, \T3, \T3
+ vpxor \T3, \T6, \T6
+
+
+ vmovdqu 16*8(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ vmovdqa TMP7(%rsp), \T1
+ vmovdqa HashKey_2(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpshufd $0b01001110, \T1, \T3
+ vpxor \T1, \T3, \T3
+ vmovdqa HashKey_2_k(arg1), \T5
+ vpclmulqdq $0x10, \T5, \T3, \T3
+ vpxor \T3, \T6, \T6
+
+ #######################################################################
+
+ vmovdqu 16*9(arg1), \T5
+ vaesenc \T5, \XMM1, \XMM1
+ vaesenc \T5, \XMM2, \XMM2
+ vaesenc \T5, \XMM3, \XMM3
+ vaesenc \T5, \XMM4, \XMM4
+ vaesenc \T5, \XMM5, \XMM5
+ vaesenc \T5, \XMM6, \XMM6
+ vaesenc \T5, \XMM7, \XMM7
+ vaesenc \T5, \XMM8, \XMM8
+
+ vmovdqa TMP8(%rsp), \T1
+ vmovdqa HashKey(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpshufd $0b01001110, \T1, \T3
+ vpxor \T1, \T3, \T3
+ vmovdqa HashKey_k(arg1), \T5
+ vpclmulqdq $0x10, \T5, \T3, \T3
+ vpxor \T3, \T6, \T6
+
+ vpxor \T4, \T6, \T6
+ vpxor \T7, \T6, \T6
+
+ vmovdqu 16*10(arg1), \T5
+
+ i = 0
+ j = 1
+ setreg
+.rep 8
+ vpxor 16*i(arg3, %r11), \T5, \T2
+ .if \ENC_DEC == ENC
+ vaesenclast \T2, reg_j, reg_j
+ .else
+ vaesenclast \T2, reg_j, \T3
+ vmovdqu 16*i(arg3, %r11), reg_j
+ vmovdqu \T3, 16*i(arg2, %r11)
+ .endif
+ i = (i+1)
+ j = (j+1)
+ setreg
+.endr
+ #######################################################################
+
+
+ vpslldq $8, \T6, \T3 # shift-L T3 2 DWs
+ vpsrldq $8, \T6, \T6 # shift-R T2 2 DWs
+ vpxor \T3, \T7, \T7
+ vpxor \T4, \T6, \T6 # accumulate the results in T6:T7
+
+
+
+ #######################################################################
+ #first phase of the reduction
+ #######################################################################
+ vpslld $31, \T7, \T2 # packed right shifting << 31
+ vpslld $30, \T7, \T3 # packed right shifting shift << 30
+ vpslld $25, \T7, \T4 # packed right shifting shift << 25
+
+ vpxor \T3, \T2, \T2 # xor the shifted versions
+ vpxor \T4, \T2, \T2
+
+ vpsrldq $4, \T2, \T1 # shift-R T1 1 DW
+
+ vpslldq $12, \T2, \T2 # shift-L T2 3 DWs
+ vpxor \T2, \T7, \T7 # first phase of the reduction complete
+ #######################################################################
+ .if \ENC_DEC == ENC
+ vmovdqu \XMM1, 16*0(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM2, 16*1(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM3, 16*2(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM4, 16*3(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM5, 16*4(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM6, 16*5(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM7, 16*6(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM8, 16*7(arg2,%r11) # Write to the Ciphertext buffer
+ .endif
+
+ #######################################################################
+ #second phase of the reduction
+ vpsrld $1, \T7, \T2 # packed left shifting >> 1
+ vpsrld $2, \T7, \T3 # packed left shifting >> 2
+ vpsrld $7, \T7, \T4 # packed left shifting >> 7
+ vpxor \T3, \T2, \T2 # xor the shifted versions
+ vpxor \T4, \T2, \T2
+
+ vpxor \T1, \T2, \T2
+ vpxor \T2, \T7, \T7
+ vpxor \T7, \T6, \T6 # the result is in T6
+ #######################################################################
+
+ vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
+
+
+ vpxor \T6, \XMM1, \XMM1
+
+
+
+.endm
+
+
+# GHASH the last 4 ciphertext blocks.
+.macro GHASH_LAST_8_AVX T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8
+
+ ## Karatsuba Method
+
+
+ vpshufd $0b01001110, \XMM1, \T2
+ vpxor \XMM1, \T2, \T2
+ vmovdqa HashKey_8(arg1), \T5
+ vpclmulqdq $0x11, \T5, \XMM1, \T6
+ vpclmulqdq $0x00, \T5, \XMM1, \T7
+
+ vmovdqa HashKey_8_k(arg1), \T3
+ vpclmulqdq $0x00, \T3, \T2, \XMM1
+
+ ######################
+
+ vpshufd $0b01001110, \XMM2, \T2
+ vpxor \XMM2, \T2, \T2
+ vmovdqa HashKey_7(arg1), \T5
+ vpclmulqdq $0x11, \T5, \XMM2, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM2, \T4
+ vpxor \T4, \T7, \T7
+
+ vmovdqa HashKey_7_k(arg1), \T3
+ vpclmulqdq $0x00, \T3, \T2, \T2
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vpshufd $0b01001110, \XMM3, \T2
+ vpxor \XMM3, \T2, \T2
+ vmovdqa HashKey_6(arg1), \T5
+ vpclmulqdq $0x11, \T5, \XMM3, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM3, \T4
+ vpxor \T4, \T7, \T7
+
+ vmovdqa HashKey_6_k(arg1), \T3
+ vpclmulqdq $0x00, \T3, \T2, \T2
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vpshufd $0b01001110, \XMM4, \T2
+ vpxor \XMM4, \T2, \T2
+ vmovdqa HashKey_5(arg1), \T5
+ vpclmulqdq $0x11, \T5, \XMM4, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM4, \T4
+ vpxor \T4, \T7, \T7
+
+ vmovdqa HashKey_5_k(arg1), \T3
+ vpclmulqdq $0x00, \T3, \T2, \T2
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vpshufd $0b01001110, \XMM5, \T2
+ vpxor \XMM5, \T2, \T2
+ vmovdqa HashKey_4(arg1), \T5
+ vpclmulqdq $0x11, \T5, \XMM5, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM5, \T4
+ vpxor \T4, \T7, \T7
+
+ vmovdqa HashKey_4_k(arg1), \T3
+ vpclmulqdq $0x00, \T3, \T2, \T2
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vpshufd $0b01001110, \XMM6, \T2
+ vpxor \XMM6, \T2, \T2
+ vmovdqa HashKey_3(arg1), \T5
+ vpclmulqdq $0x11, \T5, \XMM6, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM6, \T4
+ vpxor \T4, \T7, \T7
+
+ vmovdqa HashKey_3_k(arg1), \T3
+ vpclmulqdq $0x00, \T3, \T2, \T2
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vpshufd $0b01001110, \XMM7, \T2
+ vpxor \XMM7, \T2, \T2
+ vmovdqa HashKey_2(arg1), \T5
+ vpclmulqdq $0x11, \T5, \XMM7, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM7, \T4
+ vpxor \T4, \T7, \T7
+
+ vmovdqa HashKey_2_k(arg1), \T3
+ vpclmulqdq $0x00, \T3, \T2, \T2
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vpshufd $0b01001110, \XMM8, \T2
+ vpxor \XMM8, \T2, \T2
+ vmovdqa HashKey(arg1), \T5
+ vpclmulqdq $0x11, \T5, \XMM8, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM8, \T4
+ vpxor \T4, \T7, \T7
+
+ vmovdqa HashKey_k(arg1), \T3
+ vpclmulqdq $0x00, \T3, \T2, \T2
+
+ vpxor \T2, \XMM1, \XMM1
+ vpxor \T6, \XMM1, \XMM1
+ vpxor \T7, \XMM1, \T2
+
+
+
+
+ vpslldq $8, \T2, \T4
+ vpsrldq $8, \T2, \T2
+
+ vpxor \T4, \T7, \T7
+ vpxor \T2, \T6, \T6 # <T6:T7> holds the result of
+ # the accumulated carry-less multiplications
+
+ #######################################################################
+ #first phase of the reduction
+ vpslld $31, \T7, \T2 # packed right shifting << 31
+ vpslld $30, \T7, \T3 # packed right shifting shift << 30
+ vpslld $25, \T7, \T4 # packed right shifting shift << 25
+
+ vpxor \T3, \T2, \T2 # xor the shifted versions
+ vpxor \T4, \T2, \T2
+
+ vpsrldq $4, \T2, \T1 # shift-R T1 1 DW
+
+ vpslldq $12, \T2, \T2 # shift-L T2 3 DWs
+ vpxor \T2, \T7, \T7 # first phase of the reduction complete
+ #######################################################################
+
+
+ #second phase of the reduction
+ vpsrld $1, \T7, \T2 # packed left shifting >> 1
+ vpsrld $2, \T7, \T3 # packed left shifting >> 2
+ vpsrld $7, \T7, \T4 # packed left shifting >> 7
+ vpxor \T3, \T2, \T2 # xor the shifted versions
+ vpxor \T4, \T2, \T2
+
+ vpxor \T1, \T2, \T2
+ vpxor \T2, \T7, \T7
+ vpxor \T7, \T6, \T6 # the result is in T6
+
+.endm
+
+
+# combined for GCM encrypt and decrypt functions
+# clobbering all xmm registers
+# clobbering r10, r11, r12, r13, r14, r15
+.macro GCM_ENC_DEC_AVX ENC_DEC
+
+ #the number of pushes must equal STACK_OFFSET
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov %rsp, %r14
+
+
+
+
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp # align rsp to 64 bytes
+
+
+ vmovdqu HashKey(arg1), %xmm13 # xmm13 = HashKey
+
+ mov arg4, %r13 # save the number of bytes of plaintext/ciphertext
+ and $-16, %r13 # r13 = r13 - (r13 mod 16)
+
+ mov %r13, %r12
+ shr $4, %r12
+ and $7, %r12
+ jz _initial_num_blocks_is_0\@
+
+ cmp $7, %r12
+ je _initial_num_blocks_is_7\@
+ cmp $6, %r12
+ je _initial_num_blocks_is_6\@
+ cmp $5, %r12
+ je _initial_num_blocks_is_5\@
+ cmp $4, %r12
+ je _initial_num_blocks_is_4\@
+ cmp $3, %r12
+ je _initial_num_blocks_is_3\@
+ cmp $2, %r12
+ je _initial_num_blocks_is_2\@
+
+ jmp _initial_num_blocks_is_1\@
+
+_initial_num_blocks_is_7\@:
+ INITIAL_BLOCKS_AVX 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*7, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_6\@:
+ INITIAL_BLOCKS_AVX 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*6, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_5\@:
+ INITIAL_BLOCKS_AVX 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*5, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_4\@:
+ INITIAL_BLOCKS_AVX 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*4, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_3\@:
+ INITIAL_BLOCKS_AVX 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*3, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_2\@:
+ INITIAL_BLOCKS_AVX 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*2, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_1\@:
+ INITIAL_BLOCKS_AVX 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*1, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_0\@:
+ INITIAL_BLOCKS_AVX 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+
+
+_initial_blocks_encrypted\@:
+ cmp $0, %r13
+ je _zero_cipher_left\@
+
+ sub $128, %r13
+ je _eight_cipher_left\@
+
+
+
+
+ vmovd %xmm9, %r15d
+ and $255, %r15d
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+
+
+_encrypt_by_8_new\@:
+ cmp $(255-8), %r15d
+ jg _encrypt_by_8\@
+
+
+
+ add $8, %r15b
+ GHASH_8_ENCRYPT_8_PARALLEL_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC
+ add $128, %r11
+ sub $128, %r13
+ jne _encrypt_by_8_new\@
+
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ jmp _eight_cipher_left\@
+
+_encrypt_by_8\@:
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ add $8, %r15b
+ GHASH_8_ENCRYPT_8_PARALLEL_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ add $128, %r11
+ sub $128, %r13
+ jne _encrypt_by_8_new\@
+
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+
+
+
+
+_eight_cipher_left\@:
+ GHASH_LAST_8_AVX %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8
+
+
+_zero_cipher_left\@:
+ cmp $16, arg4
+ jl _only_less_than_16\@
+
+ mov arg4, %r13
+ and $15, %r13 # r13 = (arg4 mod 16)
+
+ je _multiple_of_16_bytes\@
+
+ # handle the last <16 Byte block seperately
+
+
+ vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn)
+
+ sub $16, %r11
+ add %r13, %r11
+ vmovdqu (arg3, %r11), %xmm1 # receive the last <16 Byte block
+
+ lea SHIFT_MASK+16(%rip), %r12
+ sub %r13, %r12 # adjust the shuffle mask pointer to be
+ # able to shift 16-r13 bytes (r13 is the
+ # number of bytes in plaintext mod 16)
+ vmovdqu (%r12), %xmm2 # get the appropriate shuffle mask
+ vpshufb %xmm2, %xmm1, %xmm1 # shift right 16-r13 bytes
+ jmp _final_ghash_mul\@
+
+_only_less_than_16\@:
+ # check for 0 length
+ mov arg4, %r13
+ and $15, %r13 # r13 = (arg4 mod 16)
+
+ je _multiple_of_16_bytes\@
+
+ # handle the last <16 Byte block seperately
+
+
+ vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn)
+
+
+ lea SHIFT_MASK+16(%rip), %r12
+ sub %r13, %r12 # adjust the shuffle mask pointer to be
+ # able to shift 16-r13 bytes (r13 is the
+ # number of bytes in plaintext mod 16)
+
+_get_last_16_byte_loop\@:
+ movb (arg3, %r11), %al
+ movb %al, TMP1 (%rsp , %r11)
+ add $1, %r11
+ cmp %r13, %r11
+ jne _get_last_16_byte_loop\@
+
+ vmovdqu TMP1(%rsp), %xmm1
+
+ sub $16, %r11
+
+_final_ghash_mul\@:
+ .if \ENC_DEC == DEC
+ vmovdqa %xmm1, %xmm2
+ vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
+ vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to
+ # mask out top 16-r13 bytes of xmm9
+ vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
+ vpand %xmm1, %xmm2, %xmm2
+ vpshufb SHUF_MASK(%rip), %xmm2, %xmm2
+ vpxor %xmm2, %xmm14, %xmm14
+ #GHASH computation for the last <16 Byte block
+ GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+ sub %r13, %r11
+ add $16, %r11
+ .else
+ vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
+ vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to
+ # mask out top 16-r13 bytes of xmm9
+ vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ vpxor %xmm9, %xmm14, %xmm14
+ #GHASH computation for the last <16 Byte block
+ GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+ sub %r13, %r11
+ add $16, %r11
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext
+ .endif
+
+
+ #############################
+ # output r13 Bytes
+ vmovq %xmm9, %rax
+ cmp $8, %r13
+ jle _less_than_8_bytes_left\@
+
+ mov %rax, (arg2 , %r11)
+ add $8, %r11
+ vpsrldq $8, %xmm9, %xmm9
+ vmovq %xmm9, %rax
+ sub $8, %r13
+
+_less_than_8_bytes_left\@:
+ movb %al, (arg2 , %r11)
+ add $1, %r11
+ shr $8, %rax
+ sub $1, %r13
+ jne _less_than_8_bytes_left\@
+ #############################
+
+_multiple_of_16_bytes\@:
+ mov arg7, %r12 # r12 = aadLen (number of bytes)
+ shl $3, %r12 # convert into number of bits
+ vmovd %r12d, %xmm15 # len(A) in xmm15
+
+ shl $3, arg4 # len(C) in bits (*128)
+ vmovq arg4, %xmm1
+ vpslldq $8, %xmm15, %xmm15 # xmm15 = len(A)|| 0x0000000000000000
+ vpxor %xmm1, %xmm15, %xmm15 # xmm15 = len(A)||len(C)
+
+ vpxor %xmm15, %xmm14, %xmm14
+ GHASH_MUL_AVX %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation
+ vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap
+
+ mov arg5, %rax # rax = *Y0
+ vmovdqu (%rax), %xmm9 # xmm9 = Y0
+
+ ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Y0)
+
+ vpxor %xmm14, %xmm9, %xmm9
+
+
+
+_return_T\@:
+ mov arg8, %r10 # r10 = authTag
+ mov arg9, %r11 # r11 = auth_tag_len
+
+ cmp $16, %r11
+ je _T_16\@
+
+ cmp $12, %r11
+ je _T_12\@
+
+_T_8\@:
+ vmovq %xmm9, %rax
+ mov %rax, (%r10)
+ jmp _return_T_done\@
+_T_12\@:
+ vmovq %xmm9, %rax
+ mov %rax, (%r10)
+ vpsrldq $8, %xmm9, %xmm9
+ vmovd %xmm9, %eax
+ mov %eax, 8(%r10)
+ jmp _return_T_done\@
+
+_T_16\@:
+ vmovdqu %xmm9, (%r10)
+
+_return_T_done\@:
+ mov %r14, %rsp
+
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+.endm
+
+
+#############################################################
+#void aesni_gcm_precomp_avx_gen2
+# (gcm_data *my_ctx_data,
+# u8 *hash_subkey)# /* H, the Hash sub key input. Data starts on a 16-byte boundary. */
+#############################################################
+ENTRY(aesni_gcm_precomp_avx_gen2)
+ #the number of pushes must equal STACK_OFFSET
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov %rsp, %r14
+
+
+
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp # align rsp to 64 bytes
+
+ vmovdqu (arg2), %xmm6 # xmm6 = HashKey
+
+ vpshufb SHUF_MASK(%rip), %xmm6, %xmm6
+ ############### PRECOMPUTATION of HashKey<<1 mod poly from the HashKey
+ vmovdqa %xmm6, %xmm2
+ vpsllq $1, %xmm6, %xmm6
+ vpsrlq $63, %xmm2, %xmm2
+ vmovdqa %xmm2, %xmm1
+ vpslldq $8, %xmm2, %xmm2
+ vpsrldq $8, %xmm1, %xmm1
+ vpor %xmm2, %xmm6, %xmm6
+ #reduction
+ vpshufd $0b00100100, %xmm1, %xmm2
+ vpcmpeqd TWOONE(%rip), %xmm2, %xmm2
+ vpand POLY(%rip), %xmm2, %xmm2
+ vpxor %xmm2, %xmm6, %xmm6 # xmm6 holds the HashKey<<1 mod poly
+ #######################################################################
+ vmovdqa %xmm6, HashKey(arg1) # store HashKey<<1 mod poly
+
+
+ PRECOMPUTE_AVX %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5
+
+ mov %r14, %rsp
+
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ ret
+ENDPROC(aesni_gcm_precomp_avx_gen2)
+
+###############################################################################
+#void aesni_gcm_enc_avx_gen2(
+# gcm_data *my_ctx_data, /* aligned to 16 Bytes */
+# u8 *out, /* Ciphertext output. Encrypt in-place is allowed. */
+# const u8 *in, /* Plaintext input */
+# u64 plaintext_len, /* Length of data in Bytes for encryption. */
+# u8 *iv, /* Pre-counter block j0: 4 byte salt
+# (from Security Association) concatenated with 8 byte
+# Initialisation Vector (from IPSec ESP Payload)
+# concatenated with 0x00000001. 16-byte aligned pointer. */
+# const u8 *aad, /* Additional Authentication Data (AAD)*/
+# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
+# u8 *auth_tag, /* Authenticated Tag output. */
+# u64 auth_tag_len)# /* Authenticated Tag Length in bytes.
+# Valid values are 16 (most likely), 12 or 8. */
+###############################################################################
+ENTRY(aesni_gcm_enc_avx_gen2)
+ GCM_ENC_DEC_AVX ENC
+ ret
+ENDPROC(aesni_gcm_enc_avx_gen2)
+
+###############################################################################
+#void aesni_gcm_dec_avx_gen2(
+# gcm_data *my_ctx_data, /* aligned to 16 Bytes */
+# u8 *out, /* Plaintext output. Decrypt in-place is allowed. */
+# const u8 *in, /* Ciphertext input */
+# u64 plaintext_len, /* Length of data in Bytes for encryption. */
+# u8 *iv, /* Pre-counter block j0: 4 byte salt
+# (from Security Association) concatenated with 8 byte
+# Initialisation Vector (from IPSec ESP Payload)
+# concatenated with 0x00000001. 16-byte aligned pointer. */
+# const u8 *aad, /* Additional Authentication Data (AAD)*/
+# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
+# u8 *auth_tag, /* Authenticated Tag output. */
+# u64 auth_tag_len)# /* Authenticated Tag Length in bytes.
+# Valid values are 16 (most likely), 12 or 8. */
+###############################################################################
+ENTRY(aesni_gcm_dec_avx_gen2)
+ GCM_ENC_DEC_AVX DEC
+ ret
+ENDPROC(aesni_gcm_dec_avx_gen2)
+#endif /* CONFIG_AS_AVX */
+
+#ifdef CONFIG_AS_AVX2
+###############################################################################
+# GHASH_MUL MACRO to implement: Data*HashKey mod (128,127,126,121,0)
+# Input: A and B (128-bits each, bit-reflected)
+# Output: C = A*B*x mod poly, (i.e. >>1 )
+# To compute GH = GH*HashKey mod poly, give HK = HashKey<<1 mod poly as input
+# GH = GH * HK * x mod poly which is equivalent to GH*HashKey mod poly.
+###############################################################################
+.macro GHASH_MUL_AVX2 GH HK T1 T2 T3 T4 T5
+
+ vpclmulqdq $0x11,\HK,\GH,\T1 # T1 = a1*b1
+ vpclmulqdq $0x00,\HK,\GH,\T2 # T2 = a0*b0
+ vpclmulqdq $0x01,\HK,\GH,\T3 # T3 = a1*b0
+ vpclmulqdq $0x10,\HK,\GH,\GH # GH = a0*b1
+ vpxor \T3, \GH, \GH
+
+
+ vpsrldq $8 , \GH, \T3 # shift-R GH 2 DWs
+ vpslldq $8 , \GH, \GH # shift-L GH 2 DWs
+
+ vpxor \T3, \T1, \T1
+ vpxor \T2, \GH, \GH
+
+ #######################################################################
+ #first phase of the reduction
+ vmovdqa POLY2(%rip), \T3
+
+ vpclmulqdq $0x01, \GH, \T3, \T2
+ vpslldq $8, \T2, \T2 # shift-L T2 2 DWs
+
+ vpxor \T2, \GH, \GH # first phase of the reduction complete
+ #######################################################################
+ #second phase of the reduction
+ vpclmulqdq $0x00, \GH, \T3, \T2
+ vpsrldq $4, \T2, \T2 # shift-R T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq $0x10, \GH, \T3, \GH
+ vpslldq $4, \GH, \GH # shift-L GH 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor \T2, \GH, \GH # second phase of the reduction complete
+ #######################################################################
+ vpxor \T1, \GH, \GH # the result is in GH
+
+
+.endm
+
+.macro PRECOMPUTE_AVX2 HK T1 T2 T3 T4 T5 T6
+
+ # Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+ vmovdqa \HK, \T5
+ GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^2<<1 mod poly
+ vmovdqa \T5, HashKey_2(arg1) # [HashKey_2] = HashKey^2<<1 mod poly
+
+ GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^3<<1 mod poly
+ vmovdqa \T5, HashKey_3(arg1)
+
+ GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^4<<1 mod poly
+ vmovdqa \T5, HashKey_4(arg1)
+
+ GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^5<<1 mod poly
+ vmovdqa \T5, HashKey_5(arg1)
+
+ GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^6<<1 mod poly
+ vmovdqa \T5, HashKey_6(arg1)
+
+ GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^7<<1 mod poly
+ vmovdqa \T5, HashKey_7(arg1)
+
+ GHASH_MUL_AVX2 \T5, \HK, \T1, \T3, \T4, \T6, \T2 # T5 = HashKey^8<<1 mod poly
+ vmovdqa \T5, HashKey_8(arg1)
+
+.endm
+
+
+## if a = number of total plaintext bytes
+## b = floor(a/16)
+## num_initial_blocks = b mod 4#
+## encrypt the initial num_initial_blocks blocks and apply ghash on the ciphertext
+## r10, r11, r12, rax are clobbered
+## arg1, arg2, arg3, r14 are used as a pointer only, not modified
+
+.macro INITIAL_BLOCKS_AVX2 num_initial_blocks T1 T2 T3 T4 T5 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T6 T_key ENC_DEC VER
+ i = (8-\num_initial_blocks)
+ setreg
+
+ mov arg6, %r10 # r10 = AAD
+ mov arg7, %r12 # r12 = aadLen
+
+
+ mov %r12, %r11
+
+ vpxor reg_i, reg_i, reg_i
+_get_AAD_loop\@:
+ vmovd (%r10), \T1
+ vpslldq $12, \T1, \T1
+ vpsrldq $4, reg_i, reg_i
+ vpxor \T1, reg_i, reg_i
+
+ add $4, %r10
+ sub $4, %r12
+ jg _get_AAD_loop\@
+
+
+ cmp $16, %r11
+ je _get_AAD_loop2_done\@
+ mov $16, %r12
+
+_get_AAD_loop2\@:
+ vpsrldq $4, reg_i, reg_i
+ sub $4, %r12
+ cmp %r11, %r12
+ jg _get_AAD_loop2\@
+
+_get_AAD_loop2_done\@:
+
+ #byte-reflect the AAD data
+ vpshufb SHUF_MASK(%rip), reg_i, reg_i
+
+ # initialize the data pointer offset as zero
+ xor %r11, %r11
+
+ # start AES for num_initial_blocks blocks
+ mov arg5, %rax # rax = *Y0
+ vmovdqu (%rax), \CTR # CTR = Y0
+ vpshufb SHUF_MASK(%rip), \CTR, \CTR
+
+
+ i = (9-\num_initial_blocks)
+ setreg
+.rep \num_initial_blocks
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, reg_i
+ vpshufb SHUF_MASK(%rip), reg_i, reg_i # perform a 16Byte swap
+ i = (i+1)
+ setreg
+.endr
+
+ vmovdqa (arg1), \T_key
+ i = (9-\num_initial_blocks)
+ setreg
+.rep \num_initial_blocks
+ vpxor \T_key, reg_i, reg_i
+ i = (i+1)
+ setreg
+.endr
+
+ j = 1
+ setreg
+.rep 9
+ vmovdqa 16*j(arg1), \T_key
+ i = (9-\num_initial_blocks)
+ setreg
+.rep \num_initial_blocks
+ vaesenc \T_key, reg_i, reg_i
+ i = (i+1)
+ setreg
+.endr
+
+ j = (j+1)
+ setreg
+.endr
+
+
+ vmovdqa 16*10(arg1), \T_key
+ i = (9-\num_initial_blocks)
+ setreg
+.rep \num_initial_blocks
+ vaesenclast \T_key, reg_i, reg_i
+ i = (i+1)
+ setreg
+.endr
+
+ i = (9-\num_initial_blocks)
+ setreg
+.rep \num_initial_blocks
+ vmovdqu (arg3, %r11), \T1
+ vpxor \T1, reg_i, reg_i
+ vmovdqu reg_i, (arg2 , %r11) # write back ciphertext for
+ # num_initial_blocks blocks
+ add $16, %r11
+.if \ENC_DEC == DEC
+ vmovdqa \T1, reg_i
+.endif
+ vpshufb SHUF_MASK(%rip), reg_i, reg_i # prepare ciphertext for GHASH computations
+ i = (i+1)
+ setreg
+.endr
+
+
+ i = (8-\num_initial_blocks)
+ j = (9-\num_initial_blocks)
+ setreg
+ GHASH_MUL_AVX2 reg_i, \T2, \T1, \T3, \T4, \T5, \T6
+
+.rep \num_initial_blocks
+ vpxor reg_i, reg_j, reg_j
+ GHASH_MUL_AVX2 reg_j, \T2, \T1, \T3, \T4, \T5, \T6 # apply GHASH on num_initial_blocks blocks
+ i = (i+1)
+ j = (j+1)
+ setreg
+.endr
+ # XMM8 has the combined result here
+
+ vmovdqa \XMM8, TMP1(%rsp)
+ vmovdqa \XMM8, \T3
+
+ cmp $128, %r13
+ jl _initial_blocks_done\@ # no need for precomputed constants
+
+###############################################################################
+# Haskey_i_k holds XORed values of the low and high parts of the Haskey_i
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM1
+ vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM2
+ vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM3
+ vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM4
+ vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM5
+ vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM6
+ vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM7
+ vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
+
+ vpaddd ONE(%rip), \CTR, \CTR # INCR Y0
+ vmovdqa \CTR, \XMM8
+ vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
+
+ vmovdqa (arg1), \T_key
+ vpxor \T_key, \XMM1, \XMM1
+ vpxor \T_key, \XMM2, \XMM2
+ vpxor \T_key, \XMM3, \XMM3
+ vpxor \T_key, \XMM4, \XMM4
+ vpxor \T_key, \XMM5, \XMM5
+ vpxor \T_key, \XMM6, \XMM6
+ vpxor \T_key, \XMM7, \XMM7
+ vpxor \T_key, \XMM8, \XMM8
+
+ i = 1
+ setreg
+.rep 9 # do 9 rounds
+ vmovdqa 16*i(arg1), \T_key
+ vaesenc \T_key, \XMM1, \XMM1
+ vaesenc \T_key, \XMM2, \XMM2
+ vaesenc \T_key, \XMM3, \XMM3
+ vaesenc \T_key, \XMM4, \XMM4
+ vaesenc \T_key, \XMM5, \XMM5
+ vaesenc \T_key, \XMM6, \XMM6
+ vaesenc \T_key, \XMM7, \XMM7
+ vaesenc \T_key, \XMM8, \XMM8
+ i = (i+1)
+ setreg
+.endr
+
+
+ vmovdqa 16*i(arg1), \T_key
+ vaesenclast \T_key, \XMM1, \XMM1
+ vaesenclast \T_key, \XMM2, \XMM2
+ vaesenclast \T_key, \XMM3, \XMM3
+ vaesenclast \T_key, \XMM4, \XMM4
+ vaesenclast \T_key, \XMM5, \XMM5
+ vaesenclast \T_key, \XMM6, \XMM6
+ vaesenclast \T_key, \XMM7, \XMM7
+ vaesenclast \T_key, \XMM8, \XMM8
+
+ vmovdqu (arg3, %r11), \T1
+ vpxor \T1, \XMM1, \XMM1
+ vmovdqu \XMM1, (arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM1
+ .endif
+
+ vmovdqu 16*1(arg3, %r11), \T1
+ vpxor \T1, \XMM2, \XMM2
+ vmovdqu \XMM2, 16*1(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM2
+ .endif
+
+ vmovdqu 16*2(arg3, %r11), \T1
+ vpxor \T1, \XMM3, \XMM3
+ vmovdqu \XMM3, 16*2(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM3
+ .endif
+
+ vmovdqu 16*3(arg3, %r11), \T1
+ vpxor \T1, \XMM4, \XMM4
+ vmovdqu \XMM4, 16*3(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM4
+ .endif
+
+ vmovdqu 16*4(arg3, %r11), \T1
+ vpxor \T1, \XMM5, \XMM5
+ vmovdqu \XMM5, 16*4(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM5
+ .endif
+
+ vmovdqu 16*5(arg3, %r11), \T1
+ vpxor \T1, \XMM6, \XMM6
+ vmovdqu \XMM6, 16*5(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM6
+ .endif
+
+ vmovdqu 16*6(arg3, %r11), \T1
+ vpxor \T1, \XMM7, \XMM7
+ vmovdqu \XMM7, 16*6(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM7
+ .endif
+
+ vmovdqu 16*7(arg3, %r11), \T1
+ vpxor \T1, \XMM8, \XMM8
+ vmovdqu \XMM8, 16*7(arg2 , %r11)
+ .if \ENC_DEC == DEC
+ vmovdqa \T1, \XMM8
+ .endif
+
+ add $128, %r11
+
+ vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
+ vpxor TMP1(%rsp), \XMM1, \XMM1 # combine GHASHed value with
+ # the corresponding ciphertext
+ vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
+
+###############################################################################
+
+_initial_blocks_done\@:
+
+
+.endm
+
+
+
+# encrypt 8 blocks at a time
+# ghash the 8 previously encrypted ciphertext blocks
+# arg1, arg2, arg3 are used as pointers only, not modified
+# r11 is the data offset value
+.macro GHASH_8_ENCRYPT_8_PARALLEL_AVX2 T1 T2 T3 T4 T5 T6 CTR XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8 T7 loop_idx ENC_DEC
+
+ vmovdqa \XMM1, \T2
+ vmovdqa \XMM2, TMP2(%rsp)
+ vmovdqa \XMM3, TMP3(%rsp)
+ vmovdqa \XMM4, TMP4(%rsp)
+ vmovdqa \XMM5, TMP5(%rsp)
+ vmovdqa \XMM6, TMP6(%rsp)
+ vmovdqa \XMM7, TMP7(%rsp)
+ vmovdqa \XMM8, TMP8(%rsp)
+
+.if \loop_idx == in_order
+ vpaddd ONE(%rip), \CTR, \XMM1 # INCR CNT
+ vpaddd ONE(%rip), \XMM1, \XMM2
+ vpaddd ONE(%rip), \XMM2, \XMM3
+ vpaddd ONE(%rip), \XMM3, \XMM4
+ vpaddd ONE(%rip), \XMM4, \XMM5
+ vpaddd ONE(%rip), \XMM5, \XMM6
+ vpaddd ONE(%rip), \XMM6, \XMM7
+ vpaddd ONE(%rip), \XMM7, \XMM8
+ vmovdqa \XMM8, \CTR
+
+ vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
+.else
+ vpaddd ONEf(%rip), \CTR, \XMM1 # INCR CNT
+ vpaddd ONEf(%rip), \XMM1, \XMM2
+ vpaddd ONEf(%rip), \XMM2, \XMM3
+ vpaddd ONEf(%rip), \XMM3, \XMM4
+ vpaddd ONEf(%rip), \XMM4, \XMM5
+ vpaddd ONEf(%rip), \XMM5, \XMM6
+ vpaddd ONEf(%rip), \XMM6, \XMM7
+ vpaddd ONEf(%rip), \XMM7, \XMM8
+ vmovdqa \XMM8, \CTR
+.endif
+
+
+ #######################################################################
+
+ vmovdqu (arg1), \T1
+ vpxor \T1, \XMM1, \XMM1
+ vpxor \T1, \XMM2, \XMM2
+ vpxor \T1, \XMM3, \XMM3
+ vpxor \T1, \XMM4, \XMM4
+ vpxor \T1, \XMM5, \XMM5
+ vpxor \T1, \XMM6, \XMM6
+ vpxor \T1, \XMM7, \XMM7
+ vpxor \T1, \XMM8, \XMM8
+
+ #######################################################################
+
+
+
+
+
+ vmovdqu 16*1(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ vmovdqu 16*2(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+
+ #######################################################################
+
+ vmovdqa HashKey_8(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T2, \T4 # T4 = a1*b1
+ vpclmulqdq $0x00, \T5, \T2, \T7 # T7 = a0*b0
+ vpclmulqdq $0x01, \T5, \T2, \T6 # T6 = a1*b0
+ vpclmulqdq $0x10, \T5, \T2, \T5 # T5 = a0*b1
+ vpxor \T5, \T6, \T6
+
+ vmovdqu 16*3(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ vmovdqa TMP2(%rsp), \T1
+ vmovdqa HashKey_7(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpclmulqdq $0x01, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vpclmulqdq $0x10, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vmovdqu 16*4(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ #######################################################################
+
+ vmovdqa TMP3(%rsp), \T1
+ vmovdqa HashKey_6(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpclmulqdq $0x01, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vpclmulqdq $0x10, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vmovdqu 16*5(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ vmovdqa TMP4(%rsp), \T1
+ vmovdqa HashKey_5(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpclmulqdq $0x01, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vpclmulqdq $0x10, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vmovdqu 16*6(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+
+ vmovdqa TMP5(%rsp), \T1
+ vmovdqa HashKey_4(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpclmulqdq $0x01, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vpclmulqdq $0x10, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vmovdqu 16*7(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ vmovdqa TMP6(%rsp), \T1
+ vmovdqa HashKey_3(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpclmulqdq $0x01, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vpclmulqdq $0x10, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vmovdqu 16*8(arg1), \T1
+ vaesenc \T1, \XMM1, \XMM1
+ vaesenc \T1, \XMM2, \XMM2
+ vaesenc \T1, \XMM3, \XMM3
+ vaesenc \T1, \XMM4, \XMM4
+ vaesenc \T1, \XMM5, \XMM5
+ vaesenc \T1, \XMM6, \XMM6
+ vaesenc \T1, \XMM7, \XMM7
+ vaesenc \T1, \XMM8, \XMM8
+
+ vmovdqa TMP7(%rsp), \T1
+ vmovdqa HashKey_2(arg1), \T5
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T4
+
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpclmulqdq $0x01, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vpclmulqdq $0x10, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+
+ #######################################################################
+
+ vmovdqu 16*9(arg1), \T5
+ vaesenc \T5, \XMM1, \XMM1
+ vaesenc \T5, \XMM2, \XMM2
+ vaesenc \T5, \XMM3, \XMM3
+ vaesenc \T5, \XMM4, \XMM4
+ vaesenc \T5, \XMM5, \XMM5
+ vaesenc \T5, \XMM6, \XMM6
+ vaesenc \T5, \XMM7, \XMM7
+ vaesenc \T5, \XMM8, \XMM8
+
+ vmovdqa TMP8(%rsp), \T1
+ vmovdqa HashKey(arg1), \T5
+
+ vpclmulqdq $0x00, \T5, \T1, \T3
+ vpxor \T3, \T7, \T7
+
+ vpclmulqdq $0x01, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vpclmulqdq $0x10, \T5, \T1, \T3
+ vpxor \T3, \T6, \T6
+
+ vpclmulqdq $0x11, \T5, \T1, \T3
+ vpxor \T3, \T4, \T1
+
+
+ vmovdqu 16*10(arg1), \T5
+
+ i = 0
+ j = 1
+ setreg
+.rep 8
+ vpxor 16*i(arg3, %r11), \T5, \T2
+ .if \ENC_DEC == ENC
+ vaesenclast \T2, reg_j, reg_j
+ .else
+ vaesenclast \T2, reg_j, \T3
+ vmovdqu 16*i(arg3, %r11), reg_j
+ vmovdqu \T3, 16*i(arg2, %r11)
+ .endif
+ i = (i+1)
+ j = (j+1)
+ setreg
+.endr
+ #######################################################################
+
+
+ vpslldq $8, \T6, \T3 # shift-L T3 2 DWs
+ vpsrldq $8, \T6, \T6 # shift-R T2 2 DWs
+ vpxor \T3, \T7, \T7
+ vpxor \T6, \T1, \T1 # accumulate the results in T1:T7
+
+
+
+ #######################################################################
+ #first phase of the reduction
+ vmovdqa POLY2(%rip), \T3
+
+ vpclmulqdq $0x01, \T7, \T3, \T2
+ vpslldq $8, \T2, \T2 # shift-L xmm2 2 DWs
+
+ vpxor \T2, \T7, \T7 # first phase of the reduction complete
+ #######################################################################
+ .if \ENC_DEC == ENC
+ vmovdqu \XMM1, 16*0(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM2, 16*1(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM3, 16*2(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM4, 16*3(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM5, 16*4(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM6, 16*5(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM7, 16*6(arg2,%r11) # Write to the Ciphertext buffer
+ vmovdqu \XMM8, 16*7(arg2,%r11) # Write to the Ciphertext buffer
+ .endif
+
+ #######################################################################
+ #second phase of the reduction
+ vpclmulqdq $0x00, \T7, \T3, \T2
+ vpsrldq $4, \T2, \T2 # shift-R xmm2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq $0x10, \T7, \T3, \T4
+ vpslldq $4, \T4, \T4 # shift-L xmm0 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor \T2, \T4, \T4 # second phase of the reduction complete
+ #######################################################################
+ vpxor \T4, \T1, \T1 # the result is in T1
+
+ vpshufb SHUF_MASK(%rip), \XMM1, \XMM1 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM2, \XMM2 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM3, \XMM3 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM4, \XMM4 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM5, \XMM5 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM6, \XMM6 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM7, \XMM7 # perform a 16Byte swap
+ vpshufb SHUF_MASK(%rip), \XMM8, \XMM8 # perform a 16Byte swap
+
+
+ vpxor \T1, \XMM1, \XMM1
+
+
+
+.endm
+
+
+# GHASH the last 4 ciphertext blocks.
+.macro GHASH_LAST_8_AVX2 T1 T2 T3 T4 T5 T6 T7 XMM1 XMM2 XMM3 XMM4 XMM5 XMM6 XMM7 XMM8
+
+ ## Karatsuba Method
+
+ vmovdqa HashKey_8(arg1), \T5
+
+ vpshufd $0b01001110, \XMM1, \T2
+ vpshufd $0b01001110, \T5, \T3
+ vpxor \XMM1, \T2, \T2
+ vpxor \T5, \T3, \T3
+
+ vpclmulqdq $0x11, \T5, \XMM1, \T6
+ vpclmulqdq $0x00, \T5, \XMM1, \T7
+
+ vpclmulqdq $0x00, \T3, \T2, \XMM1
+
+ ######################
+
+ vmovdqa HashKey_7(arg1), \T5
+ vpshufd $0b01001110, \XMM2, \T2
+ vpshufd $0b01001110, \T5, \T3
+ vpxor \XMM2, \T2, \T2
+ vpxor \T5, \T3, \T3
+
+ vpclmulqdq $0x11, \T5, \XMM2, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM2, \T4
+ vpxor \T4, \T7, \T7
+
+ vpclmulqdq $0x00, \T3, \T2, \T2
+
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vmovdqa HashKey_6(arg1), \T5
+ vpshufd $0b01001110, \XMM3, \T2
+ vpshufd $0b01001110, \T5, \T3
+ vpxor \XMM3, \T2, \T2
+ vpxor \T5, \T3, \T3
+
+ vpclmulqdq $0x11, \T5, \XMM3, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM3, \T4
+ vpxor \T4, \T7, \T7
+
+ vpclmulqdq $0x00, \T3, \T2, \T2
+
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vmovdqa HashKey_5(arg1), \T5
+ vpshufd $0b01001110, \XMM4, \T2
+ vpshufd $0b01001110, \T5, \T3
+ vpxor \XMM4, \T2, \T2
+ vpxor \T5, \T3, \T3
+
+ vpclmulqdq $0x11, \T5, \XMM4, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM4, \T4
+ vpxor \T4, \T7, \T7
+
+ vpclmulqdq $0x00, \T3, \T2, \T2
+
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vmovdqa HashKey_4(arg1), \T5
+ vpshufd $0b01001110, \XMM5, \T2
+ vpshufd $0b01001110, \T5, \T3
+ vpxor \XMM5, \T2, \T2
+ vpxor \T5, \T3, \T3
+
+ vpclmulqdq $0x11, \T5, \XMM5, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM5, \T4
+ vpxor \T4, \T7, \T7
+
+ vpclmulqdq $0x00, \T3, \T2, \T2
+
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vmovdqa HashKey_3(arg1), \T5
+ vpshufd $0b01001110, \XMM6, \T2
+ vpshufd $0b01001110, \T5, \T3
+ vpxor \XMM6, \T2, \T2
+ vpxor \T5, \T3, \T3
+
+ vpclmulqdq $0x11, \T5, \XMM6, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM6, \T4
+ vpxor \T4, \T7, \T7
+
+ vpclmulqdq $0x00, \T3, \T2, \T2
+
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vmovdqa HashKey_2(arg1), \T5
+ vpshufd $0b01001110, \XMM7, \T2
+ vpshufd $0b01001110, \T5, \T3
+ vpxor \XMM7, \T2, \T2
+ vpxor \T5, \T3, \T3
+
+ vpclmulqdq $0x11, \T5, \XMM7, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM7, \T4
+ vpxor \T4, \T7, \T7
+
+ vpclmulqdq $0x00, \T3, \T2, \T2
+
+ vpxor \T2, \XMM1, \XMM1
+
+ ######################
+
+ vmovdqa HashKey(arg1), \T5
+ vpshufd $0b01001110, \XMM8, \T2
+ vpshufd $0b01001110, \T5, \T3
+ vpxor \XMM8, \T2, \T2
+ vpxor \T5, \T3, \T3
+
+ vpclmulqdq $0x11, \T5, \XMM8, \T4
+ vpxor \T4, \T6, \T6
+
+ vpclmulqdq $0x00, \T5, \XMM8, \T4
+ vpxor \T4, \T7, \T7
+
+ vpclmulqdq $0x00, \T3, \T2, \T2
+
+ vpxor \T2, \XMM1, \XMM1
+ vpxor \T6, \XMM1, \XMM1
+ vpxor \T7, \XMM1, \T2
+
+
+
+
+ vpslldq $8, \T2, \T4
+ vpsrldq $8, \T2, \T2
+
+ vpxor \T4, \T7, \T7
+ vpxor \T2, \T6, \T6 # <T6:T7> holds the result of the
+ # accumulated carry-less multiplications
+
+ #######################################################################
+ #first phase of the reduction
+ vmovdqa POLY2(%rip), \T3
+
+ vpclmulqdq $0x01, \T7, \T3, \T2
+ vpslldq $8, \T2, \T2 # shift-L xmm2 2 DWs
+
+ vpxor \T2, \T7, \T7 # first phase of the reduction complete
+ #######################################################################
+
+
+ #second phase of the reduction
+ vpclmulqdq $0x00, \T7, \T3, \T2
+ vpsrldq $4, \T2, \T2 # shift-R T2 1 DW (Shift-R only 1-DW to obtain 2-DWs shift-R)
+
+ vpclmulqdq $0x10, \T7, \T3, \T4
+ vpslldq $4, \T4, \T4 # shift-L T4 1 DW (Shift-L 1-DW to obtain result with no shifts)
+
+ vpxor \T2, \T4, \T4 # second phase of the reduction complete
+ #######################################################################
+ vpxor \T4, \T6, \T6 # the result is in T6
+.endm
+
+
+
+# combined for GCM encrypt and decrypt functions
+# clobbering all xmm registers
+# clobbering r10, r11, r12, r13, r14, r15
+.macro GCM_ENC_DEC_AVX2 ENC_DEC
+
+ #the number of pushes must equal STACK_OFFSET
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov %rsp, %r14
+
+
+
+
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp # align rsp to 64 bytes
+
+
+ vmovdqu HashKey(arg1), %xmm13 # xmm13 = HashKey
+
+ mov arg4, %r13 # save the number of bytes of plaintext/ciphertext
+ and $-16, %r13 # r13 = r13 - (r13 mod 16)
+
+ mov %r13, %r12
+ shr $4, %r12
+ and $7, %r12
+ jz _initial_num_blocks_is_0\@
+
+ cmp $7, %r12
+ je _initial_num_blocks_is_7\@
+ cmp $6, %r12
+ je _initial_num_blocks_is_6\@
+ cmp $5, %r12
+ je _initial_num_blocks_is_5\@
+ cmp $4, %r12
+ je _initial_num_blocks_is_4\@
+ cmp $3, %r12
+ je _initial_num_blocks_is_3\@
+ cmp $2, %r12
+ je _initial_num_blocks_is_2\@
+
+ jmp _initial_num_blocks_is_1\@
+
+_initial_num_blocks_is_7\@:
+ INITIAL_BLOCKS_AVX2 7, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*7, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_6\@:
+ INITIAL_BLOCKS_AVX2 6, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*6, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_5\@:
+ INITIAL_BLOCKS_AVX2 5, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*5, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_4\@:
+ INITIAL_BLOCKS_AVX2 4, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*4, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_3\@:
+ INITIAL_BLOCKS_AVX2 3, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*3, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_2\@:
+ INITIAL_BLOCKS_AVX2 2, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*2, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_1\@:
+ INITIAL_BLOCKS_AVX2 1, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+ sub $16*1, %r13
+ jmp _initial_blocks_encrypted\@
+
+_initial_num_blocks_is_0\@:
+ INITIAL_BLOCKS_AVX2 0, %xmm12, %xmm13, %xmm14, %xmm15, %xmm11, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm10, %xmm0, \ENC_DEC
+
+
+_initial_blocks_encrypted\@:
+ cmp $0, %r13
+ je _zero_cipher_left\@
+
+ sub $128, %r13
+ je _eight_cipher_left\@
+
+
+
+
+ vmovd %xmm9, %r15d
+ and $255, %r15d
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+
+
+_encrypt_by_8_new\@:
+ cmp $(255-8), %r15d
+ jg _encrypt_by_8\@
+
+
+
+ add $8, %r15b
+ GHASH_8_ENCRYPT_8_PARALLEL_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, out_order, \ENC_DEC
+ add $128, %r11
+ sub $128, %r13
+ jne _encrypt_by_8_new\@
+
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ jmp _eight_cipher_left\@
+
+_encrypt_by_8\@:
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ add $8, %r15b
+ GHASH_8_ENCRYPT_8_PARALLEL_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm9, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8, %xmm15, in_order, \ENC_DEC
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ add $128, %r11
+ sub $128, %r13
+ jne _encrypt_by_8_new\@
+
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+
+
+
+
+_eight_cipher_left\@:
+ GHASH_LAST_8_AVX2 %xmm0, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7, %xmm8
+
+
+_zero_cipher_left\@:
+ cmp $16, arg4
+ jl _only_less_than_16\@
+
+ mov arg4, %r13
+ and $15, %r13 # r13 = (arg4 mod 16)
+
+ je _multiple_of_16_bytes\@
+
+ # handle the last <16 Byte block seperately
+
+
+ vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn)
+
+ sub $16, %r11
+ add %r13, %r11
+ vmovdqu (arg3, %r11), %xmm1 # receive the last <16 Byte block
+
+ lea SHIFT_MASK+16(%rip), %r12
+ sub %r13, %r12 # adjust the shuffle mask pointer
+ # to be able to shift 16-r13 bytes
+ # (r13 is the number of bytes in plaintext mod 16)
+ vmovdqu (%r12), %xmm2 # get the appropriate shuffle mask
+ vpshufb %xmm2, %xmm1, %xmm1 # shift right 16-r13 bytes
+ jmp _final_ghash_mul\@
+
+_only_less_than_16\@:
+ # check for 0 length
+ mov arg4, %r13
+ and $15, %r13 # r13 = (arg4 mod 16)
+
+ je _multiple_of_16_bytes\@
+
+ # handle the last <16 Byte block seperately
+
+
+ vpaddd ONE(%rip), %xmm9, %xmm9 # INCR CNT to get Yn
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Yn)
+
+
+ lea SHIFT_MASK+16(%rip), %r12
+ sub %r13, %r12 # adjust the shuffle mask pointer to be
+ # able to shift 16-r13 bytes (r13 is the
+ # number of bytes in plaintext mod 16)
+
+_get_last_16_byte_loop\@:
+ movb (arg3, %r11), %al
+ movb %al, TMP1 (%rsp , %r11)
+ add $1, %r11
+ cmp %r13, %r11
+ jne _get_last_16_byte_loop\@
+
+ vmovdqu TMP1(%rsp), %xmm1
+
+ sub $16, %r11
+
+_final_ghash_mul\@:
+ .if \ENC_DEC == DEC
+ vmovdqa %xmm1, %xmm2
+ vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
+ vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm9
+ vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
+ vpand %xmm1, %xmm2, %xmm2
+ vpshufb SHUF_MASK(%rip), %xmm2, %xmm2
+ vpxor %xmm2, %xmm14, %xmm14
+ #GHASH computation for the last <16 Byte block
+ GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+ sub %r13, %r11
+ add $16, %r11
+ .else
+ vpxor %xmm1, %xmm9, %xmm9 # Plaintext XOR E(K, Yn)
+ vmovdqu ALL_F-SHIFT_MASK(%r12), %xmm1 # get the appropriate mask to mask out top 16-r13 bytes of xmm9
+ vpand %xmm1, %xmm9, %xmm9 # mask out top 16-r13 bytes of xmm9
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9
+ vpxor %xmm9, %xmm14, %xmm14
+ #GHASH computation for the last <16 Byte block
+ GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6
+ sub %r13, %r11
+ add $16, %r11
+ vpshufb SHUF_MASK(%rip), %xmm9, %xmm9 # shuffle xmm9 back to output as ciphertext
+ .endif
+
+
+ #############################
+ # output r13 Bytes
+ vmovq %xmm9, %rax
+ cmp $8, %r13
+ jle _less_than_8_bytes_left\@
+
+ mov %rax, (arg2 , %r11)
+ add $8, %r11
+ vpsrldq $8, %xmm9, %xmm9
+ vmovq %xmm9, %rax
+ sub $8, %r13
+
+_less_than_8_bytes_left\@:
+ movb %al, (arg2 , %r11)
+ add $1, %r11
+ shr $8, %rax
+ sub $1, %r13
+ jne _less_than_8_bytes_left\@
+ #############################
+
+_multiple_of_16_bytes\@:
+ mov arg7, %r12 # r12 = aadLen (number of bytes)
+ shl $3, %r12 # convert into number of bits
+ vmovd %r12d, %xmm15 # len(A) in xmm15
+
+ shl $3, arg4 # len(C) in bits (*128)
+ vmovq arg4, %xmm1
+ vpslldq $8, %xmm15, %xmm15 # xmm15 = len(A)|| 0x0000000000000000
+ vpxor %xmm1, %xmm15, %xmm15 # xmm15 = len(A)||len(C)
+
+ vpxor %xmm15, %xmm14, %xmm14
+ GHASH_MUL_AVX2 %xmm14, %xmm13, %xmm0, %xmm10, %xmm11, %xmm5, %xmm6 # final GHASH computation
+ vpshufb SHUF_MASK(%rip), %xmm14, %xmm14 # perform a 16Byte swap
+
+ mov arg5, %rax # rax = *Y0
+ vmovdqu (%rax), %xmm9 # xmm9 = Y0
+
+ ENCRYPT_SINGLE_BLOCK %xmm9 # E(K, Y0)
+
+ vpxor %xmm14, %xmm9, %xmm9
+
+
+
+_return_T\@:
+ mov arg8, %r10 # r10 = authTag
+ mov arg9, %r11 # r11 = auth_tag_len
+
+ cmp $16, %r11
+ je _T_16\@
+
+ cmp $12, %r11
+ je _T_12\@
+
+_T_8\@:
+ vmovq %xmm9, %rax
+ mov %rax, (%r10)
+ jmp _return_T_done\@
+_T_12\@:
+ vmovq %xmm9, %rax
+ mov %rax, (%r10)
+ vpsrldq $8, %xmm9, %xmm9
+ vmovd %xmm9, %eax
+ mov %eax, 8(%r10)
+ jmp _return_T_done\@
+
+_T_16\@:
+ vmovdqu %xmm9, (%r10)
+
+_return_T_done\@:
+ mov %r14, %rsp
+
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+.endm
+
+
+#############################################################
+#void aesni_gcm_precomp_avx_gen4
+# (gcm_data *my_ctx_data,
+# u8 *hash_subkey)# /* H, the Hash sub key input.
+# Data starts on a 16-byte boundary. */
+#############################################################
+ENTRY(aesni_gcm_precomp_avx_gen4)
+ #the number of pushes must equal STACK_OFFSET
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+
+ mov %rsp, %r14
+
+
+
+ sub $VARIABLE_OFFSET, %rsp
+ and $~63, %rsp # align rsp to 64 bytes
+
+ vmovdqu (arg2), %xmm6 # xmm6 = HashKey
+
+ vpshufb SHUF_MASK(%rip), %xmm6, %xmm6
+ ############### PRECOMPUTATION of HashKey<<1 mod poly from the HashKey
+ vmovdqa %xmm6, %xmm2
+ vpsllq $1, %xmm6, %xmm6
+ vpsrlq $63, %xmm2, %xmm2
+ vmovdqa %xmm2, %xmm1
+ vpslldq $8, %xmm2, %xmm2
+ vpsrldq $8, %xmm1, %xmm1
+ vpor %xmm2, %xmm6, %xmm6
+ #reduction
+ vpshufd $0b00100100, %xmm1, %xmm2
+ vpcmpeqd TWOONE(%rip), %xmm2, %xmm2
+ vpand POLY(%rip), %xmm2, %xmm2
+ vpxor %xmm2, %xmm6, %xmm6 # xmm6 holds the HashKey<<1 mod poly
+ #######################################################################
+ vmovdqa %xmm6, HashKey(arg1) # store HashKey<<1 mod poly
+
+
+ PRECOMPUTE_AVX2 %xmm6, %xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5
+
+ mov %r14, %rsp
+
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ ret
+ENDPROC(aesni_gcm_precomp_avx_gen4)
+
+
+###############################################################################
+#void aesni_gcm_enc_avx_gen4(
+# gcm_data *my_ctx_data, /* aligned to 16 Bytes */
+# u8 *out, /* Ciphertext output. Encrypt in-place is allowed. */
+# const u8 *in, /* Plaintext input */
+# u64 plaintext_len, /* Length of data in Bytes for encryption. */
+# u8 *iv, /* Pre-counter block j0: 4 byte salt
+# (from Security Association) concatenated with 8 byte
+# Initialisation Vector (from IPSec ESP Payload)
+# concatenated with 0x00000001. 16-byte aligned pointer. */
+# const u8 *aad, /* Additional Authentication Data (AAD)*/
+# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
+# u8 *auth_tag, /* Authenticated Tag output. */
+# u64 auth_tag_len)# /* Authenticated Tag Length in bytes.
+# Valid values are 16 (most likely), 12 or 8. */
+###############################################################################
+ENTRY(aesni_gcm_enc_avx_gen4)
+ GCM_ENC_DEC_AVX2 ENC
+ ret
+ENDPROC(aesni_gcm_enc_avx_gen4)
+
+###############################################################################
+#void aesni_gcm_dec_avx_gen4(
+# gcm_data *my_ctx_data, /* aligned to 16 Bytes */
+# u8 *out, /* Plaintext output. Decrypt in-place is allowed. */
+# const u8 *in, /* Ciphertext input */
+# u64 plaintext_len, /* Length of data in Bytes for encryption. */
+# u8 *iv, /* Pre-counter block j0: 4 byte salt
+# (from Security Association) concatenated with 8 byte
+# Initialisation Vector (from IPSec ESP Payload)
+# concatenated with 0x00000001. 16-byte aligned pointer. */
+# const u8 *aad, /* Additional Authentication Data (AAD)*/
+# u64 aad_len, /* Length of AAD in bytes. With RFC4106 this is going to be 8 or 12 Bytes */
+# u8 *auth_tag, /* Authenticated Tag output. */
+# u64 auth_tag_len)# /* Authenticated Tag Length in bytes.
+# Valid values are 16 (most likely), 12 or 8. */
+###############################################################################
+ENTRY(aesni_gcm_dec_avx_gen4)
+ GCM_ENC_DEC_AVX2 DEC
+ ret
+ENDPROC(aesni_gcm_dec_avx_gen4)
+
+#endif /* CONFIG_AS_AVX2 */
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 835488b745ee..948ad0e77741 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -101,6 +101,9 @@ asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
int crypto_fpu_init(void);
void crypto_fpu_exit(void);
+#define AVX_GEN2_OPTSIZE 640
+#define AVX_GEN4_OPTSIZE 4096
+
#ifdef CONFIG_X86_64
asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
const u8 *in, unsigned int len, u8 *iv);
@@ -150,6 +153,123 @@ asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
u8 *auth_tag, unsigned long auth_tag_len);
+
+#ifdef CONFIG_AS_AVX
+/*
+ * asmlinkage void aesni_gcm_precomp_avx_gen2()
+ * gcm_data *my_ctx_data, context data
+ * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
+ */
+asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
+
+asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
+ const u8 *in, unsigned long plaintext_len, u8 *iv,
+ const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
+asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
+ const u8 *in, unsigned long ciphertext_len, u8 *iv,
+ const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
+static void aesni_gcm_enc_avx(void *ctx, u8 *out,
+ const u8 *in, unsigned long plaintext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len)
+{
+ if (plaintext_len < AVX_GEN2_OPTSIZE) {
+ aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
+ } else {
+ aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
+ aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ }
+}
+
+static void aesni_gcm_dec_avx(void *ctx, u8 *out,
+ const u8 *in, unsigned long ciphertext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len)
+{
+ if (ciphertext_len < AVX_GEN2_OPTSIZE) {
+ aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
+ } else {
+ aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
+ aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ }
+}
+#endif
+
+#ifdef CONFIG_AS_AVX2
+/*
+ * asmlinkage void aesni_gcm_precomp_avx_gen4()
+ * gcm_data *my_ctx_data, context data
+ * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
+ */
+asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
+
+asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
+ const u8 *in, unsigned long plaintext_len, u8 *iv,
+ const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
+asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
+ const u8 *in, unsigned long ciphertext_len, u8 *iv,
+ const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
+static void aesni_gcm_enc_avx2(void *ctx, u8 *out,
+ const u8 *in, unsigned long plaintext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len)
+{
+ if (plaintext_len < AVX_GEN2_OPTSIZE) {
+ aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad,
+ aad_len, auth_tag, auth_tag_len);
+ } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
+ aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
+ aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ } else {
+ aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
+ aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ }
+}
+
+static void aesni_gcm_dec_avx2(void *ctx, u8 *out,
+ const u8 *in, unsigned long ciphertext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len)
+{
+ if (ciphertext_len < AVX_GEN2_OPTSIZE) {
+ aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey,
+ aad, aad_len, auth_tag, auth_tag_len);
+ } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
+ aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
+ aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ } else {
+ aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
+ aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
+ aad_len, auth_tag, auth_tag_len);
+ }
+}
+#endif
+
+static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
+ const u8 *in, unsigned long plaintext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
+static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
+ const u8 *in, unsigned long ciphertext_len, u8 *iv,
+ u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
+ u8 *auth_tag, unsigned long auth_tag_len);
+
static inline struct
aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
{
@@ -915,7 +1035,7 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
dst = src;
}
- aesni_gcm_enc(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
+ aesni_gcm_enc_tfm(aes_ctx, dst, src, (unsigned long)req->cryptlen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst
+ ((unsigned long)req->cryptlen), auth_tag_len);
@@ -996,12 +1116,12 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
dst = src;
}
- aesni_gcm_dec(aes_ctx, dst, src, tempCipherLen, iv,
+ aesni_gcm_dec_tfm(aes_ctx, dst, src, tempCipherLen, iv,
ctx->hash_subkey, assoc, (unsigned long)req->assoclen,
authTag, auth_tag_len);
/* Compare generated tag with passed in tag. */
- retval = memcmp(src + tempCipherLen, authTag, auth_tag_len) ?
+ retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
-EBADMSG : 0;
if (one_entry_in_sg) {
@@ -1353,6 +1473,27 @@ static int __init aesni_init(void)
if (!x86_match_cpu(aesni_cpu_id))
return -ENODEV;
+#ifdef CONFIG_X86_64
+#ifdef CONFIG_AS_AVX2
+ if (boot_cpu_has(X86_FEATURE_AVX2)) {
+ pr_info("AVX2 version of gcm_enc/dec engaged.\n");
+ aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
+ aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
+ } else
+#endif
+#ifdef CONFIG_AS_AVX
+ if (boot_cpu_has(X86_FEATURE_AVX)) {
+ pr_info("AVX version of gcm_enc/dec engaged.\n");
+ aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
+ aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
+ } else
+#endif
+ {
+ pr_info("SSE version of gcm_enc/dec engaged.\n");
+ aesni_gcm_enc_tfm = aesni_gcm_enc;
+ aesni_gcm_dec_tfm = aesni_gcm_dec;
+ }
+#endif
err = crypto_fpu_init();
if (err)
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index a54ee1d054d9..aaac3b2fb746 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -19,7 +19,7 @@ extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void);
extern int amd_numa_init(void);
extern int amd_get_subcaches(int);
-extern int amd_set_subcaches(int, int);
+extern int amd_set_subcaches(int, unsigned long);
struct amd_l3_cache {
unsigned indices;
diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h
index 0d9ec770f2f8..e6a92455740e 100644
--- a/arch/x86/include/asm/archrandom.h
+++ b/arch/x86/include/asm/archrandom.h
@@ -39,6 +39,20 @@
#ifdef CONFIG_ARCH_RANDOM
+/* Instead of arch_get_random_long() when alternatives haven't run. */
+static inline int rdrand_long(unsigned long *v)
+{
+ int ok;
+ asm volatile("1: " RDRAND_LONG "\n\t"
+ "jc 2f\n\t"
+ "decl %0\n\t"
+ "jnz 1b\n\t"
+ "2:"
+ : "=r" (ok), "=a" (*v)
+ : "0" (RDRAND_RETRY_LOOPS));
+ return ok;
+}
+
#define GET_RANDOM(name, type, rdrand, nop) \
static inline int name(type *v) \
{ \
@@ -68,6 +82,13 @@ GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
#endif /* CONFIG_X86_64 */
+#else
+
+static inline int rdrand_long(unsigned long *v)
+{
+ return 0;
+}
+
#endif /* CONFIG_ARCH_RANDOM */
extern void x86_init_rdrand(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index c6cd358a1eec..04a48903b2eb 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -92,12 +92,53 @@
#endif
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#else
+#else /* !SMP */
#define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#define smp_read_barrier_depends() do { } while (0)
#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#endif /* SMP */
+
+#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+
+/*
+ * For either of these options x86 doesn't have a strong TSO memory
+ * model and we should fall back to full barriers.
+ */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ smp_mb(); \
+ ___p1; \
+})
+
+#else /* regular x86 TSO memory ordering */
+
+#define smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ACCESS_ONCE(*p) = (v); \
+} while (0)
+
+#define smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = ACCESS_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ barrier(); \
+ ___p1; \
+})
+
#endif
/*
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 89270b4318db..e099f9502ace 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -216,6 +216,7 @@
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
#define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
#define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */
#define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */
#define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */
#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */
diff --git a/arch/x86/include/asm/dmi.h b/arch/x86/include/asm/dmi.h
index fd8f9e2ca35f..535192f6bfad 100644
--- a/arch/x86/include/asm/dmi.h
+++ b/arch/x86/include/asm/dmi.h
@@ -13,7 +13,9 @@ static __always_inline __init void *dmi_alloc(unsigned len)
}
/* Use early IO mappings for DMI because it's initialized early */
-#define dmi_ioremap early_ioremap
-#define dmi_iounmap early_iounmap
+#define dmi_early_remap early_ioremap
+#define dmi_early_unmap early_iounmap
+#define dmi_remap ioremap
+#define dmi_unmap iounmap
#endif /* _ASM_X86_DMI_H */
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 65c6e6e3a552..3d6b9f81cc68 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -1,6 +1,24 @@
#ifndef _ASM_X86_EFI_H
#define _ASM_X86_EFI_H
+/*
+ * We map the EFI regions needed for runtime services non-contiguously,
+ * with preserved alignment on virtual addresses starting from -4G down
+ * for a total max space of 64G. This way, we provide for stable runtime
+ * services addresses across kernels so that a kexec'd kernel can still
+ * use them.
+ *
+ * This is the main reason why we're doing stable VA mappings for RT
+ * services.
+ *
+ * This flag is used in conjuction with a chicken bit called
+ * "efi=old_map" which can be used as a fallback to the old runtime
+ * services mapping method in case there's some b0rkage with a
+ * particular EFI implementation (haha, it is hard to hold up the
+ * sarcasm here...).
+ */
+#define EFI_OLD_MEMMAP EFI_ARCH_1
+
#ifdef CONFIG_X86_32
#define EFI_LOADER_SIGNATURE "EL32"
@@ -69,24 +87,31 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \
(u64)(a4), (u64)(a5), (u64)(a6))
+#define _efi_call_virtX(x, f, ...) \
+({ \
+ efi_status_t __s; \
+ \
+ efi_sync_low_kernel_mappings(); \
+ preempt_disable(); \
+ __s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__); \
+ preempt_enable(); \
+ __s; \
+})
+
#define efi_call_virt0(f) \
- efi_call0((efi.systab->runtime->f))
-#define efi_call_virt1(f, a1) \
- efi_call1((efi.systab->runtime->f), (u64)(a1))
-#define efi_call_virt2(f, a1, a2) \
- efi_call2((efi.systab->runtime->f), (u64)(a1), (u64)(a2))
-#define efi_call_virt3(f, a1, a2, a3) \
- efi_call3((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3))
-#define efi_call_virt4(f, a1, a2, a3, a4) \
- efi_call4((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3), (u64)(a4))
-#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
- efi_call5((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3), (u64)(a4), (u64)(a5))
-#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
- efi_call6((efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
- (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
+ _efi_call_virtX(0, f)
+#define efi_call_virt1(f, a1) \
+ _efi_call_virtX(1, f, (u64)(a1))
+#define efi_call_virt2(f, a1, a2) \
+ _efi_call_virtX(2, f, (u64)(a1), (u64)(a2))
+#define efi_call_virt3(f, a1, a2, a3) \
+ _efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3))
+#define efi_call_virt4(f, a1, a2, a3, a4) \
+ _efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4))
+#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
+ _efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5))
+#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
+ _efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
u32 type, u64 attribute);
@@ -95,12 +120,30 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
extern int add_efi_memmap;
extern unsigned long x86_efi_facility;
+extern struct efi_scratch efi_scratch;
extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
extern int efi_memblock_x86_reserve_range(void);
extern void efi_call_phys_prelog(void);
extern void efi_call_phys_epilog(void);
extern void efi_unmap_memmap(void);
extern void efi_memory_uc(u64 addr, unsigned long size);
+extern void __init efi_map_region(efi_memory_desc_t *md);
+extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
+extern void efi_sync_low_kernel_mappings(void);
+extern void efi_setup_page_tables(void);
+extern void __init old_map_region(efi_memory_desc_t *md);
+extern void __init runtime_code_page_mkexec(void);
+extern void __init efi_runtime_mkexec(void);
+
+struct efi_setup_data {
+ u64 fw_vendor;
+ u64 runtime;
+ u64 tables;
+ u64 smbios;
+ u64 reserved[8];
+};
+
+extern u64 efi_setup;
#ifdef CONFIG_EFI
@@ -110,7 +153,7 @@ static inline bool efi_is_native(void)
}
extern struct console early_efi_console;
-
+extern void parse_efi_setup(u64 phys_addr, u32 data_len);
#else
/*
* IF EFI is not configured, have the EFI calls return -ENOSYS.
@@ -122,6 +165,7 @@ extern struct console early_efi_console;
#define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS)
#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS)
#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS)
+static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
#endif /* CONFIG_EFI */
#endif /* _ASM_X86_EFI_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index e846225265ed..7252cd339175 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -175,64 +175,7 @@ static inline void __set_fixmap(enum fixed_addresses idx,
}
#endif
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-
-#define clear_fixmap(idx) \
- __set_fixmap(idx, 0, __pgprot(0))
-
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without translation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /*
- * this branch gets completely eliminated after inlining,
- * except when someone tries to use fixaddr indices in an
- * illegal way. (such as mixing up address types or using
- * out-of-range indices).
- *
- * If it doesn't get removed, the linker will complain
- * loudly with a reasonably clear error message..
- */
- if (idx >= __end_of_fixed_addresses)
- __this_fixmap_does_not_exist();
-
- return __fix_to_virt(idx);
-}
-
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
-
-/* Return an pointer with offset calculated */
-static __always_inline unsigned long
-__set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
-{
- __set_fixmap(idx, phys, flags);
- return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1));
-}
-
-#define set_fixmap_offset(idx, phys) \
- __set_fixmap_offset(idx, phys, PAGE_KERNEL)
-
-#define set_fixmap_offset_nocache(idx, phys) \
- __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE)
+#include <asm-generic/fixmap.h>
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_FIXMAP_H */
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index c49a613c6452..cea1c76d49bf 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
/* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
is pending. Clear the x87 state here by setting it to fixed
values. "m" is a random variable that should be in L1 */
- alternative_input(
- ASM_NOP8 ASM_NOP2,
- "emms\n\t" /* clear stack tags */
- "fildl %P[addr]", /* set F?P to defined value */
- X86_FEATURE_FXSAVE_LEAK,
- [addr] "m" (tsk->thread.fpu.has_fpu));
+ if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) {
+ asm volatile(
+ "fnclex\n\t"
+ "emms\n\t"
+ "fildl %P[addr]" /* set F?P to defined value */
+ : : [addr] "m" (tsk->thread.fpu.has_fpu));
+ }
return fpu_restore_checking(&tsk->thread.fpu);
}
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index be27ba1e947a..b4c1f5453436 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -110,26 +110,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
- int ret = 0;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
- asm volatile("\t" ASM_STAC "\n"
- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
- "2:\t" ASM_CLAC "\n"
- "\t.section .fixup, \"ax\"\n"
- "3:\tmov %3, %0\n"
- "\tjmp 2b\n"
- "\t.previous\n"
- _ASM_EXTABLE(1b, 3b)
- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
- : "i" (-EFAULT), "r" (newval), "1" (oldval)
- : "memory"
- );
-
- *uval = oldval;
- return ret;
+ return user_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval);
}
#endif
diff --git a/arch/x86/include/asm/hash.h b/arch/x86/include/asm/hash.h
new file mode 100644
index 000000000000..e8c58f88b1d4
--- /dev/null
+++ b/arch/x86/include/asm/hash.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_X86_HASH_H
+#define _ASM_X86_HASH_H
+
+struct fast_hash_ops;
+extern void setup_arch_fast_hash(struct fast_hash_ops *ops);
+
+#endif /* _ASM_X86_HASH_H */
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index cba45d99ac1a..67d69b8e2d20 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -191,6 +191,9 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
#define trace_interrupt interrupt
#endif
+#define VECTOR_UNDEFINED -1
+#define VECTOR_RETRIGGERED -2
+
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
extern void setup_vector_irq(int cpu);
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h
index 459769d39263..e34e097b6f9d 100644
--- a/arch/x86/include/asm/intel-mid.h
+++ b/arch/x86/include/asm/intel-mid.h
@@ -51,10 +51,41 @@ struct devs_id {
enum intel_mid_cpu_type {
/* 1 was Moorestown */
INTEL_MID_CPU_CHIP_PENWELL = 2,
+ INTEL_MID_CPU_CHIP_CLOVERVIEW,
+ INTEL_MID_CPU_CHIP_TANGIER,
};
extern enum intel_mid_cpu_type __intel_mid_cpu_chip;
+/**
+ * struct intel_mid_ops - Interface between intel-mid & sub archs
+ * @arch_setup: arch_setup function to re-initialize platform
+ * structures (x86_init, x86_platform_init)
+ *
+ * This structure can be extended if any new interface is required
+ * between intel-mid & its sub arch files.
+ */
+struct intel_mid_ops {
+ void (*arch_setup)(void);
+};
+
+/* Helper API's for INTEL_MID_OPS_INIT */
+#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
+ [cpuid] = get_##cpuname##_ops
+
+/* Maximum number of CPU ops */
+#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))
+
+/*
+ * For every new cpu addition, a weak get_<cpuname>_ops() function needs be
+ * declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
+ */
+#define INTEL_MID_OPS_INIT {\
+ DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
+ DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
+ DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
+};
+
#ifdef CONFIG_X86_INTEL_MID
static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
@@ -86,8 +117,21 @@ extern enum intel_mid_timer_options intel_mid_timer_options;
* Penwell uses spread spectrum clock, so the freq number is not exactly
* the same as reported by MSR based on SDM.
*/
-#define PENWELL_FSB_FREQ_83SKU 83200
-#define PENWELL_FSB_FREQ_100SKU 99840
+#define FSB_FREQ_83SKU 83200
+#define FSB_FREQ_100SKU 99840
+#define FSB_FREQ_133SKU 133000
+
+#define FSB_FREQ_167SKU 167000
+#define FSB_FREQ_200SKU 200000
+#define FSB_FREQ_267SKU 267000
+#define FSB_FREQ_333SKU 333000
+#define FSB_FREQ_400SKU 400000
+
+/* Bus Select SoC Fuse value */
+#define BSEL_SOC_FUSE_MASK 0x7
+#define BSEL_SOC_FUSE_001 0x1 /* FSB 133MHz */
+#define BSEL_SOC_FUSE_101 0x5 /* FSB 100MHz */
+#define BSEL_SOC_FUSE_111 0x7 /* FSB 83MHz */
#define SFI_MTMR_MAX_NUM 8
#define SFI_MRTC_MAX 8
diff --git a/arch/x86/include/asm/iosf_mbi.h b/arch/x86/include/asm/iosf_mbi.h
new file mode 100644
index 000000000000..8e71c7941767
--- /dev/null
+++ b/arch/x86/include/asm/iosf_mbi.h
@@ -0,0 +1,90 @@
+/*
+ * iosf_mbi.h: Intel OnChip System Fabric MailBox access support
+ */
+
+#ifndef IOSF_MBI_SYMS_H
+#define IOSF_MBI_SYMS_H
+
+#define MBI_MCR_OFFSET 0xD0
+#define MBI_MDR_OFFSET 0xD4
+#define MBI_MCRX_OFFSET 0xD8
+
+#define MBI_RD_MASK 0xFEFFFFFF
+#define MBI_WR_MASK 0X01000000
+
+#define MBI_MASK_HI 0xFFFFFF00
+#define MBI_MASK_LO 0x000000FF
+#define MBI_ENABLE 0xF0
+
+/* Baytrail available units */
+#define BT_MBI_UNIT_AUNIT 0x00
+#define BT_MBI_UNIT_SMC 0x01
+#define BT_MBI_UNIT_CPU 0x02
+#define BT_MBI_UNIT_BUNIT 0x03
+#define BT_MBI_UNIT_PMC 0x04
+#define BT_MBI_UNIT_GFX 0x06
+#define BT_MBI_UNIT_SMI 0x0C
+#define BT_MBI_UNIT_USB 0x43
+#define BT_MBI_UNIT_SATA 0xA3
+#define BT_MBI_UNIT_PCIE 0xA6
+
+/* Baytrail read/write opcodes */
+#define BT_MBI_AUNIT_READ 0x10
+#define BT_MBI_AUNIT_WRITE 0x11
+#define BT_MBI_SMC_READ 0x10
+#define BT_MBI_SMC_WRITE 0x11
+#define BT_MBI_CPU_READ 0x10
+#define BT_MBI_CPU_WRITE 0x11
+#define BT_MBI_BUNIT_READ 0x10
+#define BT_MBI_BUNIT_WRITE 0x11
+#define BT_MBI_PMC_READ 0x06
+#define BT_MBI_PMC_WRITE 0x07
+#define BT_MBI_GFX_READ 0x00
+#define BT_MBI_GFX_WRITE 0x01
+#define BT_MBI_SMIO_READ 0x06
+#define BT_MBI_SMIO_WRITE 0x07
+#define BT_MBI_USB_READ 0x06
+#define BT_MBI_USB_WRITE 0x07
+#define BT_MBI_SATA_READ 0x00
+#define BT_MBI_SATA_WRITE 0x01
+#define BT_MBI_PCIE_READ 0x00
+#define BT_MBI_PCIE_WRITE 0x01
+
+/**
+ * iosf_mbi_read() - MailBox Interface read command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be read
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr);
+
+/**
+ * iosf_mbi_write() - MailBox unmasked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data to be written
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
+
+/**
+ * iosf_mbi_modify() - MailBox masked write command
+ * @port: port indicating subunit being accessed
+ * @opcode: port specific read or write opcode
+ * @offset: register address offset
+ * @mdr: register data being modified
+ * @mask: mask indicating bits in mdr to be modified
+ *
+ * Locking is handled by spinlock - cannot sleep.
+ * Return: Nonzero on error
+ */
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);
+
+#endif /* IOSF_MBI_SYMS_H */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 0ea10f27d613..cb6cfcd034cf 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -25,6 +25,7 @@ extern void irq_ctx_init(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
#include <linux/cpumask.h>
+extern int check_irq_vectors_for_cpu_disable(void);
extern void fixup_irqs(void);
extern void irq_force_complete_move(int);
#endif
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ae5d7830855c..fdf83afbb7d9 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -605,6 +605,7 @@ struct kvm_arch {
/* fields used by HYPER-V emulation */
u64 hv_guest_os_id;
u64 hv_hypercall;
+ u64 hv_tsc_page;
#ifdef CONFIG_KVM_MMU_AUDIT
int audit_point;
@@ -699,6 +700,8 @@ struct kvm_x86_ops {
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
+ u64 (*get_dr6)(struct kvm_vcpu *vcpu);
+ void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 1df115909758..c7678e43465b 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -85,28 +85,9 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
return ret;
}
-static inline uint32_t kvm_cpuid_base(void)
-{
- if (boot_cpu_data.cpuid_level < 0)
- return 0; /* So we don't blow up on old processors */
-
- if (cpu_has_hypervisor)
- return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
-
- return 0;
-}
-
-static inline bool kvm_para_available(void)
-{
- return kvm_cpuid_base() != 0;
-}
-
-static inline unsigned int kvm_arch_para_features(void)
-{
- return cpuid_eax(KVM_CPUID_FEATURES);
-}
-
#ifdef CONFIG_KVM_GUEST
+bool kvm_para_available(void);
+unsigned int kvm_arch_para_features(void);
void __init kvm_guest_init(void);
void kvm_async_pf_task_wait(u32 token);
void kvm_async_pf_task_wake(u32 token);
@@ -126,6 +107,16 @@ static inline void kvm_spinlock_init(void)
#define kvm_async_pf_task_wait(T) do {} while(0)
#define kvm_async_pf_task_wake(T) do {} while(0)
+static inline bool kvm_para_available(void)
+{
+ return 0;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+ return 0;
+}
+
static inline u32 kvm_read_and_reset_pf_reason(void)
{
return 0;
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index c696a8687567..6e4ce2df87cf 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -118,7 +118,6 @@ extern void mce_register_decode_chain(struct notifier_block *nb);
extern void mce_unregister_decode_chain(struct notifier_block *nb);
#include <linux/percpu.h>
-#include <linux/init.h>
#include <linux/atomic.h>
extern int mce_p5_enabled;
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index f98bd6625318..b59827e76529 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -1,6 +1,21 @@
#ifndef _ASM_X86_MICROCODE_H
#define _ASM_X86_MICROCODE_H
+#define native_rdmsr(msr, val1, val2) \
+do { \
+ u64 __val = native_read_msr((msr)); \
+ (void)((val1) = (u32)__val); \
+ (void)((val2) = (u32)(__val >> 32)); \
+} while (0)
+
+#define native_wrmsr(msr, low, high) \
+ native_write_msr(msr, low, high)
+
+#define native_wrmsrl(msr, val) \
+ native_write_msr((msr), \
+ (u32)((u64)(val)), \
+ (u32)((u64)(val) >> 32))
+
struct cpu_signature {
unsigned int sig;
unsigned int pf;
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 4c019179a57d..b7b10b82d3e5 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -61,11 +61,10 @@ extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
extern int apply_microcode_amd(int cpu);
extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
+#define PATCH_MAX_SIZE PAGE_SIZE
+extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
+
#ifdef CONFIG_MICROCODE_AMD_EARLY
-#ifdef CONFIG_X86_32
-#define MPB_MAX_SIZE PAGE_SIZE
-extern u8 amd_bsp_mpb[MPB_MAX_SIZE];
-#endif
extern void __init load_ucode_amd_bsp(void);
extern void load_ucode_amd_ap(void);
extern int __init save_microcode_in_initrd_amd(void);
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 3142a94c7b4b..3e6b4920ef5d 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -1,7 +1,6 @@
#ifndef _ASM_X86_MPSPEC_H
#define _ASM_X86_MPSPEC_H
-#include <linux/init.h>
#include <asm/mpspec_def.h>
#include <asm/x86_init.h>
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index 2f366d0ac6b4..1da25a5f96f9 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_MWAIT_H
#define _ASM_X86_MWAIT_H
+#include <linux/sched.h>
+
#define MWAIT_SUBSTATE_MASK 0xf
#define MWAIT_CSTATE_MASK 0xf
#define MWAIT_SUBSTATE_SIZE 4
@@ -13,4 +15,45 @@
#define MWAIT_ECX_INTERRUPT_BREAK 0x1
+static inline void __monitor(const void *eax, unsigned long ecx,
+ unsigned long edx)
+{
+ /* "monitor %eax, %ecx, %edx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc8;"
+ :: "a" (eax), "c" (ecx), "d"(edx));
+}
+
+static inline void __mwait(unsigned long eax, unsigned long ecx)
+{
+ /* "mwait %eax, %ecx;" */
+ asm volatile(".byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
+}
+
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
+ */
+static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
+{
+ if (!current_set_polling_and_test()) {
+ if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) {
+ mb();
+ clflush((void *)&current_thread_info()->flags);
+ mb();
+ }
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ if (!need_resched())
+ __mwait(eax, ecx);
+ }
+ current_clr_polling();
+}
+
#endif /* _ASM_X86_MWAIT_H */
diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
index c87892442e53..775873d3be55 100644
--- a/arch/x86/include/asm/page.h
+++ b/arch/x86/include/asm/page.h
@@ -71,6 +71,7 @@ extern bool __virt_addr_valid(unsigned long kaddr);
#include <asm-generic/getorder.h>
#define __HAVE_ARCH_GATE_AREA 1
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif /* __KERNEL__ */
#endif /* _ASM_X86_PAGE_H */
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
index 4d550d04b609..904f528cc8e8 100644
--- a/arch/x86/include/asm/page_32.h
+++ b/arch/x86/include/asm/page_32.h
@@ -5,10 +5,6 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_HUGETLB_PAGE
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-#endif
-
#define __phys_addr_nodebug(x) ((x) - PAGE_OFFSET)
#ifdef CONFIG_DEBUG_VIRTUAL
extern unsigned long __phys_addr(unsigned long);
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 43dcd804ebd5..8de6d9cf3b95 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -39,9 +39,18 @@
#define __VIRTUAL_MASK_SHIFT 47
/*
- * Kernel image size is limited to 512 MB (see level2_kernel_pgt in
- * arch/x86/kernel/head_64.S), and it is mapped here:
+ * Kernel image size is limited to 1GiB due to the fixmap living in the
+ * next 1GiB (see level2_kernel_pgt in arch/x86/kernel/head_64.S). Use
+ * 512MiB by default, leaving 1.5GiB for modules once the page tables
+ * are fully set up. If kernel ASLR is configured, it can extend the
+ * kernel page table mapping, reducing the size of the modules area.
*/
-#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
+#define KERNEL_IMAGE_SIZE_DEFAULT (512 * 1024 * 1024)
+#if defined(CONFIG_RANDOMIZE_BASE) && \
+ CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE_DEFAULT
+#define KERNEL_IMAGE_SIZE CONFIG_RANDOMIZE_BASE_MAX_OFFSET
+#else
+#define KERNEL_IMAGE_SIZE KERNEL_IMAGE_SIZE_DEFAULT
+#endif
#endif /* _ASM_X86_PAGE_64_DEFS_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 401f350ef71b..cd6e1610e29e 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -781,9 +781,9 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
*/
#define PV_CALLEE_SAVE_REGS_THUNK(func) \
extern typeof(func) __raw_callee_save_##func; \
- static void *__##func##__ __used = func; \
\
asm(".pushsection .text;" \
+ ".globl __raw_callee_save_" #func " ; " \
"__raw_callee_save_" #func ": " \
PV_SAVE_ALL_CALLER_REGS \
"call " #func ";" \
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index aab8f671b523..7549b8b369e4 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -388,10 +388,11 @@ extern struct pv_lock_ops pv_lock_ops;
_paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
/* Simple instruction patching code. */
-#define DEF_NATIVE(ops, name, code) \
- extern const char start_##ops##_##name[] __visible, \
- end_##ops##_##name[] __visible; \
- asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
+#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t"
+
+#define DEF_NATIVE(ops, name, code) \
+ __visible extern const char start_##ops##_##name[], end_##ops##_##name[]; \
+ asm(NATIVE_LABEL("start_", ops, name) code NATIVE_LABEL("end_", ops, name))
unsigned paravirt_patch_nop(void);
unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h
index 947b5c417e83..1ac6114c9ea5 100644
--- a/arch/x86/include/asm/pci.h
+++ b/arch/x86/include/asm/pci.h
@@ -104,7 +104,7 @@ extern void pci_iommu_alloc(void);
struct msi_desc;
int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void native_teardown_msi_irq(unsigned int irq);
-void native_restore_msi_irqs(struct pci_dev *dev, int irq);
+void native_restore_msi_irqs(struct pci_dev *dev);
int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
unsigned int irq_base, unsigned int irq_offset);
#else
@@ -125,7 +125,6 @@ int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
/* generic pci stuff */
#include <asm-generic/pci.h>
-#define PCIBIOS_MAX_MEM_32 0xffffffff
#ifdef CONFIG_NUMA
/* Returns the node based on pci bus */
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index 3bf2dd0cf61f..0d193e234647 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -55,6 +55,13 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
+/* Bit manipulation helper on pte/pgoff entry */
+static inline unsigned long pte_bitop(unsigned long value, unsigned int rightshift,
+ unsigned long mask, unsigned int leftshift)
+{
+ return ((value >> rightshift) & mask) << leftshift;
+}
+
#ifdef CONFIG_MEM_SOFT_DIRTY
/*
@@ -71,31 +78,34 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
-#define pte_to_pgoff(pte) \
- ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
- & ((1U << PTE_FILE_BITS1) - 1))) \
- + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
- & ((1U << PTE_FILE_BITS2) - 1)) \
- << (PTE_FILE_BITS1)) \
- + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
- & ((1U << PTE_FILE_BITS3) - 1)) \
- << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
- + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
- << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
-
-#define pgoff_to_pte(off) \
- ((pte_t) { .pte_low = \
- ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
- + ((((off) >> PTE_FILE_BITS1) \
- & ((1U << PTE_FILE_BITS2) - 1)) \
- << PTE_FILE_SHIFT2) \
- + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
- & ((1U << PTE_FILE_BITS3) - 1)) \
- << PTE_FILE_SHIFT3) \
- + ((((off) >> \
- (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
- << PTE_FILE_SHIFT4) \
- + _PAGE_FILE })
+#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
+#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
+#define PTE_FILE_MASK3 ((1U << PTE_FILE_BITS3) - 1)
+
+#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
+#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
+#define PTE_FILE_LSHIFT4 (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)
+
+static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
+{
+ return (pgoff_t)
+ (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, PTE_FILE_MASK3, PTE_FILE_LSHIFT3) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT4, -1UL, PTE_FILE_LSHIFT4));
+}
+
+static __always_inline pte_t pgoff_to_pte(pgoff_t off)
+{
+ return (pte_t){
+ .pte_low =
+ pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
+ pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
+ pte_bitop(off, PTE_FILE_LSHIFT3, PTE_FILE_MASK3, PTE_FILE_SHIFT3) +
+ pte_bitop(off, PTE_FILE_LSHIFT4, -1UL, PTE_FILE_SHIFT4) +
+ _PAGE_FILE,
+ };
+}
#else /* CONFIG_MEM_SOFT_DIRTY */
@@ -115,22 +125,30 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
-#define pte_to_pgoff(pte) \
- ((((pte).pte_low >> PTE_FILE_SHIFT1) \
- & ((1U << PTE_FILE_BITS1) - 1)) \
- + ((((pte).pte_low >> PTE_FILE_SHIFT2) \
- & ((1U << PTE_FILE_BITS2) - 1)) << PTE_FILE_BITS1) \
- + (((pte).pte_low >> PTE_FILE_SHIFT3) \
- << (PTE_FILE_BITS1 + PTE_FILE_BITS2)))
-
-#define pgoff_to_pte(off) \
- ((pte_t) { .pte_low = \
- (((off) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
- + ((((off) >> PTE_FILE_BITS1) & ((1U << PTE_FILE_BITS2) - 1)) \
- << PTE_FILE_SHIFT2) \
- + (((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
- << PTE_FILE_SHIFT3) \
- + _PAGE_FILE })
+#define PTE_FILE_MASK1 ((1U << PTE_FILE_BITS1) - 1)
+#define PTE_FILE_MASK2 ((1U << PTE_FILE_BITS2) - 1)
+
+#define PTE_FILE_LSHIFT2 (PTE_FILE_BITS1)
+#define PTE_FILE_LSHIFT3 (PTE_FILE_BITS1 + PTE_FILE_BITS2)
+
+static __always_inline pgoff_t pte_to_pgoff(pte_t pte)
+{
+ return (pgoff_t)
+ (pte_bitop(pte.pte_low, PTE_FILE_SHIFT1, PTE_FILE_MASK1, 0) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT2, PTE_FILE_MASK2, PTE_FILE_LSHIFT2) +
+ pte_bitop(pte.pte_low, PTE_FILE_SHIFT3, -1UL, PTE_FILE_LSHIFT3));
+}
+
+static __always_inline pte_t pgoff_to_pte(pgoff_t off)
+{
+ return (pte_t){
+ .pte_low =
+ pte_bitop(off, 0, PTE_FILE_MASK1, PTE_FILE_SHIFT1) +
+ pte_bitop(off, PTE_FILE_LSHIFT2, PTE_FILE_MASK2, PTE_FILE_SHIFT2) +
+ pte_bitop(off, PTE_FILE_LSHIFT3, -1UL, PTE_FILE_SHIFT3) +
+ _PAGE_FILE,
+ };
+}
#endif /* CONFIG_MEM_SOFT_DIRTY */
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 3d1999458709..5ad38ad07890 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -445,16 +445,33 @@ static inline int pte_same(pte_t a, pte_t b)
return a.pte == b.pte;
}
+static inline int pteval_present(pteval_t pteval)
+{
+ /*
+ * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
+ * way clearly states that the intent is that protnone and numa
+ * hinting ptes are considered present for the purposes of
+ * pagetable operations like zapping, protection changes, gup etc.
+ */
+ return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
+}
+
static inline int pte_present(pte_t a)
{
- return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
- _PAGE_NUMA);
+ return pteval_present(pte_flags(a));
}
#define pte_accessible pte_accessible
-static inline int pte_accessible(pte_t a)
+static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
{
- return pte_flags(a) & _PAGE_PRESENT;
+ if (pte_flags(a) & _PAGE_PRESENT)
+ return true;
+
+ if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
+ mm_tlb_flush_pending(mm))
+ return true;
+
+ return false;
}
static inline int pte_hidden(pte_t pte)
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
index 2d883440cb9a..c883bf726398 100644
--- a/arch/x86/include/asm/pgtable_64_types.h
+++ b/arch/x86/include/asm/pgtable_64_types.h
@@ -58,7 +58,7 @@ typedef struct { pteval_t pte; } pte_t;
#define VMALLOC_START _AC(0xffffc90000000000, UL)
#define VMALLOC_END _AC(0xffffe8ffffffffff, UL)
#define VMEMMAP_START _AC(0xffffea0000000000, UL)
-#define MODULES_VADDR _AC(0xffffffffa0000000, UL)
+#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
#define MODULES_END _AC(0xffffffffff000000, UL)
#define MODULES_LEN (MODULES_END - MODULES_VADDR)
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 0ecac257fb26..1aa9ccd43223 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -121,7 +121,8 @@
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
- _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
+ _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
+ _PAGE_SOFT_DIRTY)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
@@ -382,7 +383,8 @@ static inline void update_page_count(int level, unsigned long pages) { }
*/
extern pte_t *lookup_address(unsigned long address, unsigned int *level);
extern phys_addr_t slow_virt_to_phys(void *__address);
-
+extern int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
+ unsigned numpages, unsigned long page_flags);
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_DEFS_H */
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 8729723636fd..c8b051933b1b 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -8,6 +8,12 @@
DECLARE_PER_CPU(int, __preempt_count);
/*
+ * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
+ * that a decrement hitting 0 means we can and should reschedule.
+ */
+#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
+
+/*
* We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
* that think a non-zero value indicates we cannot preempt.
*/
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
__this_cpu_add_4(__preempt_count, -val);
}
+/*
+ * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
static __always_inline bool __preempt_count_dec_and_test(void)
{
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 7b034a4057f9..fdedd38fd0fc 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -27,7 +27,6 @@ struct mm_struct;
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/math64.h>
-#include <linux/init.h>
#include <linux/err.h>
#include <linux/irqflags.h>
@@ -72,6 +71,7 @@ extern u16 __read_mostly tlb_lli_4m[NR_INFO];
extern u16 __read_mostly tlb_lld_4k[NR_INFO];
extern u16 __read_mostly tlb_lld_2m[NR_INFO];
extern u16 __read_mostly tlb_lld_4m[NR_INFO];
+extern u16 __read_mostly tlb_lld_1g[NR_INFO];
extern s8 __read_mostly tlb_flushall_shift;
/*
@@ -370,6 +370,20 @@ struct ymmh_struct {
u32 ymmh_space[64];
};
+/* We don't support LWP yet: */
+struct lwp_struct {
+ u8 reserved[128];
+};
+
+struct bndregs_struct {
+ u64 bndregs[8];
+} __packed;
+
+struct bndcsr_struct {
+ u64 cfg_reg_u;
+ u64 status_reg;
+} __packed;
+
struct xsave_hdr_struct {
u64 xstate_bv;
u64 reserved1[2];
@@ -380,6 +394,9 @@ struct xsave_struct {
struct i387_fxsave_struct i387;
struct xsave_hdr_struct xsave_hdr;
struct ymmh_struct ymmh;
+ struct lwp_struct lwp;
+ struct bndregs_struct bndregs;
+ struct bndcsr_struct bndcsr;
/* new processor state extensions will go here */
} __attribute__ ((packed, aligned (64)));
@@ -700,29 +717,6 @@ static inline void sync_core(void)
#endif
}
-static inline void __monitor(const void *eax, unsigned long ecx,
- unsigned long edx)
-{
- /* "monitor %eax, %ecx, %edx;" */
- asm volatile(".byte 0x0f, 0x01, 0xc8;"
- :: "a" (eax), "c" (ecx), "d"(edx));
-}
-
-static inline void __mwait(unsigned long eax, unsigned long ecx)
-{
- /* "mwait %eax, %ecx;" */
- asm volatile(".byte 0x0f, 0x01, 0xc9;"
- :: "a" (eax), "c" (ecx));
-}
-
-static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
-{
- trace_hardirqs_on();
- /* "mwait %eax, %ecx;" */
- asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
- :: "a" (eax), "c" (ecx));
-}
-
extern void select_idle_routine(const struct cpuinfo_x86 *c);
extern void init_amd_e400_c1e_mask(void);
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 942a08623a1a..14fd6fd75a19 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -60,7 +60,6 @@ struct pt_regs {
#endif /* !__i386__ */
-#include <linux/init.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt_types.h>
#endif
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 59bcf4e22418..d62c9f809bc5 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -3,7 +3,6 @@
#include <uapi/asm/setup.h>
-
#define COMMAND_LINE_SIZE 2048
#include <linux/linkage.h>
@@ -29,6 +28,8 @@
#include <asm/bootparam.h>
#include <asm/x86_init.h>
+extern u64 relocated_ramdisk;
+
/* Interrupt control for vSMPowered x86_64 systems */
#ifdef CONFIG_X86_64
void vsmp_init(void);
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 4137890e88e3..8cd27e08e23c 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -2,7 +2,6 @@
#define _ASM_X86_SMP_H
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
-#include <linux/init.h>
#include <asm/percpu.h>
/*
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 3ba3de457d05..e1940c06ed02 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -163,9 +163,11 @@ struct thread_info {
*/
#ifndef __ASSEMBLY__
-
-/* how to get the current stack pointer from C */
-register unsigned long current_stack_pointer asm("esp") __used;
+#define current_stack_pointer ({ \
+ unsigned long sp; \
+ asm("mov %%esp,%0" : "=g" (sp)); \
+ sp; \
+})
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
index 34baa0eb5d0c..a04eabd43d06 100644
--- a/arch/x86/include/asm/timer.h
+++ b/arch/x86/include/asm/timer.h
@@ -1,9 +1,9 @@
#ifndef _ASM_X86_TIMER_H
#define _ASM_X86_TIMER_H
-#include <linux/init.h>
#include <linux/pm.h>
#include <linux/percpu.h>
#include <linux/interrupt.h>
+#include <linux/math64.h>
#define TICK_SIZE (tick_nsec / 1000)
@@ -12,68 +12,26 @@ extern int recalibrate_cpu_khz(void);
extern int no_timer_check;
-/* Accelerators for sched_clock()
- * convert from cycles(64bits) => nanoseconds (64bits)
- * basic equation:
- * ns = cycles / (freq / ns_per_sec)
- * ns = cycles * (ns_per_sec / freq)
- * ns = cycles * (10^9 / (cpu_khz * 10^3))
- * ns = cycles * (10^6 / cpu_khz)
+/*
+ * We use the full linear equation: f(x) = a + b*x, in order to allow
+ * a continuous function in the face of dynamic freq changes.
*
- * Then we use scaling math (suggested by george@mvista.com) to get:
- * ns = cycles * (10^6 * SC / cpu_khz) / SC
- * ns = cycles * cyc2ns_scale / SC
+ * Continuity means that when our frequency changes our slope (b); we want to
+ * ensure that: f(t) == f'(t), which gives: a + b*t == a' + b'*t.
*
- * And since SC is a constant power of two, we can convert the div
- * into a shift.
+ * Without an offset (a) the above would not be possible.
*
- * We can use khz divisor instead of mhz to keep a better precision, since
- * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
- * (mathieu.desnoyers@polymtl.ca)
- *
- * -johnstul@us.ibm.com "math is hard, lets go shopping!"
- *
- * In:
- *
- * ns = cycles * cyc2ns_scale / SC
- *
- * Although we may still have enough bits to store the value of ns,
- * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
- * leading to an incorrect result.
- *
- * To avoid this, we can decompose 'cycles' into quotient and remainder
- * of division by SC. Then,
- *
- * ns = (quot * SC + rem) * cyc2ns_scale / SC
- * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
- *
- * - sqazi@google.com
+ * See the comment near cycles_2_ns() for details on how we compute (b).
*/
-
-DECLARE_PER_CPU(unsigned long, cyc2ns);
-DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
-
-#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
-
-static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
-{
- int cpu = smp_processor_id();
- unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
- ns += mult_frac(cyc, per_cpu(cyc2ns, cpu),
- (1UL << CYC2NS_SCALE_FACTOR));
- return ns;
-}
-
-static inline unsigned long long cycles_2_ns(unsigned long long cyc)
-{
- unsigned long long ns;
- unsigned long flags;
-
- local_irq_save(flags);
- ns = __cycles_2_ns(cyc);
- local_irq_restore(flags);
-
- return ns;
-}
+struct cyc2ns_data {
+ u32 cyc2ns_mul;
+ u32 cyc2ns_shift;
+ u64 cyc2ns_offset;
+ u32 __count;
+ /* u32 hole */
+}; /* 24 bytes -- do not grow */
+
+extern struct cyc2ns_data *cyc2ns_read_begin(void);
+extern void cyc2ns_read_end(struct cyc2ns_data *);
#endif /* _ASM_X86_TIMER_H */
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index e6d90babc245..04905bfc508b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void)
static inline void __flush_tlb_one(unsigned long addr)
{
- count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
@@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr)
*/
static inline void __flush_tlb_up(void)
{
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
}
static inline void flush_tlb_all(void)
{
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb_all();
}
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 235be70d5bb4..57ae63cd6ee2 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -65,4 +65,7 @@ extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);
+/* MSR based TSC calibration for Intel Atom SoC platforms */
+int try_msr_calibrate_tsc(unsigned long *fast_calibrate);
+
#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8ec57c07b125..0d592e0a5b84 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -40,22 +40,30 @@
/*
* Test whether a block of memory is a valid user space address.
* Returns 0 if the range is valid, nonzero otherwise.
- *
- * This is equivalent to the following test:
- * (u33)addr + (u33)size > (u33)current->addr_limit.seg (u65 for x86_64)
- *
- * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
*/
+static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
+{
+ /*
+ * If we have used "sizeof()" for the size,
+ * we know it won't overflow the limit (but
+ * it might overflow the 'addr', so it's
+ * important to subtract the size from the
+ * limit, not add it to the address).
+ */
+ if (__builtin_constant_p(size))
+ return addr > limit - size;
+
+ /* Arbitrary sizes? Be careful about overflow */
+ addr += size;
+ if (addr < size)
+ return true;
+ return addr > limit;
+}
#define __range_not_ok(addr, size, limit) \
({ \
- unsigned long flag, roksum; \
__chk_user_ptr(addr); \
- asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
- : "=&r" (flag), "=r" (roksum) \
- : "1" (addr), "g" ((long)(size)), \
- "rm" (limit)); \
- flag; \
+ __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
})
/**
@@ -78,7 +86,7 @@
* this function, memory access functions may still return -EFAULT.
*/
#define access_ok(type, addr, size) \
- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
+ likely(!__range_not_ok(addr, size, user_addr_max()))
/*
* The exception table consists of pairs of addresses relative to the
@@ -525,6 +533,98 @@ extern __must_check long strnlen_user(const char __user *str, long n);
unsigned long __must_check clear_user(void __user *mem, unsigned long len);
unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
+extern void __cmpxchg_wrong_size(void)
+ __compiletime_error("Bad argument size for cmpxchg");
+
+#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
+({ \
+ int __ret = 0; \
+ __typeof__(ptr) __uval = (uval); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ switch (size) { \
+ case 1: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "q" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 2: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 4: \
+ { \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ case 8: \
+ { \
+ if (!IS_ENABLED(CONFIG_X86_64)) \
+ __cmpxchg_wrong_size(); \
+ \
+ asm volatile("\t" ASM_STAC "\n" \
+ "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
+ "2:\t" ASM_CLAC "\n" \
+ "\t.section .fixup, \"ax\"\n" \
+ "3:\tmov %3, %0\n" \
+ "\tjmp 2b\n" \
+ "\t.previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
+ : "i" (-EFAULT), "r" (__new), "1" (__old) \
+ : "memory" \
+ ); \
+ break; \
+ } \
+ default: \
+ __cmpxchg_wrong_size(); \
+ } \
+ *__uval = __old; \
+ __ret; \
+})
+
+#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
+({ \
+ access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) ? \
+ __user_atomic_cmpxchg_inatomic((uval), (ptr), \
+ (old), (new), sizeof(*(ptr))) : \
+ -EFAULT; \
+})
+
/*
* movsl can be slow when source and dest are not both 8-byte aligned
*/
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 190413d0de57..12a26b979bf1 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -204,13 +204,13 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
static __must_check __always_inline int
__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
{
- return __copy_from_user_nocheck(dst, (__force const void *)src, size);
+ return __copy_from_user_nocheck(dst, src, size);
}
static __must_check __always_inline int
__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
{
- return __copy_to_user_nocheck((__force void *)dst, src, size);
+ return __copy_to_user_nocheck(dst, src, size);
}
extern long __copy_user_nocache(void *dst, const void __user *src,
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 6b964a0b86d1..062921ef34e9 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -12,7 +12,6 @@ extern enum uv_system_type get_uv_system_type(void);
extern int is_uv_system(void);
extern void uv_cpu_init(void);
extern void uv_nmi_init(void);
-extern void uv_register_nmi_notifier(void);
extern void uv_system_init(void);
extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
@@ -26,7 +25,6 @@ static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
static inline int is_uv_system(void) { return 0; }
static inline void uv_cpu_init(void) { }
static inline void uv_system_init(void) { }
-static inline void uv_register_nmi_notifier(void) { }
static inline const struct cpumask *
uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
unsigned long start, unsigned long end, unsigned int cpu)
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 966502d4682e..2067264fb7f5 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -100,6 +100,7 @@
#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
#define VMX_MISC_SAVE_EFER_LMA 0x00000020
+#define VMX_MISC_ACTIVITY_HLT 0x00000040
/* VMCS Encodings */
enum vmcs_field {
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 0f1be11e43d2..e45e4da96bf1 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -181,7 +181,7 @@ struct x86_msi_ops {
u8 hpet_id);
void (*teardown_msi_irq)(unsigned int irq);
void (*teardown_msi_irqs)(struct pci_dev *dev);
- void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
+ void (*restore_msi_irqs)(struct pci_dev *dev);
int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);
u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag);
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index b913915e8e63..3e276eb23d1b 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -167,7 +167,12 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
*/
static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
{
- unsigned long pfn = mfn_to_pfn(mfn);
+ unsigned long pfn;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return mfn;
+
+ pfn = mfn_to_pfn(mfn);
if (get_phys_to_machine(pfn) != mfn)
return -1; /* force !pfn_valid() */
return pfn;
@@ -222,5 +227,6 @@ void make_lowmem_page_readonly(void *vaddr);
void make_lowmem_page_readwrite(void *vaddr);
#define xen_remap(cookie, size) ioremap((cookie), (size));
+#define xen_unmap(cookie) iounmap((cookie))
#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
index 0415cdabb5a6..554738963b28 100644
--- a/arch/x86/include/asm/xsave.h
+++ b/arch/x86/include/asm/xsave.h
@@ -9,6 +9,8 @@
#define XSTATE_FP 0x1
#define XSTATE_SSE 0x2
#define XSTATE_YMM 0x4
+#define XSTATE_BNDREGS 0x8
+#define XSTATE_BNDCSR 0x10
#define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
@@ -20,10 +22,14 @@
#define XSAVE_YMM_SIZE 256
#define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
-/*
- * These are the features that the OS can handle currently.
- */
-#define XCNTXT_MASK (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
+/* Supported features which support lazy state saving */
+#define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
+
+/* Supported features which require eager state saving */
+#define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
+
+/* All currently supported features */
+#define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
#ifdef CONFIG_X86_64
#define REX_PREFIX "0x48, "
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
index 9c3733c5f8f7..225b0988043a 100644
--- a/arch/x86/include/uapi/asm/bootparam.h
+++ b/arch/x86/include/uapi/asm/bootparam.h
@@ -6,6 +6,7 @@
#define SETUP_E820_EXT 1
#define SETUP_DTB 2
#define SETUP_PCI 3
+#define SETUP_EFI 4
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
@@ -23,6 +24,7 @@
#define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1)
#define XLF_EFI_HANDOVER_32 (1<<2)
#define XLF_EFI_HANDOVER_64 (1<<3)
+#define XLF_EFI_KEXEC (1<<4)
#ifndef __ASSEMBLY__
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index b8f1c0176cbc..462efe746d77 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -28,6 +28,9 @@
/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
#define HV_X64_MSR_TIME_REF_COUNT_AVAILABLE (1 << 1)
+/* A partition's reference time stamp counter (TSC) page */
+#define HV_X64_MSR_REFERENCE_TSC 0x40000021
+
/*
* There is a single feature flag that signifies the presence of the MSR
* that can be used to retrieve both the local APIC Timer frequency as
@@ -198,6 +201,9 @@
#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_MASK \
(~((1ull << HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT) - 1))
+#define HV_X64_MSR_TSC_REFERENCE_ENABLE 0x00000001
+#define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT 12
+
#define HV_PROCESSOR_POWER_STATE_C0 0
#define HV_PROCESSOR_POWER_STATE_C1 1
#define HV_PROCESSOR_POWER_STATE_C2 2
@@ -210,4 +216,11 @@
#define HV_STATUS_INVALID_ALIGNMENT 4
#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+typedef struct _HV_REFERENCE_TSC_PAGE {
+ __u32 tsc_sequence;
+ __u32 res1;
+ __u64 tsc_scale;
+ __s64 tsc_offset;
+} HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE;
+
#endif
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
index 37813b5ddc37..c19fc60ff062 100644
--- a/arch/x86/include/uapi/asm/msr-index.h
+++ b/arch/x86/include/uapi/asm/msr-index.h
@@ -184,6 +184,7 @@
#define MSR_AMD64_PATCH_LOADER 0xc0010020
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141
+#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_BU_CFG2 0xc001102a
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
@@ -527,6 +528,7 @@
#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
#define MSR_IA32_VMX_TRUE_EXIT_CTLS 0x0000048f
#define MSR_IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
+#define MSR_IA32_VMX_VMFUNC 0x00000491
/* VMX_BASIC bits and bitmasks */
#define VMX_BASIC_VMCS_SIZE_SHIFT 32
diff --git a/arch/x86/include/uapi/asm/sembuf.h b/arch/x86/include/uapi/asm/sembuf.h
index ee50c801f7b7..cc2d6a3aeae7 100644
--- a/arch/x86/include/uapi/asm/sembuf.h
+++ b/arch/x86/include/uapi/asm/sembuf.h
@@ -13,12 +13,12 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
__kernel_time_t sem_otime; /* last semop time */
- unsigned long __unused1;
+ __kernel_ulong_t __unused1;
__kernel_time_t sem_ctime; /* last change time */
- unsigned long __unused2;
- unsigned long sem_nsems; /* no. of semaphores in array */
- unsigned long __unused3;
- unsigned long __unused4;
+ __kernel_ulong_t __unused2;
+ __kernel_ulong_t sem_nsems; /* no. of semaphores in array */
+ __kernel_ulong_t __unused3;
+ __kernel_ulong_t __unused4;
};
#endif /* _ASM_X86_SEMBUF_H */
diff --git a/arch/x86/include/uapi/asm/stat.h b/arch/x86/include/uapi/asm/stat.h
index 7b3ddc348585..bc03eb5d6360 100644
--- a/arch/x86/include/uapi/asm/stat.h
+++ b/arch/x86/include/uapi/asm/stat.h
@@ -1,6 +1,8 @@
#ifndef _ASM_X86_STAT_H
#define _ASM_X86_STAT_H
+#include <asm/posix_types.h>
+
#define STAT_HAVE_NSEC 1
#ifdef __i386__
@@ -78,26 +80,26 @@ struct stat64 {
#else /* __i386__ */
struct stat {
- unsigned long st_dev;
- unsigned long st_ino;
- unsigned long st_nlink;
-
- unsigned int st_mode;
- unsigned int st_uid;
- unsigned int st_gid;
- unsigned int __pad0;
- unsigned long st_rdev;
- long st_size;
- long st_blksize;
- long st_blocks; /* Number 512-byte blocks allocated. */
-
- unsigned long st_atime;
- unsigned long st_atime_nsec;
- unsigned long st_mtime;
- unsigned long st_mtime_nsec;
- unsigned long st_ctime;
- unsigned long st_ctime_nsec;
- long __unused[3];
+ __kernel_ulong_t st_dev;
+ __kernel_ulong_t st_ino;
+ __kernel_ulong_t st_nlink;
+
+ unsigned int st_mode;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int __pad0;
+ __kernel_ulong_t st_rdev;
+ __kernel_long_t st_size;
+ __kernel_long_t st_blksize;
+ __kernel_long_t st_blocks; /* Number 512-byte blocks allocated. */
+
+ __kernel_ulong_t st_atime;
+ __kernel_ulong_t st_atime_nsec;
+ __kernel_ulong_t st_mtime;
+ __kernel_ulong_t st_mtime_nsec;
+ __kernel_ulong_t st_ctime;
+ __kernel_ulong_t st_ctime_nsec;
+ __kernel_long_t __unused[3];
};
/* We don't need to memset the whole thing just to initialize the padding */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 9b0a34e2cd79..cb648c84b327 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -29,10 +29,11 @@ obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
obj-y += syscall_$(BITS).o
obj-$(CONFIG_X86_64) += vsyscall_64.o
obj-$(CONFIG_X86_64) += vsyscall_emu_64.o
+obj-$(CONFIG_SYSFS) += ksysfs.o
obj-y += bootflag.o e820.o
obj-y += pci-dma.o quirks.o topology.o kdebugfs.o
obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o
-obj-y += tsc.o io_delay.o rtc.o
+obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
@@ -91,15 +92,6 @@ obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
-obj-$(CONFIG_MICROCODE_EARLY) += microcode_core_early.o
-obj-$(CONFIG_MICROCODE_INTEL_EARLY) += microcode_intel_early.o
-obj-$(CONFIG_MICROCODE_INTEL_LIB) += microcode_intel_lib.o
-microcode-y := microcode_core.o
-microcode-$(CONFIG_MICROCODE_INTEL) += microcode_intel.o
-microcode-$(CONFIG_MICROCODE_AMD) += microcode_amd.o
-obj-$(CONFIG_MICROCODE_AMD_EARLY) += microcode_amd_early.o
-obj-$(CONFIG_MICROCODE) += microcode.o
-
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
@@ -111,6 +103,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_TRACING) += tracepoint.o
+obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o
###
# 64 bit specific files
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 6c0b43bd024b..1dac94265b59 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -46,7 +46,6 @@
#include "sleep.h" /* To include x86_acpi_suspend_lowlevel */
static int __initdata acpi_force = 0;
-u32 acpi_rsdt_forced;
int acpi_disabled;
EXPORT_SYMBOL(acpi_disabled);
@@ -1034,9 +1033,7 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
if (!acpi_ioapic)
return 0;
- if (!dev)
- return 0;
- if (dev->bus != &pci_bus_type)
+ if (!dev || !dev_is_pci(dev))
return 0;
pdev = to_pci_dev(dev);
@@ -1564,7 +1561,7 @@ static int __init parse_acpi(char *arg)
}
/* acpi=rsdt use RSDT instead of XSDT */
else if (strcmp(arg, "rsdt") == 0) {
- acpi_rsdt_forced = 1;
+ acpi_gbl_do_not_use_xsdt = TRUE;
}
/* "acpi=noirq" disables ACPI interrupt routing */
else if (strcmp(arg, "noirq") == 0) {
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index d2b7f27781bc..e69182fd01cf 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -150,29 +150,6 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
}
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
- if (!need_resched()) {
- if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
- clflush((void *)&current_thread_info()->flags);
-
- __monitor((void *)&current_thread_info()->flags, 0, 0);
- smp_mb();
- if (!need_resched())
- __mwait(ax, cx);
- }
-}
-
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{
unsigned int cpu = smp_processor_id();
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 59554dca96ec..dec8de4e1663 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -179,7 +179,7 @@ int amd_get_subcaches(int cpu)
return (mask >> (4 * cuid)) & 0xf;
}
-int amd_set_subcaches(int cpu, int mask)
+int amd_set_subcaches(int cpu, unsigned long mask)
{
static unsigned int reset, ban;
struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index d278736bf774..7f26c9a70a9e 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -75,6 +75,13 @@ unsigned int max_physical_apicid;
physid_mask_t phys_cpu_present_map;
/*
+ * Processor to be disabled specified by kernel parameter
+ * disable_cpu_apicid=<int>, mostly used for the kdump 2nd kernel to
+ * avoid undefined behaviour caused by sending INIT from AP to BSP.
+ */
+static unsigned int disabled_cpu_apicid __read_mostly = BAD_APICID;
+
+/*
* Map cpu index to physical APIC ID
*/
DEFINE_EARLY_PER_CPU_READ_MOSTLY(u16, x86_cpu_to_apicid, BAD_APICID);
@@ -1968,7 +1975,7 @@ __visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
*/
static inline void __smp_error_interrupt(struct pt_regs *regs)
{
- u32 v0, v1;
+ u32 v;
u32 i = 0;
static const char * const error_interrupt_reason[] = {
"Send CS error", /* APIC Error Bit 0 */
@@ -1982,21 +1989,20 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
};
/* First tickle the hardware, only then report what went on. -- REW */
- v0 = apic_read(APIC_ESR);
apic_write(APIC_ESR, 0);
- v1 = apic_read(APIC_ESR);
+ v = apic_read(APIC_ESR);
ack_APIC_irq();
atomic_inc(&irq_err_count);
- apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
- smp_processor_id(), v0 , v1);
+ apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x",
+ smp_processor_id(), v);
- v1 = v1 & 0xff;
- while (v1) {
- if (v1 & 0x1)
+ v &= 0xff;
+ while (v) {
+ if (v & 0x1)
apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]);
i++;
- v1 >>= 1;
+ v >>= 1;
}
apic_printk(APIC_DEBUG, KERN_CONT "\n");
@@ -2115,6 +2121,39 @@ int generic_processor_info(int apicid, int version)
phys_cpu_present_map);
/*
+ * boot_cpu_physical_apicid is designed to have the apicid
+ * returned by read_apic_id(), i.e, the apicid of the
+ * currently booting-up processor. However, on some platforms,
+ * it is temporarily modified by the apicid reported as BSP
+ * through MP table. Concretely:
+ *
+ * - arch/x86/kernel/mpparse.c: MP_processor_info()
+ * - arch/x86/mm/amdtopology.c: amd_numa_init()
+ * - arch/x86/platform/visws/visws_quirks.c: MP_processor_info()
+ *
+ * This function is executed with the modified
+ * boot_cpu_physical_apicid. So, disabled_cpu_apicid kernel
+ * parameter doesn't work to disable APs on kdump 2nd kernel.
+ *
+ * Since fixing handling of boot_cpu_physical_apicid requires
+ * another discussion and tests on each platform, we leave it
+ * for now and here we use read_apic_id() directly in this
+ * function, generic_processor_info().
+ */
+ if (disabled_cpu_apicid != BAD_APICID &&
+ disabled_cpu_apicid != read_apic_id() &&
+ disabled_cpu_apicid == apicid) {
+ int thiscpu = num_processors + disabled_cpus;
+
+ pr_warning("APIC: Disabling requested cpu."
+ " Processor %d/0x%x ignored.\n",
+ thiscpu, apicid);
+
+ disabled_cpus++;
+ return -ENODEV;
+ }
+
+ /*
* If boot cpu has not been detected yet, then only allow upto
* nr_cpu_ids - 1 processors and keep one slot free for boot cpu
*/
@@ -2592,3 +2631,12 @@ static int __init lapic_insert_resource(void)
* that is using request_resource
*/
late_initcall(lapic_insert_resource);
+
+static int __init apic_set_disabled_cpu_apicid(char *arg)
+{
+ if (!arg || !get_option(&arg, &disabled_cpu_apicid))
+ return -EINVAL;
+
+ return 0;
+}
+early_param("disable_cpu_apicid", apic_set_disabled_cpu_apicid);
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 00c77cf78e9e..2c621a6b901a 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -14,16 +14,13 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/module.h>
#include <asm/smp.h>
#include <asm/apic.h>
#include <asm/ipi.h>
-#ifdef CONFIG_ACPI
-#include <acpi/acpi_bus.h>
-#endif
+#include <linux/acpi.h>
static struct apic apic_physflat;
static struct apic apic_flat;
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index e145f28b4099..191ce75c0e54 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -15,7 +15,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index e63a5bd2a78f..6ad4658de705 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -37,9 +37,6 @@
#include <linux/kthread.h>
#include <linux/jiffies.h> /* time_after() */
#include <linux/slab.h>
-#ifdef CONFIG_ACPI
-#include <acpi/acpi_bus.h>
-#endif
#include <linux/bootmem.h>
#include <linux/dmar.h>
#include <linux/hpet.h>
@@ -1142,9 +1139,10 @@ next:
if (test_bit(vector, used_vectors))
goto next;
- for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
- if (per_cpu(vector_irq, new_cpu)[vector] != -1)
+ for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
+ if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED)
goto next;
+ }
/* Found one! */
current_vector = vector;
current_offset = offset;
@@ -1183,7 +1181,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
vector = cfg->vector;
for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
- per_cpu(vector_irq, cpu)[vector] = -1;
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
cfg->vector = 0;
cpumask_clear(cfg->domain);
@@ -1191,11 +1189,10 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
if (likely(!cfg->move_in_progress))
return;
for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
- for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
- vector++) {
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
if (per_cpu(vector_irq, cpu)[vector] != irq)
continue;
- per_cpu(vector_irq, cpu)[vector] = -1;
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
break;
}
}
@@ -1228,12 +1225,12 @@ void __setup_vector_irq(int cpu)
/* Mark the free vectors */
for (vector = 0; vector < NR_VECTORS; ++vector) {
irq = per_cpu(vector_irq, cpu)[vector];
- if (irq < 0)
+ if (irq <= VECTOR_UNDEFINED)
continue;
cfg = irq_cfg(irq);
if (!cpumask_test_cpu(cpu, cfg->domain))
- per_cpu(vector_irq, cpu)[vector] = -1;
+ per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
}
raw_spin_unlock(&vector_lock);
}
@@ -2202,13 +2199,13 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
me = smp_processor_id();
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
- unsigned int irq;
+ int irq;
unsigned int irr;
struct irq_desc *desc;
struct irq_cfg *cfg;
irq = __this_cpu_read(vector_irq[vector]);
- if (irq == -1)
+ if (irq <= VECTOR_UNDEFINED)
continue;
desc = irq_to_desc(irq);
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 7434d8556d09..62071569bd50 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -1,6 +1,5 @@
#include <linux/cpumask.h>
#include <linux/interrupt.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <linux/delay.h>
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 77c95c0e1bf7..00146f9b0254 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -29,7 +29,6 @@
#define pr_fmt(fmt) "summit: %s: " fmt, __func__
#include <linux/mm.h>
-#include <linux/init.h>
#include <asm/io.h>
#include <asm/bios_ebda.h>
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 140e29db478d..cac85ee6913f 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -3,7 +3,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <linux/init.h>
#include <linux/dmar.h>
#include <linux/cpu.h>
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 562a76d433c8..de231e328cae 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -3,7 +3,6 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
-#include <linux/init.h>
#include <linux/dmar.h>
#include <asm/smp.h>
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index ad0dc0428baf..d263b1307de1 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -980,7 +980,6 @@ void __init uv_system_init(void)
uv_nmi_setup();
uv_cpu_init();
uv_scir_register_cpu_notifier();
- uv_register_nmi_notifier();
proc_mkdir("sgi_uv", NULL);
/* register Legacy VGA I/O redirection handler */
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index e2dbcb7dabdd..83a7995625a6 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -91,7 +91,7 @@ void __init setup_bios_corruption_check(void)
corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
- for_each_free_mem_range(i, MAX_NUMNODES, &start, &end, NULL) {
+ for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) {
start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE),
PAGE_SIZE, corruption_check_size);
end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE),
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 47b56a7e99cb..7fd54f09b011 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -36,12 +36,13 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
endif
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
-obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o
+obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_rapl.o
endif
obj-$(CONFIG_X86_MCE) += mcheck/
obj-$(CONFIG_MTRR) += mtrr/
+obj-$(CONFIG_MICROCODE) += microcode/
obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o perf_event_amd_ibs.o
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index bca023bdd6b2..c67ffa686064 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -1,5 +1,4 @@
#include <linux/export.h>
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/elf.h>
#include <linux/mm.h>
@@ -487,7 +486,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
if (!check_tsc_unstable())
- sched_clock_stable = 1;
+ set_sched_clock_stable();
}
#ifdef CONFIG_X86_64
@@ -508,6 +507,16 @@ static void early_init_amd(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
}
#endif
+
+ /* F16h erratum 793, CVE-2013-6885 */
+ if (c->x86 == 0x16 && c->x86_model <= 0xf) {
+ u64 val;
+
+ rdmsrl(MSR_AMD64_LS_CFG, val);
+ if (!(val & BIT(15)))
+ wrmsrl(MSR_AMD64_LS_CFG, val | BIT(15));
+ }
+
}
static const int amd_erratum_383[];
@@ -758,10 +767,7 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
{
- tlb_flushall_shift = 5;
-
- if (c->x86 <= 0x11)
- tlb_flushall_shift = 4;
+ tlb_flushall_shift = 6;
}
static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
@@ -790,14 +796,10 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
}
/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
- if (!((eax >> 16) & mask)) {
- u32 a, b, c, d;
-
- cpuid(0x80000005, &a, &b, &c, &d);
- tlb_lld_2m[ENTRIES] = (a >> 16) & 0xff;
- } else {
+ if (!((eax >> 16) & mask))
+ tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
+ else
tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
- }
/* a 4M entry uses two 2M entries */
tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 8d5652dc99dd..8779edab684e 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -1,6 +1,5 @@
#include <linux/bitops.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include <asm/e820.h>
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6abc172b8258..8e28bf2fc3ef 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
raw_local_save_flags(eflags);
BUG_ON(eflags & X86_EFLAGS_AC);
- if (cpu_has(c, X86_FEATURE_SMAP))
+ if (cpu_has(c, X86_FEATURE_SMAP)) {
+#ifdef CONFIG_X86_SMAP
set_in_cr4(X86_CR4_SMAP);
+#else
+ clear_in_cr4(X86_CR4_SMAP);
+#endif
+ }
}
/*
@@ -472,6 +477,7 @@ u16 __read_mostly tlb_lli_4m[NR_INFO];
u16 __read_mostly tlb_lld_4k[NR_INFO];
u16 __read_mostly tlb_lld_2m[NR_INFO];
u16 __read_mostly tlb_lld_4m[NR_INFO];
+u16 __read_mostly tlb_lld_1g[NR_INFO];
/*
* tlb_flushall_shift shows the balance point in replacing cr3 write
@@ -486,13 +492,13 @@ void cpu_detect_tlb(struct cpuinfo_x86 *c)
if (this_cpu->c_detect_tlb)
this_cpu->c_detect_tlb(c);
- printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
- "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d\n" \
+ printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n"
+ "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n"
"tlb_flushall_shift: %d\n",
tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES],
tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES],
- tlb_flushall_shift);
+ tlb_lld_1g[ENTRIES], tlb_flushall_shift);
}
void detect_ht(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index d0969c75ab54..aaf152e79637 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -1,4 +1,3 @@
-#include <linux/init.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/pci.h>
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index dc1ec0dff939..5cd9bfabd645 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -1,4 +1,3 @@
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -93,7 +92,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
if (!check_tsc_unstable())
- sched_clock_stable = 1;
+ set_sched_clock_stable();
}
/* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
@@ -387,7 +386,8 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_PEBS);
}
- if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
+ if (c->x86 == 6 && cpu_has_clflush &&
+ (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
#ifdef CONFIG_X86_64
@@ -505,6 +505,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
#define TLB_DATA0_2M_4M 0x23
#define STLB_4K 0x41
+#define STLB_4K_2M 0x42
static const struct _tlb_table intel_tlb_table[] = {
{ 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
@@ -525,13 +526,20 @@ static const struct _tlb_table intel_tlb_table[] = {
{ 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
{ 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
+ { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
+ { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
+ { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
{ 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
{ 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
{ 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
{ 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
{ 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
+ { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" },
+ { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" },
{ 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
{ 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
+ { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
+ { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
{ 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
{ 0x00, 0, 0 }
};
@@ -557,6 +565,20 @@ static void intel_tlb_lookup(const unsigned char desc)
if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
break;
+ case STLB_4K_2M:
+ if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
+ if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
+ break;
case TLB_INST_ALL:
if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
@@ -602,6 +624,10 @@ static void intel_tlb_lookup(const unsigned char desc)
if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
break;
+ case TLB_DATA_1G:
+ if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
+ tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
+ break;
}
}
@@ -614,21 +640,17 @@ static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
case 0x61d: /* six-core 45 nm xeon "Dunnington" */
tlb_flushall_shift = -1;
break;
+ case 0x63a: /* Ivybridge */
+ tlb_flushall_shift = 2;
+ break;
case 0x61a: /* 45 nm nehalem, "Bloomfield" */
case 0x61e: /* 45 nm nehalem, "Lynnfield" */
case 0x625: /* 32 nm nehalem, "Clarkdale" */
case 0x62c: /* 32 nm nehalem, "Gulftown" */
case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
case 0x62f: /* 32 nm Xeon E7 */
- tlb_flushall_shift = 6;
- break;
case 0x62a: /* SandyBridge */
case 0x62d: /* SandyBridge, "Romely-EP" */
- tlb_flushall_shift = 5;
- break;
- case 0x63a: /* Ivybridge */
- tlb_flushall_shift = 1;
- break;
default:
tlb_flushall_shift = 6;
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index de8b60a53f69..a1aef9533154 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -33,22 +33,28 @@
#include <linux/acpi.h>
#include <linux/cper.h>
#include <acpi/apei.h>
+#include <acpi/ghes.h>
#include <asm/mce.h>
#include "mce-internal.h"
-void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
+void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err)
{
struct mce m;
- /* Only corrected MC is reported */
- if (!corrected || !(mem_err->validation_bits & CPER_MEM_VALID_PA))
+ if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
return;
mce_setup(&m);
m.bank = 1;
- /* Fake a memory read corrected error with unknown channel */
+ /* Fake a memory read error with unknown channel */
m.status = MCI_STATUS_VAL | MCI_STATUS_EN | MCI_STATUS_ADDRV | 0x9f;
+
+ if (severity >= GHES_SEV_RECOVERABLE)
+ m.status |= MCI_STATUS_UC;
+ if (severity >= GHES_SEV_PANIC)
+ m.status |= MCI_STATUS_PCC;
+
m.addr = mem_err->physical_addr;
mce_log(&m);
mce_notify_irq();
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index b3218cdee95f..4d5419b249da 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1638,15 +1638,15 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
static void mce_start_timer(unsigned int cpu, struct timer_list *t)
{
- unsigned long iv = mce_adjust_timer(check_interval * HZ);
-
- __this_cpu_write(mce_next_interval, iv);
+ unsigned long iv = check_interval * HZ;
if (mca_cfg.ignore_ce || !iv)
return;
+ per_cpu(mce_next_interval, cpu) = iv;
+
t->expires = round_jiffies(jiffies + iv);
- add_timer_on(t, smp_processor_id());
+ add_timer_on(t, cpu);
}
static void __mcheck_cpu_init_timer(void)
@@ -2272,8 +2272,10 @@ static int mce_device_create(unsigned int cpu)
dev->release = &mce_device_release;
err = device_register(dev);
- if (err)
+ if (err) {
+ put_device(dev);
return err;
+ }
for (i = 0; mce_device_attrs[i]; i++) {
err = device_create_file(dev, mce_device_attrs[i]);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 4cfe0458ca66..fb6156fee6f7 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -6,7 +6,6 @@
*/
#include <linux/gfp.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/sched.h>
diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
index 1c044b1ccc59..a3042989398c 100644
--- a/arch/x86/kernel/cpu/mcheck/p5.c
+++ b/arch/x86/kernel/cpu/mcheck/p5.c
@@ -5,7 +5,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/processor.h>
diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
index e9a701aecaa1..7dc5564d0cdf 100644
--- a/arch/x86/kernel/cpu/mcheck/winchip.c
+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
@@ -5,7 +5,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include <asm/mce.h>
diff --git a/arch/x86/kernel/cpu/microcode/Makefile b/arch/x86/kernel/cpu/microcode/Makefile
new file mode 100644
index 000000000000..285c85427c32
--- /dev/null
+++ b/arch/x86/kernel/cpu/microcode/Makefile
@@ -0,0 +1,7 @@
+microcode-y := core.o
+obj-$(CONFIG_MICROCODE) += microcode.o
+microcode-$(CONFIG_MICROCODE_INTEL) += intel.o intel_lib.o
+microcode-$(CONFIG_MICROCODE_AMD) += amd.o
+obj-$(CONFIG_MICROCODE_EARLY) += core_early.o
+obj-$(CONFIG_MICROCODE_INTEL_EARLY) += intel_early.o
+obj-$(CONFIG_MICROCODE_AMD_EARLY) += amd_early.o
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index c3d4cc972eca..8fffd845e22b 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -182,10 +182,10 @@ int __apply_microcode_amd(struct microcode_amd *mc_amd)
{
u32 rev, dummy;
- wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
+ native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
/* verify patch application was successful */
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
+ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
if (rev != mc_amd->hdr.patch_id)
return -1;
@@ -332,6 +332,9 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
patch->patch_id = mc_hdr->patch_id;
patch->equiv_cpu = proc_id;
+ pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
+ __func__, patch->patch_id, proc_id);
+
/* ... and add to cache. */
update_cache(patch);
@@ -390,9 +393,9 @@ enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
struct ucode_patch *p = find_patch(smp_processor_id());
if (p) {
- memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
- memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
- MPB_MAX_SIZE));
+ memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
+ memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
+ PATCH_MAX_SIZE));
}
}
#endif
@@ -430,7 +433,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
if (c->x86 >= 0x15)
snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
- if (request_firmware(&fw, (const char *)fw_name, device)) {
+ if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
pr_debug("failed to load file %s\n", fw_name);
goto out;
}
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
index 6073104ccaa3..617a9e284245 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/cpu/microcode/amd_early.c
@@ -2,6 +2,7 @@
* Copyright (C) 2013 Advanced Micro Devices, Inc.
*
* Author: Jacob Shin <jacob.shin@amd.com>
+ * Fixes: Borislav Petkov <bp@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -15,10 +16,18 @@
#include <asm/setup.h>
#include <asm/microcode_amd.h>
-static bool ucode_loaded;
+/*
+ * This points to the current valid container of microcode patches which we will
+ * save from the initrd before jettisoning its contents.
+ */
+static u8 *container;
+static size_t container_size;
+
static u32 ucode_new_rev;
-static unsigned long ucode_offset;
-static size_t ucode_size;
+u8 amd_ucode_patch[PATCH_MAX_SIZE];
+static u16 this_equiv_id;
+
+struct cpio_data ucode_cpio;
/*
* Microcode patch container file is prepended to the initrd in cpio format.
@@ -32,9 +41,6 @@ static struct cpio_data __init find_ucode_in_initrd(void)
char *path;
void *start;
size_t size;
- unsigned long *uoffset;
- size_t *usize;
- struct cpio_data cd;
#ifdef CONFIG_X86_32
struct boot_params *p;
@@ -47,30 +53,50 @@ static struct cpio_data __init find_ucode_in_initrd(void)
path = (char *)__pa_nodebug(ucode_path);
start = (void *)p->hdr.ramdisk_image;
size = p->hdr.ramdisk_size;
- uoffset = (unsigned long *)__pa_nodebug(&ucode_offset);
- usize = (size_t *)__pa_nodebug(&ucode_size);
#else
path = ucode_path;
start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
size = boot_params.hdr.ramdisk_size;
- uoffset = &ucode_offset;
- usize = &ucode_size;
#endif
- cd = find_cpio_data(path, start, size, &offset);
- if (!cd.data)
- return cd;
+ return find_cpio_data(path, start, size, &offset);
+}
- if (*(u32 *)cd.data != UCODE_MAGIC) {
- cd.data = NULL;
- cd.size = 0;
- return cd;
- }
+static size_t compute_container_size(u8 *data, u32 total_size)
+{
+ size_t size = 0;
+ u32 *header = (u32 *)data;
+
+ if (header[0] != UCODE_MAGIC ||
+ header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
+ header[2] == 0) /* size */
+ return size;
+
+ size = header[2] + CONTAINER_HDR_SZ;
+ total_size -= size;
+ data += size;
- *uoffset = (u8 *)cd.data - (u8 *)start;
- *usize = cd.size;
+ while (total_size) {
+ u16 patch_size;
+
+ header = (u32 *)data;
+
+ if (header[0] != UCODE_UCODE_TYPE)
+ break;
+
+ /*
+ * Sanity-check patch size.
+ */
+ patch_size = header[1];
+ if (patch_size > PATCH_MAX_SIZE)
+ break;
+
+ size += patch_size + SECTION_HDR_SIZE;
+ data += patch_size + SECTION_HDR_SIZE;
+ total_size -= patch_size + SECTION_HDR_SIZE;
+ }
- return cd;
+ return size;
}
/*
@@ -85,23 +111,22 @@ static struct cpio_data __init find_ucode_in_initrd(void)
static void apply_ucode_in_initrd(void *ucode, size_t size)
{
struct equiv_cpu_entry *eq;
+ size_t *cont_sz;
u32 *header;
- u8 *data;
+ u8 *data, **cont;
u16 eq_id = 0;
int offset, left;
- u32 rev, eax;
+ u32 rev, eax, ebx, ecx, edx;
u32 *new_rev;
- unsigned long *uoffset;
- size_t *usize;
#ifdef CONFIG_X86_32
new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
- uoffset = (unsigned long *)__pa_nodebug(&ucode_offset);
- usize = (size_t *)__pa_nodebug(&ucode_size);
+ cont_sz = (size_t *)__pa_nodebug(&container_size);
+ cont = (u8 **)__pa_nodebug(&container);
#else
new_rev = &ucode_new_rev;
- uoffset = &ucode_offset;
- usize = &ucode_size;
+ cont_sz = &container_size;
+ cont = &container;
#endif
data = ucode;
@@ -109,23 +134,37 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
header = (u32 *)data;
/* find equiv cpu table */
-
- if (header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
+ if (header[0] != UCODE_MAGIC ||
+ header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
header[2] == 0) /* size */
return;
- eax = cpuid_eax(0x00000001);
+ eax = 0x00000001;
+ ecx = 0;
+ native_cpuid(&eax, &ebx, &ecx, &edx);
while (left > 0) {
eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
+ *cont = data;
+
+ /* Advance past the container header */
offset = header[2] + CONTAINER_HDR_SZ;
data += offset;
left -= offset;
eq_id = find_equiv_id(eq, eax);
- if (eq_id)
+ if (eq_id) {
+ this_equiv_id = eq_id;
+ *cont_sz = compute_container_size(*cont, left + offset);
+
+ /*
+ * truncate how much we need to iterate over in the
+ * ucode update loop below
+ */
+ left = *cont_sz - offset;
break;
+ }
/*
* support multiple container files appended together. if this
@@ -145,19 +184,18 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
/* mark where the next microcode container file starts */
offset = data - (u8 *)ucode;
- *uoffset += offset;
- *usize -= offset;
ucode = data;
}
if (!eq_id) {
- *usize = 0;
+ *cont = NULL;
+ *cont_sz = 0;
return;
}
/* find ucode and update if needed */
- rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
+ native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
while (left > 0) {
struct microcode_amd *mc;
@@ -168,134 +206,190 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
break;
mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
- if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id)
- if (__apply_microcode_amd(mc) == 0) {
+
+ if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
+
+ if (!__apply_microcode_amd(mc)) {
rev = mc->hdr.patch_id;
*new_rev = rev;
+
+ /* save ucode patch */
+ memcpy(amd_ucode_patch, mc,
+ min_t(u32, header[1], PATCH_MAX_SIZE));
}
+ }
offset = header[1] + SECTION_HDR_SIZE;
data += offset;
left -= offset;
}
-
- /* mark where this microcode container file ends */
- offset = *usize - (data - (u8 *)ucode);
- *usize -= offset;
-
- if (!(*new_rev))
- *usize = 0;
}
void __init load_ucode_amd_bsp(void)
{
- struct cpio_data cd = find_ucode_in_initrd();
- if (!cd.data)
+ struct cpio_data cp;
+ void **data;
+ size_t *size;
+
+#ifdef CONFIG_X86_32
+ data = (void **)__pa_nodebug(&ucode_cpio.data);
+ size = (size_t *)__pa_nodebug(&ucode_cpio.size);
+#else
+ data = &ucode_cpio.data;
+ size = &ucode_cpio.size;
+#endif
+
+ cp = find_ucode_in_initrd();
+ if (!cp.data)
return;
- apply_ucode_in_initrd(cd.data, cd.size);
+ *data = cp.data;
+ *size = cp.size;
+
+ apply_ucode_in_initrd(cp.data, cp.size);
}
#ifdef CONFIG_X86_32
-u8 amd_bsp_mpb[MPB_MAX_SIZE];
-
/*
* On 32-bit, since AP's early load occurs before paging is turned on, we
* cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
* cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
- * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which
- * is used upon resume from suspend.
+ * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
+ * which is used upon resume from suspend.
*/
void load_ucode_amd_ap(void)
{
struct microcode_amd *mc;
- unsigned long *initrd;
- unsigned long *uoffset;
size_t *usize;
- void *ucode;
+ void **ucode;
- mc = (struct microcode_amd *)__pa(amd_bsp_mpb);
+ mc = (struct microcode_amd *)__pa(amd_ucode_patch);
if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
__apply_microcode_amd(mc);
return;
}
- initrd = (unsigned long *)__pa(&initrd_start);
- uoffset = (unsigned long *)__pa(&ucode_offset);
- usize = (size_t *)__pa(&ucode_size);
+ ucode = (void *)__pa_nodebug(&container);
+ usize = (size_t *)__pa_nodebug(&container_size);
- if (!*usize || !*initrd)
+ if (!*ucode || !*usize)
return;
- ucode = (void *)((unsigned long)__pa(*initrd) + *uoffset);
- apply_ucode_in_initrd(ucode, *usize);
+ apply_ucode_in_initrd(*ucode, *usize);
}
static void __init collect_cpu_sig_on_bsp(void *arg)
{
unsigned int cpu = smp_processor_id();
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+
uci->cpu_sig.sig = cpuid_eax(0x00000001);
}
+
+static void __init get_bsp_sig(void)
+{
+ unsigned int bsp = boot_cpu_data.cpu_index;
+ struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
+
+ if (!uci->cpu_sig.sig)
+ smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+}
#else
void load_ucode_amd_ap(void)
{
unsigned int cpu = smp_processor_id();
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
+ struct equiv_cpu_entry *eq;
+ struct microcode_amd *mc;
u32 rev, eax;
+ u16 eq_id;
+
+ /* Exit if called on the BSP. */
+ if (!cpu)
+ return;
+
+ if (!container)
+ return;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
- eax = cpuid_eax(0x00000001);
uci->cpu_sig.rev = rev;
uci->cpu_sig.sig = eax;
- if (cpu && !ucode_loaded) {
- void *ucode;
+ eax = cpuid_eax(0x00000001);
+ eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
+
+ eq_id = find_equiv_id(eq, eax);
+ if (!eq_id)
+ return;
- if (!ucode_size || !initrd_start)
- return;
+ if (eq_id == this_equiv_id) {
+ mc = (struct microcode_amd *)amd_ucode_patch;
+
+ if (mc && rev < mc->hdr.patch_id) {
+ if (!__apply_microcode_amd(mc))
+ ucode_new_rev = mc->hdr.patch_id;
+ }
- ucode = (void *)(initrd_start + ucode_offset);
- eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
- if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
+ } else {
+ if (!ucode_cpio.data)
return;
- ucode_loaded = true;
+ /*
+ * AP has a different equivalence ID than BSP, looks like
+ * mixed-steppings silicon so go through the ucode blob anew.
+ */
+ apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size);
}
-
- apply_microcode_amd(cpu);
}
#endif
int __init save_microcode_in_initrd_amd(void)
{
+ unsigned long cont;
enum ucode_state ret;
- void *ucode;
u32 eax;
-#ifdef CONFIG_X86_32
- unsigned int bsp = boot_cpu_data.cpu_index;
- struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
+ if (!container)
+ return -EINVAL;
- if (!uci->cpu_sig.sig)
- smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+#ifdef CONFIG_X86_32
+ get_bsp_sig();
+ cont = (unsigned long)container;
+#else
+ /*
+ * We need the physical address of the container for both bitness since
+ * boot_params.hdr.ramdisk_image is a physical address.
+ */
+ cont = __pa(container);
#endif
+
+ /*
+ * Take into account the fact that the ramdisk might get relocated and
+ * therefore we need to recompute the container's position in virtual
+ * memory space.
+ */
+ if (relocated_ramdisk)
+ container = (u8 *)(__va(relocated_ramdisk) +
+ (cont - boot_params.hdr.ramdisk_image));
+
if (ucode_new_rev)
pr_info("microcode: updated early to new patch_level=0x%08x\n",
ucode_new_rev);
- if (ucode_loaded || !ucode_size || !initrd_start)
- return 0;
-
- ucode = (void *)(initrd_start + ucode_offset);
eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
- ret = load_microcode_amd(eax, ucode, ucode_size);
+ ret = load_microcode_amd(eax, container, container_size);
if (ret != UCODE_OK)
return -EINVAL;
- ucode_loaded = true;
+ /*
+ * This will be freed any msec now, stash patches for the current
+ * family and switch to patch cache for cpu hotplug, etc later.
+ */
+ container = NULL;
+ container_size = 0;
+
return 0;
}
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/cpu/microcode/core.c
index 15c987698b0f..15c987698b0f 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c
index be7f8514f577..be7f8514f577 100644
--- a/arch/x86/kernel/microcode_core_early.c
+++ b/arch/x86/kernel/cpu/microcode/core_early.c
diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 5fb2cebf556b..a276fa75d9b5 100644
--- a/arch/x86/kernel/microcode_intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -278,7 +278,7 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
sprintf(name, "intel-ucode/%02x-%02x-%02x",
c->x86, c->x86_model, c->x86_mask);
- if (request_firmware(&firmware, name, device)) {
+ if (request_firmware_direct(&firmware, name, device)) {
pr_debug("data file %s load failed\n", name);
return UCODE_NFOUND;
}
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c
index 1575deb2e636..18f739129e72 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/cpu/microcode/intel_early.c
@@ -365,16 +365,6 @@ out:
return state;
}
-#define native_rdmsr(msr, val1, val2) \
-do { \
- u64 __val = native_read_msr((msr)); \
- (void)((val1) = (u32)__val); \
- (void)((val2) = (u32)(__val >> 32)); \
-} while (0)
-
-#define native_wrmsr(msr, low, high) \
- native_write_msr(msr, low, high);
-
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
unsigned int val[2];
diff --git a/arch/x86/kernel/microcode_intel_lib.c b/arch/x86/kernel/cpu/microcode/intel_lib.c
index ce69320d0179..ce69320d0179 100644
--- a/arch/x86/kernel/microcode_intel_lib.c
+++ b/arch/x86/kernel/cpu/microcode/intel_lib.c
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index ce2d0a2c3e4f..0e25a1bc5ab5 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
}
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
/* Save MTRR state */
@@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
static void post_set(void) __releases(set_atomicity_lock)
{
/* Flush TLBs (no need to flush caches - they are disabled) */
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
/* Intel (P6) standard MTRRs */
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8e132931614d..b88645191fe5 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1883,21 +1883,27 @@ static struct pmu pmu = {
void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
+ struct cyc2ns_data *data;
+
userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
userpg->pmc_width = x86_pmu.cntval_bits;
- if (!sched_clock_stable)
+ if (!sched_clock_stable())
return;
+ data = cyc2ns_read_begin();
+
userpg->cap_user_time = 1;
- userpg->time_mult = this_cpu_read(cyc2ns);
- userpg->time_shift = CYC2NS_SCALE_FACTOR;
- userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
+ userpg->time_mult = data->cyc2ns_mul;
+ userpg->time_shift = data->cyc2ns_shift;
+ userpg->time_offset = data->cyc2ns_offset - now;
userpg->cap_user_time_zero = 1;
- userpg->time_zero = this_cpu_read(cyc2ns_offset);
+ userpg->time_zero = data->cyc2ns_offset;
+
+ cyc2ns_read_end(data);
}
/*
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index fd00bb29425d..c1a861829d81 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -262,11 +262,20 @@ struct cpu_hw_events {
__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
-#define EVENT_CONSTRAINT_END \
- EVENT_CONSTRAINT(0, 0, 0)
+/*
+ * We define the end marker as having a weight of -1
+ * to enable blacklisting of events using a counter bitmask
+ * of zero and thus a weight of zero.
+ * The end marker has a weight that cannot possibly be
+ * obtained from counting the bits in the bitmask.
+ */
+#define EVENT_CONSTRAINT_END { .weight = -1 }
+/*
+ * Check for end marker with weight == -1
+ */
#define for_each_event_constraint(e, c) \
- for ((e) = (c); (e)->weight; (e)++)
+ for ((e) = (c); (e)->weight != -1; (e)++)
/*
* Extra registers for specific events.
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index e09f0bfb7b8f..4b8e4d3cd6ea 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/ptrace.h>
+#include <linux/syscore_ops.h>
#include <asm/apic.h>
@@ -816,6 +817,18 @@ out:
return ret;
}
+static void ibs_eilvt_setup(void)
+{
+ /*
+ * Force LVT offset assignment for family 10h: The offsets are
+ * not assigned by the BIOS for this family, so the OS is
+ * responsible for doing it. If the OS assignment fails, fall
+ * back to BIOS settings and try to setup this.
+ */
+ if (boot_cpu_data.x86 == 0x10)
+ force_ibs_eilvt_setup();
+}
+
static inline int get_ibs_lvt_offset(void)
{
u64 val;
@@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy)
setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
}
+#ifdef CONFIG_PM
+
+static int perf_ibs_suspend(void)
+{
+ clear_APIC_ibs(NULL);
+ return 0;
+}
+
+static void perf_ibs_resume(void)
+{
+ ibs_eilvt_setup();
+ setup_APIC_ibs(NULL);
+}
+
+static struct syscore_ops perf_ibs_syscore_ops = {
+ .resume = perf_ibs_resume,
+ .suspend = perf_ibs_suspend,
+};
+
+static void perf_ibs_pm_init(void)
+{
+ register_syscore_ops(&perf_ibs_syscore_ops);
+}
+
+#else
+
+static inline void perf_ibs_pm_init(void) { }
+
+#endif
+
static int
perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
@@ -877,18 +920,12 @@ static __init int amd_ibs_init(void)
if (!caps)
return -ENODEV; /* ibs not supported by the cpu */
- /*
- * Force LVT offset assignment for family 10h: The offsets are
- * not assigned by the BIOS for this family, so the OS is
- * responsible for doing it. If the OS assignment fails, fall
- * back to BIOS settings and try to setup this.
- */
- if (boot_cpu_data.x86 == 0x10)
- force_ibs_eilvt_setup();
+ ibs_eilvt_setup();
if (!ibs_eilvt_valid())
goto out;
+ perf_ibs_pm_init();
get_online_cpus();
ibs_caps = caps;
/* make ibs_caps visible to other cpus: */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
new file mode 100644
index 000000000000..5ad35ad94d0f
--- /dev/null
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -0,0 +1,679 @@
+/*
+ * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters
+ * Copyright (C) 2013 Google, Inc., Stephane Eranian
+ *
+ * Intel RAPL interface is specified in the IA-32 Manual Vol3b
+ * section 14.7.1 (September 2013)
+ *
+ * RAPL provides more controls than just reporting energy consumption
+ * however here we only expose the 3 energy consumption free running
+ * counters (pp0, pkg, dram).
+ *
+ * Each of those counters increments in a power unit defined by the
+ * RAPL_POWER_UNIT MSR. On SandyBridge, this unit is 1/(2^16) Joules
+ * but it can vary.
+ *
+ * Counter to rapl events mappings:
+ *
+ * pp0 counter: consumption of all physical cores (power plane 0)
+ * event: rapl_energy_cores
+ * perf code: 0x1
+ *
+ * pkg counter: consumption of the whole processor package
+ * event: rapl_energy_pkg
+ * perf code: 0x2
+ *
+ * dram counter: consumption of the dram domain (servers only)
+ * event: rapl_energy_dram
+ * perf code: 0x3
+ *
+ * dram counter: consumption of the builtin-gpu domain (client only)
+ * event: rapl_energy_gpu
+ * perf code: 0x4
+ *
+ * We manage those counters as free running (read-only). They may be
+ * use simultaneously by other tools, such as turbostat.
+ *
+ * The events only support system-wide mode counting. There is no
+ * sampling support because it does not make sense and is not
+ * supported by the RAPL hardware.
+ *
+ * Because we want to avoid floating-point operations in the kernel,
+ * the events are all reported in fixed point arithmetic (32.32).
+ * Tools must adjust the counts to convert them to Watts using
+ * the duration of the measurement. Tools may use a function such as
+ * ldexp(raw_count, -32);
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/perf_event.h>
+#include <asm/cpu_device_id.h>
+#include "perf_event.h"
+
+/*
+ * RAPL energy status counters
+ */
+#define RAPL_IDX_PP0_NRG_STAT 0 /* all cores */
+#define INTEL_RAPL_PP0 0x1 /* pseudo-encoding */
+#define RAPL_IDX_PKG_NRG_STAT 1 /* entire package */
+#define INTEL_RAPL_PKG 0x2 /* pseudo-encoding */
+#define RAPL_IDX_RAM_NRG_STAT 2 /* DRAM */
+#define INTEL_RAPL_RAM 0x3 /* pseudo-encoding */
+#define RAPL_IDX_PP1_NRG_STAT 3 /* DRAM */
+#define INTEL_RAPL_PP1 0x4 /* pseudo-encoding */
+
+/* Clients have PP0, PKG */
+#define RAPL_IDX_CLN (1<<RAPL_IDX_PP0_NRG_STAT|\
+ 1<<RAPL_IDX_PKG_NRG_STAT|\
+ 1<<RAPL_IDX_PP1_NRG_STAT)
+
+/* Servers have PP0, PKG, RAM */
+#define RAPL_IDX_SRV (1<<RAPL_IDX_PP0_NRG_STAT|\
+ 1<<RAPL_IDX_PKG_NRG_STAT|\
+ 1<<RAPL_IDX_RAM_NRG_STAT)
+
+/*
+ * event code: LSB 8 bits, passed in attr->config
+ * any other bit is reserved
+ */
+#define RAPL_EVENT_MASK 0xFFULL
+
+#define DEFINE_RAPL_FORMAT_ATTR(_var, _name, _format) \
+static ssize_t __rapl_##_var##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ char *page) \
+{ \
+ BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \
+ return sprintf(page, _format "\n"); \
+} \
+static struct kobj_attribute format_attr_##_var = \
+ __ATTR(_name, 0444, __rapl_##_var##_show, NULL)
+
+#define RAPL_EVENT_DESC(_name, _config) \
+{ \
+ .attr = __ATTR(_name, 0444, rapl_event_show, NULL), \
+ .config = _config, \
+}
+
+#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
+
+struct rapl_pmu {
+ spinlock_t lock;
+ int hw_unit; /* 1/2^hw_unit Joule */
+ int n_active; /* number of active events */
+ struct list_head active_list;
+ struct pmu *pmu; /* pointer to rapl_pmu_class */
+ ktime_t timer_interval; /* in ktime_t unit */
+ struct hrtimer hrtimer;
+};
+
+static struct pmu rapl_pmu_class;
+static cpumask_t rapl_cpu_mask;
+static int rapl_cntr_mask;
+
+static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu);
+static DEFINE_PER_CPU(struct rapl_pmu *, rapl_pmu_to_free);
+
+static inline u64 rapl_read_counter(struct perf_event *event)
+{
+ u64 raw;
+ rdmsrl(event->hw.event_base, raw);
+ return raw;
+}
+
+static inline u64 rapl_scale(u64 v)
+{
+ /*
+ * scale delta to smallest unit (1/2^32)
+ * users must then scale back: count * 1/(1e9*2^32) to get Joules
+ * or use ldexp(count, -32).
+ * Watts = Joules/Time delta
+ */
+ return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit);
+}
+
+static u64 rapl_event_update(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev_raw_count, new_raw_count;
+ s64 delta, sdelta;
+ int shift = RAPL_CNTR_WIDTH;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ rdmsrl(event->hw.event_base, new_raw_count);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count) {
+ cpu_relax();
+ goto again;
+ }
+
+ /*
+ * Now we have the new raw value and have updated the prev
+ * timestamp already. We can now calculate the elapsed delta
+ * (event-)time and add that to the generic event.
+ *
+ * Careful, not all hw sign-extends above the physical width
+ * of the count.
+ */
+ delta = (new_raw_count << shift) - (prev_raw_count << shift);
+ delta >>= shift;
+
+ sdelta = rapl_scale(delta);
+
+ local64_add(sdelta, &event->count);
+
+ return new_raw_count;
+}
+
+static void rapl_start_hrtimer(struct rapl_pmu *pmu)
+{
+ __hrtimer_start_range_ns(&pmu->hrtimer,
+ pmu->timer_interval, 0,
+ HRTIMER_MODE_REL_PINNED, 0);
+}
+
+static void rapl_stop_hrtimer(struct rapl_pmu *pmu)
+{
+ hrtimer_cancel(&pmu->hrtimer);
+}
+
+static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
+{
+ struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct perf_event *event;
+ unsigned long flags;
+
+ if (!pmu->n_active)
+ return HRTIMER_NORESTART;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ list_for_each_entry(event, &pmu->active_list, active_entry) {
+ rapl_event_update(event);
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ hrtimer_forward_now(hrtimer, pmu->timer_interval);
+
+ return HRTIMER_RESTART;
+}
+
+static void rapl_hrtimer_init(struct rapl_pmu *pmu)
+{
+ struct hrtimer *hr = &pmu->hrtimer;
+
+ hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hr->function = rapl_hrtimer_handle;
+}
+
+static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
+ struct perf_event *event)
+{
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
+
+ event->hw.state = 0;
+
+ list_add_tail(&event->active_entry, &pmu->active_list);
+
+ local64_set(&event->hw.prev_count, rapl_read_counter(event));
+
+ pmu->n_active++;
+ if (pmu->n_active == 1)
+ rapl_start_hrtimer(pmu);
+}
+
+static void rapl_pmu_event_start(struct perf_event *event, int mode)
+{
+ struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+ __rapl_pmu_event_start(pmu, event);
+ spin_unlock_irqrestore(&pmu->lock, flags);
+}
+
+static void rapl_pmu_event_stop(struct perf_event *event, int mode)
+{
+ struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ /* mark event as deactivated and stopped */
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ WARN_ON_ONCE(pmu->n_active <= 0);
+ pmu->n_active--;
+ if (pmu->n_active == 0)
+ rapl_stop_hrtimer(pmu);
+
+ list_del(&event->active_entry);
+
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+ }
+
+ /* check if update of sw counter is necessary */
+ if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ /*
+ * Drain the remaining delta count out of a event
+ * that we are disabling:
+ */
+ rapl_event_update(event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+}
+
+static int rapl_pmu_event_add(struct perf_event *event, int mode)
+{
+ struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pmu->lock, flags);
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (mode & PERF_EF_START)
+ __rapl_pmu_event_start(pmu, event);
+
+ spin_unlock_irqrestore(&pmu->lock, flags);
+
+ return 0;
+}
+
+static void rapl_pmu_event_del(struct perf_event *event, int flags)
+{
+ rapl_pmu_event_stop(event, PERF_EF_UPDATE);
+}
+
+static int rapl_pmu_event_init(struct perf_event *event)
+{
+ u64 cfg = event->attr.config & RAPL_EVENT_MASK;
+ int bit, msr, ret = 0;
+
+ /* only look at RAPL events */
+ if (event->attr.type != rapl_pmu_class.type)
+ return -ENOENT;
+
+ /* check only supported bits are set */
+ if (event->attr.config & ~RAPL_EVENT_MASK)
+ return -EINVAL;
+
+ /*
+ * check event is known (determines counter)
+ */
+ switch (cfg) {
+ case INTEL_RAPL_PP0:
+ bit = RAPL_IDX_PP0_NRG_STAT;
+ msr = MSR_PP0_ENERGY_STATUS;
+ break;
+ case INTEL_RAPL_PKG:
+ bit = RAPL_IDX_PKG_NRG_STAT;
+ msr = MSR_PKG_ENERGY_STATUS;
+ break;
+ case INTEL_RAPL_RAM:
+ bit = RAPL_IDX_RAM_NRG_STAT;
+ msr = MSR_DRAM_ENERGY_STATUS;
+ break;
+ case INTEL_RAPL_PP1:
+ bit = RAPL_IDX_PP1_NRG_STAT;
+ msr = MSR_PP1_ENERGY_STATUS;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* check event supported */
+ if (!(rapl_cntr_mask & (1 << bit)))
+ return -EINVAL;
+
+ /* unsupported modes and filters */
+ if (event->attr.exclude_user ||
+ event->attr.exclude_kernel ||
+ event->attr.exclude_hv ||
+ event->attr.exclude_idle ||
+ event->attr.exclude_host ||
+ event->attr.exclude_guest ||
+ event->attr.sample_period) /* no sampling */
+ return -EINVAL;
+
+ /* must be done before validate_group */
+ event->hw.event_base = msr;
+ event->hw.config = cfg;
+ event->hw.idx = bit;
+
+ return ret;
+}
+
+static void rapl_pmu_event_read(struct perf_event *event)
+{
+ rapl_event_update(event);
+}
+
+static ssize_t rapl_get_attr_cpumask(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &rapl_cpu_mask);
+
+ buf[n++] = '\n';
+ buf[n] = '\0';
+ return n;
+}
+
+static DEVICE_ATTR(cpumask, S_IRUGO, rapl_get_attr_cpumask, NULL);
+
+static struct attribute *rapl_pmu_attrs[] = {
+ &dev_attr_cpumask.attr,
+ NULL,
+};
+
+static struct attribute_group rapl_pmu_attr_group = {
+ .attrs = rapl_pmu_attrs,
+};
+
+EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
+EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
+EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
+EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
+
+EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
+EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
+EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
+EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
+
+/*
+ * we compute in 0.23 nJ increments regardless of MSR
+ */
+EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
+EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
+EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
+EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
+
+static struct attribute *rapl_events_srv_attr[] = {
+ EVENT_PTR(rapl_cores),
+ EVENT_PTR(rapl_pkg),
+ EVENT_PTR(rapl_ram),
+
+ EVENT_PTR(rapl_cores_unit),
+ EVENT_PTR(rapl_pkg_unit),
+ EVENT_PTR(rapl_ram_unit),
+
+ EVENT_PTR(rapl_cores_scale),
+ EVENT_PTR(rapl_pkg_scale),
+ EVENT_PTR(rapl_ram_scale),
+ NULL,
+};
+
+static struct attribute *rapl_events_cln_attr[] = {
+ EVENT_PTR(rapl_cores),
+ EVENT_PTR(rapl_pkg),
+ EVENT_PTR(rapl_gpu),
+
+ EVENT_PTR(rapl_cores_unit),
+ EVENT_PTR(rapl_pkg_unit),
+ EVENT_PTR(rapl_gpu_unit),
+
+ EVENT_PTR(rapl_cores_scale),
+ EVENT_PTR(rapl_pkg_scale),
+ EVENT_PTR(rapl_gpu_scale),
+ NULL,
+};
+
+static struct attribute_group rapl_pmu_events_group = {
+ .name = "events",
+ .attrs = NULL, /* patched at runtime */
+};
+
+DEFINE_RAPL_FORMAT_ATTR(event, event, "config:0-7");
+static struct attribute *rapl_formats_attr[] = {
+ &format_attr_event.attr,
+ NULL,
+};
+
+static struct attribute_group rapl_pmu_format_group = {
+ .name = "format",
+ .attrs = rapl_formats_attr,
+};
+
+const struct attribute_group *rapl_attr_groups[] = {
+ &rapl_pmu_attr_group,
+ &rapl_pmu_format_group,
+ &rapl_pmu_events_group,
+ NULL,
+};
+
+static struct pmu rapl_pmu_class = {
+ .attr_groups = rapl_attr_groups,
+ .task_ctx_nr = perf_invalid_context, /* system-wide only */
+ .event_init = rapl_pmu_event_init,
+ .add = rapl_pmu_event_add, /* must have */
+ .del = rapl_pmu_event_del, /* must have */
+ .start = rapl_pmu_event_start,
+ .stop = rapl_pmu_event_stop,
+ .read = rapl_pmu_event_read,
+};
+
+static void rapl_cpu_exit(int cpu)
+{
+ struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
+ int i, phys_id = topology_physical_package_id(cpu);
+ int target = -1;
+
+ /* find a new cpu on same package */
+ for_each_online_cpu(i) {
+ if (i == cpu)
+ continue;
+ if (phys_id == topology_physical_package_id(i)) {
+ target = i;
+ break;
+ }
+ }
+ /*
+ * clear cpu from cpumask
+ * if was set in cpumask and still some cpu on package,
+ * then move to new cpu
+ */
+ if (cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask) && target >= 0)
+ cpumask_set_cpu(target, &rapl_cpu_mask);
+
+ WARN_ON(cpumask_empty(&rapl_cpu_mask));
+ /*
+ * migrate events and context to new cpu
+ */
+ if (target >= 0)
+ perf_pmu_migrate_context(pmu->pmu, cpu, target);
+
+ /* cancel overflow polling timer for CPU */
+ rapl_stop_hrtimer(pmu);
+}
+
+static void rapl_cpu_init(int cpu)
+{
+ int i, phys_id = topology_physical_package_id(cpu);
+
+ /* check if phys_is is already covered */
+ for_each_cpu(i, &rapl_cpu_mask) {
+ if (phys_id == topology_physical_package_id(i))
+ return;
+ }
+ /* was not found, so add it */
+ cpumask_set_cpu(cpu, &rapl_cpu_mask);
+}
+
+static int rapl_cpu_prepare(int cpu)
+{
+ struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
+ int phys_id = topology_physical_package_id(cpu);
+ u64 ms;
+
+ if (pmu)
+ return 0;
+
+ if (phys_id < 0)
+ return -1;
+
+ pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
+ if (!pmu)
+ return -1;
+
+ spin_lock_init(&pmu->lock);
+
+ INIT_LIST_HEAD(&pmu->active_list);
+
+ /*
+ * grab power unit as: 1/2^unit Joules
+ *
+ * we cache in local PMU instance
+ */
+ rdmsrl(MSR_RAPL_POWER_UNIT, pmu->hw_unit);
+ pmu->hw_unit = (pmu->hw_unit >> 8) & 0x1FULL;
+ pmu->pmu = &rapl_pmu_class;
+
+ /*
+ * use reference of 200W for scaling the timeout
+ * to avoid missing counter overflows.
+ * 200W = 200 Joules/sec
+ * divide interval by 2 to avoid lockstep (2 * 100)
+ * if hw unit is 32, then we use 2 ms 1/200/2
+ */
+ if (pmu->hw_unit < 32)
+ ms = (1000 / (2 * 100)) * (1ULL << (32 - pmu->hw_unit - 1));
+ else
+ ms = 2;
+
+ pmu->timer_interval = ms_to_ktime(ms);
+
+ rapl_hrtimer_init(pmu);
+
+ /* set RAPL pmu for this cpu for now */
+ per_cpu(rapl_pmu, cpu) = pmu;
+ per_cpu(rapl_pmu_to_free, cpu) = NULL;
+
+ return 0;
+}
+
+static void rapl_cpu_kfree(int cpu)
+{
+ struct rapl_pmu *pmu = per_cpu(rapl_pmu_to_free, cpu);
+
+ kfree(pmu);
+
+ per_cpu(rapl_pmu_to_free, cpu) = NULL;
+}
+
+static int rapl_cpu_dying(int cpu)
+{
+ struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu);
+
+ if (!pmu)
+ return 0;
+
+ per_cpu(rapl_pmu, cpu) = NULL;
+
+ per_cpu(rapl_pmu_to_free, cpu) = pmu;
+
+ return 0;
+}
+
+static int rapl_cpu_notifier(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (long)hcpu;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ rapl_cpu_prepare(cpu);
+ break;
+ case CPU_STARTING:
+ rapl_cpu_init(cpu);
+ break;
+ case CPU_UP_CANCELED:
+ case CPU_DYING:
+ rapl_cpu_dying(cpu);
+ break;
+ case CPU_ONLINE:
+ case CPU_DEAD:
+ rapl_cpu_kfree(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ rapl_cpu_exit(cpu);
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static const struct x86_cpu_id rapl_cpu_match[] = {
+ [0] = { .vendor = X86_VENDOR_INTEL, .family = 6 },
+ [1] = {},
+};
+
+static int __init rapl_pmu_init(void)
+{
+ struct rapl_pmu *pmu;
+ int cpu, ret;
+
+ /*
+ * check for Intel processor family 6
+ */
+ if (!x86_match_cpu(rapl_cpu_match))
+ return 0;
+
+ /* check supported CPU */
+ switch (boot_cpu_data.x86_model) {
+ case 42: /* Sandy Bridge */
+ case 58: /* Ivy Bridge */
+ case 60: /* Haswell */
+ case 69: /* Haswell-Celeron */
+ rapl_cntr_mask = RAPL_IDX_CLN;
+ rapl_pmu_events_group.attrs = rapl_events_cln_attr;
+ break;
+ case 45: /* Sandy Bridge-EP */
+ case 62: /* IvyTown */
+ rapl_cntr_mask = RAPL_IDX_SRV;
+ rapl_pmu_events_group.attrs = rapl_events_srv_attr;
+ break;
+
+ default:
+ /* unsupported */
+ return 0;
+ }
+ get_online_cpus();
+
+ for_each_online_cpu(cpu) {
+ rapl_cpu_prepare(cpu);
+ rapl_cpu_init(cpu);
+ }
+
+ perf_cpu_notifier(rapl_cpu_notifier);
+
+ ret = perf_pmu_register(&rapl_pmu_class, "power", -1);
+ if (WARN_ON(ret)) {
+ pr_info("RAPL PMU detected, registration failed (%d), RAPL PMU disabled\n", ret);
+ put_online_cpus();
+ return -1;
+ }
+
+ pmu = __get_cpu_var(rapl_pmu);
+
+ pr_info("RAPL PMU detected, hw unit 2^-%d Joules,"
+ " API unit is 2^-32 Joules,"
+ " %d fixed counters"
+ " %llu ms ovfl timer\n",
+ pmu->hw_unit,
+ hweight32(rapl_cntr_mask),
+ ktime_to_ms(pmu->timer_interval));
+
+ put_online_cpus();
+
+ return 0;
+}
+device_initcall(rapl_pmu_init);
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index 88db010845cb..384df5105fbc 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -31,20 +31,6 @@ static int __init x86_rdrand_setup(char *s)
}
__setup("nordrand", x86_rdrand_setup);
-/* We can't use arch_get_random_long() here since alternatives haven't run */
-static inline int rdrand_long(unsigned long *v)
-{
- int ok;
- asm volatile("1: " RDRAND_LONG "\n\t"
- "jc 2f\n\t"
- "decl %0\n\t"
- "jnz 1b\n\t"
- "2:"
- : "=r" (ok), "=a" (*v)
- : "0" (RDRAND_RETRY_LOOPS));
- return ok;
-}
-
/*
* Force a reseed cycle; we are architecturally guaranteed a reseed
* after no more than 512 128-bit chunks of random data. This also
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index aa0430d69b90..3fa0e5ad86b4 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -1,6 +1,5 @@
#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include <asm/msr.h>
#include "cpu.h"
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index 75c5ad5d35cc..ef9c2a0078bd 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -1,5 +1,4 @@
#include <linux/kernel.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include "cpu.h"
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 18677a90d6a3..a57902efe2d5 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -7,7 +7,6 @@
*
*/
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/smp.h>
diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
index 5d3fe8d36e4a..f6dfd9334b67 100644
--- a/arch/x86/kernel/doublefault.c
+++ b/arch/x86/kernel/doublefault.c
@@ -1,6 +1,5 @@
#include <linux/mm.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/init_task.h>
#include <linux/fs.h>
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 174da5fc5a7b..988c00a1f60d 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1120,7 +1120,7 @@ void __init memblock_find_dma_reserve(void)
nr_pages += end_pfn - start_pfn;
}
- for_each_free_mem_range(u, MAX_NUMNODES, &start, &end, NULL) {
+ for_each_free_mem_range(u, NUMA_NO_NODE, &start, &end, NULL) {
start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN);
end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN);
if (start_pfn < end_pfn)
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 51e2988c5728..a2a4f4697889 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1082,7 +1082,7 @@ ENTRY(ftrace_caller)
pushl $0 /* Pass NULL as regs pointer */
movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx
- leal function_trace_op, %ecx
+ movl function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call
@@ -1140,7 +1140,7 @@ ENTRY(ftrace_regs_caller)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
- leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
+ movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */
GLOBAL(ftrace_regs_call)
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index e21b0785a85b..1e96c3628bf2 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -88,7 +88,7 @@ END(function_hook)
MCOUNT_SAVE_FRAME \skip
/* Load the ftrace_ops into the 3rd parameter */
- leaq function_trace_op, %rdx
+ movq function_trace_op(%rip), %rdx
/* Load ip into the first parameter */
movq RIP(%rsp), %rdi
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index d4bdd253fea7..e6253195a301 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -77,8 +77,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
return addr >= start && addr < end;
}
-static int
-do_ftrace_mod_code(unsigned long ip, const void *new_code)
+static unsigned long text_ip_addr(unsigned long ip)
{
/*
* On x86_64, kernel text mappings are mapped read-only with
@@ -91,7 +90,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code)
if (within(ip, (unsigned long)_text, (unsigned long)_etext))
ip = (unsigned long)__va(__pa_symbol(ip));
- return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
+ return ip;
}
static const unsigned char *ftrace_nop_replace(void)
@@ -123,8 +122,10 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
return -EINVAL;
+ ip = text_ip_addr(ip);
+
/* replace the text with the new text */
- if (do_ftrace_mod_code(ip, new_code))
+ if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
return -EPERM;
sync_core();
@@ -221,37 +222,51 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return -EINVAL;
}
-int ftrace_update_ftrace_func(ftrace_func_t func)
+static unsigned long ftrace_update_func;
+
+static int update_ftrace_func(unsigned long ip, void *new)
{
- unsigned long ip = (unsigned long)(&ftrace_call);
- unsigned char old[MCOUNT_INSN_SIZE], *new;
+ unsigned char old[MCOUNT_INSN_SIZE];
int ret;
- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
- new = ftrace_call_replace(ip, (unsigned long)func);
+ memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
+
+ ftrace_update_func = ip;
+ /* Make sure the breakpoints see the ftrace_update_func update */
+ smp_wmb();
/* See comment above by declaration of modifying_ftrace_code */
atomic_inc(&modifying_ftrace_code);
ret = ftrace_modify_code(ip, old, new);
+ atomic_dec(&modifying_ftrace_code);
+
+ return ret;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long ip = (unsigned long)(&ftrace_call);
+ unsigned char *new;
+ int ret;
+
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = update_ftrace_func(ip, new);
+
/* Also update the regs callback function */
if (!ret) {
ip = (unsigned long)(&ftrace_regs_call);
- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(ip, (unsigned long)func);
- ret = ftrace_modify_code(ip, old, new);
+ ret = update_ftrace_func(ip, new);
}
- atomic_dec(&modifying_ftrace_code);
-
return ret;
}
static int is_ftrace_caller(unsigned long ip)
{
- if (ip == (unsigned long)(&ftrace_call) ||
- ip == (unsigned long)(&ftrace_regs_call))
+ if (ip == ftrace_update_func)
return 1;
return 0;
@@ -677,45 +692,41 @@ int __init ftrace_dyn_arch_init(void *data)
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
-static int ftrace_mod_jmp(unsigned long ip,
- int old_offset, int new_offset)
+static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
{
- unsigned char code[MCOUNT_INSN_SIZE];
+ static union ftrace_code_union calc;
- if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
+ /* Jmp not a call (ignore the .e8) */
+ calc.e8 = 0xe9;
+ calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
- if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
- return -EINVAL;
+ /*
+ * ftrace external locks synchronize the access to the static variable.
+ */
+ return calc.code;
+}
- *(int *)(&code[1]) = new_offset;
+static int ftrace_mod_jmp(unsigned long ip, void *func)
+{
+ unsigned char *new;
- if (do_ftrace_mod_code(ip, &code))
- return -EPERM;
+ new = ftrace_jmp_replace(ip, (unsigned long)func);
- return 0;
+ return update_ftrace_func(ip, new);
}
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
- int old_offset, new_offset;
- old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
- new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
-
- return ftrace_mod_jmp(ip, old_offset, new_offset);
+ return ftrace_mod_jmp(ip, &ftrace_graph_caller);
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
- int old_offset, new_offset;
-
- old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
- new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
- return ftrace_mod_jmp(ip, old_offset, new_offset);
+ return ftrace_mod_jmp(ip, &ftrace_stub);
}
#endif /* !CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index f66ff162dce8..a67b47c31314 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -38,7 +38,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/sched.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <asm/hw_breakpoint.h>
diff --git a/arch/x86/kernel/iosf_mbi.c b/arch/x86/kernel/iosf_mbi.c
new file mode 100644
index 000000000000..c3aae6672843
--- /dev/null
+++ b/arch/x86/kernel/iosf_mbi.c
@@ -0,0 +1,226 @@
+/*
+ * IOSF-SB MailBox Interface Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ *
+ * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
+ * mailbox interface (MBI) to communicate with mutiple devices. This
+ * driver implements access to this interface for those platforms that can
+ * enumerate the device using PCI.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+
+#include <asm/iosf_mbi.h>
+
+static DEFINE_SPINLOCK(iosf_mbi_lock);
+
+static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
+{
+ return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE;
+}
+
+static struct pci_dev *mbi_pdev; /* one mbi device */
+
+static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+ mcrx);
+ if (result < 0)
+ goto fail_read;
+ }
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto fail_read;
+
+ result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto fail_read;
+
+ return 0;
+
+fail_read:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+ return result;
+}
+
+static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto fail_write;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+ mcrx);
+ if (result < 0)
+ goto fail_write;
+ }
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto fail_write;
+
+ return 0;
+
+fail_write:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+ return result;
+}
+
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_read);
+
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_write);
+
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+ u32 mcr, mcrx;
+ u32 value;
+ unsigned long flags;
+ int ret;
+
+ /*Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+
+ /* Read current mdr value */
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+ return ret;
+ }
+
+ /* Apply mask */
+ value &= ~mask;
+ mdr &= mask;
+ value |= mdr;
+
+ /* Write back */
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value);
+
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_modify);
+
+static int iosf_mbi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *unused)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ mbi_pdev = pci_dev_get(pdev);
+ return 0;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(iosf_mbi_pci_ids) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0F00) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
+
+static struct pci_driver iosf_mbi_pci_driver = {
+ .name = "iosf_mbi_pci",
+ .probe = iosf_mbi_probe,
+ .id_table = iosf_mbi_pci_ids,
+};
+
+static int __init iosf_mbi_init(void)
+{
+ return pci_register_driver(&iosf_mbi_pci_driver);
+}
+
+static void __exit iosf_mbi_exit(void)
+{
+ pci_unregister_driver(&iosf_mbi_pci_driver);
+ if (mbi_pdev) {
+ pci_dev_put(mbi_pdev);
+ mbi_pdev = NULL;
+ }
+}
+
+module_init(iosf_mbi_init);
+module_exit(iosf_mbi_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("IOSF Mailbox Interface accessor");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 22d0687e7fda..d99f31d9a750 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -193,9 +193,13 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
if (!handle_irq(irq, regs)) {
ack_APIC_irq();
- if (printk_ratelimit())
- pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
- __func__, smp_processor_id(), vector, irq);
+ if (irq != VECTOR_RETRIGGERED) {
+ pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n",
+ __func__, smp_processor_id(),
+ vector, irq);
+ } else {
+ __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
+ }
}
irq_exit();
@@ -262,6 +266,83 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
#ifdef CONFIG_HOTPLUG_CPU
+
+/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
+ * below, which is protected by stop_machine(). Putting them on the stack
+ * results in a stack frame overflow. Dynamically allocating could result in a
+ * failure so declare these two cpumasks as global.
+ */
+static struct cpumask affinity_new, online_new;
+
+/*
+ * This cpu is going to be removed and its vectors migrated to the remaining
+ * online cpus. Check to see if there are enough vectors in the remaining cpus.
+ * This function is protected by stop_machine().
+ */
+int check_irq_vectors_for_cpu_disable(void)
+{
+ int irq, cpu;
+ unsigned int this_cpu, vector, this_count, count;
+ struct irq_desc *desc;
+ struct irq_data *data;
+
+ this_cpu = smp_processor_id();
+ cpumask_copy(&online_new, cpu_online_mask);
+ cpu_clear(this_cpu, online_new);
+
+ this_count = 0;
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+ irq = __this_cpu_read(vector_irq[vector]);
+ if (irq >= 0) {
+ desc = irq_to_desc(irq);
+ data = irq_desc_get_irq_data(desc);
+ cpumask_copy(&affinity_new, data->affinity);
+ cpu_clear(this_cpu, affinity_new);
+
+ /* Do not count inactive or per-cpu irqs. */
+ if (!irq_has_action(irq) || irqd_is_per_cpu(data))
+ continue;
+
+ /*
+ * A single irq may be mapped to multiple
+ * cpu's vector_irq[] (for example IOAPIC cluster
+ * mode). In this case we have two
+ * possibilities:
+ *
+ * 1) the resulting affinity mask is empty; that is
+ * this the down'd cpu is the last cpu in the irq's
+ * affinity mask, or
+ *
+ * 2) the resulting affinity mask is no longer
+ * a subset of the online cpus but the affinity
+ * mask is not zero; that is the down'd cpu is the
+ * last online cpu in a user set affinity mask.
+ */
+ if (cpumask_empty(&affinity_new) ||
+ !cpumask_subset(&affinity_new, &online_new))
+ this_count++;
+ }
+ }
+
+ count = 0;
+ for_each_online_cpu(cpu) {
+ if (cpu == this_cpu)
+ continue;
+ for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
+ vector++) {
+ if (per_cpu(vector_irq, cpu)[vector] < 0)
+ count++;
+ }
+ }
+
+ if (count < this_count) {
+ pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
+ this_cpu, this_count, count);
+ return -ERANGE;
+ }
+ return 0;
+}
+
/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
void fixup_irqs(void)
{
@@ -344,7 +425,7 @@ void fixup_irqs(void)
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
unsigned int irr;
- if (__this_cpu_read(vector_irq[vector]) < 0)
+ if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED)
continue;
irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
@@ -355,11 +436,14 @@ void fixup_irqs(void)
data = irq_desc_get_irq_data(desc);
chip = irq_data_get_irq_chip(data);
raw_spin_lock(&desc->lock);
- if (chip->irq_retrigger)
+ if (chip->irq_retrigger) {
chip->irq_retrigger(data);
+ __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
+ }
raw_spin_unlock(&desc->lock);
}
- __this_cpu_write(vector_irq[vector], -1);
+ if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
+ __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
}
}
#endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index a2a1fbc594ff..7f50156542fb 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -52,7 +52,7 @@ static struct irqaction irq2 = {
};
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
- [0 ... NR_VECTORS - 1] = -1,
+ [0 ... NR_VECTORS - 1] = VECTOR_UNDEFINED,
};
int vector_used_by_percpu_irq(unsigned int vector)
@@ -60,7 +60,7 @@ int vector_used_by_percpu_irq(unsigned int vector)
int cpu;
for_each_online_cpu(cpu) {
- if (per_cpu(vector_irq, cpu)[vector] != -1)
+ if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNDEFINED)
return 1;
}
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 836f8322960e..7ec1d5f8d283 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -39,7 +39,6 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/kgdb.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <linux/hw_breakpoint.h>
diff --git a/arch/x86/kernel/ksysfs.c b/arch/x86/kernel/ksysfs.c
new file mode 100644
index 000000000000..c2bedaea11f7
--- /dev/null
+++ b/arch/x86/kernel/ksysfs.c
@@ -0,0 +1,340 @@
+/*
+ * Architecture specific sysfs attributes in /sys/kernel
+ *
+ * Copyright (C) 2007, Intel Corp.
+ * Huang Ying <ying.huang@intel.com>
+ * Copyright (C) 2013, 2013 Red Hat, Inc.
+ * Dave Young <dyoung@redhat.com>
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/stat.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/setup.h>
+
+static ssize_t version_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%04x\n", boot_params.hdr.version);
+}
+
+static struct kobj_attribute boot_params_version_attr = __ATTR_RO(version);
+
+static ssize_t boot_params_data_read(struct file *fp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
+{
+ memcpy(buf, (void *)&boot_params + off, count);
+ return count;
+}
+
+static struct bin_attribute boot_params_data_attr = {
+ .attr = {
+ .name = "data",
+ .mode = S_IRUGO,
+ },
+ .read = boot_params_data_read,
+ .size = sizeof(boot_params),
+};
+
+static struct attribute *boot_params_version_attrs[] = {
+ &boot_params_version_attr.attr,
+ NULL,
+};
+
+static struct bin_attribute *boot_params_data_attrs[] = {
+ &boot_params_data_attr,
+ NULL,
+};
+
+static struct attribute_group boot_params_attr_group = {
+ .attrs = boot_params_version_attrs,
+ .bin_attrs = boot_params_data_attrs,
+};
+
+static int kobj_to_setup_data_nr(struct kobject *kobj, int *nr)
+{
+ const char *name;
+
+ name = kobject_name(kobj);
+ return kstrtoint(name, 10, nr);
+}
+
+static int get_setup_data_paddr(int nr, u64 *paddr)
+{
+ int i = 0;
+ struct setup_data *data;
+ u64 pa_data = boot_params.hdr.setup_data;
+
+ while (pa_data) {
+ if (nr == i) {
+ *paddr = pa_data;
+ return 0;
+ }
+ data = ioremap_cache(pa_data, sizeof(*data));
+ if (!data)
+ return -ENOMEM;
+
+ pa_data = data->next;
+ iounmap(data);
+ i++;
+ }
+ return -EINVAL;
+}
+
+static int __init get_setup_data_size(int nr, size_t *size)
+{
+ int i = 0;
+ struct setup_data *data;
+ u64 pa_data = boot_params.hdr.setup_data;
+
+ while (pa_data) {
+ data = ioremap_cache(pa_data, sizeof(*data));
+ if (!data)
+ return -ENOMEM;
+ if (nr == i) {
+ *size = data->len;
+ iounmap(data);
+ return 0;
+ }
+
+ pa_data = data->next;
+ iounmap(data);
+ i++;
+ }
+ return -EINVAL;
+}
+
+static ssize_t type_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ int nr, ret;
+ u64 paddr;
+ struct setup_data *data;
+
+ ret = kobj_to_setup_data_nr(kobj, &nr);
+ if (ret)
+ return ret;
+
+ ret = get_setup_data_paddr(nr, &paddr);
+ if (ret)
+ return ret;
+ data = ioremap_cache(paddr, sizeof(*data));
+ if (!data)
+ return -ENOMEM;
+
+ ret = sprintf(buf, "0x%x\n", data->type);
+ iounmap(data);
+ return ret;
+}
+
+static ssize_t setup_data_data_read(struct file *fp,
+ struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf,
+ loff_t off, size_t count)
+{
+ int nr, ret = 0;
+ u64 paddr;
+ struct setup_data *data;
+ void *p;
+
+ ret = kobj_to_setup_data_nr(kobj, &nr);
+ if (ret)
+ return ret;
+
+ ret = get_setup_data_paddr(nr, &paddr);
+ if (ret)
+ return ret;
+ data = ioremap_cache(paddr, sizeof(*data));
+ if (!data)
+ return -ENOMEM;
+
+ if (off > data->len) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (count > data->len - off)
+ count = data->len - off;
+
+ if (!count)
+ goto out;
+
+ ret = count;
+ p = ioremap_cache(paddr + sizeof(*data), data->len);
+ if (!p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memcpy(buf, p + off, count);
+ iounmap(p);
+out:
+ iounmap(data);
+ return ret;
+}
+
+static struct kobj_attribute type_attr = __ATTR_RO(type);
+
+static struct bin_attribute data_attr = {
+ .attr = {
+ .name = "data",
+ .mode = S_IRUGO,
+ },
+ .read = setup_data_data_read,
+};
+
+static struct attribute *setup_data_type_attrs[] = {
+ &type_attr.attr,
+ NULL,
+};
+
+static struct bin_attribute *setup_data_data_attrs[] = {
+ &data_attr,
+ NULL,
+};
+
+static struct attribute_group setup_data_attr_group = {
+ .attrs = setup_data_type_attrs,
+ .bin_attrs = setup_data_data_attrs,
+};
+
+static int __init create_setup_data_node(struct kobject *parent,
+ struct kobject **kobjp, int nr)
+{
+ int ret = 0;
+ size_t size;
+ struct kobject *kobj;
+ char name[16]; /* should be enough for setup_data nodes numbers */
+ snprintf(name, 16, "%d", nr);
+
+ kobj = kobject_create_and_add(name, parent);
+ if (!kobj)
+ return -ENOMEM;
+
+ ret = get_setup_data_size(nr, &size);
+ if (ret)
+ goto out_kobj;
+
+ data_attr.size = size;
+ ret = sysfs_create_group(kobj, &setup_data_attr_group);
+ if (ret)
+ goto out_kobj;
+ *kobjp = kobj;
+
+ return 0;
+out_kobj:
+ kobject_put(kobj);
+ return ret;
+}
+
+static void __init cleanup_setup_data_node(struct kobject *kobj)
+{
+ sysfs_remove_group(kobj, &setup_data_attr_group);
+ kobject_put(kobj);
+}
+
+static int __init get_setup_data_total_num(u64 pa_data, int *nr)
+{
+ int ret = 0;
+ struct setup_data *data;
+
+ *nr = 0;
+ while (pa_data) {
+ *nr += 1;
+ data = ioremap_cache(pa_data, sizeof(*data));
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pa_data = data->next;
+ iounmap(data);
+ }
+
+out:
+ return ret;
+}
+
+static int __init create_setup_data_nodes(struct kobject *parent)
+{
+ struct kobject *setup_data_kobj, **kobjp;
+ u64 pa_data;
+ int i, j, nr, ret = 0;
+
+ pa_data = boot_params.hdr.setup_data;
+ if (!pa_data)
+ return 0;
+
+ setup_data_kobj = kobject_create_and_add("setup_data", parent);
+ if (!setup_data_kobj) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = get_setup_data_total_num(pa_data, &nr);
+ if (ret)
+ goto out_setup_data_kobj;
+
+ kobjp = kmalloc(sizeof(*kobjp) * nr, GFP_KERNEL);
+ if (!kobjp) {
+ ret = -ENOMEM;
+ goto out_setup_data_kobj;
+ }
+
+ for (i = 0; i < nr; i++) {
+ ret = create_setup_data_node(setup_data_kobj, kobjp + i, i);
+ if (ret)
+ goto out_clean_nodes;
+ }
+
+ kfree(kobjp);
+ return 0;
+
+out_clean_nodes:
+ for (j = i - 1; j > 0; j--)
+ cleanup_setup_data_node(*(kobjp + j));
+ kfree(kobjp);
+out_setup_data_kobj:
+ kobject_put(setup_data_kobj);
+out:
+ return ret;
+}
+
+static int __init boot_params_ksysfs_init(void)
+{
+ int ret;
+ struct kobject *boot_params_kobj;
+
+ boot_params_kobj = kobject_create_and_add("boot_params",
+ kernel_kobj);
+ if (!boot_params_kobj) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = sysfs_create_group(boot_params_kobj, &boot_params_attr_group);
+ if (ret)
+ goto out_boot_params_kobj;
+
+ ret = create_setup_data_nodes(boot_params_kobj);
+ if (ret)
+ goto out_create_group;
+
+ return 0;
+out_create_group:
+ sysfs_remove_group(boot_params_kobj, &boot_params_attr_group);
+out_boot_params_kobj:
+ kobject_put(boot_params_kobj);
+out:
+ return ret;
+}
+
+arch_initcall(boot_params_ksysfs_init);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 6dd802c6d780..713f1b3bad52 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -500,6 +500,38 @@ void __init kvm_guest_init(void)
#endif
}
+static noinline uint32_t __kvm_cpuid_base(void)
+{
+ if (boot_cpu_data.cpuid_level < 0)
+ return 0; /* So we don't blow up on old processors */
+
+ if (cpu_has_hypervisor)
+ return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
+
+ return 0;
+}
+
+static inline uint32_t kvm_cpuid_base(void)
+{
+ static int kvm_cpuid_base = -1;
+
+ if (kvm_cpuid_base == -1)
+ kvm_cpuid_base = __kvm_cpuid_base();
+
+ return kvm_cpuid_base;
+}
+
+bool kvm_para_available(void)
+{
+ return kvm_cpuid_base() != 0;
+}
+EXPORT_SYMBOL_GPL(kvm_para_available);
+
+unsigned int kvm_arch_para_features(void)
+{
+ return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
+}
+
static uint32_t __init kvm_detect(void)
{
return kvm_cpuid_base();
@@ -673,7 +705,7 @@ static cpumask_t waiting_cpus;
/* Track spinlock on which a cpu is waiting */
static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
-static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
+__visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
struct kvm_lock_waiting *w;
int cpu;
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 5b19e4d78b00..1667b1de8d5d 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -9,7 +9,6 @@
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/numa.h>
#include <linux/ftrace.h>
#include <linux/suspend.h>
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index 871be4a84c7d..da15918d1c81 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -3,7 +3,6 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/string.h>
-#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/mm.h>
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 6f1236c29c4b..0de43e98ce08 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -24,7 +24,6 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/reboot.h>
-#include <linux/init.h>
#include <linux/mc146818rtc.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 04ee1e2e4c02..7c6acd4b8995 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -571,3 +571,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
quirk_amd_nb_node);
#endif
+
+#ifdef CONFIG_PCI
+/*
+ * Processor does not ensure DRAM scrub read/write sequence
+ * is atomic wrt accesses to CC6 save state area. Therefore
+ * if a concurrent scrub read/write access is to same address
+ * the entry may appear as if it is not written. This quirk
+ * applies to Fam16h models 00h-0Fh
+ *
+ * See "Revision Guide" for AMD F16h models 00h-0fh,
+ * document 51810 rev. 3.04, Nov 2013
+ */
+static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
+{
+ u32 val;
+
+ /*
+ * Suggested workaround:
+ * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
+ */
+ pci_read_config_dword(dev, 0x58, &val);
+ if (val & 0x1F) {
+ val &= ~(0x1F);
+ pci_write_config_dword(dev, 0x58, val);
+ }
+
+ pci_read_config_dword(dev, 0x5C, &val);
+ if (val & BIT(0)) {
+ val &= ~BIT(0);
+ pci_write_config_dword(dev, 0x5c, val);
+ }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
+ amd_disable_seq_and_redirect_scrub);
+
+#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index cb233bc9dee3..06853e670354 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -295,6 +295,8 @@ static void __init reserve_brk(void)
_brk_start = 0;
}
+u64 relocated_ramdisk;
+
#ifdef CONFIG_BLK_DEV_INITRD
static u64 __init get_ramdisk_image(void)
@@ -321,25 +323,24 @@ static void __init relocate_initrd(void)
u64 ramdisk_image = get_ramdisk_image();
u64 ramdisk_size = get_ramdisk_size();
u64 area_size = PAGE_ALIGN(ramdisk_size);
- u64 ramdisk_here;
unsigned long slop, clen, mapaddr;
char *p, *q;
/* We need to move the initrd down into directly mapped mem */
- ramdisk_here = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
- area_size, PAGE_SIZE);
+ relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
+ area_size, PAGE_SIZE);
- if (!ramdisk_here)
+ if (!relocated_ramdisk)
panic("Cannot find place for new RAMDISK of size %lld\n",
- ramdisk_size);
+ ramdisk_size);
/* Note: this includes all the mem currently occupied by
the initrd, we rely on that fact to keep the data intact. */
- memblock_reserve(ramdisk_here, area_size);
- initrd_start = ramdisk_here + PAGE_OFFSET;
+ memblock_reserve(relocated_ramdisk, area_size);
+ initrd_start = relocated_ramdisk + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
- ramdisk_here, ramdisk_here + ramdisk_size - 1);
+ relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
q = (char *)initrd_start;
@@ -363,7 +364,7 @@ static void __init relocate_initrd(void)
printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
" [mem %#010llx-%#010llx]\n",
ramdisk_image, ramdisk_image + ramdisk_size - 1,
- ramdisk_here, ramdisk_here + ramdisk_size - 1);
+ relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
}
static void __init early_reserve_initrd(void)
@@ -447,6 +448,9 @@ static void __init parse_setup_data(void)
case SETUP_DTB:
add_dtb(pa_data);
break;
+ case SETUP_EFI:
+ parse_efi_setup(pa_data, data_len);
+ break;
default:
break;
}
@@ -824,6 +828,20 @@ static void __init trim_low_memory_range(void)
}
/*
+ * Dump out kernel offset information on panic.
+ */
+static int
+dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
+{
+ pr_emerg("Kernel Offset: 0x%lx from 0x%lx "
+ "(relocation range: 0x%lx-0x%lx)\n",
+ (unsigned long)&_text - __START_KERNEL, __START_KERNEL,
+ __START_KERNEL_map, MODULES_VADDR-1);
+
+ return 0;
+}
+
+/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
* for initialization. Note, the efi init code path is determined by the
@@ -924,8 +942,6 @@ void __init setup_arch(char **cmdline_p)
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
setup_memory_map();
parse_setup_data();
- /* update the e820_saved too */
- e820_reserve_setup_data();
copy_edd();
@@ -987,6 +1003,8 @@ void __init setup_arch(char **cmdline_p)
early_dump_pci_devices();
#endif
+ /* update the e820_saved too */
+ e820_reserve_setup_data();
finish_e820_parsing();
if (efi_enabled(EFI_BOOT))
@@ -1248,3 +1266,15 @@ void __init i386_reserve_resources(void)
}
#endif /* CONFIG_X86_32 */
+
+static struct notifier_block kernel_offset_notifier = {
+ .notifier_call = dump_kernel_offset
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_offset_notifier);
+ return 0;
+}
+__initcall(register_kernel_offset_dumper);
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 85dc05a3aa02..a32da804252e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1312,6 +1312,12 @@ void cpu_disable_common(void)
int native_cpu_disable(void)
{
+ int ret;
+
+ ret = check_irq_vectors_for_cpu_disable();
+ if (ret)
+ return ret;
+
clear_local_APIC();
cpu_disable_common();
@@ -1417,7 +1423,9 @@ static inline void mwait_play_dead(void)
* The WBINVD is insufficient due to the spurious-wakeup
* case where we return around the loop.
*/
+ mb();
clflush(mwait_ptr);
+ mb();
__monitor(mwait_ptr, 0, 0);
mb();
__mwait(eax, 0);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index b857ed890b4c..57409f6b8c62 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -211,21 +211,17 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
exception_exit(prev_state); \
}
-DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
- regs->ip)
-DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
-DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
-DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
- regs->ip)
-DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
- coprocessor_segment_overrun)
-DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
-DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
+DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip )
+DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow )
+DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds )
+DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip )
+DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun )
+DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS )
+DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present )
#ifdef CONFIG_X86_32
-DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
+DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment )
#endif
-DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
- BUS_ADRALN, 0)
+DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0 )
#ifdef CONFIG_X86_64
/* Runs on IST stack */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 930e5d48f560..acb3b606613e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -11,6 +11,7 @@
#include <linux/clocksource.h>
#include <linux/percpu.h>
#include <linux/timex.h>
+#include <linux/static_key.h>
#include <asm/hpet.h>
#include <asm/timer.h>
@@ -37,13 +38,244 @@ static int __read_mostly tsc_unstable;
erroneous rdtsc usage on !cpu_has_tsc processors */
static int __read_mostly tsc_disabled = -1;
+static struct static_key __use_tsc = STATIC_KEY_INIT;
+
int tsc_clocksource_reliable;
+
+/*
+ * Use a ring-buffer like data structure, where a writer advances the head by
+ * writing a new data entry and a reader advances the tail when it observes a
+ * new entry.
+ *
+ * Writers are made to wait on readers until there's space to write a new
+ * entry.
+ *
+ * This means that we can always use an {offset, mul} pair to compute a ns
+ * value that is 'roughly' in the right direction, even if we're writing a new
+ * {offset, mul} pair during the clock read.
+ *
+ * The down-side is that we can no longer guarantee strict monotonicity anymore
+ * (assuming the TSC was that to begin with), because while we compute the
+ * intersection point of the two clock slopes and make sure the time is
+ * continuous at the point of switching; we can no longer guarantee a reader is
+ * strictly before or after the switch point.
+ *
+ * It does mean a reader no longer needs to disable IRQs in order to avoid
+ * CPU-Freq updates messing with his times, and similarly an NMI reader will
+ * no longer run the risk of hitting half-written state.
+ */
+
+struct cyc2ns {
+ struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */
+ struct cyc2ns_data *head; /* 48 + 8 = 56 */
+ struct cyc2ns_data *tail; /* 56 + 8 = 64 */
+}; /* exactly fits one cacheline */
+
+static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
+
+struct cyc2ns_data *cyc2ns_read_begin(void)
+{
+ struct cyc2ns_data *head;
+
+ preempt_disable();
+
+ head = this_cpu_read(cyc2ns.head);
+ /*
+ * Ensure we observe the entry when we observe the pointer to it.
+ * matches the wmb from cyc2ns_write_end().
+ */
+ smp_read_barrier_depends();
+ head->__count++;
+ barrier();
+
+ return head;
+}
+
+void cyc2ns_read_end(struct cyc2ns_data *head)
+{
+ barrier();
+ /*
+ * If we're the outer most nested read; update the tail pointer
+ * when we're done. This notifies possible pending writers
+ * that we've observed the head pointer and that the other
+ * entry is now free.
+ */
+ if (!--head->__count) {
+ /*
+ * x86-TSO does not reorder writes with older reads;
+ * therefore once this write becomes visible to another
+ * cpu, we must be finished reading the cyc2ns_data.
+ *
+ * matches with cyc2ns_write_begin().
+ */
+ this_cpu_write(cyc2ns.tail, head);
+ }
+ preempt_enable();
+}
+
+/*
+ * Begin writing a new @data entry for @cpu.
+ *
+ * Assumes some sort of write side lock; currently 'provided' by the assumption
+ * that cpufreq will call its notifiers sequentially.
+ */
+static struct cyc2ns_data *cyc2ns_write_begin(int cpu)
+{
+ struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
+ struct cyc2ns_data *data = c2n->data;
+
+ if (data == c2n->head)
+ data++;
+
+ /* XXX send an IPI to @cpu in order to guarantee a read? */
+
+ /*
+ * When we observe the tail write from cyc2ns_read_end(),
+ * the cpu must be done with that entry and its safe
+ * to start writing to it.
+ */
+ while (c2n->tail == data)
+ cpu_relax();
+
+ return data;
+}
+
+static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
+{
+ struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
+
+ /*
+ * Ensure the @data writes are visible before we publish the
+ * entry. Matches the data-depencency in cyc2ns_read_begin().
+ */
+ smp_wmb();
+
+ ACCESS_ONCE(c2n->head) = data;
+}
+
+/*
+ * Accelerators for sched_clock()
+ * convert from cycles(64bits) => nanoseconds (64bits)
+ * basic equation:
+ * ns = cycles / (freq / ns_per_sec)
+ * ns = cycles * (ns_per_sec / freq)
+ * ns = cycles * (10^9 / (cpu_khz * 10^3))
+ * ns = cycles * (10^6 / cpu_khz)
+ *
+ * Then we use scaling math (suggested by george@mvista.com) to get:
+ * ns = cycles * (10^6 * SC / cpu_khz) / SC
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * And since SC is a constant power of two, we can convert the div
+ * into a shift.
+ *
+ * We can use khz divisor instead of mhz to keep a better precision, since
+ * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
+ * (mathieu.desnoyers@polymtl.ca)
+ *
+ * -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ */
+
+#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
+
+static void cyc2ns_data_init(struct cyc2ns_data *data)
+{
+ data->cyc2ns_mul = 0;
+ data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
+ data->cyc2ns_offset = 0;
+ data->__count = 0;
+}
+
+static void cyc2ns_init(int cpu)
+{
+ struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
+
+ cyc2ns_data_init(&c2n->data[0]);
+ cyc2ns_data_init(&c2n->data[1]);
+
+ c2n->head = c2n->data;
+ c2n->tail = c2n->data;
+}
+
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
+{
+ struct cyc2ns_data *data, *tail;
+ unsigned long long ns;
+
+ /*
+ * See cyc2ns_read_*() for details; replicated in order to avoid
+ * an extra few instructions that came with the abstraction.
+ * Notable, it allows us to only do the __count and tail update
+ * dance when its actually needed.
+ */
+
+ preempt_disable_notrace();
+ data = this_cpu_read(cyc2ns.head);
+ tail = this_cpu_read(cyc2ns.tail);
+
+ if (likely(data == tail)) {
+ ns = data->cyc2ns_offset;
+ ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
+ } else {
+ data->__count++;
+
+ barrier();
+
+ ns = data->cyc2ns_offset;
+ ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
+
+ barrier();
+
+ if (!--data->__count)
+ this_cpu_write(cyc2ns.tail, data);
+ }
+ preempt_enable_notrace();
+
+ return ns;
+}
+
+/* XXX surely we already have this someplace in the kernel?! */
+#define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d))
+
+static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
+{
+ unsigned long long tsc_now, ns_now;
+ struct cyc2ns_data *data;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sched_clock_idle_sleep_event();
+
+ if (!cpu_khz)
+ goto done;
+
+ data = cyc2ns_write_begin(cpu);
+
+ rdtscll(tsc_now);
+ ns_now = cycles_2_ns(tsc_now);
+
+ /*
+ * Compute a new multiplier as per the above comment and ensure our
+ * time function is continuous; see the comment near struct
+ * cyc2ns_data.
+ */
+ data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz);
+ data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
+ data->cyc2ns_offset = ns_now -
+ mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
+
+ cyc2ns_write_end(cpu, data);
+
+done:
+ sched_clock_idle_wakeup_event(0);
+ local_irq_restore(flags);
+}
/*
* Scheduler clock - returns current time in nanosec units.
*/
u64 native_sched_clock(void)
{
- u64 this_offset;
+ u64 tsc_now;
/*
* Fall back to jiffies if there's no TSC available:
@@ -53,16 +285,16 @@ u64 native_sched_clock(void)
* very important for it to be as fast as the platform
* can achieve it. )
*/
- if (unlikely(tsc_disabled)) {
+ if (!static_key_false(&__use_tsc)) {
/* No locking but a rare wrong value is not a big deal: */
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);
}
/* read the Time Stamp Counter: */
- rdtscll(this_offset);
+ rdtscll(tsc_now);
/* return the value in ns */
- return __cycles_2_ns(this_offset);
+ return cycles_2_ns(tsc_now);
}
/* We need to define a real function for sched_clock, to override the
@@ -419,6 +651,16 @@ unsigned long native_calibrate_tsc(void)
unsigned long flags, latch, ms, fast_calibrate;
int hpet = is_hpet_enabled(), i, loopmin;
+ /* Calibrate TSC using MSR for Intel Atom SoCs */
+ local_irq_save(flags);
+ i = try_msr_calibrate_tsc(&fast_calibrate);
+ local_irq_restore(flags);
+ if (i >= 0) {
+ if (i == 0)
+ pr_warn("Fast TSC calibration using MSR failed\n");
+ return fast_calibrate;
+ }
+
local_irq_save(flags);
fast_calibrate = quick_pit_calibrate();
local_irq_restore(flags);
@@ -589,61 +831,11 @@ int recalibrate_cpu_khz(void)
EXPORT_SYMBOL(recalibrate_cpu_khz);
-/* Accelerators for sched_clock()
- * convert from cycles(64bits) => nanoseconds (64bits)
- * basic equation:
- * ns = cycles / (freq / ns_per_sec)
- * ns = cycles * (ns_per_sec / freq)
- * ns = cycles * (10^9 / (cpu_khz * 10^3))
- * ns = cycles * (10^6 / cpu_khz)
- *
- * Then we use scaling math (suggested by george@mvista.com) to get:
- * ns = cycles * (10^6 * SC / cpu_khz) / SC
- * ns = cycles * cyc2ns_scale / SC
- *
- * And since SC is a constant power of two, we can convert the div
- * into a shift.
- *
- * We can use khz divisor instead of mhz to keep a better precision, since
- * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits.
- * (mathieu.desnoyers@polymtl.ca)
- *
- * -johnstul@us.ibm.com "math is hard, lets go shopping!"
- */
-
-DEFINE_PER_CPU(unsigned long, cyc2ns);
-DEFINE_PER_CPU(unsigned long long, cyc2ns_offset);
-
-static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
-{
- unsigned long long tsc_now, ns_now, *offset;
- unsigned long flags, *scale;
-
- local_irq_save(flags);
- sched_clock_idle_sleep_event();
-
- scale = &per_cpu(cyc2ns, cpu);
- offset = &per_cpu(cyc2ns_offset, cpu);
-
- rdtscll(tsc_now);
- ns_now = __cycles_2_ns(tsc_now);
-
- if (cpu_khz) {
- *scale = ((NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR) +
- cpu_khz / 2) / cpu_khz;
- *offset = ns_now - mult_frac(tsc_now, *scale,
- (1UL << CYC2NS_SCALE_FACTOR));
- }
-
- sched_clock_idle_wakeup_event(0);
- local_irq_restore(flags);
-}
-
static unsigned long long cyc2ns_suspend;
void tsc_save_sched_clock_state(void)
{
- if (!sched_clock_stable)
+ if (!sched_clock_stable())
return;
cyc2ns_suspend = sched_clock();
@@ -663,16 +855,26 @@ void tsc_restore_sched_clock_state(void)
unsigned long flags;
int cpu;
- if (!sched_clock_stable)
+ if (!sched_clock_stable())
return;
local_irq_save(flags);
- __this_cpu_write(cyc2ns_offset, 0);
+ /*
+ * We're comming out of suspend, there's no concurrency yet; don't
+ * bother being nice about the RCU stuff, just write to both
+ * data fields.
+ */
+
+ this_cpu_write(cyc2ns.data[0].cyc2ns_offset, 0);
+ this_cpu_write(cyc2ns.data[1].cyc2ns_offset, 0);
+
offset = cyc2ns_suspend - sched_clock();
- for_each_possible_cpu(cpu)
- per_cpu(cyc2ns_offset, cpu) = offset;
+ for_each_possible_cpu(cpu) {
+ per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset;
+ per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
+ }
local_irq_restore(flags);
}
@@ -795,7 +997,7 @@ void mark_tsc_unstable(char *reason)
{
if (!tsc_unstable) {
tsc_unstable = 1;
- sched_clock_stable = 0;
+ clear_sched_clock_stable();
disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
@@ -995,14 +1197,18 @@ void __init tsc_init(void)
* speed as the bootup CPU. (cpufreq notifiers will fix this
* up if their speed diverges)
*/
- for_each_possible_cpu(cpu)
+ for_each_possible_cpu(cpu) {
+ cyc2ns_init(cpu);
set_cyc2ns_scale(cpu_khz, cpu);
+ }
if (tsc_disabled > 0)
return;
/* now allow native_sched_clock() to use rdtsc */
+
tsc_disabled = 0;
+ static_key_slow_inc(&__use_tsc);
if (!no_sched_irq_time)
enable_sched_clock_irqtime();
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
new file mode 100644
index 000000000000..8b5434f4389f
--- /dev/null
+++ b/arch/x86/kernel/tsc_msr.c
@@ -0,0 +1,127 @@
+/*
+ * tsc_msr.c - MSR based TSC calibration on Intel Atom SoC platforms.
+ *
+ * TSC in Intel Atom SoC runs at a constant rate which can be figured
+ * by this formula:
+ * <maximum core-clock to bus-clock ratio> * <maximum resolved frequency>
+ * See Intel 64 and IA-32 System Programming Guid section 16.12 and 30.11.5
+ * for details.
+ * Especially some Intel Atom SoCs don't have PIT(i8254) or HPET, so MSR
+ * based calibration is the only option.
+ *
+ *
+ * Copyright (C) 2013 Intel Corporation
+ * Author: Bin Gao <bin.gao@intel.com>
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/kernel.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/apic.h>
+#include <asm/param.h>
+
+/* CPU reference clock frequency: in KHz */
+#define FREQ_83 83200
+#define FREQ_100 99840
+#define FREQ_133 133200
+#define FREQ_166 166400
+
+#define MAX_NUM_FREQS 8
+
+/*
+ * According to Intel 64 and IA-32 System Programming Guide,
+ * if MSR_PERF_STAT[31] is set, the maximum resolved bus ratio can be
+ * read in MSR_PLATFORM_ID[12:8], otherwise in MSR_PERF_STAT[44:40].
+ * Unfortunately some Intel Atom SoCs aren't quite compliant to this,
+ * so we need manually differentiate SoC families. This is what the
+ * field msr_plat does.
+ */
+struct freq_desc {
+ u8 x86_family; /* CPU family */
+ u8 x86_model; /* model */
+ u8 msr_plat; /* 1: use MSR_PLATFORM_INFO, 0: MSR_IA32_PERF_STATUS */
+ u32 freqs[MAX_NUM_FREQS];
+};
+
+static struct freq_desc freq_desc_tables[] = {
+ /* PNW */
+ { 6, 0x27, 0, { 0, 0, 0, 0, 0, FREQ_100, 0, FREQ_83 } },
+ /* CLV+ */
+ { 6, 0x35, 0, { 0, FREQ_133, 0, 0, 0, FREQ_100, 0, FREQ_83 } },
+ /* TNG */
+ { 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } },
+ /* VLV2 */
+ { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
+ /* ANN */
+ { 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } },
+};
+
+static int match_cpu(u8 family, u8 model)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(freq_desc_tables); i++) {
+ if ((family == freq_desc_tables[i].x86_family) &&
+ (model == freq_desc_tables[i].x86_model))
+ return i;
+ }
+
+ return -1;
+}
+
+/* Map CPU reference clock freq ID(0-7) to CPU reference clock freq(KHz) */
+#define id_to_freq(cpu_index, freq_id) \
+ (freq_desc_tables[cpu_index].freqs[freq_id])
+
+/*
+ * Do MSR calibration only for known/supported CPUs.
+ * Return values:
+ * -1: CPU is unknown/unsupported for MSR based calibration
+ * 0: CPU is known/supported, but calibration failed
+ * 1: CPU is known/supported, and calibration succeeded
+ */
+int try_msr_calibrate_tsc(unsigned long *fast_calibrate)
+{
+ int cpu_index;
+ u32 lo, hi, ratio, freq_id, freq;
+
+ cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
+ if (cpu_index < 0)
+ return -1;
+
+ *fast_calibrate = 0;
+
+ if (freq_desc_tables[cpu_index].msr_plat) {
+ rdmsr(MSR_PLATFORM_INFO, lo, hi);
+ ratio = (lo >> 8) & 0x1f;
+ } else {
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ ratio = (hi >> 8) & 0x1f;
+ }
+ pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio);
+
+ if (!ratio)
+ return 0;
+
+ /* Get FSB FREQ ID */
+ rdmsr(MSR_FSB_FREQ, lo, hi);
+ freq_id = lo & 0x7;
+ freq = id_to_freq(cpu_index, freq_id);
+ pr_info("Resolved frequency ID: %u, frequency: %u KHz\n",
+ freq_id, freq);
+ if (!freq)
+ return 0;
+
+ /* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
+ *fast_calibrate = freq * ratio;
+ pr_info("TSC runs at %lu KHz\n", *fast_calibrate);
+
+#ifdef CONFIG_X86_LOCAL_APIC
+ lapic_timer_frequency = (freq * 1000) / HZ;
+ pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency);
+#endif
+
+ return 1;
+}
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index adfdf56a3714..26488487bc61 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -16,7 +16,6 @@
*/
#include <linux/spinlock.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <asm/tsc.h>
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index 992f890283e9..f6584a90aba3 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -33,7 +33,7 @@
* and vice versa.
*/
-static unsigned long vsmp_save_fl(void)
+asmlinkage unsigned long vsmp_save_fl(void)
{
unsigned long flags = native_save_fl();
@@ -43,7 +43,7 @@ static unsigned long vsmp_save_fl(void)
}
PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
-static void vsmp_restore_fl(unsigned long flags)
+__visible void vsmp_restore_fl(unsigned long flags)
{
if (flags & X86_EFLAGS_IF)
flags &= ~X86_EFLAGS_AC;
@@ -53,7 +53,7 @@ static void vsmp_restore_fl(unsigned long flags)
}
PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
-static void vsmp_irq_disable(void)
+asmlinkage void vsmp_irq_disable(void)
{
unsigned long flags = native_save_fl();
@@ -61,7 +61,7 @@ static void vsmp_irq_disable(void)
}
PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
-static void vsmp_irq_enable(void)
+asmlinkage void vsmp_irq_enable(void)
{
unsigned long flags = native_save_fl();
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 021783b1f46a..e48b674639cc 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -136,9 +136,9 @@ void arch_teardown_msi_irq(unsigned int irq)
x86_msi.teardown_msi_irq(irq);
}
-void arch_restore_msi_irqs(struct pci_dev *dev, int irq)
+void arch_restore_msi_irqs(struct pci_dev *dev)
{
- x86_msi.restore_msi_irqs(dev, irq);
+ x86_msi.restore_msi_irqs(dev);
}
u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)
{
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index 422fd8223470..a4b451c6addf 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -562,6 +562,16 @@ static void __init xstate_enable_boot_cpu(void)
if (cpu_has_xsaveopt && eagerfpu != DISABLE)
eagerfpu = ENABLE;
+ if (pcntxt_mask & XSTATE_EAGER) {
+ if (eagerfpu == DISABLE) {
+ pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n",
+ pcntxt_mask & XSTATE_EAGER);
+ pcntxt_mask &= ~XSTATE_EAGER;
+ } else {
+ eagerfpu = ENABLE;
+ }
+ }
+
pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n",
pcntxt_mask, xstate_size);
}
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index b89c5db2b832..287e4c85fff9 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -80,7 +80,7 @@ config KVM_MMU_AUDIT
depends on KVM && TRACEPOINTS
---help---
This option adds a R/W kVM module parameter 'mmu_audit', which allows
- audit KVM MMU at runtime.
+ auditing of KVM MMU events at runtime.
config KVM_DEVICE_ASSIGNMENT
bool "KVM legacy PCI device assignment support"
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index f1e4895174b2..a2a1bb7ed8c1 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -72,4 +72,12 @@ static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
return best && (best->ecx & bit(X86_FEATURE_PCID));
}
+static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ return best && (best->ecx & bit(X86_FEATURE_X2APIC));
+}
+
#endif
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 412a5aa0ef94..518d86471b76 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -37,6 +37,7 @@
#include "irq.h"
#include "i8254.h"
+#include "x86.h"
#ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
@@ -349,6 +350,23 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
atomic_set(&ps->pending, 0);
ps->irq_ack = 1;
+ /*
+ * Do not allow the guest to program periodic timers with small
+ * interval, since the hrtimers are not throttled by the host
+ * scheduler.
+ */
+ if (ps->is_periodic) {
+ s64 min_period = min_timer_period_us * 1000LL;
+
+ if (ps->period < min_period) {
+ pr_info_ratelimited(
+ "kvm: requested %lld ns "
+ "i8254 timer period limited to %lld ns\n",
+ ps->period, min_period);
+ ps->period = min_period;
+ }
+ }
+
hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval),
HRTIMER_MODE_ABS);
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index dec48bfaddb8..9736529ade08 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -71,9 +71,6 @@
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
-static unsigned int min_timer_period_us = 500;
-module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
-
static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
{
*((u32 *) (apic->regs + reg_off)) = val;
@@ -435,7 +432,7 @@ static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
u8 val;
if (pv_eoi_get_user(vcpu, &val) < 0)
apic_debug("Can't read EOI MSR value: 0x%llx\n",
- (unsigned long long)vcpi->arch.pv_eoi.msr_val);
+ (unsigned long long)vcpu->arch.pv_eoi.msr_val);
return val & 0x1;
}
@@ -443,7 +440,7 @@ static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
{
if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
apic_debug("Can't set EOI MSR value: 0x%llx\n",
- (unsigned long long)vcpi->arch.pv_eoi.msr_val);
+ (unsigned long long)vcpu->arch.pv_eoi.msr_val);
return;
}
__set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
@@ -453,7 +450,7 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
{
if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
apic_debug("Can't clear EOI MSR value: 0x%llx\n",
- (unsigned long long)vcpi->arch.pv_eoi.msr_val);
+ (unsigned long long)vcpu->arch.pv_eoi.msr_val);
return;
}
__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
@@ -1350,8 +1347,12 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
return;
}
+ if (!kvm_vcpu_is_bsp(apic->vcpu))
+ value &= ~MSR_IA32_APICBASE_BSP;
+ vcpu->arch.apic_base = value;
+
/* update jump label if enable bit changes */
- if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
+ if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE)
static_key_slow_dec_deferred(&apic_hw_disabled);
else
@@ -1359,10 +1360,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
recalculate_apic_map(vcpu->kvm);
}
- if (!kvm_vcpu_is_bsp(apic->vcpu))
- value &= ~MSR_IA32_APICBASE_BSP;
-
- vcpu->arch.apic_base = value;
if ((old_value ^ value) & X2APIC_ENABLE) {
if (value & X2APIC_ENABLE) {
u32 id = kvm_apic_id(apic);
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index c8b0d0d2da5c..6a11845fd8b9 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -65,7 +65,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map);
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
-void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
+int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 40772ef0f2b1..e50425d0f5f7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2659,6 +2659,9 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
int emulate = 0;
gfn_t pseudo_gfn;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return 0;
+
for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
if (iterator.level == level) {
mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
@@ -2829,6 +2832,9 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
bool ret = false;
u64 spte = 0ull;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return false;
+
if (!page_fault_can_be_fast(error_code))
return false;
@@ -3224,6 +3230,9 @@ static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
struct kvm_shadow_walk_iterator iterator;
u64 spte = 0ull;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return spte;
+
walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
if (!is_shadow_present_pte(spte))
@@ -4510,6 +4519,9 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
u64 spte;
int nr_sptes = 0;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return nr_sptes;
+
walk_shadow_page_lockless_begin(vcpu);
for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
sptes[iterator.level-1] = spte;
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ad75d77999d0..cba218a2f08d 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -569,6 +569,9 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (FNAME(gpte_changed)(vcpu, gw, top_level))
goto out_gpte_changed;
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ goto out_gpte_changed;
+
for (shadow_walk_init(&it, vcpu, addr);
shadow_walk_okay(&it) && it.level > gw->level;
shadow_walk_next(&it)) {
@@ -820,6 +823,11 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
*/
mmu_topup_memory_caches(vcpu);
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
+ WARN_ON(1);
+ return;
+ }
+
spin_lock(&vcpu->kvm->mmu_lock);
for_each_shadow_entry(vcpu, gva, iterator) {
level = iterator.level;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c7168a5cff1b..e81df8fce027 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1671,6 +1671,19 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
mark_dirty(svm->vmcb, VMCB_ASID);
}
+static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
+{
+ return to_svm(vcpu)->vmcb->save.dr6;
+}
+
+static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ svm->vmcb->save.dr6 = value;
+ mark_dirty(svm->vmcb, VMCB_DR);
+}
+
static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -4286,6 +4299,8 @@ static struct kvm_x86_ops svm_x86_ops = {
.set_idt = svm_set_idt,
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
+ .get_dr6 = svm_get_dr6,
+ .set_dr6 = svm_set_dr6,
.set_dr7 = svm_set_dr7,
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b2fe1c252f35..a06f101ef64b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -418,6 +418,8 @@ struct vcpu_vmx {
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
#endif
+ u32 vm_entry_controls_shadow;
+ u32 vm_exit_controls_shadow;
/*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
* non-nested (L1) guest, it always points to vmcs01. For a nested
@@ -1056,7 +1058,9 @@ static inline bool is_exception(u32 intr_info)
== (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
}
-static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
+static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+ u32 exit_intr_info,
+ unsigned long exit_qualification);
static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
u32 reason, unsigned long qualification);
@@ -1326,6 +1330,62 @@ static void vmcs_set_bits(unsigned long field, u32 mask)
vmcs_writel(field, vmcs_readl(field) | mask);
}
+static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VM_ENTRY_CONTROLS, val);
+ vmx->vm_entry_controls_shadow = val;
+}
+
+static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
+{
+ if (vmx->vm_entry_controls_shadow != val)
+ vm_entry_controls_init(vmx, val);
+}
+
+static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
+{
+ return vmx->vm_entry_controls_shadow;
+}
+
+
+static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
+}
+
+static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
+}
+
+static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
+{
+ vmcs_write32(VM_EXIT_CONTROLS, val);
+ vmx->vm_exit_controls_shadow = val;
+}
+
+static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
+{
+ if (vmx->vm_exit_controls_shadow != val)
+ vm_exit_controls_init(vmx, val);
+}
+
+static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
+{
+ return vmx->vm_exit_controls_shadow;
+}
+
+
+static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
+}
+
+static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
+{
+ vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
+}
+
static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
{
vmx->segment_cache.bitmask = 0;
@@ -1410,11 +1470,11 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
vmcs_write32(EXCEPTION_BITMAP, eb);
}
-static void clear_atomic_switch_msr_special(unsigned long entry,
- unsigned long exit)
+static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+ unsigned long entry, unsigned long exit)
{
- vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
- vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
+ vm_entry_controls_clearbit(vmx, entry);
+ vm_exit_controls_clearbit(vmx, exit);
}
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
@@ -1425,14 +1485,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
switch (msr) {
case MSR_EFER:
if (cpu_has_load_ia32_efer) {
- clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
+ clear_atomic_switch_msr_special(vmx,
+ VM_ENTRY_LOAD_IA32_EFER,
VM_EXIT_LOAD_IA32_EFER);
return;
}
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
if (cpu_has_load_perf_global_ctrl) {
- clear_atomic_switch_msr_special(
+ clear_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
return;
@@ -1453,14 +1514,15 @@ static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
}
-static void add_atomic_switch_msr_special(unsigned long entry,
- unsigned long exit, unsigned long guest_val_vmcs,
- unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
+static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
+ unsigned long entry, unsigned long exit,
+ unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
+ u64 guest_val, u64 host_val)
{
vmcs_write64(guest_val_vmcs, guest_val);
vmcs_write64(host_val_vmcs, host_val);
- vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
- vmcs_set_bits(VM_EXIT_CONTROLS, exit);
+ vm_entry_controls_setbit(vmx, entry);
+ vm_exit_controls_setbit(vmx, exit);
}
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
@@ -1472,7 +1534,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
switch (msr) {
case MSR_EFER:
if (cpu_has_load_ia32_efer) {
- add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
+ add_atomic_switch_msr_special(vmx,
+ VM_ENTRY_LOAD_IA32_EFER,
VM_EXIT_LOAD_IA32_EFER,
GUEST_IA32_EFER,
HOST_IA32_EFER,
@@ -1482,7 +1545,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
if (cpu_has_load_perf_global_ctrl) {
- add_atomic_switch_msr_special(
+ add_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
GUEST_IA32_PERF_GLOBAL_CTRL,
@@ -1906,7 +1969,9 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
if (!(vmcs12->exception_bitmap & (1u << nr)))
return 0;
- nested_vmx_vmexit(vcpu);
+ nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
+ vmcs_read32(VM_EXIT_INTR_INFO),
+ vmcs_readl(EXIT_QUALIFICATION));
return 1;
}
@@ -2279,6 +2344,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high);
nested_vmx_misc_low &= VMX_MISC_PREEMPTION_TIMER_RATE_MASK |
VMX_MISC_SAVE_EFER_LMA;
+ nested_vmx_misc_low |= VMX_MISC_ACTIVITY_HLT;
nested_vmx_misc_high = 0;
}
@@ -2295,32 +2361,10 @@ static inline u64 vmx_control_msr(u32 low, u32 high)
return low | ((u64)high << 32);
}
-/*
- * If we allow our guest to use VMX instructions (i.e., nested VMX), we should
- * also let it use VMX-specific MSRs.
- * vmx_get_vmx_msr() and vmx_set_vmx_msr() return 1 when we handled a
- * VMX-specific MSR, or 0 when we haven't (and the caller should handle it
- * like all other MSRs).
- */
+/* Returns 0 on success, non-0 otherwise. */
static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{
- if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC &&
- msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) {
- /*
- * According to the spec, processors which do not support VMX
- * should throw a #GP(0) when VMX capability MSRs are read.
- */
- kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
- return 1;
- }
-
switch (msr_index) {
- case MSR_IA32_FEATURE_CONTROL:
- if (nested_vmx_allowed(vcpu)) {
- *pdata = to_vmx(vcpu)->nested.msr_ia32_feature_control;
- break;
- }
- return 0;
case MSR_IA32_VMX_BASIC:
/*
* This MSR reports some information about VMX support. We
@@ -2387,34 +2431,9 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
*pdata = nested_vmx_ept_caps;
break;
default:
- return 0;
- }
-
- return 1;
-}
-
-static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
-{
- u32 msr_index = msr_info->index;
- u64 data = msr_info->data;
- bool host_initialized = msr_info->host_initiated;
-
- if (!nested_vmx_allowed(vcpu))
- return 0;
-
- if (msr_index == MSR_IA32_FEATURE_CONTROL) {
- if (!host_initialized &&
- to_vmx(vcpu)->nested.msr_ia32_feature_control
- & FEATURE_CONTROL_LOCKED)
- return 0;
- to_vmx(vcpu)->nested.msr_ia32_feature_control = data;
return 1;
}
- /*
- * No need to treat VMX capability MSRs specially: If we don't handle
- * them, handle_wrmsr will #GP(0), which is correct (they are readonly)
- */
return 0;
}
@@ -2460,13 +2479,20 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
case MSR_IA32_SYSENTER_ESP:
data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
+ case MSR_IA32_FEATURE_CONTROL:
+ if (!nested_vmx_allowed(vcpu))
+ return 1;
+ data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
+ break;
+ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
+ if (!nested_vmx_allowed(vcpu))
+ return 1;
+ return vmx_get_vmx_msr(vcpu, msr_index, pdata);
case MSR_TSC_AUX:
if (!to_vmx(vcpu)->rdtscp_enabled)
return 1;
/* Otherwise falls through */
default:
- if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
- return 0;
msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) {
data = msr->data;
@@ -2479,6 +2505,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
return 0;
}
+static void vmx_leave_nested(struct kvm_vcpu *vcpu);
+
/*
* Writes msr value into into the appropriate "register".
* Returns 0 on success, non-0 otherwise.
@@ -2533,6 +2561,17 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_IA32_TSC_ADJUST:
ret = kvm_set_msr_common(vcpu, msr_info);
break;
+ case MSR_IA32_FEATURE_CONTROL:
+ if (!nested_vmx_allowed(vcpu) ||
+ (to_vmx(vcpu)->nested.msr_ia32_feature_control &
+ FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
+ return 1;
+ vmx->nested.msr_ia32_feature_control = data;
+ if (msr_info->host_initiated && data == 0)
+ vmx_leave_nested(vcpu);
+ break;
+ case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
+ return 1; /* they are read-only */
case MSR_TSC_AUX:
if (!vmx->rdtscp_enabled)
return 1;
@@ -2541,8 +2580,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
/* Otherwise falls through */
default:
- if (vmx_set_vmx_msr(vcpu, msr_info))
- break;
msr = find_msr_entry(vmx, msr_index);
if (msr) {
msr->data = data;
@@ -3182,14 +3219,10 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
vmx_load_host_state(to_vmx(vcpu));
vcpu->arch.efer = efer;
if (efer & EFER_LMA) {
- vmcs_write32(VM_ENTRY_CONTROLS,
- vmcs_read32(VM_ENTRY_CONTROLS) |
- VM_ENTRY_IA32E_MODE);
+ vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
msr->data = efer;
} else {
- vmcs_write32(VM_ENTRY_CONTROLS,
- vmcs_read32(VM_ENTRY_CONTROLS) &
- ~VM_ENTRY_IA32E_MODE);
+ vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
msr->data = efer & ~EFER_LME;
}
@@ -3217,9 +3250,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
static void exit_lmode(struct kvm_vcpu *vcpu)
{
- vmcs_write32(VM_ENTRY_CONTROLS,
- vmcs_read32(VM_ENTRY_CONTROLS)
- & ~VM_ENTRY_IA32E_MODE);
+ vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
}
@@ -4346,10 +4377,11 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs;
}
- vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
+
+ vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
/* 22.2.1, 20.8.1 */
- vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
+ vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
set_cr4_guest_host_mask(vmx);
@@ -4360,7 +4392,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- u64 msr;
+ struct msr_data apic_base_msr;
vmx->rmode.vm86_active = 0;
@@ -4368,10 +4400,11 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
kvm_set_cr8(&vmx->vcpu, 0);
- msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
+ apic_base_msr.data = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_bsp(&vmx->vcpu))
- msr |= MSR_IA32_APICBASE_BSP;
- kvm_set_apic_base(&vmx->vcpu, msr);
+ apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
+ apic_base_msr.host_initiated = true;
+ kvm_set_apic_base(&vmx->vcpu, &apic_base_msr);
vmx_segment_cache_clear(vmx);
@@ -4588,15 +4621,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu)) {
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
if (to_vmx(vcpu)->nested.nested_run_pending)
return 0;
if (nested_exit_on_nmi(vcpu)) {
- nested_vmx_vmexit(vcpu);
- vmcs12->vm_exit_reason = EXIT_REASON_EXCEPTION_NMI;
- vmcs12->vm_exit_intr_info = NMI_VECTOR |
- INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK;
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
+ NMI_VECTOR | INTR_TYPE_NMI_INTR |
+ INTR_INFO_VALID_MASK, 0);
/*
* The NMI-triggered VM exit counts as injection:
* clear this one and block further NMIs.
@@ -4618,15 +4648,11 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu)) {
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
if (to_vmx(vcpu)->nested.nested_run_pending)
return 0;
if (nested_exit_on_intr(vcpu)) {
- nested_vmx_vmexit(vcpu);
- vmcs12->vm_exit_reason =
- EXIT_REASON_EXTERNAL_INTERRUPT;
- vmcs12->vm_exit_intr_info = 0;
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
+ 0, 0);
/*
* fall through to normal code, but now in L1, not L2
*/
@@ -4812,7 +4838,8 @@ static int handle_exception(struct kvm_vcpu *vcpu)
dr6 = vmcs_readl(EXIT_QUALIFICATION);
if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
- vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
+ vcpu->arch.dr6 &= ~15;
+ vcpu->arch.dr6 |= dr6;
kvm_queue_exception(vcpu, DB_VECTOR);
return 1;
}
@@ -5080,14 +5107,27 @@ static int handle_dr(struct kvm_vcpu *vcpu)
reg = DEBUG_REG_ACCESS_REG(exit_qualification);
if (exit_qualification & TYPE_MOV_FROM_DR) {
unsigned long val;
- if (!kvm_get_dr(vcpu, dr, &val))
- kvm_register_write(vcpu, reg, val);
+
+ if (kvm_get_dr(vcpu, dr, &val))
+ return 1;
+ kvm_register_write(vcpu, reg, val);
} else
- kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
+ if (kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]))
+ return 1;
+
skip_emulated_instruction(vcpu);
return 1;
}
+static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.dr6;
+}
+
+static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
+{
+}
+
static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
{
vmcs_writel(GUEST_DR7, val);
@@ -6460,11 +6500,8 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
int size;
u8 b;
- if (nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING))
- return 1;
-
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
- return 0;
+ return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
@@ -6628,6 +6665,13 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 exit_reason = vmx->exit_reason;
+ trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
+ vmcs_readl(EXIT_QUALIFICATION),
+ vmx->idt_vectoring_info,
+ intr_info,
+ vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
+ KVM_ISA_VMX);
+
if (vmx->nested.nested_run_pending)
return 0;
@@ -6777,7 +6821,9 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
return handle_invalid_guest_state(vcpu);
if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
- nested_vmx_vmexit(vcpu);
+ nested_vmx_vmexit(vcpu, exit_reason,
+ vmcs_read32(VM_EXIT_INTR_INFO),
+ vmcs_readl(EXIT_QUALIFICATION));
return 1;
}
@@ -7332,8 +7378,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
free_vpid(vmx);
- free_nested(vmx);
free_loaded_vmcs(vmx->loaded_vmcs);
+ free_nested(vmx);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
@@ -7518,15 +7564,14 @@ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
- struct vmcs12 *vmcs12;
- nested_vmx_vmexit(vcpu);
- vmcs12 = get_vmcs12(vcpu);
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ u32 exit_reason;
if (fault->error_code & PFERR_RSVD_MASK)
- vmcs12->vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
+ exit_reason = EXIT_REASON_EPT_MISCONFIG;
else
- vmcs12->vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
- vmcs12->exit_qualification = vcpu->arch.exit_qualification;
+ exit_reason = EXIT_REASON_EPT_VIOLATION;
+ nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification);
vmcs12->guest_physical_address = fault->address;
}
@@ -7564,7 +7609,9 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
/* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
if (vmcs12->exception_bitmap & (1u << PF_VECTOR))
- nested_vmx_vmexit(vcpu);
+ nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
+ vmcs_read32(VM_EXIT_INTR_INFO),
+ vmcs_readl(EXIT_QUALIFICATION));
else
kvm_inject_page_fault(vcpu, fault);
}
@@ -7706,6 +7753,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
else
vmcs_write64(APIC_ACCESS_ADDR,
page_to_phys(vmx->nested.apic_access_page));
+ } else if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm)) {
+ exec_control |=
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+ vmcs_write64(APIC_ACCESS_ADDR,
+ page_to_phys(vcpu->kvm->arch.apic_access_page));
}
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
@@ -7759,12 +7811,12 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
exit_control = vmcs_config.vmexit_ctrl;
if (vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER)
exit_control |= VM_EXIT_SAVE_VMX_PREEMPTION_TIMER;
- vmcs_write32(VM_EXIT_CONTROLS, exit_control);
+ vm_exit_controls_init(vmx, exit_control);
/* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
* emulated by vmx_set_efer(), below.
*/
- vmcs_write32(VM_ENTRY_CONTROLS,
+ vm_entry_controls_init(vmx,
(vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
~VM_ENTRY_IA32E_MODE) |
(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
@@ -7882,7 +7934,8 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
return 1;
}
- if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE) {
+ if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
+ vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
@@ -7994,8 +8047,6 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
enter_guest_mode(vcpu);
- vmx->nested.nested_run_pending = 1;
-
vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
cpu = get_cpu();
@@ -8011,6 +8062,11 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
prepare_vmcs02(vcpu, vmcs12);
+ if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
+ return kvm_emulate_halt(vcpu);
+
+ vmx->nested.nested_run_pending = 1;
+
/*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
* we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
@@ -8110,7 +8166,9 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
* exit-information fields only. Other fields are modified by L1 with VMWRITE,
* which already writes to vmcs12 directly.
*/
-static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
+static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ u32 exit_reason, u32 exit_intr_info,
+ unsigned long exit_qualification)
{
/* update guest state fields: */
vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
@@ -8162,6 +8220,10 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
vmcs12->guest_pending_dbg_exceptions =
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
+ if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
+ vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
+ else
+ vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
if ((vmcs12->pin_based_vm_exec_control & PIN_BASED_VMX_PREEMPTION_TIMER) &&
(vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER))
@@ -8186,7 +8248,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->vm_entry_controls =
(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
- (vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
+ (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
/* TODO: These cannot have changed unless we have MSR bitmaps and
* the relevant bit asks not to trap the change */
@@ -8201,10 +8263,10 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
/* update exit information fields: */
- vmcs12->vm_exit_reason = to_vmx(vcpu)->exit_reason;
- vmcs12->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ vmcs12->vm_exit_reason = exit_reason;
+ vmcs12->exit_qualification = exit_qualification;
- vmcs12->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ vmcs12->vm_exit_intr_info = exit_intr_info;
if ((vmcs12->vm_exit_intr_info &
(INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
(INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK))
@@ -8283,8 +8345,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
kvm_set_cr4(vcpu, vmcs12->host_cr4);
- if (nested_cpu_has_ept(vmcs12))
- nested_ept_uninit_mmu_context(vcpu);
+ nested_ept_uninit_mmu_context(vcpu);
kvm_set_cr3(vcpu, vmcs12->host_cr3);
kvm_mmu_reset_context(vcpu);
@@ -8371,7 +8432,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* and modify vmcs12 to make it see what it would expect to see there if
* L2 was its real guest. Must only be called when in L2 (is_guest_mode())
*/
-static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
+static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
+ u32 exit_intr_info,
+ unsigned long exit_qualification)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu;
@@ -8381,7 +8444,15 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
WARN_ON_ONCE(vmx->nested.nested_run_pending);
leave_guest_mode(vcpu);
- prepare_vmcs12(vcpu, vmcs12);
+ prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
+ exit_qualification);
+
+ trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
+ vmcs12->exit_qualification,
+ vmcs12->idt_vectoring_info_field,
+ vmcs12->vm_exit_intr_info,
+ vmcs12->vm_exit_intr_error_code,
+ KVM_ISA_VMX);
cpu = get_cpu();
vmx->loaded_vmcs = &vmx->vmcs01;
@@ -8390,6 +8461,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
vcpu->cpu = cpu;
put_cpu();
+ vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
+ vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
vmx_segment_cache_clear(vmx);
/* if no vmcs02 cache requested, remove the one we used */
@@ -8425,6 +8498,16 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
}
/*
+ * Forcibly leave nested mode in order to be able to reset the VCPU later on.
+ */
+static void vmx_leave_nested(struct kvm_vcpu *vcpu)
+{
+ if (is_guest_mode(vcpu))
+ nested_vmx_vmexit(vcpu, -1, 0, 0);
+ free_nested(to_vmx(vcpu));
+}
+
+/*
* L1's failure to enter L2 is a subset of a normal exit, as explained in
* 23.7 "VM-entry failures during or after loading guest state" (this also
* lists the acceptable exit-reason and exit-qualification parameters).
@@ -8487,6 +8570,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
+ .get_dr6 = vmx_get_dr6,
+ .set_dr6 = vmx_set_dr6,
.set_dr7 = vmx_set_dr7,
.cache_reg = vmx_cache_reg,
.get_rflags = vmx_get_rflags,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5d004da1e35d..39c28f09dfd5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -94,6 +94,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
static bool ignore_msrs = 0;
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
+unsigned int min_timer_period_us = 500;
+module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
+
bool kvm_has_tsc_control;
EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
u32 kvm_max_guest_tsc_khz;
@@ -254,10 +257,26 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_get_apic_base);
-void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
-{
- /* TODO: reserve bits check */
- kvm_lapic_set_base(vcpu, data);
+int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+ u64 old_state = vcpu->arch.apic_base &
+ (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
+ u64 new_state = msr_info->data &
+ (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
+ u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) |
+ 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE);
+
+ if (!msr_info->host_initiated &&
+ ((msr_info->data & reserved_bits) != 0 ||
+ new_state == X2APIC_ENABLE ||
+ (new_state == MSR_IA32_APICBASE_ENABLE &&
+ old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) ||
+ (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) &&
+ old_state == 0)))
+ return 1;
+
+ kvm_lapic_set_base(vcpu, msr_info->data);
+ return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
@@ -719,6 +738,12 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_get_cr8);
+static void kvm_update_dr6(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
+ kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6);
+}
+
static void kvm_update_dr7(struct kvm_vcpu *vcpu)
{
unsigned long dr7;
@@ -747,6 +772,7 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
if (val & 0xffffffff00000000ULL)
return -1; /* #GP */
vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
+ kvm_update_dr6(vcpu);
break;
case 5:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
@@ -788,7 +814,10 @@ static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
return 1;
/* fall through */
case 6:
- *val = vcpu->arch.dr6;
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
+ *val = vcpu->arch.dr6;
+ else
+ *val = kvm_x86_ops->get_dr6(vcpu);
break;
case 5:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
@@ -836,11 +865,12 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
* kvm-specific. Those are put in the beginning of the list.
*/
-#define KVM_SAVE_MSRS_BEGIN 10
+#define KVM_SAVE_MSRS_BEGIN 12
static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
+ HV_X64_MSR_TIME_REF_COUNT, HV_X64_MSR_REFERENCE_TSC,
HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
@@ -1275,8 +1305,6 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
kvm->arch.last_tsc_write = data;
kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
- /* Reset of TSC must disable overshoot protection below */
- vcpu->arch.hv_clock.tsc_timestamp = 0;
vcpu->arch.last_guest_tsc = data;
/* Keep track of which generation this VCPU has synchronized to */
@@ -1484,7 +1512,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- s64 kernel_ns, max_kernel_ns;
+ s64 kernel_ns;
u64 tsc_timestamp, host_tsc;
struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
@@ -1543,37 +1571,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
if (!vcpu->pv_time_enabled)
return 0;
- /*
- * Time as measured by the TSC may go backwards when resetting the base
- * tsc_timestamp. The reason for this is that the TSC resolution is
- * higher than the resolution of the other clock scales. Thus, many
- * possible measurments of the TSC correspond to one measurement of any
- * other clock, and so a spread of values is possible. This is not a
- * problem for the computation of the nanosecond clock; with TSC rates
- * around 1GHZ, there can only be a few cycles which correspond to one
- * nanosecond value, and any path through this code will inevitably
- * take longer than that. However, with the kernel_ns value itself,
- * the precision may be much lower, down to HZ granularity. If the
- * first sampling of TSC against kernel_ns ends in the low part of the
- * range, and the second in the high end of the range, we can get:
- *
- * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
- *
- * As the sampling errors potentially range in the thousands of cycles,
- * it is possible such a time value has already been observed by the
- * guest. To protect against this, we must compute the system time as
- * observed by the guest and ensure the new system time is greater.
- */
- max_kernel_ns = 0;
- if (vcpu->hv_clock.tsc_timestamp) {
- max_kernel_ns = vcpu->last_guest_tsc -
- vcpu->hv_clock.tsc_timestamp;
- max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
- vcpu->hv_clock.tsc_to_system_mul,
- vcpu->hv_clock.tsc_shift);
- max_kernel_ns += vcpu->last_kernel_ns;
- }
-
if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
&vcpu->hv_clock.tsc_shift,
@@ -1581,14 +1578,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hw_tsc_khz = this_tsc_khz;
}
- /* with a master <monotonic time, tsc value> tuple,
- * pvclock clock reads always increase at the (scaled) rate
- * of guest TSC - no need to deal with sampling errors.
- */
- if (!use_master_clock) {
- if (max_kernel_ns > kernel_ns)
- kernel_ns = max_kernel_ns;
- }
/* With all the info we got, fill in the values */
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
@@ -1826,6 +1815,8 @@ static bool kvm_hv_msr_partition_wide(u32 msr)
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
case HV_X64_MSR_HYPERCALL:
+ case HV_X64_MSR_REFERENCE_TSC:
+ case HV_X64_MSR_TIME_REF_COUNT:
r = true;
break;
}
@@ -1865,6 +1856,21 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
kvm->arch.hv_hypercall = data;
+ mark_page_dirty(kvm, gfn);
+ break;
+ }
+ case HV_X64_MSR_REFERENCE_TSC: {
+ u64 gfn;
+ HV_REFERENCE_TSC_PAGE tsc_ref;
+ memset(&tsc_ref, 0, sizeof(tsc_ref));
+ kvm->arch.hv_tsc_page = data;
+ if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
+ break;
+ gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
+ if (kvm_write_guest(kvm, data,
+ &tsc_ref, sizeof(tsc_ref)))
+ return 1;
+ mark_page_dirty(kvm, gfn);
break;
}
default:
@@ -1879,19 +1885,21 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
case HV_X64_MSR_APIC_ASSIST_PAGE: {
+ u64 gfn;
unsigned long addr;
if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
vcpu->arch.hv_vapic = data;
break;
}
- addr = gfn_to_hva(vcpu->kvm, data >>
- HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
+ gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT;
+ addr = gfn_to_hva(vcpu->kvm, gfn);
if (kvm_is_error_hva(addr))
return 1;
if (__clear_user((void __user *)addr, PAGE_SIZE))
return 1;
vcpu->arch.hv_vapic = data;
+ mark_page_dirty(vcpu->kvm, gfn);
break;
}
case HV_X64_MSR_EOI:
@@ -2017,8 +2025,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case 0x200 ... 0x2ff:
return set_msr_mtrr(vcpu, msr, data);
case MSR_IA32_APICBASE:
- kvm_set_apic_base(vcpu, data);
- break;
+ return kvm_set_apic_base(vcpu, msr_info);
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
@@ -2291,6 +2298,14 @@ static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case HV_X64_MSR_HYPERCALL:
data = kvm->arch.hv_hypercall;
break;
+ case HV_X64_MSR_TIME_REF_COUNT: {
+ data =
+ div_u64(get_kernel_ns() + kvm->arch.kvmclock_offset, 100);
+ break;
+ }
+ case HV_X64_MSR_REFERENCE_TSC:
+ data = kvm->arch.hv_tsc_page;
+ break;
default:
vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1;
@@ -2601,6 +2616,7 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_KVMCLOCK_CTRL:
case KVM_CAP_READONLY_MEM:
+ case KVM_CAP_HYPERV_TIME:
#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
case KVM_CAP_ASSIGN_DEV_IRQ:
case KVM_CAP_PCI_2_3:
@@ -2972,8 +2988,11 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
struct kvm_debugregs *dbgregs)
{
+ unsigned long val;
+
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
- dbgregs->dr6 = vcpu->arch.dr6;
+ _kvm_get_dr(vcpu, 6, &val);
+ dbgregs->dr6 = val;
dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0;
memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
@@ -2987,7 +3006,9 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
vcpu->arch.dr6 = dbgregs->dr6;
+ kvm_update_dr6(vcpu);
vcpu->arch.dr7 = dbgregs->dr7;
+ kvm_update_dr7(vcpu);
return 0;
}
@@ -5834,6 +5855,11 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
kvm_apic_update_tmr(vcpu, tmr);
}
+/*
+ * Returns 1 to let __vcpu_run() continue the guest execution loop without
+ * exiting to the userspace. Otherwise, the value will be returned to the
+ * userspace.
+ */
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
@@ -6089,7 +6115,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
}
if (need_resched()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- kvm_resched(vcpu);
+ cond_resched();
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
}
}
@@ -6401,6 +6427,7 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
+ struct msr_data apic_base_msr;
int mmu_reset_needed = 0;
int pending_vec, max_bits, idx;
struct desc_ptr dt;
@@ -6424,7 +6451,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
kvm_x86_ops->set_efer(vcpu, sregs->efer);
- kvm_set_apic_base(vcpu, sregs->apic_base);
+ apic_base_msr.data = sregs->apic_base;
+ apic_base_msr.host_initiated = true;
+ kvm_set_apic_base(vcpu, &apic_base_msr);
mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
@@ -6717,6 +6746,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
vcpu->arch.dr6 = DR6_FIXED_1;
+ kvm_update_dr6(vcpu);
vcpu->arch.dr7 = DR7_FIXED_1;
kvm_update_dr7(vcpu);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 587fb9ede436..8da5823bcde6 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -125,5 +125,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
#define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM)
extern u64 host_xcr0;
+extern unsigned int min_timer_period_us;
+
extern struct static_key kvm_no_apic_vcpu;
#endif
diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
index bdf8532494fe..ad1fb5f53925 100644
--- a/arch/x86/lguest/boot.c
+++ b/arch/x86/lguest/boot.c
@@ -233,13 +233,13 @@ static void lguest_end_context_switch(struct task_struct *next)
* flags word contains all kind of stuff, but in practice Linux only cares
* about the interrupt flag. Our "save_flags()" just returns that.
*/
-static unsigned long save_fl(void)
+asmlinkage unsigned long lguest_save_fl(void)
{
return lguest_data.irq_enabled;
}
/* Interrupts go off... */
-static void irq_disable(void)
+asmlinkage void lguest_irq_disable(void)
{
lguest_data.irq_enabled = 0;
}
@@ -253,8 +253,8 @@ static void irq_disable(void)
* PV_CALLEE_SAVE_REGS_THUNK(), which pushes %eax onto the stack, calls the
* C function, then restores it.
*/
-PV_CALLEE_SAVE_REGS_THUNK(save_fl);
-PV_CALLEE_SAVE_REGS_THUNK(irq_disable);
+PV_CALLEE_SAVE_REGS_THUNK(lguest_save_fl);
+PV_CALLEE_SAVE_REGS_THUNK(lguest_irq_disable);
/*:*/
/* These are in i386_head.S */
@@ -1291,9 +1291,9 @@ __init void lguest_init(void)
*/
/* Interrupt-related operations */
- pv_irq_ops.save_fl = PV_CALLEE_SAVE(save_fl);
+ pv_irq_ops.save_fl = PV_CALLEE_SAVE(lguest_save_fl);
pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(lg_restore_fl);
- pv_irq_ops.irq_disable = PV_CALLEE_SAVE(irq_disable);
+ pv_irq_ops.irq_disable = PV_CALLEE_SAVE(lguest_irq_disable);
pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(lg_irq_enable);
pv_irq_ops.safe_halt = lguest_safe_halt;
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index 992d63bb154f..eabcb6e6a900 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -24,7 +24,7 @@ lib-$(CONFIG_SMP) += rwlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o
-obj-y += msr.o msr-reg.o msr-reg-export.o
+obj-y += msr.o msr-reg.o msr-reg-export.o hash.o
ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index a30ca15be21c..dee945d55594 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -186,7 +186,7 @@ ENTRY(copy_user_generic_unrolled)
30: shll $6,%ecx
addl %ecx,%edx
jmp 60f
-40: lea (%rdx,%rcx,8),%rdx
+40: leal (%rdx,%rcx,8),%edx
jmp 60f
50: movl %ecx,%edx
60: jmp copy_user_handle_tail /* ecx is zerorest also */
@@ -236,8 +236,6 @@ ENDPROC(copy_user_generic_unrolled)
ENTRY(copy_user_generic_string)
CFI_STARTPROC
ASM_STAC
- andl %edx,%edx
- jz 4f
cmpl $8,%edx
jb 2f /* less than 8 bytes, go to byte copy loop */
ALIGN_DESTINATION
@@ -249,12 +247,12 @@ ENTRY(copy_user_generic_string)
2: movl %edx,%ecx
3: rep
movsb
-4: xorl %eax,%eax
+ xorl %eax,%eax
ASM_CLAC
ret
.section .fixup,"ax"
-11: lea (%rdx,%rcx,8),%rcx
+11: leal (%rdx,%rcx,8),%ecx
12: movl %ecx,%edx /* ecx is zerorest also */
jmp copy_user_handle_tail
.previous
@@ -279,12 +277,10 @@ ENDPROC(copy_user_generic_string)
ENTRY(copy_user_enhanced_fast_string)
CFI_STARTPROC
ASM_STAC
- andl %edx,%edx
- jz 2f
movl %edx,%ecx
1: rep
movsb
-2: xorl %eax,%eax
+ xorl %eax,%eax
ASM_CLAC
ret
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index 7c3bee636e2f..39d6a3db0b96 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -16,7 +16,6 @@
#include <linux/timex.h>
#include <linux/preempt.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <asm/processor.h>
#include <asm/delay.h>
diff --git a/arch/x86/lib/hash.c b/arch/x86/lib/hash.c
new file mode 100644
index 000000000000..3056702e81fb
--- /dev/null
+++ b/arch/x86/lib/hash.c
@@ -0,0 +1,88 @@
+/*
+ * Some portions derived from code covered by the following notice:
+ *
+ * Copyright (c) 2010-2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/hash.h>
+
+#include <asm/processor.h>
+#include <asm/cpufeature.h>
+#include <asm/hash.h>
+
+static inline u32 crc32_u32(u32 crc, u32 val)
+{
+ asm ("crc32l %1,%0\n" : "+r" (crc) : "rm" (val));
+ return crc;
+}
+
+static u32 intel_crc4_2_hash(const void *data, u32 len, u32 seed)
+{
+ const u32 *p32 = (const u32 *) data;
+ u32 i, tmp = 0;
+
+ for (i = 0; i < len / 4; i++)
+ seed = crc32_u32(*p32++, seed);
+
+ switch (3 - (len & 0x03)) {
+ case 0:
+ tmp |= *((const u8 *) p32 + 2) << 16;
+ /* fallthrough */
+ case 1:
+ tmp |= *((const u8 *) p32 + 1) << 8;
+ /* fallthrough */
+ case 2:
+ tmp |= *((const u8 *) p32);
+ seed = crc32_u32(tmp, seed);
+ default:
+ break;
+ }
+
+ return seed;
+}
+
+static u32 intel_crc4_2_hash2(const u32 *data, u32 len, u32 seed)
+{
+ const u32 *p32 = (const u32 *) data;
+ u32 i;
+
+ for (i = 0; i < len; i++)
+ seed = crc32_u32(*p32++, seed);
+
+ return seed;
+}
+
+void setup_arch_fast_hash(struct fast_hash_ops *ops)
+{
+ if (cpu_has_xmm4_2) {
+ ops->hash = intel_crc4_2_hash;
+ ops->hash2 = intel_crc4_2_hash2;
+ }
+}
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 533a85e3a07e..1a2be7c6895d 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -346,8 +346,8 @@ AVXcode: 1
17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
18: Grp16 (1A)
19:
-1a:
-1b:
+1a: BNDCL Ev,Gv | BNDCU Ev,Gv | BNDMOV Gv,Ev | BNDLDX Gv,Ev,Gv
+1b: BNDCN Ev,Gv | BNDMOV Ev,Gv | BNDMK Gv,Ev | BNDSTX Ev,GV,Gv
1c:
1d:
1e:
diff --git a/arch/x86/math-emu/errors.c b/arch/x86/math-emu/errors.c
index 59d353d2c599..a5449089cd9f 100644
--- a/arch/x86/math-emu/errors.c
+++ b/arch/x86/math-emu/errors.c
@@ -330,11 +330,6 @@ asmlinkage void FPU_exception(int n)
RE_ENTRANT_CHECK_OFF;
if ((~control_word & n & CW_Exceptions) || (n == EX_INTERNAL)) {
-#ifdef PRINT_MESSAGES
- /* My message from the sponsor */
- printk(FPU_VERSION " " __DATE__ " (C) W. Metzenthen.\n");
-#endif /* PRINT_MESSAGES */
-
/* Get a name string for error reporting */
for (i = 0; exception_names[i].type; i++)
if ((exception_names[i].type & n) ==
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9ff85bb8dd69..6dea040cc3a1 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -641,6 +641,20 @@ no_context(struct pt_regs *regs, unsigned long error_code,
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
+ /*
+ * Any interrupt that takes a fault gets the fixup. This makes
+ * the below recursive fault logic only apply to a faults from
+ * task context.
+ */
+ if (in_interrupt())
+ return;
+
+ /*
+ * Per the above we're !in_interrupt(), aka. task context.
+ *
+ * In this case we need to make sure we're not recursively
+ * faulting through the emulate_vsyscall() logic.
+ */
if (current_thread_info()->sig_on_uaccess_error && signal) {
tsk->thread.trap_nr = X86_TRAP_PF;
tsk->thread.error_code = error_code | PF_USER;
@@ -649,6 +663,10 @@ no_context(struct pt_regs *regs, unsigned long error_code,
/* XXX: hwpoison faults will set the wrong code. */
force_sig_info_fault(signal, si_code, address, tsk, 0);
}
+
+ /*
+ * Barring that, we can do the fixup and be happy.
+ */
return;
}
@@ -983,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address)
static inline bool smap_violation(int error_code, struct pt_regs *regs)
{
+ if (!IS_ENABLED(CONFIG_X86_SMAP))
+ return false;
+
+ if (!static_cpu_has(X86_FEATURE_SMAP))
+ return false;
+
if (error_code & PF_USER)
return false;
@@ -1069,11 +1093,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
- if (static_cpu_has(X86_FEATURE_SMAP)) {
- if (unlikely(smap_violation(error_code, regs))) {
- bad_area_nosemaphore(regs, error_code, address);
- return;
- }
+ if (unlikely(smap_violation(error_code, regs))) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
}
/*
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index dd74e46828c0..207d9aef662d 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
pte_t pte = gup_get_pte(ptep);
struct page *page;
+ /* Similar to the PMD case, NUMA hinting must take slow path */
+ if (pte_numa(pte)) {
+ pte_unmap(ptep);
+ return 0;
+ }
+
if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
pte_unmap(ptep);
return 0;
@@ -102,8 +108,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
static inline void get_head_page_multiple(struct page *page, int nr)
{
- VM_BUG_ON(page != compound_head(page));
- VM_BUG_ON(page_count(page) == 0);
+ VM_BUG_ON_PAGE(page != compound_head(page), page);
+ VM_BUG_ON_PAGE(page_count(page) == 0, page);
atomic_add(nr, &page->_count);
SetPageReferenced(page);
}
@@ -129,7 +135,7 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
head = pte_page(pte);
page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
do {
- VM_BUG_ON(compound_head(page) != head);
+ VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
if (PageTail(page))
get_huge_page_tail(page);
@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
if (unlikely(pmd_large(pmd))) {
+ /*
+ * NUMA hinting faults need to be handled in the GUP
+ * slowpath for accounting purposes and so that they
+ * can be serialised against THP migration.
+ */
+ if (pmd_numa(pmd))
+ return 0;
if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
return 0;
} else {
@@ -199,7 +212,7 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
head = pte_page(pte);
page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
do {
- VM_BUG_ON(compound_head(page) != head);
+ VM_BUG_ON_PAGE(compound_head(page) != head, page);
pages[*nr] = page;
if (PageTail(page))
get_huge_page_tail(page);
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index 9d980d88b747..8c9f647ff9e1 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -87,9 +87,7 @@ int pmd_huge_support(void)
}
#endif
-/* x86_64 also uses this file */
-
-#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#ifdef CONFIG_HUGETLB_PAGE
static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
@@ -99,7 +97,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
info.flags = 0;
info.length = len;
- info.low_limit = TASK_UNMAPPED_BASE;
+ info.low_limit = current->mm->mmap_legacy_base;
info.high_limit = TASK_SIZE;
info.align_mask = PAGE_MASK & ~huge_page_mask(h);
info.align_offset = 0;
@@ -172,8 +170,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return hugetlb_get_unmapped_area_topdown(file, addr, len,
pgoff, flags);
}
-
-#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
+#endif /* CONFIG_HUGETLB_PAGE */
#ifdef CONFIG_X86_64
static __init int setup_hugepagesz(char *opt)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4287f1ffba7e..e39504878aec 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -665,7 +665,7 @@ void __init initmem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
#endif
- memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
sparse_memory_present_with_active_regions(0);
#ifdef CONFIG_FLATMEM
@@ -806,6 +806,9 @@ void __init mem_init(void)
BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
#undef high_memory
#undef __FIXADDR_TOP
+#ifdef CONFIG_RANDOMIZE_BASE
+ BUILD_BUG_ON(CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE);
+#endif
#ifdef CONFIG_HIGHMEM
BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 104d56a9245f..f35c66c5959a 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -643,7 +643,7 @@ kernel_physical_mapping_init(unsigned long start,
#ifndef CONFIG_NUMA
void __init initmem_init(void)
{
- memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
+ memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
}
#endif
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index e5d5e2ce9f77..637ab34ed632 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -11,7 +11,6 @@
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/hash.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 8dabbed409ee..1e9da795767a 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -74,7 +74,7 @@ static void __init do_one_pass(u64 pattern, u64 start, u64 end)
u64 i;
phys_addr_t this_start, this_end;
- for_each_free_mem_range(i, MAX_NUMNODES, &this_start, &this_end, NULL) {
+ for_each_free_mem_range(i, NUMA_NO_NODE, &this_start, &this_end, NULL) {
this_start = clamp_t(phys_addr_t, this_start, start, end);
this_end = clamp_t(phys_addr_t, this_end, start, end);
if (this_start < this_end) {
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 24aec58d6afd..27aa0455fab3 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -211,9 +211,13 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
*/
nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
if (!nd_pa) {
- pr_err("Cannot find %zu bytes in node %d\n",
- nd_size, nid);
- return;
+ nd_pa = __memblock_alloc_base(nd_size, SMP_CACHE_BYTES,
+ MEMBLOCK_ALLOC_ACCESSIBLE);
+ if (!nd_pa) {
+ pr_err("Cannot find %zu bytes in node %d\n",
+ nd_size, nid);
+ return;
+ }
}
nd = __va(nd_pa);
@@ -487,7 +491,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
for (i = 0; i < mi->nr_blks; i++) {
struct numa_memblk *mb = &mi->blk[i];
- memblock_set_node(mb->start, mb->end - mb->start, mb->nid);
+ memblock_set_node(mb->start, mb->end - mb->start,
+ &memblock.memory, mb->nid);
}
/*
@@ -549,6 +554,41 @@ static void __init numa_init_array(void)
}
}
+static void __init numa_clear_kernel_node_hotplug(void)
+{
+ int i, nid;
+ nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
+ unsigned long start, end;
+ struct memblock_type *type = &memblock.reserved;
+
+ /*
+ * At this time, all memory regions reserved by memblock are
+ * used by the kernel. Set the nid in memblock.reserved will
+ * mark out all the nodes the kernel resides in.
+ */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = &numa_meminfo.blk[i];
+ memblock_set_node(mb->start, mb->end - mb->start,
+ &memblock.reserved, mb->nid);
+ }
+
+ /* Mark all kernel nodes. */
+ for (i = 0; i < type->cnt; i++)
+ node_set(type->regions[i].nid, numa_kernel_nodes);
+
+ /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ nid = numa_meminfo.blk[i].nid;
+ if (!node_isset(nid, numa_kernel_nodes))
+ continue;
+
+ start = numa_meminfo.blk[i].start;
+ end = numa_meminfo.blk[i].end;
+
+ memblock_clear_hotplug(start, end - start);
+ }
+}
+
static int __init numa_init(int (*init_func)(void))
{
int i;
@@ -561,7 +601,12 @@ static int __init numa_init(int (*init_func)(void))
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
- WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
+ WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.memory,
+ MAX_NUMNODES));
+ WARN_ON(memblock_set_node(0, ULLONG_MAX, &memblock.reserved,
+ MAX_NUMNODES));
+ /* In case that parsing SRAT failed. */
+ WARN_ON(memblock_clear_hotplug(0, ULLONG_MAX));
numa_reset_distance();
ret = init_func();
@@ -597,6 +642,16 @@ static int __init numa_init(int (*init_func)(void))
numa_clear_node(i);
}
numa_init_array();
+
+ /*
+ * At very early time, the kernel have to use some memory such as
+ * loading the kernel image. We cannot prevent this anyway. So any
+ * node the kernel resides in should be un-hotpluggable.
+ *
+ * And when we come here, numa_init() won't fail.
+ */
+ numa_clear_kernel_node_hotplug();
+
return 0;
}
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 0342d27ca798..47b6436e41c2 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -52,6 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end)
nid, start, end);
printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
printk(KERN_DEBUG " ");
+ start = round_down(start, PAGES_PER_SECTION);
+ end = round_up(end, PAGES_PER_SECTION);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
physnode_map[pfn / PAGES_PER_SECTION] = nid;
printk(KERN_CONT "%lx ", pfn);
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index d0b1773d9d2e..461bc8289024 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -8,7 +8,6 @@
#include <linux/kthread.h>
#include <linux/random.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bb32480c2d71..b3b19f46c016 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -30,6 +30,7 @@
*/
struct cpa_data {
unsigned long *vaddr;
+ pgd_t *pgd;
pgprot_t mask_set;
pgprot_t mask_clr;
int numpages;
@@ -322,17 +323,9 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
return prot;
}
-/*
- * Lookup the page table entry for a virtual address. Return a pointer
- * to the entry and the level of the mapping.
- *
- * Note: We return pud and pmd either when the entry is marked large
- * or when the present bit is not set. Otherwise we would return a
- * pointer to a nonexisting mapping.
- */
-pte_t *lookup_address(unsigned long address, unsigned int *level)
+static pte_t *__lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned int *level)
{
- pgd_t *pgd = pgd_offset_k(address);
pud_t *pud;
pmd_t *pmd;
@@ -361,8 +354,31 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
return pte_offset_kernel(pmd, address);
}
+
+/*
+ * Lookup the page table entry for a virtual address. Return a pointer
+ * to the entry and the level of the mapping.
+ *
+ * Note: We return pud and pmd either when the entry is marked large
+ * or when the present bit is not set. Otherwise we would return a
+ * pointer to a nonexisting mapping.
+ */
+pte_t *lookup_address(unsigned long address, unsigned int *level)
+{
+ return __lookup_address_in_pgd(pgd_offset_k(address), address, level);
+}
EXPORT_SYMBOL_GPL(lookup_address);
+static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
+ unsigned int *level)
+{
+ if (cpa->pgd)
+ return __lookup_address_in_pgd(cpa->pgd + pgd_index(address),
+ address, level);
+
+ return lookup_address(address, level);
+}
+
/*
* This is necessary because __pa() does not work on some
* kinds of memory, like vmalloc() or the alloc_remap()
@@ -437,7 +453,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* Check for races, another CPU might have split this page
* up already:
*/
- tmp = lookup_address(address, &level);
+ tmp = _lookup_address_cpa(cpa, address, &level);
if (tmp != kpte)
goto out_unlock;
@@ -543,7 +559,8 @@ out_unlock:
}
static int
-__split_large_page(pte_t *kpte, unsigned long address, struct page *base)
+__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
+ struct page *base)
{
pte_t *pbase = (pte_t *)page_address(base);
unsigned long pfn, pfninc = 1;
@@ -556,7 +573,7 @@ __split_large_page(pte_t *kpte, unsigned long address, struct page *base)
* Check for races, another CPU might have split this page
* up for us already:
*/
- tmp = lookup_address(address, &level);
+ tmp = _lookup_address_cpa(cpa, address, &level);
if (tmp != kpte) {
spin_unlock(&pgd_lock);
return 1;
@@ -632,7 +649,8 @@ __split_large_page(pte_t *kpte, unsigned long address, struct page *base)
return 0;
}
-static int split_large_page(pte_t *kpte, unsigned long address)
+static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
+ unsigned long address)
{
struct page *base;
@@ -644,15 +662,390 @@ static int split_large_page(pte_t *kpte, unsigned long address)
if (!base)
return -ENOMEM;
- if (__split_large_page(kpte, address, base))
+ if (__split_large_page(cpa, kpte, address, base))
__free_page(base);
return 0;
}
+static bool try_to_free_pte_page(pte_t *pte)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ if (!pte_none(pte[i]))
+ return false;
+
+ free_page((unsigned long)pte);
+ return true;
+}
+
+static bool try_to_free_pmd_page(pmd_t *pmd)
+{
+ int i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++)
+ if (!pmd_none(pmd[i]))
+ return false;
+
+ free_page((unsigned long)pmd);
+ return true;
+}
+
+static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
+{
+ pte_t *pte = pte_offset_kernel(pmd, start);
+
+ while (start < end) {
+ set_pte(pte, __pte(0));
+
+ start += PAGE_SIZE;
+ pte++;
+ }
+
+ if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
+ pmd_clear(pmd);
+ return true;
+ }
+ return false;
+}
+
+static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
+ unsigned long start, unsigned long end)
+{
+ if (unmap_pte_range(pmd, start, end))
+ if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+ pud_clear(pud);
+}
+
+static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
+{
+ pmd_t *pmd = pmd_offset(pud, start);
+
+ /*
+ * Not on a 2MB page boundary?
+ */
+ if (start & (PMD_SIZE - 1)) {
+ unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
+ unsigned long pre_end = min_t(unsigned long, end, next_page);
+
+ __unmap_pmd_range(pud, pmd, start, pre_end);
+
+ start = pre_end;
+ pmd++;
+ }
+
+ /*
+ * Try to unmap in 2M chunks.
+ */
+ while (end - start >= PMD_SIZE) {
+ if (pmd_large(*pmd))
+ pmd_clear(pmd);
+ else
+ __unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
+
+ start += PMD_SIZE;
+ pmd++;
+ }
+
+ /*
+ * 4K leftovers?
+ */
+ if (start < end)
+ return __unmap_pmd_range(pud, pmd, start, end);
+
+ /*
+ * Try again to free the PMD page if haven't succeeded above.
+ */
+ if (!pud_none(*pud))
+ if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
+ pud_clear(pud);
+}
+
+static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
+{
+ pud_t *pud = pud_offset(pgd, start);
+
+ /*
+ * Not on a GB page boundary?
+ */
+ if (start & (PUD_SIZE - 1)) {
+ unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
+ unsigned long pre_end = min_t(unsigned long, end, next_page);
+
+ unmap_pmd_range(pud, start, pre_end);
+
+ start = pre_end;
+ pud++;
+ }
+
+ /*
+ * Try to unmap in 1G chunks?
+ */
+ while (end - start >= PUD_SIZE) {
+
+ if (pud_large(*pud))
+ pud_clear(pud);
+ else
+ unmap_pmd_range(pud, start, start + PUD_SIZE);
+
+ start += PUD_SIZE;
+ pud++;
+ }
+
+ /*
+ * 2M leftovers?
+ */
+ if (start < end)
+ unmap_pmd_range(pud, start, end);
+
+ /*
+ * No need to try to free the PUD page because we'll free it in
+ * populate_pgd's error path
+ */
+}
+
+static int alloc_pte_page(pmd_t *pmd)
+{
+ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+ if (!pte)
+ return -1;
+
+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
+ return 0;
+}
+
+static int alloc_pmd_page(pud_t *pud)
+{
+ pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+ if (!pmd)
+ return -1;
+
+ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+ return 0;
+}
+
+static void populate_pte(struct cpa_data *cpa,
+ unsigned long start, unsigned long end,
+ unsigned num_pages, pmd_t *pmd, pgprot_t pgprot)
+{
+ pte_t *pte;
+
+ pte = pte_offset_kernel(pmd, start);
+
+ while (num_pages-- && start < end) {
+
+ /* deal with the NX bit */
+ if (!(pgprot_val(pgprot) & _PAGE_NX))
+ cpa->pfn &= ~_PAGE_NX;
+
+ set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
+
+ start += PAGE_SIZE;
+ cpa->pfn += PAGE_SIZE;
+ pte++;
+ }
+}
+
+static int populate_pmd(struct cpa_data *cpa,
+ unsigned long start, unsigned long end,
+ unsigned num_pages, pud_t *pud, pgprot_t pgprot)
+{
+ unsigned int cur_pages = 0;
+ pmd_t *pmd;
+
+ /*
+ * Not on a 2M boundary?
+ */
+ if (start & (PMD_SIZE - 1)) {
+ unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
+ unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
+
+ pre_end = min_t(unsigned long, pre_end, next_page);
+ cur_pages = (pre_end - start) >> PAGE_SHIFT;
+ cur_pages = min_t(unsigned int, num_pages, cur_pages);
+
+ /*
+ * Need a PTE page?
+ */
+ pmd = pmd_offset(pud, start);
+ if (pmd_none(*pmd))
+ if (alloc_pte_page(pmd))
+ return -1;
+
+ populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
+
+ start = pre_end;
+ }
+
+ /*
+ * We mapped them all?
+ */
+ if (num_pages == cur_pages)
+ return cur_pages;
+
+ while (end - start >= PMD_SIZE) {
+
+ /*
+ * We cannot use a 1G page so allocate a PMD page if needed.
+ */
+ if (pud_none(*pud))
+ if (alloc_pmd_page(pud))
+ return -1;
+
+ pmd = pmd_offset(pud, start);
+
+ set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot)));
+
+ start += PMD_SIZE;
+ cpa->pfn += PMD_SIZE;
+ cur_pages += PMD_SIZE >> PAGE_SHIFT;
+ }
+
+ /*
+ * Map trailing 4K pages.
+ */
+ if (start < end) {
+ pmd = pmd_offset(pud, start);
+ if (pmd_none(*pmd))
+ if (alloc_pte_page(pmd))
+ return -1;
+
+ populate_pte(cpa, start, end, num_pages - cur_pages,
+ pmd, pgprot);
+ }
+ return num_pages;
+}
+
+static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
+ pgprot_t pgprot)
+{
+ pud_t *pud;
+ unsigned long end;
+ int cur_pages = 0;
+
+ end = start + (cpa->numpages << PAGE_SHIFT);
+
+ /*
+ * Not on a Gb page boundary? => map everything up to it with
+ * smaller pages.
+ */
+ if (start & (PUD_SIZE - 1)) {
+ unsigned long pre_end;
+ unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
+
+ pre_end = min_t(unsigned long, end, next_page);
+ cur_pages = (pre_end - start) >> PAGE_SHIFT;
+ cur_pages = min_t(int, (int)cpa->numpages, cur_pages);
+
+ pud = pud_offset(pgd, start);
+
+ /*
+ * Need a PMD page?
+ */
+ if (pud_none(*pud))
+ if (alloc_pmd_page(pud))
+ return -1;
+
+ cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
+ pud, pgprot);
+ if (cur_pages < 0)
+ return cur_pages;
+
+ start = pre_end;
+ }
+
+ /* We mapped them all? */
+ if (cpa->numpages == cur_pages)
+ return cur_pages;
+
+ pud = pud_offset(pgd, start);
+
+ /*
+ * Map everything starting from the Gb boundary, possibly with 1G pages
+ */
+ while (end - start >= PUD_SIZE) {
+ set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot)));
+
+ start += PUD_SIZE;
+ cpa->pfn += PUD_SIZE;
+ cur_pages += PUD_SIZE >> PAGE_SHIFT;
+ pud++;
+ }
+
+ /* Map trailing leftover */
+ if (start < end) {
+ int tmp;
+
+ pud = pud_offset(pgd, start);
+ if (pud_none(*pud))
+ if (alloc_pmd_page(pud))
+ return -1;
+
+ tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
+ pud, pgprot);
+ if (tmp < 0)
+ return cur_pages;
+
+ cur_pages += tmp;
+ }
+ return cur_pages;
+}
+
+/*
+ * Restrictions for kernel page table do not necessarily apply when mapping in
+ * an alternate PGD.
+ */
+static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
+{
+ pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
+ bool allocd_pgd = false;
+ pgd_t *pgd_entry;
+ pud_t *pud = NULL; /* shut up gcc */
+ int ret;
+
+ pgd_entry = cpa->pgd + pgd_index(addr);
+
+ /*
+ * Allocate a PUD page and hand it down for mapping.
+ */
+ if (pgd_none(*pgd_entry)) {
+ pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+ if (!pud)
+ return -1;
+
+ set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
+ allocd_pgd = true;
+ }
+
+ pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
+ pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
+
+ ret = populate_pud(cpa, addr, pgd_entry, pgprot);
+ if (ret < 0) {
+ unmap_pud_range(pgd_entry, addr,
+ addr + (cpa->numpages << PAGE_SHIFT));
+
+ if (allocd_pgd) {
+ /*
+ * If I allocated this PUD page, I can just as well
+ * free it in this error path.
+ */
+ pgd_clear(pgd_entry);
+ free_page((unsigned long)pud);
+ }
+ return ret;
+ }
+ cpa->numpages = ret;
+ return 0;
+}
+
static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
int primary)
{
+ if (cpa->pgd)
+ return populate_pgd(cpa, vaddr);
+
/*
* Ignore all non primary paths.
*/
@@ -697,7 +1090,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
else
address = *cpa->vaddr;
repeat:
- kpte = lookup_address(address, &level);
+ kpte = _lookup_address_cpa(cpa, address, &level);
if (!kpte)
return __cpa_process_fault(cpa, address, primary);
@@ -761,7 +1154,7 @@ repeat:
/*
* We have to split the large page:
*/
- err = split_large_page(kpte, address);
+ err = split_large_page(cpa, kpte, address);
if (!err) {
/*
* Do a global flush tlb after splitting the large page
@@ -910,6 +1303,8 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
int ret, cache, checkalias;
unsigned long baddr = 0;
+ memset(&cpa, 0, sizeof(cpa));
+
/*
* Check, if we are requested to change a not supported
* feature:
@@ -1356,6 +1751,7 @@ static int __set_pages_p(struct page *page, int numpages)
{
unsigned long tempaddr = (unsigned long) page_address(page);
struct cpa_data cpa = { .vaddr = &tempaddr,
+ .pgd = NULL,
.numpages = numpages,
.mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
.mask_clr = __pgprot(0),
@@ -1374,6 +1770,7 @@ static int __set_pages_np(struct page *page, int numpages)
{
unsigned long tempaddr = (unsigned long) page_address(page);
struct cpa_data cpa = { .vaddr = &tempaddr,
+ .pgd = NULL,
.numpages = numpages,
.mask_set = __pgprot(0),
.mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
@@ -1434,6 +1831,36 @@ bool kernel_page_present(struct page *page)
#endif /* CONFIG_DEBUG_PAGEALLOC */
+int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
+ unsigned numpages, unsigned long page_flags)
+{
+ int retval = -EINVAL;
+
+ struct cpa_data cpa = {
+ .vaddr = &address,
+ .pfn = pfn,
+ .pgd = pgd,
+ .numpages = numpages,
+ .mask_set = __pgprot(0),
+ .mask_clr = __pgprot(0),
+ .flags = 0,
+ };
+
+ if (!(__supported_pte_mask & _PAGE_NX))
+ goto out;
+
+ if (!(page_flags & _PAGE_NX))
+ cpa.mask_clr = __pgprot(_PAGE_NX);
+
+ cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
+
+ retval = __change_page_attr_set_clr(&cpa, 0);
+ __flush_tlb_all();
+
+out:
+ return retval;
+}
+
/*
* The testcases use internal knowledge of the implementation that shouldn't
* be exposed to the rest of the kernel. Include these directly here.
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 266ca912f62e..1953e9c9391a 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -42,15 +42,25 @@ static __init inline int srat_disabled(void)
return acpi_numa < 0;
}
-/* Callback for SLIT parsing */
+/*
+ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them. I/O localities are
+ * not supported at this point.
+ */
void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{
int i, j;
- for (i = 0; i < slit->locality_count; i++)
- for (j = 0; j < slit->locality_count; j++)
+ for (i = 0; i < slit->locality_count; i++) {
+ if (pxm_to_node(i) == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < slit->locality_count; j++) {
+ if (pxm_to_node(j) == NUMA_NO_NODE)
+ continue;
numa_set_distance(pxm_to_node(i), pxm_to_node(j),
slit->entry[slit->locality_count * i + j]);
+ }
+ }
}
/* Callback for Proximity Domain -> x2APIC mapping */
@@ -181,6 +191,11 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
(unsigned long long) start, (unsigned long long) end - 1,
hotpluggable ? " hotplug" : "");
+ /* Mark hotplug range in memblock. */
+ if (hotpluggable && memblock_mark_hotplug(start, ma->length))
+ pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n",
+ (unsigned long long)start, (unsigned long long)end - 1);
+
return 0;
out_err_bad_srat:
bad_srat();
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index ae699b3bbac8..dd8dda167a24 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -103,7 +103,7 @@ static void flush_tlb_func(void *info)
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return;
- count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
if (f->flush_end == TLB_FLUSH_ALL)
local_flush_tlb();
@@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
info.flush_start = start;
info.flush_end = end;
- count_vm_event(NR_TLB_REMOTE_FLUSH);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (is_uv_system()) {
unsigned int cpu;
@@ -151,44 +151,19 @@ void flush_tlb_current_task(void)
preempt_disable();
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable();
}
-/*
- * It can find out the THP large page, or
- * HUGETLB page in tlb_flush when THP disabled
- */
-static inline unsigned long has_large_page(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- unsigned long addr = ALIGN(start, HPAGE_SIZE);
- for (; addr < end; addr += HPAGE_SIZE) {
- pgd = pgd_offset(mm, addr);
- if (likely(!pgd_none(*pgd))) {
- pud = pud_offset(pgd, addr);
- if (likely(!pud_none(*pud))) {
- pmd = pmd_offset(pud, addr);
- if (likely(!pmd_none(*pmd)))
- if (pmd_large(*pmd))
- return addr;
- }
- }
- }
- return 0;
-}
-
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag)
{
unsigned long addr;
unsigned act_entries, tlb_entries = 0;
+ unsigned long nr_base_pages;
preempt_disable();
if (current->active_mm != mm)
@@ -210,21 +185,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
tlb_entries = tlb_lli_4k[ENTRIES];
else
tlb_entries = tlb_lld_4k[ENTRIES];
+
/* Assume all of TLB entries was occupied by this task */
- act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
+ act_entries = tlb_entries >> tlb_flushall_shift;
+ act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm;
+ nr_base_pages = (end - start) >> PAGE_SHIFT;
/* tlb_flushall_shift is on balance point, details in commit log */
- if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ if (nr_base_pages > act_entries) {
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
} else {
- if (has_large_page(mm, start, end)) {
- local_flush_tlb();
- goto flush_all;
- }
/* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) {
- count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
@@ -262,7 +236,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
static void do_flush_tlb_all(void *info)
{
- count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
__flush_tlb_all();
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(smp_processor_id());
@@ -270,7 +244,7 @@ static void do_flush_tlb_all(void *info)
void flush_tlb_all(void)
{
- count_vm_event(NR_TLB_REMOTE_FLUSH);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 26328e800869..4ed75dd81d05 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -359,15 +359,21 @@ void bpf_jit_compile(struct sk_filter *fp)
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
break;
case BPF_S_ALU_MOD_K: /* A %= K; */
+ if (K == 1) {
+ CLEAR_A();
+ break;
+ }
EMIT2(0x31, 0xd2); /* xor %edx,%edx */
EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
EMIT2(0xf7, 0xf1); /* div %ecx */
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
- EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
- EMIT(K, 4);
- EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
+ EMIT2(0xf7, 0xf1); /* div %ecx */
break;
case BPF_S_ALU_AND_X:
seen |= SEEN_XREG;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index b046e070e088..bca9e85daaa5 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -5,7 +5,6 @@
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/vgaarb.h>
#include <asm/pci_x86.h>
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 51384ca727ad..84b9d672843d 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -31,6 +31,7 @@
#include <asm/pci_x86.h>
#include <asm/hw_irq.h>
#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
#define PCIE_CAP_OFFSET 0x100
@@ -219,7 +220,10 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
irq_attr.ioapic = mp_find_ioapic(dev->irq);
irq_attr.ioapic_pin = dev->irq;
irq_attr.trigger = 1; /* level */
- irq_attr.polarity = 1; /* active low */
+ if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
+ irq_attr.polarity = 0; /* active high */
+ else
+ irq_attr.polarity = 1; /* active low */
io_apic_set_pci_routing(&dev->dev, dev->irq, &irq_attr);
return 0;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 082e88129712..248642f4bab7 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -12,7 +12,6 @@
#include <linux/pci.h>
#include <linux/init.h>
-#include <linux/acpi.h>
#include <linux/sfi_acpi.h>
#include <linux/bitmap.h>
#include <linux/dmi.h>
diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
index 5c90975cdf0f..43984bc1665a 100644
--- a/arch/x86/pci/mmconfig_32.c
+++ b/arch/x86/pci/mmconfig_32.c
@@ -14,7 +14,6 @@
#include <linux/rcupdate.h>
#include <asm/e820.h>
#include <asm/pci_x86.h>
-#include <acpi/acpi.h>
/* Assume systems with more busses have correct MCFG */
#define mmcfg_virt_addr ((void __iomem *) fix_to_virt(FIX_PCIE_MCFG))
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 5eee4959785d..103e702ec5a7 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -337,7 +337,7 @@ out:
return ret;
}
-static void xen_initdom_restore_msi_irqs(struct pci_dev *dev, int irq)
+static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
{
int ret = 0;
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index 7145ec63c520..f15103dff4b4 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -42,14 +42,15 @@ void __init efi_bgrt_init(void)
if (bgrt_tab->header.length < sizeof(*bgrt_tab))
return;
- if (bgrt_tab->version != 1)
+ if (bgrt_tab->version != 1 || bgrt_tab->status != 1)
return;
if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address)
return;
image = efi_lookup_mapped_addr(bgrt_tab->image_address);
if (!image) {
- image = ioremap(bgrt_tab->image_address, sizeof(bmp_header));
+ image = early_memremap(bgrt_tab->image_address,
+ sizeof(bmp_header));
ioremapped = true;
if (!image)
return;
@@ -57,7 +58,7 @@ void __init efi_bgrt_init(void)
memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
if (ioremapped)
- iounmap(image);
+ early_iounmap(image, sizeof(bmp_header));
bgrt_image_size = bmp_header.size;
bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL);
@@ -65,7 +66,8 @@ void __init efi_bgrt_init(void)
return;
if (ioremapped) {
- image = ioremap(bgrt_tab->image_address, bmp_header.size);
+ image = early_memremap(bgrt_tab->image_address,
+ bmp_header.size);
if (!image) {
kfree(bgrt_image);
bgrt_image = NULL;
@@ -75,5 +77,5 @@ void __init efi_bgrt_init(void)
memcpy_fromio(bgrt_image, image, bgrt_image_size);
if (ioremapped)
- iounmap(image);
+ early_iounmap(image, bmp_header.size);
}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index cceb813044ef..1a201ac7cef8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -12,6 +12,8 @@
* Bibo Mao <bibo.mao@intel.com>
* Chandramouli Narayanan <mouli@linux.intel.com>
* Huang Ying <ying.huang@intel.com>
+ * Copyright (C) 2013 SuSE Labs
+ * Borislav Petkov <bp@suse.de> - runtime services VA mapping
*
* Copied from efi_32.c to eliminate the duplicated code between EFI
* 32/64 support code. --ying 2007-10-26
@@ -51,7 +53,7 @@
#include <asm/x86_init.h>
#include <asm/rtc.h>
-#define EFI_DEBUG 1
+#define EFI_DEBUG
#define EFI_MIN_RESERVE 5120
@@ -74,6 +76,8 @@ static __initdata efi_config_table_type_t arch_tables[] = {
{NULL_GUID, NULL, NULL},
};
+u64 efi_setup; /* efi setup_data physical address */
+
/*
* Returns 1 if 'facility' is enabled, 0 otherwise.
*/
@@ -110,7 +114,6 @@ static int __init setup_storage_paranoia(char *arg)
}
early_param("efi_no_storage_paranoia", setup_storage_paranoia);
-
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
unsigned long flags;
@@ -398,9 +401,9 @@ int __init efi_memblock_x86_reserve_range(void)
return 0;
}
-#if EFI_DEBUG
static void __init print_efi_memmap(void)
{
+#ifdef EFI_DEBUG
efi_memory_desc_t *md;
void *p;
int i;
@@ -415,8 +418,8 @@ static void __init print_efi_memmap(void)
md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
(md->num_pages >> (20 - EFI_PAGE_SHIFT)));
}
-}
#endif /* EFI_DEBUG */
+}
void __init efi_reserve_boot_services(void)
{
@@ -436,7 +439,7 @@ void __init efi_reserve_boot_services(void)
* - Not within any part of the kernel
* - Not the bios reserved area
*/
- if ((start+size >= __pa_symbol(_text)
+ if ((start + size > __pa_symbol(_text)
&& start <= __pa_symbol(_end)) ||
!e820_all_mapped(start, start+size, E820_RAM) ||
memblock_is_region_reserved(start, size)) {
@@ -489,18 +492,27 @@ static int __init efi_systab_init(void *phys)
{
if (efi_enabled(EFI_64BIT)) {
efi_system_table_64_t *systab64;
+ struct efi_setup_data *data = NULL;
u64 tmp = 0;
+ if (efi_setup) {
+ data = early_memremap(efi_setup, sizeof(*data));
+ if (!data)
+ return -ENOMEM;
+ }
systab64 = early_ioremap((unsigned long)phys,
sizeof(*systab64));
if (systab64 == NULL) {
pr_err("Couldn't map the system table!\n");
+ if (data)
+ early_iounmap(data, sizeof(*data));
return -ENOMEM;
}
efi_systab.hdr = systab64->hdr;
- efi_systab.fw_vendor = systab64->fw_vendor;
- tmp |= systab64->fw_vendor;
+ efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor :
+ systab64->fw_vendor;
+ tmp |= data ? data->fw_vendor : systab64->fw_vendor;
efi_systab.fw_revision = systab64->fw_revision;
efi_systab.con_in_handle = systab64->con_in_handle;
tmp |= systab64->con_in_handle;
@@ -514,15 +526,20 @@ static int __init efi_systab_init(void *phys)
tmp |= systab64->stderr_handle;
efi_systab.stderr = systab64->stderr;
tmp |= systab64->stderr;
- efi_systab.runtime = (void *)(unsigned long)systab64->runtime;
- tmp |= systab64->runtime;
+ efi_systab.runtime = data ?
+ (void *)(unsigned long)data->runtime :
+ (void *)(unsigned long)systab64->runtime;
+ tmp |= data ? data->runtime : systab64->runtime;
efi_systab.boottime = (void *)(unsigned long)systab64->boottime;
tmp |= systab64->boottime;
efi_systab.nr_tables = systab64->nr_tables;
- efi_systab.tables = systab64->tables;
- tmp |= systab64->tables;
+ efi_systab.tables = data ? (unsigned long)data->tables :
+ systab64->tables;
+ tmp |= data ? data->tables : systab64->tables;
early_iounmap(systab64, sizeof(*systab64));
+ if (data)
+ early_iounmap(data, sizeof(*data));
#ifdef CONFIG_X86_32
if (tmp >> 32) {
pr_err("EFI data located above 4GB, disabling EFI.\n");
@@ -626,6 +643,62 @@ static int __init efi_memmap_init(void)
return 0;
}
+/*
+ * A number of config table entries get remapped to virtual addresses
+ * after entering EFI virtual mode. However, the kexec kernel requires
+ * their physical addresses therefore we pass them via setup_data and
+ * correct those entries to their respective physical addresses here.
+ *
+ * Currently only handles smbios which is necessary for some firmware
+ * implementation.
+ */
+static int __init efi_reuse_config(u64 tables, int nr_tables)
+{
+ int i, sz, ret = 0;
+ void *p, *tablep;
+ struct efi_setup_data *data;
+
+ if (!efi_setup)
+ return 0;
+
+ if (!efi_enabled(EFI_64BIT))
+ return 0;
+
+ data = early_memremap(efi_setup, sizeof(*data));
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!data->smbios)
+ goto out_memremap;
+
+ sz = sizeof(efi_config_table_64_t);
+
+ p = tablep = early_memremap(tables, nr_tables * sz);
+ if (!p) {
+ pr_err("Could not map Configuration table!\n");
+ ret = -ENOMEM;
+ goto out_memremap;
+ }
+
+ for (i = 0; i < efi.systab->nr_tables; i++) {
+ efi_guid_t guid;
+
+ guid = ((efi_config_table_64_t *)p)->guid;
+
+ if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID))
+ ((efi_config_table_64_t *)p)->table = data->smbios;
+ p += sz;
+ }
+ early_iounmap(tablep, nr_tables * sz);
+
+out_memremap:
+ early_iounmap(data, sizeof(*data));
+out:
+ return ret;
+}
+
void __init efi_init(void)
{
efi_char16_t *c16;
@@ -651,6 +724,10 @@ void __init efi_init(void)
set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
+ efi.config_table = (unsigned long)efi.systab->tables;
+ efi.fw_vendor = (unsigned long)efi.systab->fw_vendor;
+ efi.runtime = (unsigned long)efi.systab->runtime;
+
/*
* Show what we know for posterity
*/
@@ -667,6 +744,9 @@ void __init efi_init(void)
efi.systab->hdr.revision >> 16,
efi.systab->hdr.revision & 0xffff, vendor);
+ if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables))
+ return;
+
if (efi_config_init(arch_tables))
return;
@@ -684,15 +764,12 @@ void __init efi_init(void)
return;
set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility);
}
-
if (efi_memmap_init())
return;
set_bit(EFI_MEMMAP, &x86_efi_facility);
-#if EFI_DEBUG
print_efi_memmap();
-#endif
}
void __init efi_late_init(void)
@@ -715,7 +792,7 @@ void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
set_memory_nx(addr, npages);
}
-static void __init runtime_code_page_mkexec(void)
+void __init runtime_code_page_mkexec(void)
{
efi_memory_desc_t *md;
void *p;
@@ -741,36 +818,38 @@ void efi_memory_uc(u64 addr, unsigned long size)
set_memory_uc(addr, npages);
}
-/*
- * This function will switch the EFI runtime services to virtual mode.
- * Essentially, look through the EFI memmap and map every region that
- * has the runtime attribute bit set in its memory descriptor and update
- * that memory descriptor with the virtual address obtained from ioremap().
- * This enables the runtime services to be called without having to
- * thunk back into physical mode for every invocation.
- */
-void __init efi_enter_virtual_mode(void)
+void __init old_map_region(efi_memory_desc_t *md)
{
- efi_memory_desc_t *md, *prev_md = NULL;
- efi_status_t status;
+ u64 start_pfn, end_pfn, end;
unsigned long size;
- u64 end, systab, start_pfn, end_pfn;
- void *p, *va, *new_memmap = NULL;
- int count = 0;
+ void *va;
- efi.systab = NULL;
+ start_pfn = PFN_DOWN(md->phys_addr);
+ size = md->num_pages << PAGE_SHIFT;
+ end = md->phys_addr + size;
+ end_pfn = PFN_UP(end);
- /*
- * We don't do virtual mode, since we don't do runtime services, on
- * non-native EFI
- */
+ if (pfn_range_is_mapped(start_pfn, end_pfn)) {
+ va = __va(md->phys_addr);
- if (!efi_is_native()) {
- efi_unmap_memmap();
- return;
- }
+ if (!(md->attribute & EFI_MEMORY_WB))
+ efi_memory_uc((u64)(unsigned long)va, size);
+ } else
+ va = efi_ioremap(md->phys_addr, size,
+ md->type, md->attribute);
+
+ md->virt_addr = (u64) (unsigned long) va;
+ if (!va)
+ pr_err("ioremap of 0x%llX failed!\n",
+ (unsigned long long)md->phys_addr);
+}
+
+/* Merge contiguous regions of the same type and attribute */
+static void __init efi_merge_regions(void)
+{
+ void *p;
+ efi_memory_desc_t *md, *prev_md = NULL;
- /* Merge contiguous regions of the same type and attribute */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
u64 prev_size;
md = p;
@@ -796,6 +875,77 @@ void __init efi_enter_virtual_mode(void)
}
prev_md = md;
}
+}
+
+static void __init get_systab_virt_addr(efi_memory_desc_t *md)
+{
+ unsigned long size;
+ u64 end, systab;
+
+ size = md->num_pages << EFI_PAGE_SHIFT;
+ end = md->phys_addr + size;
+ systab = (u64)(unsigned long)efi_phys.systab;
+ if (md->phys_addr <= systab && systab < end) {
+ systab += md->virt_addr - md->phys_addr;
+ efi.systab = (efi_system_table_t *)(unsigned long)systab;
+ }
+}
+
+static int __init save_runtime_map(void)
+{
+ efi_memory_desc_t *md;
+ void *tmp, *p, *q = NULL;
+ int count = 0;
+
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
+ (md->type == EFI_BOOT_SERVICES_CODE) ||
+ (md->type == EFI_BOOT_SERVICES_DATA))
+ continue;
+ tmp = krealloc(q, (count + 1) * memmap.desc_size, GFP_KERNEL);
+ if (!tmp)
+ goto out;
+ q = tmp;
+
+ memcpy(q + count * memmap.desc_size, md, memmap.desc_size);
+ count++;
+ }
+
+ efi_runtime_map_setup(q, count, memmap.desc_size);
+
+ return 0;
+out:
+ kfree(q);
+ return -ENOMEM;
+}
+
+/*
+ * Map efi regions which were passed via setup_data. The virt_addr is a fixed
+ * addr which was used in first kernel of a kexec boot.
+ */
+static void __init efi_map_regions_fixed(void)
+{
+ void *p;
+ efi_memory_desc_t *md;
+
+ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+ md = p;
+ efi_map_region_fixed(md); /* FIXME: add error handling */
+ get_systab_virt_addr(md);
+ }
+
+}
+
+/*
+ * Map efi memory ranges for runtime serivce and update new_memmap with virtual
+ * addresses.
+ */
+static void * __init efi_map_regions(int *count)
+{
+ efi_memory_desc_t *md;
+ void *p, *tmp, *new_memmap = NULL;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
@@ -807,53 +957,95 @@ void __init efi_enter_virtual_mode(void)
continue;
}
- size = md->num_pages << EFI_PAGE_SHIFT;
- end = md->phys_addr + size;
+ efi_map_region(md);
+ get_systab_virt_addr(md);
- start_pfn = PFN_DOWN(md->phys_addr);
- end_pfn = PFN_UP(end);
- if (pfn_range_is_mapped(start_pfn, end_pfn)) {
- va = __va(md->phys_addr);
+ tmp = krealloc(new_memmap, (*count + 1) * memmap.desc_size,
+ GFP_KERNEL);
+ if (!tmp)
+ goto out;
+ new_memmap = tmp;
+ memcpy(new_memmap + (*count * memmap.desc_size), md,
+ memmap.desc_size);
+ (*count)++;
+ }
- if (!(md->attribute & EFI_MEMORY_WB))
- efi_memory_uc((u64)(unsigned long)va, size);
- } else
- va = efi_ioremap(md->phys_addr, size,
- md->type, md->attribute);
+ return new_memmap;
+out:
+ kfree(new_memmap);
+ return NULL;
+}
+
+/*
+ * This function will switch the EFI runtime services to virtual mode.
+ * Essentially, we look through the EFI memmap and map every region that
+ * has the runtime attribute bit set in its memory descriptor into the
+ * ->trampoline_pgd page table using a top-down VA allocation scheme.
+ *
+ * The old method which used to update that memory descriptor with the
+ * virtual address obtained from ioremap() is still supported when the
+ * kernel is booted with efi=old_map on its command line. Same old
+ * method enabled the runtime services to be called without having to
+ * thunk back into physical mode for every invocation.
+ *
+ * The new method does a pagetable switch in a preemption-safe manner
+ * so that we're in a different address space when calling a runtime
+ * function. For function arguments passing we do copy the PGDs of the
+ * kernel page table into ->trampoline_pgd prior to each call.
+ *
+ * Specially for kexec boot, efi runtime maps in previous kernel should
+ * be passed in via setup_data. In that case runtime ranges will be mapped
+ * to the same virtual addresses as the first kernel.
+ */
+void __init efi_enter_virtual_mode(void)
+{
+ efi_status_t status;
+ void *new_memmap = NULL;
+ int err, count = 0;
- md->virt_addr = (u64) (unsigned long) va;
+ efi.systab = NULL;
- if (!va) {
- pr_err("ioremap of 0x%llX failed!\n",
- (unsigned long long)md->phys_addr);
- continue;
- }
+ /*
+ * We don't do virtual mode, since we don't do runtime services, on
+ * non-native EFI
+ */
+ if (!efi_is_native()) {
+ efi_unmap_memmap();
+ return;
+ }
- systab = (u64) (unsigned long) efi_phys.systab;
- if (md->phys_addr <= systab && systab < end) {
- systab += md->virt_addr - md->phys_addr;
- efi.systab = (efi_system_table_t *) (unsigned long) systab;
+ if (efi_setup) {
+ efi_map_regions_fixed();
+ } else {
+ efi_merge_regions();
+ new_memmap = efi_map_regions(&count);
+ if (!new_memmap) {
+ pr_err("Error reallocating memory, EFI runtime non-functional!\n");
+ return;
}
- new_memmap = krealloc(new_memmap,
- (count + 1) * memmap.desc_size,
- GFP_KERNEL);
- memcpy(new_memmap + (count * memmap.desc_size), md,
- memmap.desc_size);
- count++;
}
+ err = save_runtime_map();
+ if (err)
+ pr_err("Error saving runtime map, efi runtime on kexec non-functional!!\n");
+
BUG_ON(!efi.systab);
- status = phys_efi_set_virtual_address_map(
- memmap.desc_size * count,
- memmap.desc_size,
- memmap.desc_version,
- (efi_memory_desc_t *)__pa(new_memmap));
+ efi_setup_page_tables();
+ efi_sync_low_kernel_mappings();
- if (status != EFI_SUCCESS) {
- pr_alert("Unable to switch EFI into virtual mode "
- "(status=%lx)!\n", status);
- panic("EFI call to SetVirtualAddressMap() failed!");
+ if (!efi_setup) {
+ status = phys_efi_set_virtual_address_map(
+ memmap.desc_size * count,
+ memmap.desc_size,
+ memmap.desc_version,
+ (efi_memory_desc_t *)__pa(new_memmap));
+
+ if (status != EFI_SUCCESS) {
+ pr_alert("Unable to switch EFI into virtual mode (status=%lx)!\n",
+ status);
+ panic("EFI call to SetVirtualAddressMap() failed!");
+ }
}
/*
@@ -876,8 +1068,8 @@ void __init efi_enter_virtual_mode(void)
efi.query_variable_info = virt_efi_query_variable_info;
efi.update_capsule = virt_efi_update_capsule;
efi.query_capsule_caps = virt_efi_query_capsule_caps;
- if (__supported_pte_mask & _PAGE_NX)
- runtime_code_page_mkexec();
+
+ efi_runtime_mkexec();
kfree(new_memmap);
@@ -1006,3 +1198,15 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
return EFI_SUCCESS;
}
EXPORT_SYMBOL_GPL(efi_query_variable_store);
+
+static int __init parse_efi_cmdline(char *str)
+{
+ if (*str == '=')
+ str++;
+
+ if (!strncmp(str, "old_map", 7))
+ set_bit(EFI_OLD_MEMMAP, &x86_efi_facility);
+
+ return 0;
+}
+early_param("efi", parse_efi_cmdline);
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 40e446941dd7..0b74cdf7f816 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -37,9 +37,19 @@
* claim EFI runtime service handler exclusively and to duplicate a memory in
* low memory space say 0 - 3G.
*/
-
static unsigned long efi_rt_eflags;
+void efi_sync_low_kernel_mappings(void) {}
+void efi_setup_page_tables(void) {}
+
+void __init efi_map_region(efi_memory_desc_t *md)
+{
+ old_map_region(md);
+}
+
+void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
+void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
+
void efi_call_phys_prelog(void)
{
struct desc_ptr gdt_descr;
@@ -67,3 +77,9 @@ void efi_call_phys_epilog(void)
local_irq_restore(efi_rt_eflags);
}
+
+void __init efi_runtime_mkexec(void)
+{
+ if (__supported_pte_mask & _PAGE_NX)
+ runtime_code_page_mkexec();
+}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 39a0e7f1f0a3..0c2a234fef1e 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -38,10 +38,28 @@
#include <asm/efi.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
+#include <asm/realmode.h>
static pgd_t *save_pgd __initdata;
static unsigned long efi_flags __initdata;
+/*
+ * We allocate runtime services regions bottom-up, starting from -4G, i.e.
+ * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
+ */
+static u64 efi_va = -4 * (1UL << 30);
+#define EFI_VA_END (-68 * (1UL << 30))
+
+/*
+ * Scratch space used for switching the pagetable in the EFI stub
+ */
+struct efi_scratch {
+ u64 r15;
+ u64 prev_cr3;
+ pgd_t *efi_pgt;
+ bool use_pgd;
+};
+
static void __init early_code_mapping_set_exec(int executable)
{
efi_memory_desc_t *md;
@@ -65,6 +83,9 @@ void __init efi_call_phys_prelog(void)
int pgd;
int n_pgds;
+ if (!efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
early_code_mapping_set_exec(1);
local_irq_save(efi_flags);
@@ -86,6 +107,10 @@ void __init efi_call_phys_epilog(void)
*/
int pgd;
int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
+
+ if (!efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
for (pgd = 0; pgd < n_pgds; pgd++)
set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
kfree(save_pgd);
@@ -94,6 +119,96 @@ void __init efi_call_phys_epilog(void)
early_code_mapping_set_exec(0);
}
+/*
+ * Add low kernel mappings for passing arguments to EFI functions.
+ */
+void efi_sync_low_kernel_mappings(void)
+{
+ unsigned num_pgds;
+ pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
+ num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
+
+ memcpy(pgd + pgd_index(PAGE_OFFSET),
+ init_mm.pgd + pgd_index(PAGE_OFFSET),
+ sizeof(pgd_t) * num_pgds);
+}
+
+void efi_setup_page_tables(void)
+{
+ efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
+
+ if (!efi_enabled(EFI_OLD_MEMMAP))
+ efi_scratch.use_pgd = true;
+}
+
+static void __init __map_region(efi_memory_desc_t *md, u64 va)
+{
+ pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+ unsigned long pf = 0;
+
+ if (!(md->attribute & EFI_MEMORY_WB))
+ pf |= _PAGE_PCD;
+
+ if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
+ pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
+ md->phys_addr, va);
+}
+
+void __init efi_map_region(efi_memory_desc_t *md)
+{
+ unsigned long size = md->num_pages << PAGE_SHIFT;
+ u64 pa = md->phys_addr;
+
+ if (efi_enabled(EFI_OLD_MEMMAP))
+ return old_map_region(md);
+
+ /*
+ * Make sure the 1:1 mappings are present as a catch-all for b0rked
+ * firmware which doesn't update all internal pointers after switching
+ * to virtual mode and would otherwise crap on us.
+ */
+ __map_region(md, md->phys_addr);
+
+ efi_va -= size;
+
+ /* Is PA 2M-aligned? */
+ if (!(pa & (PMD_SIZE - 1))) {
+ efi_va &= PMD_MASK;
+ } else {
+ u64 pa_offset = pa & (PMD_SIZE - 1);
+ u64 prev_va = efi_va;
+
+ /* get us the same offset within this 2M page */
+ efi_va = (efi_va & PMD_MASK) + pa_offset;
+
+ if (efi_va > prev_va)
+ efi_va -= PMD_SIZE;
+ }
+
+ if (efi_va < EFI_VA_END) {
+ pr_warn(FW_WARN "VA address range overflow!\n");
+ return;
+ }
+
+ /* Do the VA map */
+ __map_region(md, efi_va);
+ md->virt_addr = efi_va;
+}
+
+/*
+ * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
+ * md->virt_addr is the original virtual address which had been mapped in kexec
+ * 1st kernel.
+ */
+void __init efi_map_region_fixed(efi_memory_desc_t *md)
+{
+ __map_region(md, md->virt_addr);
+}
+
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
u32 type, u64 attribute)
{
@@ -113,3 +228,17 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
return (void __iomem *)__va(phys_addr);
}
+
+void __init parse_efi_setup(u64 phys_addr, u32 data_len)
+{
+ efi_setup = phys_addr + sizeof(struct setup_data);
+}
+
+void __init efi_runtime_mkexec(void)
+{
+ if (!efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
+ if (__supported_pte_mask & _PAGE_NX)
+ runtime_code_page_mkexec();
+}
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 4c07ccab8146..88073b140298 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -34,10 +34,47 @@
mov %rsi, %cr0; \
mov (%rsp), %rsp
+ /* stolen from gcc */
+ .macro FLUSH_TLB_ALL
+ movq %r15, efi_scratch(%rip)
+ movq %r14, efi_scratch+8(%rip)
+ movq %cr4, %r15
+ movq %r15, %r14
+ andb $0x7f, %r14b
+ movq %r14, %cr4
+ movq %r15, %cr4
+ movq efi_scratch+8(%rip), %r14
+ movq efi_scratch(%rip), %r15
+ .endm
+
+ .macro SWITCH_PGT
+ cmpb $0, efi_scratch+24(%rip)
+ je 1f
+ movq %r15, efi_scratch(%rip) # r15
+ # save previous CR3
+ movq %cr3, %r15
+ movq %r15, efi_scratch+8(%rip) # prev_cr3
+ movq efi_scratch+16(%rip), %r15 # EFI pgt
+ movq %r15, %cr3
+ 1:
+ .endm
+
+ .macro RESTORE_PGT
+ cmpb $0, efi_scratch+24(%rip)
+ je 2f
+ movq efi_scratch+8(%rip), %r15
+ movq %r15, %cr3
+ movq efi_scratch(%rip), %r15
+ FLUSH_TLB_ALL
+ 2:
+ .endm
+
ENTRY(efi_call0)
SAVE_XMM
subq $32, %rsp
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $32, %rsp
RESTORE_XMM
ret
@@ -47,7 +84,9 @@ ENTRY(efi_call1)
SAVE_XMM
subq $32, %rsp
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $32, %rsp
RESTORE_XMM
ret
@@ -57,7 +96,9 @@ ENTRY(efi_call2)
SAVE_XMM
subq $32, %rsp
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $32, %rsp
RESTORE_XMM
ret
@@ -68,7 +109,9 @@ ENTRY(efi_call3)
subq $32, %rsp
mov %rcx, %r8
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $32, %rsp
RESTORE_XMM
ret
@@ -80,7 +123,9 @@ ENTRY(efi_call4)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $32, %rsp
RESTORE_XMM
ret
@@ -93,7 +138,9 @@ ENTRY(efi_call5)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $48, %rsp
RESTORE_XMM
ret
@@ -109,8 +156,15 @@ ENTRY(efi_call6)
mov %r8, %r9
mov %rcx, %r8
mov %rsi, %rcx
+ SWITCH_PGT
call *%rdi
+ RESTORE_PGT
addq $48, %rsp
RESTORE_XMM
ret
ENDPROC(efi_call6)
+
+ .data
+ENTRY(efi_scratch)
+ .fill 3,8,0
+ .byte 0
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
index 01cc29ea5ff7..0a8ee703b9fa 100644
--- a/arch/x86/platform/intel-mid/Makefile
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -1,6 +1,6 @@
-obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o
-obj-$(CONFIG_X86_INTEL_MID) += intel_mid_vrtc.o
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o intel_mid_vrtc.o mfld.o mrfl.o
obj-$(CONFIG_EARLY_PRINTK_INTEL_MID) += early_printk_intel_mid.o
+
# SFI specific code
ifdef CONFIG_X86_INTEL_MID
obj-$(CONFIG_SFI) += sfi.o device_libs/
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
index 0d942c1d26d5..69a783689d21 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_emc1403.c
@@ -22,7 +22,9 @@ static void __init *emc1403_platform_data(void *info)
int intr = get_gpio_by_name("thermal_int");
int intr2nd = get_gpio_by_name("thermal_alert");
- if (intr == -1 || intr2nd == -1)
+ if (intr < 0)
+ return NULL;
+ if (intr2nd < 0)
return NULL;
i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
index a013a4834bbe..dccae6b0413f 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_gpio_keys.c
@@ -66,7 +66,7 @@ static int __init pb_keys_init(void)
gb[i].gpio = get_gpio_by_name(gb[i].desc);
pr_debug("info[%2d]: name = %s, gpio = %d\n", i, gb[i].desc,
gb[i].gpio);
- if (gb[i].gpio == -1)
+ if (gb[i].gpio < 0)
continue;
if (i != good)
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_ipc.h b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
index 8f568dd79605..79bb09d4f718 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
+++ b/arch/x86/platform/intel-mid/device_libs/platform_ipc.h
@@ -12,6 +12,7 @@
#ifndef _PLATFORM_IPC_H_
#define _PLATFORM_IPC_H_
-extern void __init ipc_device_handler(struct sfi_device_table_entry *pentry,
- struct devs_id *dev) __attribute__((weak));
+void __init
+ipc_device_handler(struct sfi_device_table_entry *pentry, struct devs_id *dev);
+
#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
index 15278c11f714..54226de7541a 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_lis331.c
@@ -21,7 +21,9 @@ static void __init *lis331dl_platform_data(void *info)
int intr = get_gpio_by_name("accel_int");
int intr2nd = get_gpio_by_name("accel_2");
- if (intr == -1 || intr2nd == -1)
+ if (intr < 0)
+ return NULL;
+ if (intr2nd < 0)
return NULL;
i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
index 94ade10024ae..2c8acbc1e9ad 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_max7315.c
@@ -48,7 +48,7 @@ static void __init *max7315_platform_data(void *info)
gpio_base = get_gpio_by_name(base_pin_name);
intr = get_gpio_by_name(intr_pin_name);
- if (gpio_base == -1)
+ if (gpio_base < 0)
return NULL;
max7315->gpio_base = gpio_base;
if (intr != -1) {
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
index dd28d63c84fb..cfe9a47a1e87 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mpu3050.c
@@ -19,7 +19,7 @@ static void *mpu3050_platform_data(void *info)
struct i2c_board_info *i2c_info = info;
int intr = get_gpio_by_name("mpu3050_int");
- if (intr == -1)
+ if (intr < 0)
return NULL;
i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_msic.h b/arch/x86/platform/intel-mid/device_libs/platform_msic.h
index 917eb56d77da..b7be1d041da2 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_msic.h
+++ b/arch/x86/platform/intel-mid/device_libs/platform_msic.h
@@ -14,6 +14,6 @@
extern struct intel_msic_platform_data msic_pdata;
-extern void *msic_generic_platform_data(void *info,
- enum intel_msic_block block) __attribute__((weak));
+void *msic_generic_platform_data(void *info, enum intel_msic_block block);
+
#endif
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
index d87182a09263..65c2a9a19db4 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pmic_gpio.c
@@ -26,7 +26,7 @@ static void __init *pmic_gpio_platform_data(void *info)
static struct intel_pmic_gpio_platform_data pmic_gpio_pdata;
int gpio_base = get_gpio_by_name("pmic_gpio_base");
- if (gpio_base == -1)
+ if (gpio_base < 0)
gpio_base = 64;
pmic_gpio_pdata.gpio_base = gpio_base;
pmic_gpio_pdata.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
index 22881c9a6737..33be0b3be6e1 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_tca6416.c
@@ -34,10 +34,10 @@ static void *tca6416_platform_data(void *info)
gpio_base = get_gpio_by_name(base_pin_name);
intr = get_gpio_by_name(intr_pin_name);
- if (gpio_base == -1)
+ if (gpio_base < 0)
return NULL;
tca6416.gpio_base = gpio_base;
- if (intr != -1) {
+ if (intr >= 0) {
i2c_info->irq = intr + INTEL_MID_IRQ_OFFSET;
tca6416.irq_base = gpio_base + INTEL_MID_IRQ_OFFSET;
} else {
diff --git a/arch/x86/platform/intel-mid/early_printk_intel_mid.c b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
index 4f702f554f6e..e0bd082a80e0 100644
--- a/arch/x86/platform/intel-mid/early_printk_intel_mid.c
+++ b/arch/x86/platform/intel-mid/early_printk_intel_mid.c
@@ -22,7 +22,6 @@
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <asm/fixmap.h>
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index f90e290f689f..1bbedc4b0f88 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -35,6 +35,8 @@
#include <asm/apb_timer.h>
#include <asm/reboot.h>
+#include "intel_mid_weak_decls.h"
+
/*
* the clockevent devices on Moorestown/Medfield can be APBT or LAPIC clock,
* cmdline option x86_intel_mid_timer can be used to override the configuration
@@ -58,12 +60,16 @@
enum intel_mid_timer_options intel_mid_timer_options;
+/* intel_mid_ops to store sub arch ops */
+struct intel_mid_ops *intel_mid_ops;
+/* getter function for sub arch ops*/
+static void *(*get_intel_mid_ops[])(void) = INTEL_MID_OPS_INIT;
enum intel_mid_cpu_type __intel_mid_cpu_chip;
EXPORT_SYMBOL_GPL(__intel_mid_cpu_chip);
static void intel_mid_power_off(void)
{
-}
+};
static void intel_mid_reboot(void)
{
@@ -72,32 +78,6 @@ static void intel_mid_reboot(void)
static unsigned long __init intel_mid_calibrate_tsc(void)
{
- unsigned long fast_calibrate;
- u32 lo, hi, ratio, fsb;
-
- rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
- pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
- ratio = (hi >> 8) & 0x1f;
- pr_debug("ratio is %d\n", ratio);
- if (!ratio) {
- pr_err("read a zero ratio, should be incorrect!\n");
- pr_err("force tsc ratio to 16 ...\n");
- ratio = 16;
- }
- rdmsr(MSR_FSB_FREQ, lo, hi);
- if ((lo & 0x7) == 0x7)
- fsb = PENWELL_FSB_FREQ_83SKU;
- else
- fsb = PENWELL_FSB_FREQ_100SKU;
- fast_calibrate = ratio * fsb;
- pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
- lapic_timer_frequency = fsb * 1000 / HZ;
- /* mark tsc clocksource as reliable */
- set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
-
- if (fast_calibrate)
- return fast_calibrate;
-
return 0;
}
@@ -125,13 +105,37 @@ static void __init intel_mid_time_init(void)
static void intel_mid_arch_setup(void)
{
- if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27)
- __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
- else {
+ if (boot_cpu_data.x86 != 6) {
pr_err("Unknown Intel MID CPU (%d:%d), default to Penwell\n",
boot_cpu_data.x86, boot_cpu_data.x86_model);
__intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+ goto out;
}
+
+ switch (boot_cpu_data.x86_model) {
+ case 0x35:
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_CLOVERVIEW;
+ break;
+ case 0x3C:
+ case 0x4A:
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_TANGIER;
+ break;
+ case 0x27:
+ default:
+ __intel_mid_cpu_chip = INTEL_MID_CPU_CHIP_PENWELL;
+ break;
+ }
+
+ if (__intel_mid_cpu_chip < MAX_CPU_OPS(get_intel_mid_ops))
+ intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
+ else {
+ intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
+ pr_info("ARCH: Uknown SoC, assuming PENWELL!\n");
+ }
+
+out:
+ if (intel_mid_ops->arch_setup)
+ intel_mid_ops->arch_setup();
}
/* MID systems don't have i8042 controller */
diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
new file mode 100644
index 000000000000..46aa25c8ce06
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
@@ -0,0 +1,19 @@
+/*
+ * intel_mid_weak_decls.h: Weak declarations of intel-mid.c
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+
+/* __attribute__((weak)) makes these declarations overridable */
+/* For every CPU addition a new get_<cpuname>_ops interface needs
+ * to be added.
+ */
+extern void *get_penwell_ops(void) __attribute__((weak));
+extern void *get_cloverview_ops(void) __attribute__((weak));
+extern void *get_tangier_ops(void) __attribute__((weak));
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c
new file mode 100644
index 000000000000..23381d2174ae
--- /dev/null
+++ b/arch/x86/platform/intel-mid/mfld.c
@@ -0,0 +1,75 @@
+/*
+ * mfld.c: Intel Medfield platform setup code
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+
+#include <asm/apic.h>
+#include <asm/intel-mid.h>
+#include <asm/intel_mid_vrtc.h>
+
+#include "intel_mid_weak_decls.h"
+
+static void penwell_arch_setup(void);
+/* penwell arch ops */
+static struct intel_mid_ops penwell_ops = {
+ .arch_setup = penwell_arch_setup,
+};
+
+static void mfld_power_off(void)
+{
+}
+
+static unsigned long __init mfld_calibrate_tsc(void)
+{
+ unsigned long fast_calibrate;
+ u32 lo, hi, ratio, fsb;
+
+ rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
+ pr_debug("IA32 perf status is 0x%x, 0x%0x\n", lo, hi);
+ ratio = (hi >> 8) & 0x1f;
+ pr_debug("ratio is %d\n", ratio);
+ if (!ratio) {
+ pr_err("read a zero ratio, should be incorrect!\n");
+ pr_err("force tsc ratio to 16 ...\n");
+ ratio = 16;
+ }
+ rdmsr(MSR_FSB_FREQ, lo, hi);
+ if ((lo & 0x7) == 0x7)
+ fsb = FSB_FREQ_83SKU;
+ else
+ fsb = FSB_FREQ_100SKU;
+ fast_calibrate = ratio * fsb;
+ pr_debug("read penwell tsc %lu khz\n", fast_calibrate);
+ lapic_timer_frequency = fsb * 1000 / HZ;
+ /* mark tsc clocksource as reliable */
+ set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+ if (fast_calibrate)
+ return fast_calibrate;
+
+ return 0;
+}
+
+static void __init penwell_arch_setup(void)
+{
+ x86_platform.calibrate_tsc = mfld_calibrate_tsc;
+ pm_power_off = mfld_power_off;
+}
+
+void *get_penwell_ops(void)
+{
+ return &penwell_ops;
+}
+
+void *get_cloverview_ops(void)
+{
+ return &penwell_ops;
+}
diff --git a/arch/x86/platform/intel-mid/mrfl.c b/arch/x86/platform/intel-mid/mrfl.c
new file mode 100644
index 000000000000..aaca91753d32
--- /dev/null
+++ b/arch/x86/platform/intel-mid/mrfl.c
@@ -0,0 +1,103 @@
+/*
+ * mrfl.c: Intel Merrifield platform specific setup code
+ *
+ * (C) Copyright 2013 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+
+#include <asm/apic.h>
+#include <asm/intel-mid.h>
+
+#include "intel_mid_weak_decls.h"
+
+static unsigned long __init tangier_calibrate_tsc(void)
+{
+ unsigned long fast_calibrate;
+ u32 lo, hi, ratio, fsb, bus_freq;
+
+ /* *********************** */
+ /* Compute TSC:Ratio * FSB */
+ /* *********************** */
+
+ /* Compute Ratio */
+ rdmsr(MSR_PLATFORM_INFO, lo, hi);
+ pr_debug("IA32 PLATFORM_INFO is 0x%x : %x\n", hi, lo);
+
+ ratio = (lo >> 8) & 0xFF;
+ pr_debug("ratio is %d\n", ratio);
+ if (!ratio) {
+ pr_err("Read a zero ratio, force tsc ratio to 4 ...\n");
+ ratio = 4;
+ }
+
+ /* Compute FSB */
+ rdmsr(MSR_FSB_FREQ, lo, hi);
+ pr_debug("Actual FSB frequency detected by SOC 0x%x : %x\n",
+ hi, lo);
+
+ bus_freq = lo & 0x7;
+ pr_debug("bus_freq = 0x%x\n", bus_freq);
+
+ if (bus_freq == 0)
+ fsb = FSB_FREQ_100SKU;
+ else if (bus_freq == 1)
+ fsb = FSB_FREQ_100SKU;
+ else if (bus_freq == 2)
+ fsb = FSB_FREQ_133SKU;
+ else if (bus_freq == 3)
+ fsb = FSB_FREQ_167SKU;
+ else if (bus_freq == 4)
+ fsb = FSB_FREQ_83SKU;
+ else if (bus_freq == 5)
+ fsb = FSB_FREQ_400SKU;
+ else if (bus_freq == 6)
+ fsb = FSB_FREQ_267SKU;
+ else if (bus_freq == 7)
+ fsb = FSB_FREQ_333SKU;
+ else {
+ BUG();
+ pr_err("Invalid bus_freq! Setting to minimal value!\n");
+ fsb = FSB_FREQ_100SKU;
+ }
+
+ /* TSC = FSB Freq * Resolved HFM Ratio */
+ fast_calibrate = ratio * fsb;
+ pr_debug("calculate tangier tsc %lu KHz\n", fast_calibrate);
+
+ /* ************************************ */
+ /* Calculate Local APIC Timer Frequency */
+ /* ************************************ */
+ lapic_timer_frequency = (fsb * 1000) / HZ;
+
+ pr_debug("Setting lapic_timer_frequency = %d\n",
+ lapic_timer_frequency);
+
+ /* mark tsc clocksource as reliable */
+ set_cpu_cap(&boot_cpu_data, X86_FEATURE_TSC_RELIABLE);
+
+ if (fast_calibrate)
+ return fast_calibrate;
+
+ return 0;
+}
+
+static void __init tangier_arch_setup(void)
+{
+ x86_platform.calibrate_tsc = tangier_calibrate_tsc;
+}
+
+/* tangier arch ops */
+static struct intel_mid_ops tangier_ops = {
+ .arch_setup = tangier_arch_setup,
+};
+
+void *get_tangier_ops(void)
+{
+ return &tangier_ops;
+}
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
index c84c1ca396bf..994c40bd7cb7 100644
--- a/arch/x86/platform/intel-mid/sfi.c
+++ b/arch/x86/platform/intel-mid/sfi.c
@@ -224,7 +224,7 @@ int get_gpio_by_name(const char *name)
if (!strncmp(name, pentry->pin_name, SFI_NAME_LEN))
return pentry->pin_no;
}
- return -1;
+ return -EINVAL;
}
void __init intel_scu_device_register(struct platform_device *pdev)
@@ -250,7 +250,7 @@ static void __init intel_scu_spi_device_register(struct spi_board_info *sdev)
sdev->modalias);
return;
}
- memcpy(new_dev, sdev, sizeof(*sdev));
+ *new_dev = *sdev;
spi_devs[spi_next_dev++] = new_dev;
}
@@ -271,7 +271,7 @@ static void __init intel_scu_i2c_device_register(int bus,
idev->type);
return;
}
- memcpy(new_dev, idev, sizeof(*idev));
+ *new_dev = *idev;
i2c_bus[i2c_next_dev] = bus;
i2c_devs[i2c_next_dev++] = new_dev;
@@ -337,6 +337,8 @@ static void __init sfi_handle_ipc_dev(struct sfi_device_table_entry *pentry,
pr_debug("IPC bus, name = %16.16s, irq = 0x%2x\n",
pentry->name, pentry->irq);
pdata = intel_mid_sfi_get_pdata(dev, pentry);
+ if (IS_ERR(pdata))
+ return;
pdev = platform_device_alloc(pentry->name, 0);
if (pdev == NULL) {
@@ -370,6 +372,8 @@ static void __init sfi_handle_spi_dev(struct sfi_device_table_entry *pentry,
spi_info.chip_select);
pdata = intel_mid_sfi_get_pdata(dev, &spi_info);
+ if (IS_ERR(pdata))
+ return;
spi_info.platform_data = pdata;
if (dev->delay)
@@ -395,6 +399,8 @@ static void __init sfi_handle_i2c_dev(struct sfi_device_table_entry *pentry,
i2c_info.addr);
pdata = intel_mid_sfi_get_pdata(dev, &i2c_info);
i2c_info.platform_data = pdata;
+ if (IS_ERR(pdata))
+ return;
if (dev->delay)
intel_scu_i2c_device_register(pentry->host_num, &i2c_info);
@@ -443,13 +449,35 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
* so we have to enable them one by one here
*/
ioapic = mp_find_ioapic(irq);
- irq_attr.ioapic = ioapic;
- irq_attr.ioapic_pin = irq;
- irq_attr.trigger = 1;
- irq_attr.polarity = 1;
- io_apic_set_pci_routing(NULL, irq, &irq_attr);
- } else
+ if (ioapic >= 0) {
+ irq_attr.ioapic = ioapic;
+ irq_attr.ioapic_pin = irq;
+ irq_attr.trigger = 1;
+ if (intel_mid_identify_cpu() ==
+ INTEL_MID_CPU_CHIP_TANGIER) {
+ if (!strncmp(pentry->name,
+ "r69001-ts-i2c", 13))
+ /* active low */
+ irq_attr.polarity = 1;
+ else if (!strncmp(pentry->name,
+ "synaptics_3202", 14))
+ /* active low */
+ irq_attr.polarity = 1;
+ else if (irq == 41)
+ /* fast_int_1 */
+ irq_attr.polarity = 1;
+ else
+ /* active high */
+ irq_attr.polarity = 0;
+ } else {
+ /* PNW and CLV go with active low */
+ irq_attr.polarity = 1;
+ }
+ io_apic_set_pci_routing(NULL, irq, &irq_attr);
+ }
+ } else {
irq = 0; /* No irq */
+ }
dev = get_device_id(pentry->type, pentry->name);
diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c
index e6cb80f620af..4d171e8640ef 100644
--- a/arch/x86/platform/iris/iris.c
+++ b/arch/x86/platform/iris/iris.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/pm.h>
#include <asm/io.h>
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
index 649a12befba9..08e350e757dc 100644
--- a/arch/x86/platform/olpc/olpc-xo15-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -15,8 +15,7 @@
#include <linux/power_supply.h>
#include <linux/olpc-ec.h>
-#include <acpi/acpi_bus.h>
-#include <acpi/acpi_drivers.h>
+#include <linux/acpi.h>
#include <asm/olpc.h>
#define DRV_NAME "olpc-xo15-sci"
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index efe4d7220397..dfe605ac1bcd 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -433,15 +433,49 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
return;
}
-static inline unsigned long cycles_2_us(unsigned long long cyc)
+/*
+ * Not to be confused with cycles_2_ns() from tsc.c; this gives a relative
+ * number, not an absolute. It converts a duration in cycles to a duration in
+ * ns.
+ */
+static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
+ struct cyc2ns_data *data = cyc2ns_read_begin();
unsigned long long ns;
- unsigned long us;
- int cpu = smp_processor_id();
- ns = (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
- us = ns / 1000;
- return us;
+ ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
+
+ cyc2ns_read_end(data);
+ return ns;
+}
+
+/*
+ * The reverse of the above; converts a duration in ns to a duration in cycles.
+ */
+static inline unsigned long long ns_2_cycles(unsigned long long ns)
+{
+ struct cyc2ns_data *data = cyc2ns_read_begin();
+ unsigned long long cyc;
+
+ cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul;
+
+ cyc2ns_read_end(data);
+ return cyc;
+}
+
+static inline unsigned long cycles_2_us(unsigned long long cyc)
+{
+ return cycles_2_ns(cyc) / NSEC_PER_USEC;
+}
+
+static inline cycles_t sec_2_cycles(unsigned long sec)
+{
+ return ns_2_cycles(sec * NSEC_PER_SEC);
+}
+
+static inline unsigned long long usec_2_cycles(unsigned long usec)
+{
+ return ns_2_cycles(usec * NSEC_PER_USEC);
}
/*
@@ -668,16 +702,6 @@ static int wait_completion(struct bau_desc *bau_desc,
bcp, try);
}
-static inline cycles_t sec_2_cycles(unsigned long sec)
-{
- unsigned long ns;
- cycles_t cyc;
-
- ns = sec * 1000000000;
- cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
- return cyc;
-}
-
/*
* Our retries are blocked by all destination sw ack resources being
* in use, and a timeout is pending. In that case hardware immediately
@@ -1327,16 +1351,6 @@ static void ptc_seq_stop(struct seq_file *file, void *data)
{
}
-static inline unsigned long long usec_2_cycles(unsigned long microsec)
-{
- unsigned long ns;
- unsigned long long cyc;
-
- ns = microsec * 1000;
- cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
- return cyc;
-}
-
/*
* Display the statistics thru /proc/sgi_uv/ptc_statistics
* 'data' points to the cpu number
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 8eeccba73130..be27da60dc8f 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -74,7 +74,6 @@ static atomic_t uv_in_nmi;
static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
static atomic_t uv_nmi_slave_continue;
-static atomic_t uv_nmi_kexec_failed;
static cpumask_var_t uv_nmi_cpu_mask;
/* Values for uv_nmi_slave_continue */
@@ -149,7 +148,8 @@ module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
* "dump" - dump process stack for each cpu
* "ips" - dump IP info for each cpu
* "kdump" - do crash dump
- * "kdb" - enter KDB/KGDB (default)
+ * "kdb" - enter KDB (default)
+ * "kgdb" - enter KGDB
*/
static char uv_nmi_action[8] = "kdb";
module_param_string(action, uv_nmi_action, sizeof(uv_nmi_action), 0644);
@@ -504,6 +504,7 @@ static void uv_nmi_touch_watchdogs(void)
}
#if defined(CONFIG_KEXEC)
+static atomic_t uv_nmi_kexec_failed;
static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
{
/* Call crash to dump system state */
@@ -537,18 +538,45 @@ static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
}
#endif /* !CONFIG_KEXEC */
+#ifdef CONFIG_KGDB
#ifdef CONFIG_KGDB_KDB
-/* Call KDB from NMI handler */
-static void uv_call_kdb(int cpu, struct pt_regs *regs, int master)
+static inline int uv_nmi_kdb_reason(void)
{
- int ret;
+ return KDB_REASON_SYSTEM_NMI;
+}
+#else /* !CONFIG_KGDB_KDB */
+static inline int uv_nmi_kdb_reason(void)
+{
+ /* Insure user is expecting to attach gdb remote */
+ if (uv_nmi_action_is("kgdb"))
+ return 0;
+
+ pr_err("UV: NMI error: KDB is not enabled in this kernel\n");
+ return -1;
+}
+#endif /* CONFIG_KGDB_KDB */
+/*
+ * Call KGDB/KDB from NMI handler
+ *
+ * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or
+ * 'kdb' has no affect on which is used. See the KGDB documention for further
+ * information.
+ */
+static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
+{
if (master) {
+ int reason = uv_nmi_kdb_reason();
+ int ret;
+
+ if (reason < 0)
+ return;
+
/* call KGDB NMI handler as MASTER */
- ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs,
- &uv_nmi_slave_continue);
+ ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
+ &uv_nmi_slave_continue);
if (ret) {
- pr_alert("KDB returned error, is kgdboc set?\n");
+ pr_alert("KGDB returned error, is kgdboc set?\n");
atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
}
} else {
@@ -567,12 +595,12 @@ static void uv_call_kdb(int cpu, struct pt_regs *regs, int master)
uv_nmi_sync_exit(master);
}
-#else /* !CONFIG_KGDB_KDB */
-static inline void uv_call_kdb(int cpu, struct pt_regs *regs, int master)
+#else /* !CONFIG_KGDB */
+static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
{
- pr_err("UV: NMI error: KGDB/KDB is not enabled in this kernel\n");
+ pr_err("UV: NMI error: KGDB is not enabled in this kernel\n");
}
-#endif /* !CONFIG_KGDB_KDB */
+#endif /* !CONFIG_KGDB */
/*
* UV NMI handler
@@ -606,9 +634,9 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump"))
uv_nmi_dump_state(cpu, regs, master);
- /* Call KDB if enabled */
- else if (uv_nmi_action_is("kdb"))
- uv_call_kdb(cpu, regs, master);
+ /* Call KGDB/KDB if enabled */
+ else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb"))
+ uv_call_kgdb_kdb(cpu, regs, master);
/* Clear per_cpu "in nmi" flag */
atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT);
@@ -634,7 +662,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
/*
* NMI handler for pulling in CPUs when perf events are grabbing our NMI
*/
-int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
+static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
{
int ret;
@@ -651,7 +679,7 @@ int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
return ret;
}
-void uv_register_nmi_notifier(void)
+static void uv_register_nmi_notifier(void)
{
if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
pr_warn("UV: NMI handler failed to register\n");
@@ -695,6 +723,5 @@ void uv_nmi_setup(void)
uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
}
BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
+ uv_register_nmi_notifier();
}
-
-
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index a44f457e70a1..bad628a620c4 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -29,12 +29,10 @@ void __init reserve_real_mode(void)
void __init setup_real_mode(void)
{
u16 real_mode_seg;
- u32 *rel;
+ const u32 *rel;
u32 count;
- u32 *ptr;
- u16 *seg;
- int i;
unsigned char *base;
+ unsigned long phys_base;
struct trampoline_header *trampoline_header;
size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
#ifdef CONFIG_X86_64
@@ -46,23 +44,23 @@ void __init setup_real_mode(void)
memcpy(base, real_mode_blob, size);
- real_mode_seg = __pa(base) >> 4;
+ phys_base = __pa(base);
+ real_mode_seg = phys_base >> 4;
+
rel = (u32 *) real_mode_relocs;
/* 16-bit segment relocations. */
- count = rel[0];
- rel = &rel[1];
- for (i = 0; i < count; i++) {
- seg = (u16 *) (base + rel[i]);
+ count = *rel++;
+ while (count--) {
+ u16 *seg = (u16 *) (base + *rel++);
*seg = real_mode_seg;
}
/* 32-bit linear relocations. */
- count = rel[i];
- rel = &rel[i + 1];
- for (i = 0; i < count; i++) {
- ptr = (u32 *) (base + rel[i]);
- *ptr += __pa(base);
+ count = *rel++;
+ while (count--) {
+ u32 *ptr = (u32 *) (base + *rel++);
+ *ptr += phys_base;
}
/* Must be perfomed *after* relocation. */
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 9cac82588cbc..3497f14e4dea 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -64,20 +64,7 @@ $(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
# ---------------------------------------------------------------------------
-# How to compile the 16-bit code. Note we always compile for -march=i386,
-# that way we can complain to the user if the CPU is insufficient.
-KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
- -I$(srctree)/arch/x86/boot \
- -DDISABLE_BRANCH_PROFILING \
- -Wall -Wstrict-prototypes \
- -march=i386 -mregparm=3 \
- -include $(srctree)/$(src)/../../boot/code16gcc.h \
- -fno-strict-aliasing -fomit-frame-pointer -fno-pic \
- -mno-mmx -mno-sse \
- $(call cc-option, -ffreestanding) \
- $(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
- $(call cc-option, -fno-stack-protector) \
- $(call cc-option, -mpreferred-stack-boundary=2)
+KBUILD_CFLAGS := $(LINUXINCLUDE) $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
+ -I$(srctree)/arch/x86/boot
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
index f932ea61d1c8..d66c607bdc58 100644
--- a/arch/x86/realmode/rm/reboot.S
+++ b/arch/x86/realmode/rm/reboot.S
@@ -1,5 +1,4 @@
#include <linux/linkage.h>
-#include <linux/init.h>
#include <asm/segment.h>
#include <asm/page_types.h>
#include <asm/processor-flags.h>
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
index c1b2791183e7..48ddd76bc4c3 100644
--- a/arch/x86/realmode/rm/trampoline_32.S
+++ b/arch/x86/realmode/rm/trampoline_32.S
@@ -20,7 +20,6 @@
*/
#include <linux/linkage.h>
-#include <linux/init.h>
#include <asm/segment.h>
#include <asm/page_types.h>
#include "realmode.h"
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index bb360dc39d21..dac7b20d2f9d 100644
--- a/arch/x86/realmode/rm/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -25,7 +25,6 @@
*/
#include <linux/linkage.h>
-#include <linux/init.h>
#include <asm/pgtable_types.h>
#include <asm/page_types.h>
#include <asm/msr.h>
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index aabfb8380a1c..96bc506ac6de 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -357,3 +357,5 @@
348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
349 i386 kcmp sys_kcmp
350 i386 finit_module sys_finit_module
+351 i386 sched_setattr sys_sched_setattr
+352 i386 sched_getattr sys_sched_getattr
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index 38ae65dfd14f..a12bddc7ccea 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -320,6 +320,8 @@
311 64 process_vm_writev sys_process_vm_writev
312 common kcmp sys_kcmp
313 common finit_module sys_finit_module
+314 common sched_setattr sys_sched_setattr
+315 common sched_getattr sys_sched_getattr
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index f7bab68a4b83..cfbdbdb4e173 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -722,15 +722,25 @@ static void percpu_init(void)
/*
* Check to see if a symbol lies in the .data..percpu section.
- * For some as yet not understood reason the "__init_begin"
- * symbol which immediately preceeds the .data..percpu section
- * also shows up as it it were part of it so we do an explict
- * check for that symbol name and ignore it.
+ *
+ * The linker incorrectly associates some symbols with the
+ * .data..percpu section so we also need to check the symbol
+ * name to make sure that we classify the symbol correctly.
+ *
+ * The GNU linker incorrectly associates:
+ * __init_begin
+ * __per_cpu_load
+ *
+ * The "gold" linker incorrectly associates:
+ * init_per_cpu__irq_stack_union
+ * init_per_cpu__gdt_page
*/
static int is_percpu_sym(ElfW(Sym) *sym, const char *symname)
{
return (sym->st_shndx == per_cpu_shndx) &&
- strcmp(symname, "__init_begin");
+ strcmp(symname, "__init_begin") &&
+ strcmp(symname, "__per_cpu_load") &&
+ strncmp(symname, "init_per_cpu_", 13);
}
@@ -1015,6 +1025,29 @@ static void emit_relocs(int as_text, int use_real_mode)
}
}
+/*
+ * As an aid to debugging problems with different linkers
+ * print summary information about the relocs.
+ * Since different linkers tend to emit the sections in
+ * different orders we use the section names in the output.
+ */
+static int do_reloc_info(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
+ const char *symname)
+{
+ printf("%s\t%s\t%s\t%s\n",
+ sec_name(sec->shdr.sh_info),
+ rel_type(ELF_R_TYPE(rel->r_info)),
+ symname,
+ sec_name(sym->st_shndx));
+ return 0;
+}
+
+static void print_reloc_info(void)
+{
+ printf("reloc section\treloc type\tsymbol\tsymbol section\n");
+ walk_relocs(do_reloc_info);
+}
+
#if ELF_BITS == 64
# define process process_64
#else
@@ -1022,7 +1055,8 @@ static void emit_relocs(int as_text, int use_real_mode)
#endif
void process(FILE *fp, int use_real_mode, int as_text,
- int show_absolute_syms, int show_absolute_relocs)
+ int show_absolute_syms, int show_absolute_relocs,
+ int show_reloc_info)
{
regex_init(use_real_mode);
read_ehdr(fp);
@@ -1040,5 +1074,9 @@ void process(FILE *fp, int use_real_mode, int as_text,
print_absolute_relocs();
return;
}
+ if (show_reloc_info) {
+ print_reloc_info();
+ return;
+ }
emit_relocs(as_text, use_real_mode);
}
diff --git a/arch/x86/tools/relocs.h b/arch/x86/tools/relocs.h
index 07cdb1eca4fa..f59590645b68 100644
--- a/arch/x86/tools/relocs.h
+++ b/arch/x86/tools/relocs.h
@@ -29,8 +29,9 @@ enum symtype {
};
void process_32(FILE *fp, int use_real_mode, int as_text,
- int show_absolute_syms, int show_absolute_relocs);
+ int show_absolute_syms, int show_absolute_relocs,
+ int show_reloc_info);
void process_64(FILE *fp, int use_real_mode, int as_text,
- int show_absolute_syms, int show_absolute_relocs);
-
+ int show_absolute_syms, int show_absolute_relocs,
+ int show_reloc_info);
#endif /* RELOCS_H */
diff --git a/arch/x86/tools/relocs_common.c b/arch/x86/tools/relocs_common.c
index 44d396823a53..acab636bcb34 100644
--- a/arch/x86/tools/relocs_common.c
+++ b/arch/x86/tools/relocs_common.c
@@ -11,12 +11,13 @@ void die(char *fmt, ...)
static void usage(void)
{
- die("relocs [--abs-syms|--abs-relocs|--text|--realmode] vmlinux\n");
+ die("relocs [--abs-syms|--abs-relocs|--reloc-info|--text|--realmode]" \
+ " vmlinux\n");
}
int main(int argc, char **argv)
{
- int show_absolute_syms, show_absolute_relocs;
+ int show_absolute_syms, show_absolute_relocs, show_reloc_info;
int as_text, use_real_mode;
const char *fname;
FILE *fp;
@@ -25,6 +26,7 @@ int main(int argc, char **argv)
show_absolute_syms = 0;
show_absolute_relocs = 0;
+ show_reloc_info = 0;
as_text = 0;
use_real_mode = 0;
fname = NULL;
@@ -39,6 +41,10 @@ int main(int argc, char **argv)
show_absolute_relocs = 1;
continue;
}
+ if (strcmp(arg, "--reloc-info") == 0) {
+ show_reloc_info = 1;
+ continue;
+ }
if (strcmp(arg, "--text") == 0) {
as_text = 1;
continue;
@@ -67,10 +73,12 @@ int main(int argc, char **argv)
rewind(fp);
if (e_ident[EI_CLASS] == ELFCLASS64)
process_64(fp, use_real_mode, as_text,
- show_absolute_syms, show_absolute_relocs);
+ show_absolute_syms, show_absolute_relocs,
+ show_reloc_info);
else
process_32(fp, use_real_mode, as_text,
- show_absolute_syms, show_absolute_relocs);
+ show_absolute_syms, show_absolute_relocs,
+ show_reloc_info);
fclose(fp);
return 0;
}
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 2ada505067cc..eb5d7a56f8d4 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts)
ts->tv_nsec = 0;
do {
- seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+ seq = raw_read_seqcount_begin(&gtod->seq);
mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->wall_time_sec;
ns = gtod->wall_time_snsec;
@@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts)
ts->tv_nsec = 0;
do {
- seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+ seq = raw_read_seqcount_begin(&gtod->seq);
mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->monotonic_time_sec;
ns = gtod->monotonic_time_snsec;
@@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts)
{
unsigned long seq;
do {
- seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+ seq = raw_read_seqcount_begin(&gtod->seq);
ts->tv_sec = gtod->wall_time_coarse.tv_sec;
ts->tv_nsec = gtod->wall_time_coarse.tv_nsec;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
@@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts)
{
unsigned long seq;
do {
- seq = read_seqcount_begin_no_lockdep(&gtod->seq);
+ seq = raw_read_seqcount_begin(&gtod->seq);
ts->tv_sec = gtod->monotonic_time_coarse.tv_sec;
ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec;
} while (unlikely(read_seqcount_retry(&gtod->seq, seq)));
diff --git a/arch/x86/vdso/vdso.S b/arch/x86/vdso/vdso.S
index 01f5e3b4613c..1e13eb8c9656 100644
--- a/arch/x86/vdso/vdso.S
+++ b/arch/x86/vdso/vdso.S
@@ -1,6 +1,5 @@
#include <asm/page_types.h>
#include <linux/linkage.h>
-#include <linux/init.h>
__PAGE_ALIGNED_DATA
diff --git a/arch/x86/vdso/vdsox32.S b/arch/x86/vdso/vdsox32.S
index d6b9a7f42a8a..295f1c7543d8 100644
--- a/arch/x86/vdso/vdsox32.S
+++ b/arch/x86/vdso/vdsox32.S
@@ -1,6 +1,5 @@
#include <asm/page_types.h>
#include <linux/linkage.h>
-#include <linux/init.h>
__PAGE_ALIGNED_DATA
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 1a3c76505649..01b90261fa38 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -51,3 +51,7 @@ config XEN_DEBUG_FS
Enable statistics output and various tuning options in debugfs.
Enabling this option may incur a significant performance overhead.
+config XEN_PVH
+ bool "Support for running as a PVH guest"
+ depends on X86_64 && XEN && XEN_PVHVM
+ def_bool n
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index fa6ade76ef3f..201d09a7c46b 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -262,8 +262,9 @@ static void __init xen_banner(void)
struct xen_extraversion extra;
HYPERVISOR_xen_version(XENVER_extraversion, &extra);
- printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
- pv_info.name);
+ pr_info("Booting paravirtualized kernel %son %s\n",
+ xen_feature(XENFEAT_auto_translated_physmap) ?
+ "with PVH extensions " : "", pv_info.name);
printk(KERN_INFO "Xen version: %d.%d%s%s\n",
version >> 16, version & 0xffff, extra.extraversion,
xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
@@ -433,7 +434,7 @@ static void __init xen_init_cpuid_mask(void)
ax = 1;
cx = 0;
- xen_cpuid(&ax, &bx, &cx, &dx);
+ cpuid(1, &ax, &bx, &cx, &dx);
xsave_mask =
(1 << (X86_FEATURE_XSAVE % 32)) |
@@ -1142,8 +1143,9 @@ void xen_setup_vcpu_info_placement(void)
xen_vcpu_setup(cpu);
/* xen_vcpu_setup managed to place the vcpu_info within the
- percpu area for all cpus, so make use of it */
- if (have_vcpu_info_placement) {
+ * percpu area for all cpus, so make use of it. Note that for
+ * PVH we want to use native IRQ mechanism. */
+ if (have_vcpu_info_placement && !xen_pvh_domain()) {
pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
@@ -1407,9 +1409,49 @@ static void __init xen_boot_params_init_edd(void)
* Set up the GDT and segment registers for -fstack-protector. Until
* we do this, we have to be careful not to call any stack-protected
* function, which is most of the kernel.
+ *
+ * Note, that it is __ref because the only caller of this after init
+ * is PVH which is not going to use xen_load_gdt_boot or other
+ * __init functions.
*/
-static void __init xen_setup_stackprotector(void)
+static void __ref xen_setup_gdt(int cpu)
{
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+#ifdef CONFIG_X86_64
+ unsigned long dummy;
+
+ load_percpu_segment(cpu); /* We need to access per-cpu area */
+ switch_to_new_gdt(cpu); /* GDT and GS set */
+
+ /* We are switching of the Xen provided GDT to our HVM mode
+ * GDT. The new GDT has __KERNEL_CS with CS.L = 1
+ * and we are jumping to reload it.
+ */
+ asm volatile ("pushq %0\n"
+ "leaq 1f(%%rip),%0\n"
+ "pushq %0\n"
+ "lretq\n"
+ "1:\n"
+ : "=&r" (dummy) : "0" (__KERNEL_CS));
+
+ /*
+ * While not needed, we also set the %es, %ds, and %fs
+ * to zero. We don't care about %ss as it is NULL.
+ * Strictly speaking this is not needed as Xen zeros those
+ * out (and also MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE)
+ *
+ * Linux zeros them in cpu_init() and in secondary_startup_64
+ * (for BSP).
+ */
+ loadsegment(es, 0);
+ loadsegment(ds, 0);
+ loadsegment(fs, 0);
+#else
+ /* PVH: TODO Implement. */
+ BUG();
+#endif
+ return; /* PVH does not need any PV GDT ops. */
+ }
pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
pv_cpu_ops.load_gdt = xen_load_gdt_boot;
@@ -1420,6 +1462,58 @@ static void __init xen_setup_stackprotector(void)
pv_cpu_ops.load_gdt = xen_load_gdt;
}
+/*
+ * A PV guest starts with default flags that are not set for PVH, set them
+ * here asap.
+ */
+static void xen_pvh_set_cr_flags(int cpu)
+{
+
+ /* Some of these are setup in 'secondary_startup_64'. The others:
+ * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
+ * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
+ write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
+
+ if (!cpu)
+ return;
+ /*
+ * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
+ * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
+ */
+ if (cpu_has_pse)
+ set_in_cr4(X86_CR4_PSE);
+
+ if (cpu_has_pge)
+ set_in_cr4(X86_CR4_PGE);
+}
+
+/*
+ * Note, that it is ref - because the only caller of this after init
+ * is PVH which is not going to use xen_load_gdt_boot or other
+ * __init functions.
+ */
+void __ref xen_pvh_secondary_vcpu_init(int cpu)
+{
+ xen_setup_gdt(cpu);
+ xen_pvh_set_cr_flags(cpu);
+}
+
+static void __init xen_pvh_early_guest_init(void)
+{
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
+ if (!xen_feature(XENFEAT_hvm_callback_vector))
+ return;
+
+ xen_have_vector_callback = 1;
+ xen_pvh_set_cr_flags(0);
+
+#ifdef CONFIG_X86_32
+ BUG(); /* PVH: Implement proper support. */
+#endif
+}
+
/* First C function to be called on Xen boot */
asmlinkage void __init xen_start_kernel(void)
{
@@ -1431,13 +1525,16 @@ asmlinkage void __init xen_start_kernel(void)
xen_domain_type = XEN_PV_DOMAIN;
+ xen_setup_features();
+ xen_pvh_early_guest_init();
xen_setup_machphys_mapping();
/* Install Xen paravirt ops */
pv_info = xen_info;
pv_init_ops = xen_init_ops;
- pv_cpu_ops = xen_cpu_ops;
pv_apic_ops = xen_apic_ops;
+ if (!xen_pvh_domain())
+ pv_cpu_ops = xen_cpu_ops;
x86_init.resources.memory_setup = xen_memory_setup;
x86_init.oem.arch_setup = xen_arch_setup;
@@ -1469,17 +1566,14 @@ asmlinkage void __init xen_start_kernel(void)
/* Work out if we support NX */
x86_configure_nx();
- xen_setup_features();
-
/* Get mfn list */
- if (!xen_feature(XENFEAT_auto_translated_physmap))
- xen_build_dynamic_phys_to_machine();
+ xen_build_dynamic_phys_to_machine();
/*
* Set up kernel GDT and segment registers, mainly so that
* -fstack-protector code can be executed.
*/
- xen_setup_stackprotector();
+ xen_setup_gdt(0);
xen_init_irq_ops();
xen_init_cpuid_mask();
@@ -1548,14 +1642,18 @@ asmlinkage void __init xen_start_kernel(void)
/* set the limit of our address space */
xen_reserve_top();
- /* We used to do this in xen_arch_setup, but that is too late on AMD
- * were early_cpu_init (run before ->arch_setup()) calls early_amd_init
- * which pokes 0xcf8 port.
- */
- set_iopl.iopl = 1;
- rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
- if (rc != 0)
- xen_raw_printk("physdev_op failed %d\n", rc);
+ /* PVH: runs at default kernel iopl of 0 */
+ if (!xen_pvh_domain()) {
+ /*
+ * We used to do this in xen_arch_setup, but that is too late
+ * on AMD were early_cpu_init (run before ->arch_setup()) calls
+ * early_amd_init which pokes 0xcf8 port.
+ */
+ set_iopl.iopl = 1;
+ rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
+ if (rc != 0)
+ xen_raw_printk("physdev_op failed %d\n", rc);
+ }
#ifdef CONFIG_X86_32
/* set up basic CPUID stuff */
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c
index 3a5f55d51907..c98583588580 100644
--- a/arch/x86/xen/grant-table.c
+++ b/arch/x86/xen/grant-table.c
@@ -125,3 +125,67 @@ void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
apply_to_page_range(&init_mm, (unsigned long)shared,
PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
}
+#ifdef CONFIG_XEN_PVH
+#include <xen/balloon.h>
+#include <xen/events.h>
+#include <xen/xen.h>
+#include <linux/slab.h>
+static int __init xlated_setup_gnttab_pages(void)
+{
+ struct page **pages;
+ xen_pfn_t *pfns;
+ int rc;
+ unsigned int i;
+ unsigned long nr_grant_frames = gnttab_max_grant_frames();
+
+ BUG_ON(nr_grant_frames == 0);
+ pages = kcalloc(nr_grant_frames, sizeof(pages[0]), GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL);
+ if (!pfns) {
+ kfree(pages);
+ return -ENOMEM;
+ }
+ rc = alloc_xenballooned_pages(nr_grant_frames, pages, 0 /* lowmem */);
+ if (rc) {
+ pr_warn("%s Couldn't balloon alloc %ld pfns rc:%d\n", __func__,
+ nr_grant_frames, rc);
+ kfree(pages);
+ kfree(pfns);
+ return rc;
+ }
+ for (i = 0; i < nr_grant_frames; i++)
+ pfns[i] = page_to_pfn(pages[i]);
+
+ rc = arch_gnttab_map_shared(pfns, nr_grant_frames, nr_grant_frames,
+ &xen_auto_xlat_grant_frames.vaddr);
+
+ if (rc) {
+ pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__,
+ nr_grant_frames, rc);
+ free_xenballooned_pages(nr_grant_frames, pages);
+ kfree(pages);
+ kfree(pfns);
+ return rc;
+ }
+ kfree(pages);
+
+ xen_auto_xlat_grant_frames.pfn = pfns;
+ xen_auto_xlat_grant_frames.count = nr_grant_frames;
+
+ return 0;
+}
+
+static int __init xen_pvh_gnttab_setup(void)
+{
+ if (!xen_pvh_domain())
+ return -ENODEV;
+
+ return xlated_setup_gnttab_pages();
+}
+/* Call it _before_ __gnttab_init as we need to initialize the
+ * xen_auto_xlat_grant_frames first. */
+core_initcall(xen_pvh_gnttab_setup);
+#endif
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 0da7f863056f..08f763de26fe 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -5,6 +5,7 @@
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
#include <xen/interface/vcpu.h>
+#include <xen/features.h>
#include <xen/events.h>
#include <asm/xen/hypercall.h>
@@ -22,7 +23,7 @@ void xen_force_evtchn_callback(void)
(void)HYPERVISOR_xen_version(0, NULL);
}
-static unsigned long xen_save_fl(void)
+asmlinkage unsigned long xen_save_fl(void)
{
struct vcpu_info *vcpu;
unsigned long flags;
@@ -40,7 +41,7 @@ static unsigned long xen_save_fl(void)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
-static void xen_restore_fl(unsigned long flags)
+__visible void xen_restore_fl(unsigned long flags)
{
struct vcpu_info *vcpu;
@@ -62,7 +63,7 @@ static void xen_restore_fl(unsigned long flags)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
-static void xen_irq_disable(void)
+asmlinkage void xen_irq_disable(void)
{
/* There's a one instruction preempt window here. We need to
make sure we're don't switch CPUs between getting the vcpu
@@ -73,7 +74,7 @@ static void xen_irq_disable(void)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
-static void xen_irq_enable(void)
+asmlinkage void xen_irq_enable(void)
{
struct vcpu_info *vcpu;
@@ -128,6 +129,8 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
void __init xen_init_irq_ops(void)
{
- pv_irq_ops = xen_irq_ops;
+ /* For PVH we use default pv_irq_ops settings. */
+ if (!xen_feature(XENFEAT_hvm_callback_vector))
+ pv_irq_ops = xen_irq_ops;
x86_init.irqs.intr_init = xen_init_IRQ;
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index ce563be09cc1..256282e7888b 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
- if (val & _PAGE_PRESENT) {
+ if (pteval_present(val)) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
- if (val & _PAGE_PRESENT) {
+ if (pteval_present(val)) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
@@ -431,7 +431,7 @@ static pteval_t iomap_pte(pteval_t val)
return val;
}
-static pteval_t xen_pte_val(pte_t pte)
+__visible pteval_t xen_pte_val(pte_t pte)
{
pteval_t pteval = pte.pte;
#if 0
@@ -448,7 +448,7 @@ static pteval_t xen_pte_val(pte_t pte)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
-static pgdval_t xen_pgd_val(pgd_t pgd)
+__visible pgdval_t xen_pgd_val(pgd_t pgd)
{
return pte_mfn_to_pfn(pgd.pgd);
}
@@ -479,7 +479,7 @@ void xen_set_pat(u64 pat)
WARN_ON(pat != 0x0007010600070106ull);
}
-static pte_t xen_make_pte(pteval_t pte)
+__visible pte_t xen_make_pte(pteval_t pte)
{
phys_addr_t addr = (pte & PTE_PFN_MASK);
#if 0
@@ -514,14 +514,14 @@ static pte_t xen_make_pte(pteval_t pte)
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
-static pgd_t xen_make_pgd(pgdval_t pgd)
+__visible pgd_t xen_make_pgd(pgdval_t pgd)
{
pgd = pte_pfn_to_mfn(pgd);
return native_make_pgd(pgd);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
-static pmdval_t xen_pmd_val(pmd_t pmd)
+__visible pmdval_t xen_pmd_val(pmd_t pmd)
{
return pte_mfn_to_pfn(pmd.pmd);
}
@@ -580,7 +580,7 @@ static void xen_pmd_clear(pmd_t *pmdp)
}
#endif /* CONFIG_X86_PAE */
-static pmd_t xen_make_pmd(pmdval_t pmd)
+__visible pmd_t xen_make_pmd(pmdval_t pmd)
{
pmd = pte_pfn_to_mfn(pmd);
return native_make_pmd(pmd);
@@ -588,13 +588,13 @@ static pmd_t xen_make_pmd(pmdval_t pmd)
PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
#if PAGETABLE_LEVELS == 4
-static pudval_t xen_pud_val(pud_t pud)
+__visible pudval_t xen_pud_val(pud_t pud)
{
return pte_mfn_to_pfn(pud.pud);
}
PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
-static pud_t xen_make_pud(pudval_t pud)
+__visible pud_t xen_make_pud(pudval_t pud)
{
pud = pte_pfn_to_mfn(pud);
@@ -1198,44 +1198,40 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
* instead of somewhere later and be confusing. */
xen_mc_flush();
}
-#endif
-static void __init xen_pagetable_init(void)
+static void __init xen_pagetable_p2m_copy(void)
{
-#ifdef CONFIG_X86_64
unsigned long size;
unsigned long addr;
-#endif
- paging_init();
- xen_setup_shared_info();
-#ifdef CONFIG_X86_64
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- unsigned long new_mfn_list;
-
- size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
-
- /* On 32-bit, we get zero so this never gets executed. */
- new_mfn_list = xen_revector_p2m_tree();
- if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
- /* using __ka address and sticking INVALID_P2M_ENTRY! */
- memset((void *)xen_start_info->mfn_list, 0xff, size);
-
- /* We should be in __ka space. */
- BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
- addr = xen_start_info->mfn_list;
- /* We roundup to the PMD, which means that if anybody at this stage is
- * using the __ka address of xen_start_info or xen_start_info->shared_info
- * they are in going to crash. Fortunatly we have already revectored
- * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
- size = roundup(size, PMD_SIZE);
- xen_cleanhighmap(addr, addr + size);
-
- size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
- memblock_free(__pa(xen_start_info->mfn_list), size);
- /* And revector! Bye bye old array */
- xen_start_info->mfn_list = new_mfn_list;
- } else
- goto skip;
- }
+ unsigned long new_mfn_list;
+
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
+ size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
+
+ new_mfn_list = xen_revector_p2m_tree();
+ /* No memory or already called. */
+ if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list)
+ return;
+
+ /* using __ka address and sticking INVALID_P2M_ENTRY! */
+ memset((void *)xen_start_info->mfn_list, 0xff, size);
+
+ /* We should be in __ka space. */
+ BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
+ addr = xen_start_info->mfn_list;
+ /* We roundup to the PMD, which means that if anybody at this stage is
+ * using the __ka address of xen_start_info or xen_start_info->shared_info
+ * they are in going to crash. Fortunatly we have already revectored
+ * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
+ size = roundup(size, PMD_SIZE);
+ xen_cleanhighmap(addr, addr + size);
+
+ size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
+ memblock_free(__pa(xen_start_info->mfn_list), size);
+ /* And revector! Bye bye old array */
+ xen_start_info->mfn_list = new_mfn_list;
+
/* At this stage, cleanup_highmap has already cleaned __ka space
* from _brk_limit way up to the max_pfn_mapped (which is the end of
* the ramdisk). We continue on, erasing PMD entries that point to page
@@ -1255,7 +1251,15 @@ static void __init xen_pagetable_init(void)
* anything at this stage. */
xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
#endif
-skip:
+}
+#endif
+
+static void __init xen_pagetable_init(void)
+{
+ paging_init();
+ xen_setup_shared_info();
+#ifdef CONFIG_X86_64
+ xen_pagetable_p2m_copy();
#endif
xen_post_allocator_init();
}
@@ -1753,6 +1757,10 @@ static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot);
+ /* For PVH no need to set R/O or R/W to pin them or unpin them. */
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
BUG();
}
@@ -1863,6 +1871,7 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
* but that's enough to get __va working. We need to fill in the rest
* of the physical mapping once some sort of allocator has been set
* up.
+ * NOTE: for PVH, the page tables are native.
*/
void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
{
@@ -1884,17 +1893,18 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
/* Zap identity mapping */
init_level4_pgt[0] = __pgd(0);
- /* Pre-constructed entries are in pfn, so convert to mfn */
- /* L4[272] -> level3_ident_pgt
- * L4[511] -> level3_kernel_pgt */
- convert_pfn_mfn(init_level4_pgt);
-
- /* L3_i[0] -> level2_ident_pgt */
- convert_pfn_mfn(level3_ident_pgt);
- /* L3_k[510] -> level2_kernel_pgt
- * L3_i[511] -> level2_fixmap_pgt */
- convert_pfn_mfn(level3_kernel_pgt);
-
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Pre-constructed entries are in pfn, so convert to mfn */
+ /* L4[272] -> level3_ident_pgt
+ * L4[511] -> level3_kernel_pgt */
+ convert_pfn_mfn(init_level4_pgt);
+
+ /* L3_i[0] -> level2_ident_pgt */
+ convert_pfn_mfn(level3_ident_pgt);
+ /* L3_k[510] -> level2_kernel_pgt
+ * L3_i[511] -> level2_fixmap_pgt */
+ convert_pfn_mfn(level3_kernel_pgt);
+ }
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
@@ -1918,31 +1928,33 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
copy_page(level2_fixmap_pgt, l2);
/* Note that we don't do anything with level1_fixmap_pgt which
* we don't need. */
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ /* Make pagetable pieces RO */
+ set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
+ set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
+ set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
+
+ /* Pin down new L4 */
+ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
+ PFN_DOWN(__pa_symbol(init_level4_pgt)));
+
+ /* Unpin Xen-provided one */
+ pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
- /* Make pagetable pieces RO */
- set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
- set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
- set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
- set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
- set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
-
- /* Pin down new L4 */
- pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
- PFN_DOWN(__pa_symbol(init_level4_pgt)));
-
- /* Unpin Xen-provided one */
- pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
-
- /*
- * At this stage there can be no user pgd, and no page
- * structure to attach it to, so make sure we just set kernel
- * pgd.
- */
- xen_mc_batch();
- __xen_write_cr3(true, __pa(init_level4_pgt));
- xen_mc_issue(PARAVIRT_LAZY_CPU);
+ /*
+ * At this stage there can be no user pgd, and no page
+ * structure to attach it to, so make sure we just set kernel
+ * pgd.
+ */
+ xen_mc_batch();
+ __xen_write_cr3(true, __pa(init_level4_pgt));
+ xen_mc_issue(PARAVIRT_LAZY_CPU);
+ } else
+ native_write_cr3(__pa(init_level4_pgt));
/* We can't that easily rip out L3 and L2, as the Xen pagetables are
* set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for
@@ -2103,6 +2115,9 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
static void __init xen_post_allocator_init(void)
{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
pv_mmu_ops.set_pte = xen_set_pte;
pv_mmu_ops.set_pmd = xen_set_pmd;
pv_mmu_ops.set_pud = xen_set_pud;
@@ -2207,6 +2222,15 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
void __init xen_init_mmu_ops(void)
{
x86_init.paging.pagetable_init = xen_pagetable_init;
+
+ /* Optimization - we can use the HVM one but it has no idea which
+ * VCPUs are descheduled - which means that it will needlessly IPI
+ * them. Xen knows so let it do the job.
+ */
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
+ return;
+ }
pv_mmu_ops = xen_mmu_ops;
memset(dummy_mapping, 0xff, PAGE_SIZE);
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 2ae8699e8767..696c694986d0 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -280,6 +280,9 @@ void __ref xen_build_mfn_list_list(void)
{
unsigned long pfn;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
/* Pre-initialize p2m_top_mfn to be completely missing */
if (p2m_top_mfn == NULL) {
p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
@@ -336,6 +339,9 @@ void __ref xen_build_mfn_list_list(void)
void xen_setup_mfn_list_list(void)
{
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
@@ -346,10 +352,15 @@ void xen_setup_mfn_list_list(void)
/* Set up p2m_top to point to the domain-builder provided p2m pages */
void __init xen_build_dynamic_phys_to_machine(void)
{
- unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
- unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
+ unsigned long *mfn_list;
+ unsigned long max_pfn;
unsigned long pfn;
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
+ mfn_list = (unsigned long *)xen_start_info->mfn_list;
+ max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
xen_max_p2m_pfn = max_pfn;
p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 0a7852483ffe..a8261716d58d 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -30,10 +30,9 @@
#define XEN_PLATFORM_ERR_PROTOCOL -2
#define XEN_PLATFORM_ERR_BLACKLIST -3
-/* store the value of xen_emul_unplug after the unplug is done */
-int xen_platform_pci_unplug;
-EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
#ifdef CONFIG_XEN_PVHVM
+/* store the value of xen_emul_unplug after the unplug is done */
+static int xen_platform_pci_unplug;
static int xen_emul_unplug;
static int check_platform_magic(void)
@@ -69,6 +68,80 @@ static int check_platform_magic(void)
return 0;
}
+bool xen_has_pv_devices()
+{
+ if (!xen_domain())
+ return false;
+
+ /* PV domains always have them. */
+ if (xen_pv_domain())
+ return true;
+
+ /* And user has xen_platform_pci=0 set in guest config as
+ * driver did not modify the value. */
+ if (xen_platform_pci_unplug == 0)
+ return false;
+
+ if (xen_platform_pci_unplug & XEN_UNPLUG_NEVER)
+ return false;
+
+ if (xen_platform_pci_unplug & XEN_UNPLUG_ALL)
+ return true;
+
+ /* This is an odd one - we are going to run legacy
+ * and PV drivers at the same time. */
+ if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY)
+ return true;
+
+ /* And the caller has to follow with xen_pv_{disk,nic}_devices
+ * to be certain which driver can load. */
+ return false;
+}
+EXPORT_SYMBOL_GPL(xen_has_pv_devices);
+
+static bool __xen_has_pv_device(int state)
+{
+ /* HVM domains might or might not */
+ if (xen_hvm_domain() && (xen_platform_pci_unplug & state))
+ return true;
+
+ return xen_has_pv_devices();
+}
+
+bool xen_has_pv_nic_devices(void)
+{
+ return __xen_has_pv_device(XEN_UNPLUG_ALL_NICS | XEN_UNPLUG_ALL);
+}
+EXPORT_SYMBOL_GPL(xen_has_pv_nic_devices);
+
+bool xen_has_pv_disk_devices(void)
+{
+ return __xen_has_pv_device(XEN_UNPLUG_ALL_IDE_DISKS |
+ XEN_UNPLUG_AUX_IDE_DISKS | XEN_UNPLUG_ALL);
+}
+EXPORT_SYMBOL_GPL(xen_has_pv_disk_devices);
+
+/*
+ * This one is odd - it determines whether you want to run PV _and_
+ * legacy (IDE) drivers together. This combination is only possible
+ * under HVM.
+ */
+bool xen_has_pv_and_legacy_disk_devices(void)
+{
+ if (!xen_domain())
+ return false;
+
+ /* N.B. This is only ever used in HVM mode */
+ if (xen_pv_domain())
+ return false;
+
+ if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(xen_has_pv_and_legacy_disk_devices);
+
void xen_unplug_emulated_devices(void)
{
int r;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 68c054f59de6..0982233b9b84 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -27,6 +27,7 @@
#include <xen/interface/memory.h>
#include <xen/interface/physdev.h>
#include <xen/features.h>
+#include "mmu.h"
#include "xen-ops.h"
#include "vdso.h"
@@ -34,7 +35,7 @@
extern const char xen_hypervisor_callback[];
extern const char xen_failsafe_callback[];
#ifdef CONFIG_X86_64
-extern const char nmi[];
+extern asmlinkage void nmi(void);
#endif
extern void xen_sysenter_target(void);
extern void xen_syscall_target(void);
@@ -81,6 +82,9 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
memblock_reserve(start, size);
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return;
+
xen_max_p2m_pfn = PFN_DOWN(start + size);
for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
@@ -103,6 +107,7 @@ static unsigned long __init xen_do_chunk(unsigned long start,
.domid = DOMID_SELF
};
unsigned long len = 0;
+ int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);
unsigned long pfn;
int ret;
@@ -116,7 +121,7 @@ static unsigned long __init xen_do_chunk(unsigned long start,
continue;
frame = mfn;
} else {
- if (mfn != INVALID_P2M_ENTRY)
+ if (!xlated_phys && mfn != INVALID_P2M_ENTRY)
continue;
frame = pfn;
}
@@ -154,6 +159,13 @@ static unsigned long __init xen_do_chunk(unsigned long start,
static unsigned long __init xen_release_chunk(unsigned long start,
unsigned long end)
{
+ /*
+ * Xen already ballooned out the E820 non RAM regions for us
+ * and set them up properly in EPT.
+ */
+ if (xen_feature(XENFEAT_auto_translated_physmap))
+ return end - start;
+
return xen_do_chunk(start, end, true);
}
@@ -222,7 +234,13 @@ static void __init xen_set_identity_and_release_chunk(
* (except for the ISA region which must be 1:1 mapped) to
* release the refcounts (in Xen) on the original frames.
*/
- for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
+
+ /*
+ * PVH E820 matches the hypervisor's P2M which means we need to
+ * account for the proper values of *release and *identity.
+ */
+ for (pfn = start_pfn; !xen_feature(XENFEAT_auto_translated_physmap) &&
+ pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
pte_t pte = __pte_ma(0);
if (pfn < PFN_UP(ISA_END_ADDRESS))
@@ -559,20 +577,17 @@ void xen_enable_syscall(void)
void xen_enable_nmi(void)
{
#ifdef CONFIG_X86_64
- if (register_callback(CALLBACKTYPE_nmi, nmi))
+ if (register_callback(CALLBACKTYPE_nmi, (char *)nmi))
BUG();
#endif
}
-void __init xen_arch_setup(void)
+void __init xen_pvmmu_arch_setup(void)
{
- xen_panic_handler_init();
-
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
- if (!xen_feature(XENFEAT_auto_translated_physmap))
- HYPERVISOR_vm_assist(VMASST_CMD_enable,
- VMASST_TYPE_pae_extended_cr3);
+ HYPERVISOR_vm_assist(VMASST_CMD_enable,
+ VMASST_TYPE_pae_extended_cr3);
if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
@@ -581,6 +596,15 @@ void __init xen_arch_setup(void)
xen_enable_sysenter();
xen_enable_syscall();
xen_enable_nmi();
+}
+
+/* This function is not called for HVM domains */
+void __init xen_arch_setup(void)
+{
+ xen_panic_handler_init();
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ xen_pvmmu_arch_setup();
+
#ifdef CONFIG_ACPI
if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index c36b325abd83..a18eadd8bb40 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -73,9 +73,11 @@ static void cpu_bringup(void)
touch_softlockup_watchdog();
preempt_disable();
- xen_enable_sysenter();
- xen_enable_syscall();
-
+ /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
+ if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
+ xen_enable_sysenter();
+ xen_enable_syscall();
+ }
cpu = smp_processor_id();
smp_store_cpu_info(cpu);
cpu_data(cpu).x86_max_cores = 1;
@@ -97,8 +99,14 @@ static void cpu_bringup(void)
wmb(); /* make sure everything is out */
}
-static void cpu_bringup_and_idle(void)
+/* Note: cpu parameter is only relevant for PVH */
+static void cpu_bringup_and_idle(int cpu)
{
+#ifdef CONFIG_X86_64
+ if (xen_feature(XENFEAT_auto_translated_physmap) &&
+ xen_feature(XENFEAT_supervisor_mode_kernel))
+ xen_pvh_secondary_vcpu_init(cpu);
+#endif
cpu_bringup();
cpu_startup_entry(CPUHP_ONLINE);
}
@@ -274,9 +282,10 @@ static void __init xen_smp_prepare_boot_cpu(void)
native_smp_prepare_boot_cpu();
if (xen_pv_domain()) {
- /* We've switched to the "real" per-cpu gdt, so make sure the
- old memory can be recycled */
- make_lowmem_page_readwrite(xen_initial_gdt);
+ if (!xen_feature(XENFEAT_writable_page_tables))
+ /* We've switched to the "real" per-cpu gdt, so make
+ * sure the old memory can be recycled. */
+ make_lowmem_page_readwrite(xen_initial_gdt);
#ifdef CONFIG_X86_32
/*
@@ -360,22 +369,21 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
gdt = get_cpu_gdt_table(cpu);
- ctxt->flags = VGCF_IN_KERNEL;
- ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32
+ /* Note: PVH is not yet supported on x86_32. */
ctxt->user_regs.fs = __KERNEL_PERCPU;
ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
-#else
- ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
- {
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ ctxt->flags = VGCF_IN_KERNEL;
ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
ctxt->user_regs.ds = __USER_DS;
ctxt->user_regs.es = __USER_DS;
+ ctxt->user_regs.ss = __KERNEL_DS;
xen_copy_trap_info(ctxt->trap_ctxt);
@@ -396,18 +404,27 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
#ifdef CONFIG_X86_32
ctxt->event_callback_cs = __KERNEL_CS;
ctxt->failsafe_callback_cs = __KERNEL_CS;
+#else
+ ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
ctxt->event_callback_eip =
(unsigned long)xen_hypervisor_callback;
ctxt->failsafe_callback_eip =
(unsigned long)xen_failsafe_callback;
+ ctxt->user_regs.cs = __KERNEL_CS;
+ per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
+#ifdef CONFIG_X86_32
}
- ctxt->user_regs.cs = __KERNEL_CS;
+#else
+ } else
+ /* N.B. The user_regs.eip (cpu_bringup_and_idle) is called with
+ * %rdi having the cpu number - which means are passing in
+ * as the first parameter the cpu. Subtle!
+ */
+ ctxt->user_regs.rdi = cpu;
+#endif
ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
-
- per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
-
if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
BUG();
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 0e36cde12f7e..581521c843a5 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -106,7 +106,7 @@ static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
static cpumask_t waiting_cpus;
static bool xen_pvspin = true;
-static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
+__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
int irq = __this_cpu_read(lock_kicker_irq);
struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting);
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 12a1ca707b94..7b78f88c1707 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -446,6 +446,7 @@ void xen_setup_timer(int cpu)
IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
IRQF_FORCE_RESUME,
name, NULL);
+ (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
memcpy(evt, xen_clockevent, sizeof(*evt));
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 7faed5869e5b..485b69585540 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -11,8 +11,28 @@
#include <asm/page_types.h>
#include <xen/interface/elfnote.h>
+#include <xen/interface/features.h>
#include <asm/xen/interface.h>
+#ifdef CONFIG_XEN_PVH
+#define PVH_FEATURES_STR "|writable_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel"
+/* Note the lack of 'hvm_callback_vector'. Older hypervisor will
+ * balk at this being part of XEN_ELFNOTE_FEATURES, so we put it in
+ * XEN_ELFNOTE_SUPPORTED_FEATURES which older hypervisors will ignore.
+ */
+#define PVH_FEATURES ((1 << XENFEAT_writable_page_tables) | \
+ (1 << XENFEAT_auto_translated_physmap) | \
+ (1 << XENFEAT_supervisor_mode_kernel) | \
+ (1 << XENFEAT_hvm_callback_vector))
+/* The XENFEAT_writable_page_tables is not stricly neccessary as we set that
+ * up regardless whether this CONFIG option is enabled or not, but it
+ * clarifies what the right flags need to be.
+ */
+#else
+#define PVH_FEATURES_STR ""
+#define PVH_FEATURES (0)
+#endif
+
__INIT
ENTRY(startup_xen)
cld
@@ -95,7 +115,10 @@ NEXT_HYPERCALL(arch_6)
#endif
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen)
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
- ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "!writable_page_tables|pae_pgdir_above_4gb")
+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .ascii "!writable_page_tables|pae_pgdir_above_4gb"; .asciz PVH_FEATURES_STR)
+ ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES, .long (PVH_FEATURES) |
+ (1 << XENFEAT_writable_page_tables) |
+ (1 << XENFEAT_dom0))
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes")
ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic")
ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID,
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 95f8c6142328..1cb6f4c37300 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -123,4 +123,5 @@ __visible void xen_adjust_exception_frame(void);
extern int xen_panic_handler_init(void);
+void xen_pvh_secondary_vcpu_init(int cpu);
#endif /* XEN_OPS_H */