summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-10-14 16:05:18 +0200
committerIngo Molnar <mingo@kernel.org>2015-10-14 16:05:18 +0200
commitc7d77a7980e434c3af17de19e3348157f9b9ccce (patch)
treeb32c5988ce8239b80c83e94c22d68f5eb0fb84da /arch/arm64
parentefi: Use the generic efi.memmap instead of 'memmap' (diff)
parentx86/efi: Fix multiple GOP device support (diff)
downloadlinux-c7d77a7980e434c3af17de19e3348157f9b9ccce.tar.xz
linux-c7d77a7980e434c3af17de19e3348157f9b9ccce.zip
Merge branch 'x86/urgent' into core/efi, to pick up a pending EFI fix
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig209
-rw-r--r--arch/arm64/Kconfig.platforms125
-rw-r--r--arch/arm64/Makefile22
-rw-r--r--arch/arm64/boot/Makefile12
-rw-r--r--arch/arm64/boot/dts/Makefile3
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi23
-rw-r--r--arch/arm64/boot/dts/arm/juno-motherboard.dtsi2
-rw-r--r--arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi2
-rw-r--r--arch/arm64/boot/dts/broadcom/Makefile5
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2-svk.dts59
-rw-r--r--arch/arm64/boot/dts/broadcom/ns2.dtsi118
-rw-r--r--arch/arm64/boot/dts/marvell/Makefile5
-rw-r--r--arch/arm64/boot/dts/marvell/berlin4ct-dmp.dts66
-rw-r--r--arch/arm64/boot/dts/marvell/berlin4ct.dtsi164
-rw-r--r--arch/arm64/boot/dts/mediatek/Makefile1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6795-evb.dts41
-rw-r--r--arch/arm64/boot/dts/mediatek/mt6795.dtsi175
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173-evb.dts353
-rw-r--r--arch/arm64/boot/dts/mediatek/mt8173.dtsi327
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi34
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi14
-rw-r--r--arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi51
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916-pins.dtsi430
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi227
-rw-r--r--arch/arm64/boot/dts/rockchip/Makefile5
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368-r88.dts354
-rw-r--r--arch/arm64/boot/dts/rockchip/rk3368.dtsi900
-rw-r--r--arch/arm64/boot/dts/sprd/sc9836.dtsi99
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts89
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi233
-rw-r--r--arch/arm64/configs/defconfig4
-rw-r--r--arch/arm64/crypto/aes-ce-ccm-glue.c68
-rw-r--r--arch/arm64/include/asm/acpi.h4
-rw-r--r--arch/arm64/include/asm/alternative.h78
-rw-r--r--arch/arm64/include/asm/assembler.h14
-rw-r--r--arch/arm64/include/asm/atomic.h265
-rw-r--r--arch/arm64/include/asm/atomic_ll_sc.h247
-rw-r--r--arch/arm64/include/asm/atomic_lse.h391
-rw-r--r--arch/arm64/include/asm/barrier.h24
-rw-r--r--arch/arm64/include/asm/bug.h64
-rw-r--r--arch/arm64/include/asm/cmpxchg.h192
-rw-r--r--arch/arm64/include/asm/cpufeature.h18
-rw-r--r--arch/arm64/include/asm/cputype.h3
-rw-r--r--arch/arm64/include/asm/debug-monitors.h44
-rw-r--r--arch/arm64/include/asm/dma-mapping.h69
-rw-r--r--arch/arm64/include/asm/esr.h9
-rw-r--r--arch/arm64/include/asm/exception.h6
-rw-r--r--arch/arm64/include/asm/fixmap.h2
-rw-r--r--arch/arm64/include/asm/futex.h10
-rw-r--r--arch/arm64/include/asm/hardirq.h9
-rw-r--r--arch/arm64/include/asm/hugetlb.h4
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h14
-rw-r--r--arch/arm64/include/asm/irq_work.h11
-rw-r--r--arch/arm64/include/asm/jump_label.h18
-rw-r--r--arch/arm64/include/asm/kvm_arm.h16
-rw-r--r--arch/arm64/include/asm/kvm_asm.h24
-rw-r--r--arch/arm64/include/asm/kvm_host.h52
-rw-r--r--arch/arm64/include/asm/lse.h53
-rw-r--r--arch/arm64/include/asm/memory.h14
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/percpu.h8
-rw-r--r--arch/arm64/include/asm/perf_event.h2
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h3
-rw-r--r--arch/arm64/include/asm/pgtable.h169
-rw-r--r--arch/arm64/include/asm/processor.h2
-rw-r--r--arch/arm64/include/asm/psci.h28
-rw-r--r--arch/arm64/include/asm/ptrace.h4
-rw-r--r--arch/arm64/include/asm/smp.h4
-rw-r--r--arch/arm64/include/asm/smp_plat.h2
-rw-r--r--arch/arm64/include/asm/spinlock.h147
-rw-r--r--arch/arm64/include/asm/spinlock_types.h2
-rw-r--r--arch/arm64/include/asm/sysreg.h40
-rw-r--r--arch/arm64/include/asm/tlb.h7
-rw-r--r--arch/arm64/include/asm/tlbflush.h76
-rw-r--r--arch/arm64/include/asm/topology.h9
-rw-r--r--arch/arm64/include/asm/traps.h23
-rw-r--r--arch/arm64/include/asm/uaccess.h11
-rw-r--r--arch/arm64/include/asm/xen/events.h6
-rw-r--r--arch/arm64/include/uapi/asm/hwcap.h1
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h37
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/kernel/Makefile6
-rw-r--r--arch/arm64/kernel/alternative.c30
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c19
-rw-r--r--arch/arm64/kernel/asm-offsets.c9
-rw-r--r--arch/arm64/kernel/cpu_ops.c2
-rw-r--r--arch/arm64/kernel/cpufeature.c53
-rw-r--r--arch/arm64/kernel/debug-monitors.c29
-rw-r--r--arch/arm64/kernel/efi-stub.c41
-rw-r--r--arch/arm64/kernel/efi.c3
-rw-r--r--arch/arm64/kernel/entry-ftrace.S22
-rw-r--r--arch/arm64/kernel/entry.S46
-rw-r--r--arch/arm64/kernel/fpsimd.c1
-rw-r--r--arch/arm64/kernel/head.S20
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c20
-rw-r--r--arch/arm64/kernel/insn.c11
-rw-r--r--arch/arm64/kernel/irq.c2
-rw-r--r--arch/arm64/kernel/jump_label.c2
-rw-r--r--arch/arm64/kernel/kgdb.c12
-rw-r--r--arch/arm64/kernel/module.c2
-rw-r--r--arch/arm64/kernel/pci.c13
-rw-r--r--arch/arm64/kernel/perf_callchain.c196
-rw-r--r--arch/arm64/kernel/perf_event.c310
-rw-r--r--arch/arm64/kernel/psci.c366
-rw-r--r--arch/arm64/kernel/ptrace.c92
-rw-r--r--arch/arm64/kernel/setup.c152
-rw-r--r--arch/arm64/kernel/signal32.c47
-rw-r--r--arch/arm64/kernel/sleep.S14
-rw-r--r--arch/arm64/kernel/smp.c15
-rw-r--r--arch/arm64/kernel/time.c2
-rw-r--r--arch/arm64/kernel/topology.c2
-rw-r--r--arch/arm64/kernel/traps.c94
-rw-r--r--arch/arm64/kernel/vdso.c7
-rw-r--r--arch/arm64/kvm/Kconfig11
-rw-r--r--arch/arm64/kvm/Makefile2
-rw-r--r--arch/arm64/kvm/debug.c217
-rw-r--r--arch/arm64/kvm/guest.c43
-rw-r--r--arch/arm64/kvm/handle_exit.c44
-rw-r--r--arch/arm64/kvm/hyp.S656
-rw-r--r--arch/arm64/kvm/inject_fault.c12
-rw-r--r--arch/arm64/kvm/reset.c20
-rw-r--r--arch/arm64/kvm/sys_regs.c298
-rw-r--r--arch/arm64/kvm/sys_regs.h6
-rw-r--r--arch/arm64/kvm/sys_regs_generic_v8.c2
-rw-r--r--arch/arm64/kvm/trace.h123
-rw-r--r--arch/arm64/lib/Makefile13
-rw-r--r--arch/arm64/lib/atomic_ll_sc.c3
-rw-r--r--arch/arm64/lib/bitops.S45
-rw-r--r--arch/arm64/lib/clear_user.S8
-rw-r--r--arch/arm64/lib/copy_from_user.S25
-rw-r--r--arch/arm64/lib/copy_in_user.S25
-rw-r--r--arch/arm64/lib/copy_to_user.S25
-rw-r--r--arch/arm64/mm/cache.S7
-rw-r--r--arch/arm64/mm/context.c16
-rw-r--r--arch/arm64/mm/dma-mapping.c35
-rw-r--r--arch/arm64/mm/fault.c29
-rw-r--r--arch/arm64/mm/flush.c4
-rw-r--r--arch/arm64/mm/hugetlbpage.c4
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/arm64/mm/mmu.c13
-rw-r--r--arch/arm64/mm/proc.S21
141 files changed, 7371 insertions, 2438 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 318175f62c24..07d1811aa03f 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -20,6 +20,7 @@ config ARM64
select ARM_GIC_V2M if PCI_MSI
select ARM_GIC_V3
select ARM_GIC_V3_ITS if PCI_MSI
+ select ARM_PSCI_FW
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
@@ -28,9 +29,10 @@ config ARM64
select EDAC_SUPPORT
select GENERIC_ALLOCATOR
select GENERIC_CLOCKEVENTS
- select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+ select GENERIC_CLOCKEVENTS_BROADCAST
select GENERIC_CPU_AUTOPROBE
select GENERIC_EARLY_IOREMAP
+ select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL
@@ -53,6 +55,7 @@ config ARM64
select HAVE_C_RECORDMCOUNT
select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE
+ select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
@@ -104,6 +107,10 @@ config NO_IOPORT_MAP
config STACKTRACE_SUPPORT
def_bool y
+config ILLEGAL_POINTER_VALUE
+ hex
+ default 0xdead000000000000
+
config LOCKDEP_SUPPORT
def_bool y
@@ -113,6 +120,14 @@ config TRACE_IRQFLAGS_SUPPORT
config RWSEM_XCHGADD_ALGORITHM
def_bool y
+config GENERIC_BUG
+ def_bool y
+ depends on BUG
+
+config GENERIC_BUG_RELATIVE_POINTERS
+ def_bool y
+ depends on GENERIC_BUG
+
config GENERIC_HWEIGHT
def_bool y
@@ -137,6 +152,9 @@ config NEED_DMA_MAP_STATE
config NEED_SG_DMA_LENGTH
def_bool y
+config SMP
+ def_bool y
+
config SWIOTLB
def_bool y
@@ -160,110 +178,7 @@ source "init/Kconfig"
source "kernel/Kconfig.freezer"
-menu "Platform selection"
-
-config ARCH_EXYNOS
- bool
- help
- This enables support for Samsung Exynos SoC family
-
-config ARCH_EXYNOS7
- bool "ARMv8 based Samsung Exynos7"
- select ARCH_EXYNOS
- select COMMON_CLK_SAMSUNG
- select HAVE_S3C2410_WATCHDOG if WATCHDOG
- select HAVE_S3C_RTC if RTC_CLASS
- select PINCTRL
- select PINCTRL_EXYNOS
-
- help
- This enables support for Samsung Exynos7 SoC family
-
-config ARCH_FSL_LS2085A
- bool "Freescale LS2085A SOC"
- help
- This enables support for Freescale LS2085A SOC.
-
-config ARCH_HISI
- bool "Hisilicon SoC Family"
- help
- This enables support for Hisilicon ARMv8 SoC family
-
-config ARCH_MEDIATEK
- bool "Mediatek MT65xx & MT81xx ARMv8 SoC"
- select ARM_GIC
- select PINCTRL
- help
- Support for Mediatek MT65xx & MT81xx ARMv8 SoCs
-
-config ARCH_QCOM
- bool "Qualcomm Platforms"
- select PINCTRL
- help
- This enables support for the ARMv8 based Qualcomm chipsets.
-
-config ARCH_SEATTLE
- bool "AMD Seattle SoC Family"
- help
- This enables support for AMD Seattle SOC Family
-
-config ARCH_TEGRA
- bool "NVIDIA Tegra SoC Family"
- select ARCH_HAS_RESET_CONTROLLER
- select ARCH_REQUIRE_GPIOLIB
- select CLKDEV_LOOKUP
- select CLKSRC_MMIO
- select CLKSRC_OF
- select GENERIC_CLOCKEVENTS
- select HAVE_CLK
- select PINCTRL
- select RESET_CONTROLLER
- help
- This enables support for the NVIDIA Tegra SoC family.
-
-config ARCH_TEGRA_132_SOC
- bool "NVIDIA Tegra132 SoC"
- depends on ARCH_TEGRA
- select PINCTRL_TEGRA124
- select USB_ULPI if USB_PHY
- select USB_ULPI_VIEWPORT if USB_PHY
- help
- Enable support for NVIDIA Tegra132 SoC, based on the Denver
- ARMv8 CPU. The Tegra132 SoC is similar to the Tegra124 SoC,
- but contains an NVIDIA Denver CPU complex in place of
- Tegra124's "4+1" Cortex-A15 CPU complex.
-
-config ARCH_SPRD
- bool "Spreadtrum SoC platform"
- help
- Support for Spreadtrum ARM based SoCs
-
-config ARCH_THUNDER
- bool "Cavium Inc. Thunder SoC Family"
- help
- This enables support for Cavium's Thunder Family of SoCs.
-
-config ARCH_VEXPRESS
- bool "ARMv8 software model (Versatile Express)"
- select ARCH_REQUIRE_GPIOLIB
- select COMMON_CLK_VERSATILE
- select POWER_RESET_VEXPRESS
- select VEXPRESS_CONFIG
- help
- This enables support for the ARMv8 software model (Versatile
- Express).
-
-config ARCH_XGENE
- bool "AppliedMicro X-Gene SOC Family"
- help
- This enables support for AppliedMicro X-Gene SOC Family
-
-config ARCH_ZYNQMP
- bool "Xilinx ZynqMP Family"
- help
- This enables support for Xilinx ZynqMP Family
-
-endmenu
+source "arch/arm64/Kconfig.platforms"
menu "Bus support"
@@ -417,6 +332,22 @@ config ARM64_ERRATUM_845719
If unsure, say Y.
+config ARM64_ERRATUM_843419
+ bool "Cortex-A53: 843419: A load or store might access an incorrect address"
+ depends on MODULES
+ default y
+ help
+ This option builds kernel modules using the large memory model in
+ order to avoid the use of the ADRP instruction, which can cause
+ a subsequent memory access to use an incorrect address on Cortex-A53
+ parts up to r0p4.
+
+ Note that the kernel itself must be linked with a version of ld
+ which fixes potentially affected ADRP instructions through the
+ use of veneers.
+
+ If unsure, say Y.
+
endmenu
@@ -474,22 +405,8 @@ config CPU_BIG_ENDIAN
help
Say Y if you plan on running a kernel in big-endian mode.
-config SMP
- bool "Symmetric Multi-Processing"
- help
- This enables support for systems with more than one CPU. If
- you say N here, the kernel will run on single and
- multiprocessor machines, but will use only one CPU of a
- multiprocessor machine. If you say Y here, the kernel will run
- on many, but not all, single processor machines. On a single
- processor machine, the kernel will run faster if you say N
- here.
-
- If you don't know what to do here, say N.
-
config SCHED_MC
bool "Multi-core scheduler support"
- depends on SMP
help
Multi-core scheduler support improves the CPU scheduler's decision
making when dealing with multi-core CPU chips at a cost of slightly
@@ -497,7 +414,6 @@ config SCHED_MC
config SCHED_SMT
bool "SMT scheduler support"
- depends on SMP
help
Improves the CPU scheduler's decision making when dealing with
MultiThreading at a cost of slightly increased overhead in some
@@ -506,23 +422,17 @@ config SCHED_SMT
config NR_CPUS
int "Maximum number of CPUs (2-4096)"
range 2 4096
- depends on SMP
# These have to remain sorted largest to smallest
default "64"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
- depends on SMP
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
source kernel/Kconfig.preempt
-config UP_LATE_INIT
- def_bool y
- depends on !SMP
-
config HZ
int
default 100
@@ -664,6 +574,53 @@ config SETEND_EMULATION
If unsure, say Y
endif
+menu "ARMv8.1 architectural features"
+
+config ARM64_HW_AFDBM
+ bool "Support for hardware updates of the Access and Dirty page flags"
+ default y
+ help
+ The ARMv8.1 architecture extensions introduce support for
+ hardware updates of the access and dirty information in page
+ table entries. When enabled in TCR_EL1 (HA and HD bits) on
+ capable processors, accesses to pages with PTE_AF cleared will
+ set this bit instead of raising an access flag fault.
+ Similarly, writes to read-only pages with the DBM bit set will
+ clear the read-only bit (AP[2]) instead of raising a
+ permission fault.
+
+ Kernels built with this configuration option enabled continue
+ to work on pre-ARMv8.1 hardware and the performance impact is
+ minimal. If unsure, say Y.
+
+config ARM64_PAN
+ bool "Enable support for Privileged Access Never (PAN)"
+ default y
+ help
+ Privileged Access Never (PAN; part of the ARMv8.1 Extensions)
+ prevents the kernel or hypervisor from accessing user-space (EL0)
+ memory directly.
+
+ Choosing this option will cause any unprotected (not using
+ copy_to_user et al) memory access to fail with a permission fault.
+
+ The feature is detected at runtime, and will remain as a 'nop'
+ instruction if the cpu does not implement the feature.
+
+config ARM64_LSE_ATOMICS
+ bool "Atomic instructions"
+ help
+ As part of the Large System Extensions, ARMv8.1 introduces new
+ atomic instructions that are designed specifically to scale in
+ very large systems.
+
+ Say Y here to make use of these instructions for the in-kernel
+ atomic routines. This incurs a small overhead on CPUs that do
+ not support these instructions and requires the kernel to be
+ built with binutils >= 2.25.
+
+endmenu
+
endmenu
menu "Boot options"
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
new file mode 100644
index 000000000000..23800a19a7bc
--- /dev/null
+++ b/arch/arm64/Kconfig.platforms
@@ -0,0 +1,125 @@
+menu "Platform selection"
+
+config ARCH_BCM_IPROC
+ bool "Broadcom iProc SoC Family"
+ help
+ This enables support for Broadcom iProc based SoCs
+
+config ARCH_BERLIN
+ bool "Marvell Berlin SoC Family"
+ select DW_APB_ICTL
+ help
+ This enables support for Marvell Berlin SoC Family
+
+config ARCH_EXYNOS
+ bool
+ help
+ This enables support for Samsung Exynos SoC family
+
+config ARCH_EXYNOS7
+ bool "ARMv8 based Samsung Exynos7"
+ select ARCH_EXYNOS
+ select COMMON_CLK_SAMSUNG
+ select HAVE_S3C2410_WATCHDOG if WATCHDOG
+ select HAVE_S3C_RTC if RTC_CLASS
+ select PINCTRL
+ select PINCTRL_EXYNOS
+
+ help
+ This enables support for Samsung Exynos7 SoC family
+
+config ARCH_FSL_LS2085A
+ bool "Freescale LS2085A SOC"
+ help
+ This enables support for Freescale LS2085A SOC.
+
+config ARCH_HISI
+ bool "Hisilicon SoC Family"
+ help
+ This enables support for Hisilicon ARMv8 SoC family
+
+config ARCH_MEDIATEK
+ bool "Mediatek MT65xx & MT81xx ARMv8 SoC"
+ select ARM_GIC
+ select PINCTRL
+ help
+ Support for Mediatek MT65xx & MT81xx ARMv8 SoCs
+
+config ARCH_QCOM
+ bool "Qualcomm Platforms"
+ select PINCTRL
+ help
+ This enables support for the ARMv8 based Qualcomm chipsets.
+
+config ARCH_ROCKCHIP
+ bool "Rockchip Platforms"
+ select ARCH_HAS_RESET_CONTROLLER
+ select ARCH_REQUIRE_GPIOLIB
+ select PINCTRL
+ select PINCTRL_ROCKCHIP
+ help
+ This enables support for the ARMv8 based Rockchip chipsets,
+ like the RK3368.
+
+config ARCH_SEATTLE
+ bool "AMD Seattle SoC Family"
+ help
+ This enables support for AMD Seattle SOC Family
+
+config ARCH_TEGRA
+ bool "NVIDIA Tegra SoC Family"
+ select ARCH_HAS_RESET_CONTROLLER
+ select ARCH_REQUIRE_GPIOLIB
+ select CLKDEV_LOOKUP
+ select CLKSRC_MMIO
+ select CLKSRC_OF
+ select GENERIC_CLOCKEVENTS
+ select HAVE_CLK
+ select PINCTRL
+ select RESET_CONTROLLER
+ help
+ This enables support for the NVIDIA Tegra SoC family.
+
+config ARCH_TEGRA_132_SOC
+ bool "NVIDIA Tegra132 SoC"
+ depends on ARCH_TEGRA
+ select PINCTRL_TEGRA124
+ select USB_ULPI if USB_PHY
+ select USB_ULPI_VIEWPORT if USB_PHY
+ help
+ Enable support for NVIDIA Tegra132 SoC, based on the Denver
+ ARMv8 CPU. The Tegra132 SoC is similar to the Tegra124 SoC,
+ but contains an NVIDIA Denver CPU complex in place of
+ Tegra124's "4+1" Cortex-A15 CPU complex.
+
+config ARCH_SPRD
+ bool "Spreadtrum SoC platform"
+ help
+ Support for Spreadtrum ARM based SoCs
+
+config ARCH_THUNDER
+ bool "Cavium Inc. Thunder SoC Family"
+ help
+ This enables support for Cavium's Thunder Family of SoCs.
+
+config ARCH_VEXPRESS
+ bool "ARMv8 software model (Versatile Express)"
+ select ARCH_REQUIRE_GPIOLIB
+ select COMMON_CLK_VERSATILE
+ select POWER_RESET_VEXPRESS
+ select VEXPRESS_CONFIG
+ help
+ This enables support for the ARMv8 software model (Versatile
+ Express).
+
+config ARCH_XGENE
+ bool "AppliedMicro X-Gene SOC Family"
+ help
+ This enables support for AppliedMicro X-Gene SOC Family
+
+config ARCH_ZYNQMP
+ bool "Xilinx ZynqMP Family"
+ help
+ This enables support for Xilinx ZynqMP Family
+
+endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 4d2a925998f9..f9914d7c1bb0 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -17,7 +17,18 @@ GZFLAGS :=-9
KBUILD_DEFCONFIG := defconfig
-KBUILD_CFLAGS += -mgeneral-regs-only
+# Check for binutils support for specific extensions
+lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
+
+ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
+ ifeq ($(lseinstr),)
+$(warning LSE atomics not supported by binutils)
+ endif
+endif
+
+KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
+KBUILD_AFLAGS += $(lseinstr)
+
ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
AS += -EB
@@ -30,6 +41,10 @@ endif
CHECKFLAGS += -D__aarch64__
+ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
+CFLAGS_MODULE += -mcmodel=large
+endif
+
# Default value
head-y := arch/arm64/kernel/head.o
@@ -58,7 +73,10 @@ all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
boot := arch/arm64/boot
-Image Image.gz: vmlinux
+Image: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+Image.%: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
zinstall install: vmlinux
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 5a0e3ab854a5..abcbba2f01ba 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -19,9 +19,21 @@ targets := Image Image.gz
$(obj)/Image: vmlinux FORCE
$(call if_changed,objcopy)
+$(obj)/Image.bz2: $(obj)/Image FORCE
+ $(call if_changed,bzip2)
+
$(obj)/Image.gz: $(obj)/Image FORCE
$(call if_changed,gzip)
+$(obj)/Image.lz4: $(obj)/Image FORCE
+ $(call if_changed,lz4)
+
+$(obj)/Image.lzma: $(obj)/Image FORCE
+ $(call if_changed,lzma)
+
+$(obj)/Image.lzo: $(obj)/Image FORCE
+ $(call if_changed,lzo)
+
install: $(obj)/Image
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/Image System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 38913be23695..d9f88330e7b0 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,12 +1,15 @@
dts-dirs += amd
dts-dirs += apm
dts-dirs += arm
+dts-dirs += broadcom
dts-dirs += cavium
dts-dirs += exynos
dts-dirs += freescale
dts-dirs += hisilicon
+dts-dirs += marvell
dts-dirs += mediatek
dts-dirs += qcom
+dts-dirs += rockchip
dts-dirs += sprd
dts-dirs += xilinx
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 58093edeea2e..d831bc2ac204 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -490,7 +490,8 @@
0xe0 0xd0000000 0x0 0x00040000>; /* PCI config space */
reg-names = "csr", "cfg";
ranges = <0x01000000 0x00 0x00000000 0xe0 0x10000000 0x00 0x00010000 /* io */
- 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000>; /* mem */
+ 0x02000000 0x00 0x80000000 0xe1 0x80000000 0x00 0x80000000 /* mem */
+ 0x43000000 0xf0 0x00000000 0xf0 0x00000000 0x10 0x00000000>; /* mem */
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
@@ -513,8 +514,9 @@
reg = < 0x00 0x1f2c0000 0x0 0x00010000 /* Controller registers */
0xd0 0xd0000000 0x0 0x00040000>; /* PCI config space */
reg-names = "csr", "cfg";
- ranges = <0x01000000 0x0 0x00000000 0xd0 0x10000000 0x00 0x00010000 /* io */
- 0x02000000 0x0 0x80000000 0xd1 0x80000000 0x00 0x80000000>; /* mem */
+ ranges = <0x01000000 0x00 0x00000000 0xd0 0x10000000 0x00 0x00010000 /* io */
+ 0x02000000 0x00 0x80000000 0xd1 0x80000000 0x00 0x80000000 /* mem */
+ 0x43000000 0xd8 0x00000000 0xd8 0x00000000 0x08 0x00000000>; /* mem */
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
@@ -537,8 +539,9 @@
reg = < 0x00 0x1f2d0000 0x0 0x00010000 /* Controller registers */
0x90 0xd0000000 0x0 0x00040000>; /* PCI config space */
reg-names = "csr", "cfg";
- ranges = <0x01000000 0x0 0x00000000 0x90 0x10000000 0x0 0x00010000 /* io */
- 0x02000000 0x0 0x80000000 0x91 0x80000000 0x0 0x80000000>; /* mem */
+ ranges = <0x01000000 0x00 0x00000000 0x90 0x10000000 0x00 0x00010000 /* io */
+ 0x02000000 0x00 0x80000000 0x91 0x80000000 0x00 0x80000000 /* mem */
+ 0x43000000 0x94 0x00000000 0x94 0x00000000 0x04 0x00000000>; /* mem */
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
@@ -561,8 +564,9 @@
reg = < 0x00 0x1f500000 0x0 0x00010000 /* Controller registers */
0xa0 0xd0000000 0x0 0x00040000>; /* PCI config space */
reg-names = "csr", "cfg";
- ranges = <0x01000000 0x0 0x00000000 0xa0 0x10000000 0x0 0x00010000 /* io */
- 0x02000000 0x0 0x80000000 0xa1 0x80000000 0x0 0x80000000>; /* mem */
+ ranges = <0x01000000 0x00 0x00000000 0xa0 0x10000000 0x00 0x00010000 /* io */
+ 0x02000000 0x00 0x80000000 0xa1 0x80000000 0x00 0x80000000 /* mem */
+ 0x43000000 0xb0 0x00000000 0xb0 0x00000000 0x10 0x00000000>; /* mem */
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
@@ -585,8 +589,9 @@
reg = < 0x00 0x1f510000 0x0 0x00010000 /* Controller registers */
0xc0 0xd0000000 0x0 0x00200000>; /* PCI config space */
reg-names = "csr", "cfg";
- ranges = <0x01000000 0x0 0x00000000 0xc0 0x10000000 0x0 0x00010000 /* io */
- 0x02000000 0x0 0x80000000 0xc1 0x80000000 0x0 0x80000000>; /* mem */
+ ranges = <0x01000000 0x00 0x00000000 0xc0 0x10000000 0x00 0x00010000 /* io */
+ 0x02000000 0x00 0x80000000 0xc1 0x80000000 0x00 0x80000000 /* mem */
+ 0x43000000 0xc8 0x00000000 0xc8 0x00000000 0x08 0x00000000>; /* mem */
dma-ranges = <0x42000000 0x80 0x00000000 0x80 0x00000000 0x00 0x80000000
0x42000000 0x00 0x00000000 0x00 0x00000000 0x80 0x00000000>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
diff --git a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
index 021e0f40f419..637e046f0e36 100644
--- a/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/juno-motherboard.dtsi
@@ -136,6 +136,8 @@
clock-names = "refclk", "timclk", "apb_pclk";
#clock-cells = <1>;
clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+ assigned-clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_sysctl 3>, <&v2m_sysctl 3>;
+ assigned-clock-parents = <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>;
};
apbregs@010000 {
diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
index c46cbb29f3c6..88a7583ed7a7 100644
--- a/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/arm/rtsm_ve-motherboard.dtsi
@@ -74,6 +74,8 @@
clock-names = "refclk", "timclk", "apb_pclk";
#clock-cells = <1>;
clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+ assigned-clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&v2m_sysctl 3>, <&v2m_sysctl 3>;
+ assigned-clock-parents = <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>, <&v2m_refclk1mhz>;
};
aaci@040000 {
diff --git a/arch/arm64/boot/dts/broadcom/Makefile b/arch/arm64/boot/dts/broadcom/Makefile
new file mode 100644
index 000000000000..e21fe66f1837
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_BCM_IPROC) += ns2-svk.dtb
+
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/broadcom/ns2-svk.dts b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
new file mode 100644
index 000000000000..244baf879dc9
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/ns2-svk.dts
@@ -0,0 +1,59 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Broadcom Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/dts-v1/;
+
+#include "ns2.dtsi"
+
+/ {
+ model = "Broadcom NS2 SVK";
+ compatible = "brcm,ns2-svk", "brcm,ns2";
+
+ aliases {
+ serial0 = &uart3;
+ };
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x000000000 0x80000000 0x00000000 0x40000000>;
+ };
+
+ soc: soc {
+ uart3: serial@66130000 {
+ status = "ok";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi
new file mode 100644
index 000000000000..3c92d92278e5
--- /dev/null
+++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi
@@ -0,0 +1,118 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2015 Broadcom Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Broadcom Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/memreserve/ 0x84b00000 0x00000008;
+
+/ {
+ compatible = "brcm,ns2";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0 0>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x84b00000>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0 1>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x84b00000>;
+ };
+
+ cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0 2>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x84b00000>;
+ };
+
+ cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a57", "arm,armv8";
+ reg = <0 3>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0 0x84b00000>;
+ };
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_RAW(0xff) |
+ IRQ_TYPE_EDGE_RISING)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_RAW(0xff) |
+ IRQ_TYPE_EDGE_RISING)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_RAW(0xff) |
+ IRQ_TYPE_EDGE_RISING)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_RAW(0xff) |
+ IRQ_TYPE_EDGE_RISING)>;
+ };
+
+ soc: soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+
+ gic: interrupt-controller@65210000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x65210000 0x1000>,
+ <0x65220000 0x1000>,
+ <0x65240000 0x2000>,
+ <0x65260000 0x1000>;
+ };
+
+ uart3: serial@66130000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0x66130000 0x100>;
+ interrupts = <GIC_SPI 393 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ clock-frequency = <23961600>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/marvell/Makefile b/arch/arm64/boot/dts/marvell/Makefile
new file mode 100644
index 000000000000..e2f6afa7f849
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_BERLIN) += berlin4ct-dmp.dtb
+
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/marvell/berlin4ct-dmp.dts b/arch/arm64/boot/dts/marvell/berlin4ct-dmp.dts
new file mode 100644
index 000000000000..0d70d39fa8d2
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/berlin4ct-dmp.dts
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2015 Marvell Technology Group Ltd.
+ *
+ * Author: Jisheng Zhang <jszhang@marvell.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "berlin4ct.dtsi"
+
+/ {
+ model = "Marvell BG4CT DMP board";
+ compatible = "marvell,berlin4ct-dmp", "marvell,berlin4ct", "marvell,berlin";
+
+ chosen {
+ stdout-path = "serial0:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ /* the first 16MB is for firmwares' usage */
+ reg = <0 0x01000000 0 0x7f000000>;
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/marvell/berlin4ct.dtsi b/arch/arm64/boot/dts/marvell/berlin4ct.dtsi
new file mode 100644
index 000000000000..dd4a10d605d9
--- /dev/null
+++ b/arch/arm64/boot/dts/marvell/berlin4ct.dtsi
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2015 Marvell Technology Group Ltd.
+ *
+ * Author: Jisheng Zhang <jszhang@marvell.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPLv2 or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "marvell,berlin4ct", "marvell,berlin";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x0>;
+ enable-method = "psci";
+ };
+
+ cpu1: cpu@1 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x1>;
+ enable-method = "psci";
+ };
+
+ cpu2: cpu@2 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x2>;
+ enable-method = "psci";
+ };
+
+ cpu3: cpu@3 {
+ compatible = "arm,cortex-a53", "arm,armv8";
+ device_type = "cpu";
+ reg = <0x3>;
+ enable-method = "psci";
+ };
+ };
+
+ osc: osc {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
+ };
+
+ pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu0>,
+ <&cpu1>,
+ <&cpu2>,
+ <&cpu3>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0 0xf7000000 0x1000000>;
+
+ gic: interrupt-controller@901000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x901000 0x1000>,
+ <0x902000 0x2000>,
+ <0x904000 0x2000>,
+ <0x906000 0x2000>;
+ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ apb@fc0000 {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 0xfc0000 0x10000>;
+ interrupt-parent = <&sic>;
+
+ sic: interrupt-controller@1000 {
+ compatible = "snps,dw-apb-ictl";
+ reg = <0x1000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ uart0: uart@d000 {
+ compatible = "snps,dw-apb-uart";
+ reg = <0xd000 0x100>;
+ interrupts = <8>;
+ clocks = <&osc>;
+ reg-shift = <2>;
+ status = "disabled";
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/Makefile b/arch/arm64/boot/dts/mediatek/Makefile
index 3ce24622b231..e0a4bff2fc17 100644
--- a/arch/arm64/boot/dts/mediatek/Makefile
+++ b/arch/arm64/boot/dts/mediatek/Makefile
@@ -1,3 +1,4 @@
+dtb-$(CONFIG_ARCH_MEDIATEK) += mt6795-evb.dtb
dtb-$(CONFIG_ARCH_MEDIATEK) += mt8173-evb.dtb
always := $(dtb-y)
diff --git a/arch/arm64/boot/dts/mediatek/mt6795-evb.dts b/arch/arm64/boot/dts/mediatek/mt6795-evb.dts
new file mode 100644
index 000000000000..ad665f5835f0
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6795-evb.dts
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Mars.C <mars.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+#include "mt6795.dtsi"
+
+/ {
+ model = "MediaTek MT6795 Evaluation Board";
+ compatible = "mediatek,mt6795-evb", "mediatek,mt6795";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ };
+
+ memory@40000000 {
+ device_type = "memory";
+ reg = <0 0x40000000 0 0x1e800000>;
+ };
+
+ chosen {
+ stdout-path = "serial0:921600n8";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt6795.dtsi b/arch/arm64/boot/dts/mediatek/mt6795.dtsi
new file mode 100644
index 000000000000..c85659d0ff5d
--- /dev/null
+++ b/arch/arm64/boot/dts/mediatek/mt6795.dtsi
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015 MediaTek Inc.
+ * Author: Mars.C <mars.cheng@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+/ {
+ compatible = "mediatek,mt6795";
+ interrupt-parent = <&sysirq>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x000>;
+ };
+
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x001>;
+ };
+
+ cpu2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x002>;
+ };
+
+ cpu3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x003>;
+ };
+
+ cpu4: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x100>;
+ };
+
+ cpu5: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x101>;
+ };
+
+ cpu6: cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x102>;
+ };
+
+ cpu7: cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53";
+ enable-method = "psci";
+ reg = <0x103>;
+ };
+ };
+
+ system_clk: dummy13m {
+ compatible = "fixed-clock";
+ clock-frequency = <13000000>;
+ #clock-cells = <0>;
+ };
+
+ rtc_clk: dummy32k {
+ compatible = "fixed-clock";
+ clock-frequency = <32000>;
+ #clock-cells = <0>;
+ };
+
+ uart_clk: dummy26m {
+ compatible = "fixed-clock";
+ clock-frequency = <26000000>;
+ #clock-cells = <0>;
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+ };
+
+ sysirq: intpol-controller@10200620 {
+ compatible = "mediatek,mt6795-sysirq",
+ "mediatek,mt6577-sysirq";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ reg = <0 0x10200620 0 0x20>;
+ };
+
+ gic: interrupt-controller@10221000 {
+ compatible = "arm,gic-400";
+ #interrupt-cells = <3>;
+ interrupt-parent = <&gic>;
+ interrupt-controller;
+ reg = <0 0x10221000 0 0x1000>,
+ <0 0x10222000 0 0x2000>,
+ <0 0x10224000 0 0x2000>,
+ <0 0x10226000 0 0x2000>;
+ };
+
+ uart0: serial@11002000 {
+ compatible = "mediatek,mt6795-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11002000 0 0x400>;
+ interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart1: serial@11003000 {
+ compatible = "mediatek,mt6795-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11003000 0 0x400>;
+ interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@11004000 {
+ compatible = "mediatek,mt6795-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11004000 0 0x400>;
+ interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart3: serial@11005000 {
+ compatible = "mediatek,mt6795-uart",
+ "mediatek,mt6577-uart";
+ reg = <0 0x11005000 0 0x400>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
index d0ab012fa379..4be66cadbc7c 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
+++ b/arch/arm64/boot/dts/mediatek/mt8173-evb.dts
@@ -34,6 +34,359 @@
chosen { };
};
+&i2c1 {
+ status = "okay";
+
+ buck: da9211@68 {
+ compatible = "dlg,da9211";
+ reg = <0x68>;
+
+ regulators {
+ da9211_vcpu_reg: BUCKA {
+ regulator-name = "VBUCKA";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1310000>;
+ regulator-min-microamp = <2000000>;
+ regulator-max-microamp = <4400000>;
+ regulator-ramp-delay = <10000>;
+ regulator-always-on;
+ };
+
+ da9211_vgpu_reg: BUCKB {
+ regulator-name = "VBUCKB";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1310000>;
+ regulator-min-microamp = <2000000>;
+ regulator-max-microamp = <3000000>;
+ regulator-ramp-delay = <10000>;
+ };
+ };
+ };
+};
+
+&mmc0 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc0_pins_default>;
+ pinctrl-1 = <&mmc0_pins_uhs>;
+ bus-width = <8>;
+ max-frequency = <50000000>;
+ cap-mmc-highspeed;
+ vmmc-supply = <&mt6397_vemc_3v3_reg>;
+ vqmmc-supply = <&mt6397_vio18_reg>;
+ non-removable;
+};
+
+&mmc1 {
+ status = "okay";
+ pinctrl-names = "default", "state_uhs";
+ pinctrl-0 = <&mmc1_pins_default>;
+ pinctrl-1 = <&mmc1_pins_uhs>;
+ bus-width = <4>;
+ max-frequency = <50000000>;
+ cap-sd-highspeed;
+ sd-uhs-sdr25;
+ cd-gpios = <&pio 132 0>;
+ vmmc-supply = <&mt6397_vmch_reg>;
+ vqmmc-supply = <&mt6397_vmc_reg>;
+};
+
+&pio {
+ mmc0_pins_default: mmc0default {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_57_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+ <MT8173_PIN_58_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+ <MT8173_PIN_59_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+ <MT8173_PIN_60_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+ <MT8173_PIN_61_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+ <MT8173_PIN_62_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+ <MT8173_PIN_63_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+ <MT8173_PIN_64_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+ <MT8173_PIN_66_MSDC0_CMD__FUNC_MSDC0_CMD>;
+ input-enable;
+ bias-pull-up;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_65_MSDC0_CLK__FUNC_MSDC0_CLK>;
+ bias-pull-down;
+ };
+
+ pins_rst {
+ pinmux = <MT8173_PIN_68_MSDC0_RST___FUNC_MSDC0_RSTB>;
+ bias-pull-up;
+ };
+ };
+
+ mmc1_pins_default: mmc1default {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_73_MSDC1_DAT0__FUNC_MSDC1_DAT0>,
+ <MT8173_PIN_74_MSDC1_DAT1__FUNC_MSDC1_DAT1>,
+ <MT8173_PIN_75_MSDC1_DAT2__FUNC_MSDC1_DAT2>,
+ <MT8173_PIN_76_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
+ <MT8173_PIN_78_MSDC1_CMD__FUNC_MSDC1_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_4mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_77_MSDC1_CLK__FUNC_MSDC1_CLK>;
+ bias-pull-down;
+ drive-strength = <MTK_DRIVE_4mA>;
+ };
+
+ pins_insert {
+ pinmux = <MT8173_PIN_132_I2S0_DATA1__FUNC_GPIO132>;
+ bias-pull-up;
+ };
+ };
+
+ mmc0_pins_uhs: mmc0 {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_57_MSDC0_DAT0__FUNC_MSDC0_DAT0>,
+ <MT8173_PIN_58_MSDC0_DAT1__FUNC_MSDC0_DAT1>,
+ <MT8173_PIN_59_MSDC0_DAT2__FUNC_MSDC0_DAT2>,
+ <MT8173_PIN_60_MSDC0_DAT3__FUNC_MSDC0_DAT3>,
+ <MT8173_PIN_61_MSDC0_DAT4__FUNC_MSDC0_DAT4>,
+ <MT8173_PIN_62_MSDC0_DAT5__FUNC_MSDC0_DAT5>,
+ <MT8173_PIN_63_MSDC0_DAT6__FUNC_MSDC0_DAT6>,
+ <MT8173_PIN_64_MSDC0_DAT7__FUNC_MSDC0_DAT7>,
+ <MT8173_PIN_66_MSDC0_CMD__FUNC_MSDC0_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_2mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_65_MSDC0_CLK__FUNC_MSDC0_CLK>;
+ drive-strength = <MTK_DRIVE_2mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_01>;
+ };
+
+ pins_rst {
+ pinmux = <MT8173_PIN_68_MSDC0_RST___FUNC_MSDC0_RSTB>;
+ bias-pull-up;
+ };
+ };
+
+ mmc1_pins_uhs: mmc1 {
+ pins_cmd_dat {
+ pinmux = <MT8173_PIN_73_MSDC1_DAT0__FUNC_MSDC1_DAT0>,
+ <MT8173_PIN_74_MSDC1_DAT1__FUNC_MSDC1_DAT1>,
+ <MT8173_PIN_75_MSDC1_DAT2__FUNC_MSDC1_DAT2>,
+ <MT8173_PIN_76_MSDC1_DAT3__FUNC_MSDC1_DAT3>,
+ <MT8173_PIN_78_MSDC1_CMD__FUNC_MSDC1_CMD>;
+ input-enable;
+ drive-strength = <MTK_DRIVE_4mA>;
+ bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
+ };
+
+ pins_clk {
+ pinmux = <MT8173_PIN_77_MSDC1_CLK__FUNC_MSDC1_CLK>;
+ drive-strength = <MTK_DRIVE_4mA>;
+ bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
+ };
+ };
+};
+
+&pwrap {
+ pmic: mt6397 {
+ compatible = "mediatek,mt6397";
+ interrupt-parent = <&pio>;
+ interrupts = <11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ mt6397regulator: mt6397regulator {
+ compatible = "mediatek,mt6397-regulator";
+
+ mt6397_vpca15_reg: buck_vpca15 {
+ regulator-compatible = "buck_vpca15";
+ regulator-name = "vpca15";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vpca7_reg: buck_vpca7 {
+ regulator-compatible = "buck_vpca7";
+ regulator-name = "vpca7";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vsramca15_reg: buck_vsramca15 {
+ regulator-compatible = "buck_vsramca15";
+ regulator-name = "vsramca15";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vsramca7_reg: buck_vsramca7 {
+ regulator-compatible = "buck_vsramca7";
+ regulator-name = "vsramca7";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vcore_reg: buck_vcore {
+ regulator-compatible = "buck_vcore";
+ regulator-name = "vcore";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vgpu_reg: buck_vgpu {
+ regulator-compatible = "buck_vgpu";
+ regulator-name = "vgpu";
+ regulator-min-microvolt = < 700000>;
+ regulator-max-microvolt = <1350000>;
+ regulator-ramp-delay = <12500>;
+ regulator-enable-ramp-delay = <115>;
+ };
+
+ mt6397_vdrm_reg: buck_vdrm {
+ regulator-compatible = "buck_vdrm";
+ regulator-name = "vdrm";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1400000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vio18_reg: buck_vio18 {
+ regulator-compatible = "buck_vio18";
+ regulator-name = "vio18";
+ regulator-min-microvolt = <1620000>;
+ regulator-max-microvolt = <1980000>;
+ regulator-ramp-delay = <12500>;
+ regulator-always-on;
+ };
+
+ mt6397_vtcxo_reg: ldo_vtcxo {
+ regulator-compatible = "ldo_vtcxo";
+ regulator-name = "vtcxo";
+ regulator-always-on;
+ };
+
+ mt6397_va28_reg: ldo_va28 {
+ regulator-compatible = "ldo_va28";
+ regulator-name = "va28";
+ regulator-always-on;
+ };
+
+ mt6397_vcama_reg: ldo_vcama {
+ regulator-compatible = "ldo_vcama";
+ regulator-name = "vcama";
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <2800000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vio28_reg: ldo_vio28 {
+ regulator-compatible = "ldo_vio28";
+ regulator-name = "vio28";
+ regulator-always-on;
+ };
+
+ mt6397_vusb_reg: ldo_vusb {
+ regulator-compatible = "ldo_vusb";
+ regulator-name = "vusb";
+ };
+
+ mt6397_vmc_reg: ldo_vmc {
+ regulator-compatible = "ldo_vmc";
+ regulator-name = "vmc";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vmch_reg: ldo_vmch {
+ regulator-compatible = "ldo_vmch";
+ regulator-name = "vmch";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vemc_3v3_reg: ldo_vemc3v3 {
+ regulator-compatible = "ldo_vemc3v3";
+ regulator-name = "vemc_3v3";
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp1_reg: ldo_vgp1 {
+ regulator-compatible = "ldo_vgp1";
+ regulator-name = "vcamd";
+ regulator-min-microvolt = <1220000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <240>;
+ };
+
+ mt6397_vgp2_reg: ldo_vgp2 {
+ regulator-compatible = "ldo_vgp2";
+ regulator-name = "vcamio";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp3_reg: ldo_vgp3 {
+ regulator-compatible = "ldo_vgp3";
+ regulator-name = "vcamaf";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp4_reg: ldo_vgp4 {
+ regulator-compatible = "ldo_vgp4";
+ regulator-name = "vgp4";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp5_reg: ldo_vgp5 {
+ regulator-compatible = "ldo_vgp5";
+ regulator-name = "vgp5";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vgp6_reg: ldo_vgp6 {
+ regulator-compatible = "ldo_vgp6";
+ regulator-name = "vgp6";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+
+ mt6397_vibr_reg: ldo_vibr {
+ regulator-compatible = "ldo_vibr";
+ regulator-name = "vibr";
+ regulator-min-microvolt = <1300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-enable-ramp-delay = <218>;
+ };
+ };
+ };
+};
+
&uart0 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt8173.dtsi b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
index 27237a1c1a87..06a15644be38 100644
--- a/arch/arm64/boot/dts/mediatek/mt8173.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt8173.dtsi
@@ -11,8 +11,11 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/clock/mt8173-clk.h>
#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/power/mt8173-power.h>
+#include <dt-bindings/reset-controller/mt8173-resets.h>
#include "mt8173-pinfunc.h"
/ {
@@ -49,6 +52,8 @@
device_type = "cpu";
compatible = "arm,cortex-a53";
reg = <0x000>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
};
cpu1: cpu@1 {
@@ -56,6 +61,7 @@
compatible = "arm,cortex-a53";
reg = <0x001>;
enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
};
cpu2: cpu@100 {
@@ -63,6 +69,7 @@
compatible = "arm,cortex-a57";
reg = <0x100>;
enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
};
cpu3: cpu@101 {
@@ -70,6 +77,20 @@
compatible = "arm,cortex-a57";
reg = <0x101>;
enable-method = "psci";
+ cpu-idle-states = <&CPU_SLEEP_0>;
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ CPU_SLEEP_0: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ local-timer-stop;
+ entry-latency-us = <639>;
+ exit-latency-us = <680>;
+ min-residency-us = <1088>;
+ arm,psci-suspend-param = <0x0010000>;
+ };
};
};
@@ -81,10 +102,18 @@
cpu_on = <0x84000003>;
};
- uart_clk: dummy26m {
+ clk26m: oscillator@0 {
compatible = "fixed-clock";
+ #clock-cells = <0>;
clock-frequency = <26000000>;
+ clock-output-names = "clk26m";
+ };
+
+ clk32k: oscillator@1 {
+ compatible = "fixed-clock";
#clock-cells = <0>;
+ clock-frequency = <32000>;
+ clock-output-names = "clk32k";
};
timer {
@@ -106,11 +135,32 @@
compatible = "simple-bus";
ranges;
- /*
- * Pinctrl access register at 0x10005000 through regmap.
- * Register 0x1000b000 is used by EINT.
- */
- pio: pinctrl@10005000 {
+ topckgen: clock-controller@10000000 {
+ compatible = "mediatek,mt8173-topckgen";
+ reg = <0 0x10000000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ infracfg: power-controller@10001000 {
+ compatible = "mediatek,mt8173-infracfg", "syscon";
+ reg = <0 0x10001000 0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ pericfg: power-controller@10003000 {
+ compatible = "mediatek,mt8173-pericfg", "syscon";
+ reg = <0 0x10003000 0 0x1000>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ syscfg_pctl_a: syscfg_pctl_a@10005000 {
+ compatible = "mediatek,mt8173-pctl-a-syscfg", "syscon";
+ reg = <0 0x10005000 0 0x1000>;
+ };
+
+ pio: pinctrl@0x10005000 {
compatible = "mediatek,mt8173-pinctrl";
reg = <0 0x1000b000 0 0x1000>;
mediatek,pctl-regmap = <&syscfg_pctl_a>;
@@ -122,11 +172,81 @@
interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+
+ i2c0_pins_a: i2c0 {
+ pins1 {
+ pinmux = <MT8173_PIN_45_SDA0__FUNC_SDA0>,
+ <MT8173_PIN_46_SCL0__FUNC_SCL0>;
+ bias-disable;
+ };
+ };
+
+ i2c1_pins_a: i2c1 {
+ pins1 {
+ pinmux = <MT8173_PIN_125_SDA1__FUNC_SDA1>,
+ <MT8173_PIN_126_SCL1__FUNC_SCL1>;
+ bias-disable;
+ };
+ };
+
+ i2c2_pins_a: i2c2 {
+ pins1 {
+ pinmux = <MT8173_PIN_43_SDA2__FUNC_SDA2>,
+ <MT8173_PIN_44_SCL2__FUNC_SCL2>;
+ bias-disable;
+ };
+ };
+
+ i2c3_pins_a: i2c3 {
+ pins1 {
+ pinmux = <MT8173_PIN_106_SDA3__FUNC_SDA3>,
+ <MT8173_PIN_107_SCL3__FUNC_SCL3>;
+ bias-disable;
+ };
+ };
+
+ i2c4_pins_a: i2c4 {
+ pins1 {
+ pinmux = <MT8173_PIN_133_SDA4__FUNC_SDA4>,
+ <MT8173_PIN_134_SCL4__FUNC_SCL4>;
+ bias-disable;
+ };
+ };
+
+ i2c6_pins_a: i2c6 {
+ pins1 {
+ pinmux = <MT8173_PIN_100_MSDC2_DAT0__FUNC_SDA5>,
+ <MT8173_PIN_101_MSDC2_DAT1__FUNC_SCL5>;
+ bias-disable;
+ };
+ };
};
- syscfg_pctl_a: syscfg_pctl_a@10005000 {
- compatible = "mediatek,mt8173-pctl-a-syscfg", "syscon";
- reg = <0 0x10005000 0 0x1000>;
+ scpsys: scpsys@10006000 {
+ compatible = "mediatek,mt8173-scpsys";
+ #power-domain-cells = <1>;
+ reg = <0 0x10006000 0 0x1000>;
+ clocks = <&clk26m>,
+ <&topckgen CLK_TOP_MM_SEL>;
+ clock-names = "mfg", "mm";
+ infracfg = <&infracfg>;
+ };
+
+ watchdog: watchdog@10007000 {
+ compatible = "mediatek,mt8173-wdt",
+ "mediatek,mt6589-wdt";
+ reg = <0 0x10007000 0 0x100>;
+ };
+
+ pwrap: pwrap@1000d000 {
+ compatible = "mediatek,mt8173-pwrap";
+ reg = <0 0x1000d000 0 0x1000>;
+ reg-names = "pwrap";
+ interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+ resets = <&infracfg MT8173_INFRA_PMIC_WRAP_RST>;
+ reset-names = "pwrap";
+ clocks = <&infracfg CLK_INFRA_PMICSPI>, <&infracfg CLK_INFRA_PMICWRAP>;
+ clock-names = "spi", "wrap";
};
sysirq: intpol-controller@10200620 {
@@ -138,6 +258,12 @@
reg = <0 0x10200620 0 0x20>;
};
+ apmixedsys: clock-controller@10209000 {
+ compatible = "mediatek,mt8173-apmixedsys";
+ reg = <0 0x10209000 0 0x1000>;
+ #clock-cells = <1>;
+ };
+
gic: interrupt-controller@10220000 {
compatible = "arm,gic-400";
#interrupt-cells = <3>;
@@ -156,7 +282,8 @@
"mediatek,mt6577-uart";
reg = <0 0x11002000 0 0x400>;
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&uart_clk>;
+ clocks = <&pericfg CLK_PERI_UART0_SEL>, <&pericfg CLK_PERI_UART0>;
+ clock-names = "baud", "bus";
status = "disabled";
};
@@ -165,7 +292,8 @@
"mediatek,mt6577-uart";
reg = <0 0x11003000 0 0x400>;
interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&uart_clk>;
+ clocks = <&pericfg CLK_PERI_UART1_SEL>, <&pericfg CLK_PERI_UART1>;
+ clock-names = "baud", "bus";
status = "disabled";
};
@@ -174,7 +302,8 @@
"mediatek,mt6577-uart";
reg = <0 0x11004000 0 0x400>;
interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&uart_clk>;
+ clocks = <&pericfg CLK_PERI_UART2_SEL>, <&pericfg CLK_PERI_UART2>;
+ clock-names = "baud", "bus";
status = "disabled";
};
@@ -183,7 +312,179 @@
"mediatek,mt6577-uart";
reg = <0 0x11005000 0 0x400>;
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_LOW>;
- clocks = <&uart_clk>;
+ clocks = <&pericfg CLK_PERI_UART3_SEL>, <&pericfg CLK_PERI_UART3>;
+ clock-names = "baud", "bus";
+ status = "disabled";
+ };
+
+ i2c0: i2c@11007000 {
+ compatible = "mediatek,mt8173-i2c";
+ reg = <0 0x11007000 0 0x70>,
+ <0 0x11000100 0 0x80>;
+ interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <16>;
+ clocks = <&pericfg CLK_PERI_I2C0>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main", "dma";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_pins_a>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@11008000 {
+ compatible = "mediatek,mt8173-i2c";
+ reg = <0 0x11008000 0 0x70>,
+ <0 0x11000180 0 0x80>;
+ interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <16>;
+ clocks = <&pericfg CLK_PERI_I2C1>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main", "dma";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_pins_a>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@11009000 {
+ compatible = "mediatek,mt8173-i2c";
+ reg = <0 0x11009000 0 0x70>,
+ <0 0x11000200 0 0x80>;
+ interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <16>;
+ clocks = <&pericfg CLK_PERI_I2C2>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main", "dma";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_pins_a>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c3: i2c3@11010000 {
+ compatible = "mediatek,mt8173-i2c";
+ reg = <0 0x11010000 0 0x70>,
+ <0 0x11000280 0 0x80>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <16>;
+ clocks = <&pericfg CLK_PERI_I2C3>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main", "dma";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_pins_a>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c4: i2c4@11011000 {
+ compatible = "mediatek,mt8173-i2c";
+ reg = <0 0x11011000 0 0x70>,
+ <0 0x11000300 0 0x80>;
+ interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <16>;
+ clocks = <&pericfg CLK_PERI_I2C4>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main", "dma";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_pins_a>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c6: i2c6@11013000 {
+ compatible = "mediatek,mt8173-i2c";
+ reg = <0 0x11013000 0 0x70>,
+ <0 0x11000080 0 0x80>;
+ interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_LOW>;
+ clock-div = <16>;
+ clocks = <&pericfg CLK_PERI_I2C6>,
+ <&pericfg CLK_PERI_AP_DMA>;
+ clock-names = "main", "dma";
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c6_pins_a>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ afe: audio-controller@11220000 {
+ compatible = "mediatek,mt8173-afe-pcm";
+ reg = <0 0x11220000 0 0x1000>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_EDGE_FALLING>;
+ power-domains = <&scpsys MT8173_POWER_DOMAIN_AUDIO>;
+ clocks = <&infracfg CLK_INFRA_AUDIO>,
+ <&topckgen CLK_TOP_AUDIO_SEL>,
+ <&topckgen CLK_TOP_AUD_INTBUS_SEL>,
+ <&topckgen CLK_TOP_APLL1_DIV0>,
+ <&topckgen CLK_TOP_APLL2_DIV0>,
+ <&topckgen CLK_TOP_I2S0_M_SEL>,
+ <&topckgen CLK_TOP_I2S1_M_SEL>,
+ <&topckgen CLK_TOP_I2S2_M_SEL>,
+ <&topckgen CLK_TOP_I2S3_M_SEL>,
+ <&topckgen CLK_TOP_I2S3_B_SEL>;
+ clock-names = "infra_sys_audio_clk",
+ "top_pdn_audio",
+ "top_pdn_aud_intbus",
+ "bck0",
+ "bck1",
+ "i2s0_m",
+ "i2s1_m",
+ "i2s2_m",
+ "i2s3_m",
+ "i2s3_b";
+ assigned-clocks = <&topckgen CLK_TOP_AUD_1_SEL>,
+ <&topckgen CLK_TOP_AUD_2_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_APLL1>,
+ <&topckgen CLK_TOP_APLL2>;
+ };
+
+ mmc0: mmc@11230000 {
+ compatible = "mediatek,mt8173-mmc",
+ "mediatek,mt8135-mmc";
+ reg = <0 0x11230000 0 0x1000>;
+ interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_MSDC30_0>,
+ <&topckgen CLK_TOP_MSDC50_0_H_SEL>;
+ clock-names = "source", "hclk";
+ status = "disabled";
+ };
+
+ mmc1: mmc@11240000 {
+ compatible = "mediatek,mt8173-mmc",
+ "mediatek,mt8135-mmc";
+ reg = <0 0x11240000 0 0x1000>;
+ interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_MSDC30_1>,
+ <&topckgen CLK_TOP_AXI_SEL>;
+ clock-names = "source", "hclk";
+ status = "disabled";
+ };
+
+ mmc2: mmc@11250000 {
+ compatible = "mediatek,mt8173-mmc",
+ "mediatek,mt8135-mmc";
+ reg = <0 0x11250000 0 0x1000>;
+ interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_MSDC30_2>,
+ <&topckgen CLK_TOP_AXI_SEL>;
+ clock-names = "source", "hclk";
+ status = "disabled";
+ };
+
+ mmc3: mmc@11260000 {
+ compatible = "mediatek,mt8173-mmc",
+ "mediatek,mt8135-mmc";
+ reg = <0 0x11260000 0 0x1000>;
+ interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_LOW>;
+ clocks = <&pericfg CLK_PERI_MSDC30_3>,
+ <&topckgen CLK_TOP_MSDC50_2_H_SEL>;
+ clock-names = "source", "hclk";
status = "disabled";
};
};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
index 535532b9287f..e03c11d9d834 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-pmic-pins.dtsi
@@ -2,27 +2,37 @@
&pm8916_gpios {
- pinctrl-names = "default";
- pinctrl-0 = <&pm8916_gpios_default>;
-
- pm8916_gpios_default: default {
- usb_hub_reset_pm {
- pins = "gpio1";
+ usb_hub_reset_pm: usb_hub_reset_pm {
+ pinconf {
+ pins = "gpio3";
function = PMIC_GPIO_FUNC_NORMAL;
output-low;
};
- usb_sw_sel_pm {
- pins = "gpio2";
+ };
+
+ usb_sw_sel_pm: usb_sw_sel_pm {
+ pinconf {
+ pins = "gpio4";
function = PMIC_GPIO_FUNC_NORMAL;
+ power-source = <PM8916_GPIO_VPH>;
input-disable;
};
- usr_led_3_ctrl {
- pins = "gpio3";
+ };
+
+ pm8916_gpios_leds: pm8916_gpios_leds {
+ pinconf {
+ pins = "gpio1", "gpio2";
function = PMIC_GPIO_FUNC_NORMAL;
output-low;
};
- usr_led_4_ctrl {
- pins = "gpio4";
+ };
+};
+
+&pm8916_mpps {
+
+ pm8916_mpps_leds: pm8916_mpps_leds {
+ pinconf {
+ pins = "mpp2", "mpp3";
function = PMIC_GPIO_FUNC_NORMAL;
output-low;
};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
index 5f7023f90df7..cbeee0bcdf52 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc-soc-pins.dtsi
@@ -3,17 +3,9 @@
&msmgpio {
- pinctrl-names = "default";
- pinctrl-0 = <&soc_gpios_default>;
-
- soc_gpios_default: default {
- usr_led_1_ctrl_default: usr_led_1_ctrl_default {
- pins = "gpio21";
- function = "gpio";
- output-low;
- };
- usr_led_2_ctrl_default: usr_led_2_ctrl_default {
- pins = "gpio120";
+ msmgpio_leds: msmgpio_leds {
+ pinconf {
+ pins = "gpio21", "gpio120";
function = "gpio";
output-low;
};
diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
index 98abece6b233..66804ffbc6d2 100644
--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
@@ -32,5 +32,56 @@
pinctrl-0 = <&blsp1_uart2_default>;
pinctrl-1 = <&blsp1_uart2_sleep>;
};
+
+ leds {
+ pinctrl-names = "default";
+ pinctrl-0 = <&msmgpio_leds>,
+ <&pm8916_gpios_leds>,
+ <&pm8916_mpps_leds>;
+
+ compatible = "gpio-leds";
+
+ led@1 {
+ label = "apq8016-sbc:green:user1";
+ gpios = <&msmgpio 21 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "off";
+ };
+
+ led@2 {
+ label = "apq8016-sbc:green:user2";
+ gpios = <&msmgpio 120 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ default-state = "off";
+ };
+
+ led@3 {
+ label = "apq8016-sbc:green:user3";
+ gpios = <&pm8916_gpios 1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc1";
+ default-state = "off";
+ };
+
+ led@4 {
+ label = "apq8016-sbc:green:user4";
+ gpios = <&pm8916_gpios 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "none";
+ default-state = "off";
+ };
+
+ led@5 {
+ label = "apq8016-sbc:yellow:wlan";
+ gpios = <&pm8916_mpps 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "wlan";
+ default-state = "off";
+ };
+
+ led@6 {
+ label = "apq8016-sbc:blue:bt";
+ gpios = <&pm8916_mpps 3 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "bt";
+ default-state = "off";
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
new file mode 100644
index 000000000000..568956859088
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&msmgpio {
+
+ blsp1_uart2_default: blsp1_uart2_default {
+ pinmux {
+ function = "blsp_uart2";
+ pins = "gpio4", "gpio5";
+ };
+ pinconf {
+ pins = "gpio4", "gpio5";
+ drive-strength = <16>;
+ bias-disable;
+ };
+ };
+
+ blsp1_uart2_sleep: blsp1_uart2_sleep {
+ pinmux {
+ function = "blsp_uart2";
+ pins = "gpio4", "gpio5";
+ };
+ pinconf {
+ pins = "gpio4", "gpio5";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ spi1_default: spi1_default {
+ pinmux {
+ function = "blsp_spi1";
+ pins = "gpio0", "gpio1", "gpio3";
+ };
+ pinmux_cs {
+ function = "gpio";
+ pins = "gpio2";
+ };
+ pinconf {
+ pins = "gpio0", "gpio1", "gpio3";
+ drive-strength = <12>;
+ bias-disable;
+ };
+ pinconf_cs {
+ pins = "gpio2";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ spi1_sleep: spi1_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ };
+ pinconf {
+ pins = "gpio0", "gpio1", "gpio2", "gpio3";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ spi2_default: spi2_default {
+ pinmux {
+ function = "blsp_spi2";
+ pins = "gpio4", "gpio5", "gpio7";
+ };
+ pinmux_cs {
+ function = "gpio";
+ pins = "gpio6";
+ };
+ pinconf {
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ drive-strength = <12>;
+ bias-disable;
+ };
+ pinconf_cs {
+ pins = "gpio6";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ spi2_sleep: spi2_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ };
+ pinconf {
+ pins = "gpio4", "gpio5", "gpio6", "gpio7";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ spi3_default: spi3_default {
+ pinmux {
+ function = "blsp_spi3";
+ pins = "gpio8", "gpio9", "gpio11";
+ };
+ pinmux_cs {
+ function = "gpio";
+ pins = "gpio10";
+ };
+ pinconf {
+ pins = "gpio8", "gpio9", "gpio10", "gpio11";
+ drive-strength = <12>;
+ bias-disable;
+ };
+ pinconf_cs {
+ pins = "gpio10";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ spi3_sleep: spi3_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio8", "gpio9", "gpio10", "gpio11";
+ };
+ pinconf {
+ pins = "gpio8", "gpio9", "gpio10", "gpio11";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ spi4_default: spi4_default {
+ pinmux {
+ function = "blsp_spi4";
+ pins = "gpio12", "gpio13", "gpio15";
+ };
+ pinmux_cs {
+ function = "gpio";
+ pins = "gpio14";
+ };
+ pinconf {
+ pins = "gpio12", "gpio13", "gpio14", "gpio15";
+ drive-strength = <12>;
+ bias-disable;
+ };
+ pinconf_cs {
+ pins = "gpio14";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ spi4_sleep: spi4_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio12", "gpio13", "gpio14", "gpio15";
+ };
+ pinconf {
+ pins = "gpio12", "gpio13", "gpio14", "gpio15";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ spi5_default: spi5_default {
+ pinmux {
+ function = "blsp_spi5";
+ pins = "gpio16", "gpio17", "gpio19";
+ };
+ pinmux_cs {
+ function = "gpio";
+ pins = "gpio18";
+ };
+ pinconf {
+ pins = "gpio16", "gpio17", "gpio18", "gpio19";
+ drive-strength = <12>;
+ bias-disable;
+ };
+ pinconf_cs {
+ pins = "gpio18";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ spi5_sleep: spi5_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio16", "gpio17", "gpio18", "gpio19";
+ };
+ pinconf {
+ pins = "gpio16", "gpio17", "gpio18", "gpio19";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ spi6_default: spi6_default {
+ pinmux {
+ function = "blsp_spi6";
+ pins = "gpio20", "gpio21", "gpio23";
+ };
+ pinmux_cs {
+ function = "gpio";
+ pins = "gpio22";
+ };
+ pinconf {
+ pins = "gpio20", "gpio21", "gpio22", "gpio23";
+ drive-strength = <12>;
+ bias-disable;
+ };
+ pinconf_cs {
+ pins = "gpio22";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ spi6_sleep: spi6_sleep {
+ pinmux {
+ function = "gpio";
+ pins = "gpio20", "gpio21", "gpio22", "gpio23";
+ };
+ pinconf {
+ pins = "gpio20", "gpio21", "gpio22", "gpio23";
+ drive-strength = <2>;
+ bias-pull-down;
+ };
+ };
+
+ i2c4_default: i2c4_default {
+ pinmux {
+ function = "blsp_i2c4";
+ pins = "gpio14", "gpio15";
+ };
+ pinconf {
+ pins = "gpio14", "gpio15";
+ drive-strength = <2>;
+ bias-disable = <0>;
+ };
+ };
+
+ i2c4_sleep: i2c4_sleep {
+ pinmux {
+ function = "blsp_i2c4";
+ pins = "gpio14", "gpio15";
+ };
+ pinconf {
+ pins = "gpio14", "gpio15";
+ drive-strength = <2>;
+ bias-disable = <0>;
+ };
+ };
+
+ sdhc2_cd_pin {
+ sdc2_cd_on: cd_on {
+ pinmux {
+ function = "gpio";
+ pins = "gpio38";
+ };
+ pinconf {
+ pins = "gpio38";
+ drive-strength = <2>;
+ bias-pull-up;
+ };
+ };
+ sdc2_cd_off: cd_off {
+ pinmux {
+ function = "gpio";
+ pins = "gpio38";
+ };
+ pinconf {
+ pins = "gpio38";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
+ pmx_sdc1_clk {
+ sdc1_clk_on: clk_on {
+ pinmux {
+ pins = "sdc1_clk";
+ };
+ pinconf {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+ };
+ sdc1_clk_off: clk_off {
+ pinmux {
+ pins = "sdc1_clk";
+ };
+ pinconf {
+ pins = "sdc1_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+ };
+ };
+
+ pmx_sdc1_cmd {
+ sdc1_cmd_on: cmd_on {
+ pinmux {
+ pins = "sdc1_cmd";
+ };
+ pinconf {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+ };
+ sdc1_cmd_off: cmd_off {
+ pinmux {
+ pins = "sdc1_cmd";
+ };
+ pinconf {
+ pins = "sdc1_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+ };
+
+ pmx_sdc1_data {
+ sdc1_data_on: data_on {
+ pinmux {
+ pins = "sdc1_data";
+ };
+ pinconf {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+ };
+ sdc1_data_off: data_off {
+ pinmux {
+ pins = "sdc1_data";
+ };
+ pinconf {
+ pins = "sdc1_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+ };
+
+ pmx_sdc2_clk {
+ sdc2_clk_on: clk_on {
+ pinmux {
+ pins = "sdc2_clk";
+ };
+ pinconf {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <16>;
+ };
+ };
+ sdc2_clk_off: clk_off {
+ pinmux {
+ pins = "sdc2_clk";
+ };
+ pinconf {
+ pins = "sdc2_clk";
+ bias-disable;
+ drive-strength = <2>;
+ };
+ };
+ };
+
+ pmx_sdc2_cmd {
+ sdc2_cmd_on: cmd_on {
+ pinmux {
+ pins = "sdc2_cmd";
+ };
+ pinconf {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+ };
+ sdc2_cmd_off: cmd_off {
+ pinmux {
+ pins = "sdc2_cmd";
+ };
+ pinconf {
+ pins = "sdc2_cmd";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+ };
+
+ pmx_sdc2_data {
+ sdc2_data_on: data_on {
+ pinmux {
+ pins = "sdc2_data";
+ };
+ pinconf {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <10>;
+ };
+ };
+ sdc2_data_off: data_off {
+ pinmux {
+ pins = "sdc2_data";
+ };
+ pinconf {
+ pins = "sdc2_data";
+ bias-pull-up;
+ drive-strength = <2>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 0f49ebd0aa8b..5911de008dd5 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -24,7 +24,10 @@
#address-cells = <2>;
#size-cells = <2>;
- aliases { };
+ aliases {
+ sdhc1 = &sdhc_1; /* SDC1 eMMC slot */
+ sdhc2 = &sdhc_2; /* SDC2 SD card slot */
+ };
chosen { };
@@ -90,30 +93,6 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
-
- blsp1_uart2_default: blsp1_uart2_default {
- pinmux {
- function = "blsp_uart2";
- pins = "gpio4", "gpio5";
- };
- pinconf {
- pins = "gpio4", "gpio5";
- drive-strength = <16>;
- bias-disable;
- };
- };
-
- blsp1_uart2_sleep: blsp1_uart2_sleep {
- pinmux {
- function = "blsp_uart2";
- pins = "gpio4", "gpio5";
- };
- pinconf {
- pins = "gpio4", "gpio5";
- drive-strength = <2>;
- bias-pull-down;
- };
- };
};
gcc: qcom,gcc@1800000 {
@@ -132,6 +111,202 @@
status = "disabled";
};
+ blsp_dma: dma@7884000 {
+ compatible = "qcom,bam-v1.7.0";
+ reg = <0x07884000 0x23000>;
+ interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "bam_clk";
+ #dma-cells = <1>;
+ qcom,ee = <0>;
+ status = "disabled";
+ };
+
+ blsp_spi1: spi@78b5000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078b5000 0x600>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP1_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp_dma 5>, <&blsp_dma 4>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi1_default>;
+ pinctrl-1 = <&spi1_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp_spi2: spi@78b6000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078b6000 0x600>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP2_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp_dma 7>, <&blsp_dma 6>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi2_default>;
+ pinctrl-1 = <&spi2_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp_spi3: spi@78b7000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078b7000 0x600>;
+ interrupts = <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP3_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp_dma 9>, <&blsp_dma 8>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi3_default>;
+ pinctrl-1 = <&spi3_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp_spi4: spi@78b8000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078b8000 0x600>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP4_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp_dma 11>, <&blsp_dma 10>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi4_default>;
+ pinctrl-1 = <&spi4_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp_spi5: spi@78b9000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078b9000 0x600>;
+ interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP5_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp_dma 13>, <&blsp_dma 12>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi5_default>;
+ pinctrl-1 = <&spi5_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp_spi6: spi@78ba000 {
+ compatible = "qcom,spi-qup-v2.2.1";
+ reg = <0x078ba000 0x600>;
+ interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&gcc GCC_BLSP1_QUP6_SPI_APPS_CLK>,
+ <&gcc GCC_BLSP1_AHB_CLK>;
+ clock-names = "core", "iface";
+ dmas = <&blsp_dma 15>, <&blsp_dma 14>;
+ dma-names = "rx", "tx";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&spi6_default>;
+ pinctrl-1 = <&spi6_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ blsp_i2c4: i2c@78b8000 {
+ compatible = "qcom,i2c-qup-v2.2.1";
+ reg = <0x78b8000 0x1000>;
+ interrupts = <GIC_SPI 98 0>;
+ clocks = <&gcc GCC_BLSP1_AHB_CLK>,
+ <&gcc GCC_BLSP1_QUP4_I2C_APPS_CLK>;
+ clock-names = "iface", "core";
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&i2c4_default>;
+ pinctrl-1 = <&i2c4_sleep>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ sdhc_1: sdhci@07824000 {
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x07824900 0x11c>, <0x07824000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 123 0>, <0 138 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+ clocks = <&gcc GCC_SDCC1_APPS_CLK>,
+ <&gcc GCC_SDCC1_AHB_CLK>;
+ clock-names = "core", "iface";
+ bus-width = <8>;
+ non-removable;
+ status = "disabled";
+ };
+
+ sdhc_2: sdhci@07864000 {
+ compatible = "qcom,sdhci-msm-v4";
+ reg = <0x07864900 0x11c>, <0x07864000 0x800>;
+ reg-names = "hc_mem", "core_mem";
+
+ interrupts = <0 125 0>, <0 221 0>;
+ interrupt-names = "hc_irq", "pwr_irq";
+ clocks = <&gcc GCC_SDCC2_APPS_CLK>,
+ <&gcc GCC_SDCC2_AHB_CLK>;
+ clock-names = "core", "iface";
+ bus-width = <4>;
+ status = "disabled";
+ };
+
+ usb_dev: usb@78d9000 {
+ compatible = "qcom,ci-hdrc";
+ reg = <0x78d9000 0x400>;
+ dr_mode = "peripheral";
+ interrupts = <GIC_SPI 134 IRQ_TYPE_NONE>;
+ usb-phy = <&usb_otg>;
+ status = "disabled";
+ };
+
+ usb_host: ehci@78d9000 {
+ compatible = "qcom,ehci-host";
+ reg = <0x78d9000 0x400>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_NONE>;
+ usb-phy = <&usb_otg>;
+ status = "disabled";
+ };
+
+ usb_otg: phy@78d9000 {
+ compatible = "qcom,usb-otg-snps";
+ reg = <0x78d9000 0x400>;
+ interrupts = <GIC_SPI 134 IRQ_TYPE_EDGE_BOTH>,
+ <GIC_SPI 140 IRQ_TYPE_EDGE_RISING>;
+
+ qcom,vdd-levels = <1 5 7>;
+ qcom,phy-init-sequence = <0x44 0x6B 0x24 0x13>;
+ dr_mode = "peripheral";
+ qcom,otg-control = <2>; // PMIC
+
+ clocks = <&gcc GCC_USB_HS_AHB_CLK>,
+ <&gcc GCC_USB_HS_SYSTEM_CLK>,
+ <&gcc GCC_USB2A_PHY_SLEEP_CLK>;
+ clock-names = "iface", "core", "sleep";
+
+ resets = <&gcc GCC_USB2A_PHY_BCR>,
+ <&gcc GCC_USB_HS_BCR>;
+ reset-names = "phy", "link";
+ status = "disabled";
+ };
+
intc: interrupt-controller@b000000 {
compatible = "qcom,msm-qgic2";
interrupt-controller;
@@ -217,3 +392,5 @@
};
};
};
+
+#include "msm8916-pins.dtsi"
diff --git a/arch/arm64/boot/dts/rockchip/Makefile b/arch/arm64/boot/dts/rockchip/Makefile
new file mode 100644
index 000000000000..601e6a236c1d
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/Makefile
@@ -0,0 +1,5 @@
+dtb-$(CONFIG_ARCH_ROCKCHIP) += rk3368-r88.dtb
+
+always := $(dtb-y)
+subdir-y := $(dts-dirs)
+clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/rockchip/rk3368-r88.dts b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
new file mode 100644
index 000000000000..401a81231eb9
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3368-r88.dts
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2015 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This file is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "rk3368.dtsi"
+
+/ {
+ model = "Rockchip R88";
+ compatible = "rockchip,r88", "rockchip,rk3368";
+
+ chosen {
+ stdout-path = "serial2:115200n8";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x0 0x0 0x40000000>;
+ };
+
+ emmc_pwrseq: emmc-pwrseq {
+ compatible = "mmc-pwrseq-emmc";
+ pinctrl-0 = <&emmc_reset>;
+ pinctrl-names = "default";
+ reset-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
+ };
+
+ keys: gpio-keys {
+ compatible = "gpio-keys";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pwr_key>;
+
+ button@0 {
+ gpio-key,wakeup = <1>;
+ gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
+ label = "GPIO Power";
+ linux,code = <116>;
+ };
+ };
+
+ leds: gpio-leds {
+ compatible = "gpio-leds";
+
+ work {
+ gpios = <&gpio3 29 GPIO_ACTIVE_HIGH>;
+ label = "r88:green:led";
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_ctl>;
+ };
+ };
+
+ ir: ir-receiver {
+ compatible = "gpio-ir-receiver";
+ gpios = <&gpio3 30 GPIO_ACTIVE_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&ir_int>;
+ };
+
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&hym8563>;
+ clock-names = "ext_clock";
+ pinctrl-names = "default";
+ pinctrl-0 = <&bt_rst>, <&wifi_reg_on>;
+
+ reset-gpios =
+ /* BT_RST_N */
+ <&gpio3 5 GPIO_ACTIVE_LOW>,
+
+ /* WL_REG_ON */
+ <&gpio3 4 GPIO_ACTIVE_LOW>;
+ };
+
+ vcc_18: vcc18-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_18";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_sys>;
+ };
+
+ /* supplies both host and otg */
+ vcc_host: vcc-host-regulator {
+ compatible = "regulator-fixed";
+ enable-active-high;
+ gpio = <&gpio0 4 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&host_vbus_drv>;
+ regulator-name = "vcc_host";
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc_io: vcc-io-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_io";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_sys>;
+ };
+
+ vcc_lan: vcc-lan-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_lan";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_io>;
+ };
+
+ vcc_sys: vcc-sys-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_sys";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vccio_wl: vccio-wl-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vccio_wl";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_io>;
+ };
+
+ vdd_10: vdd-10-regulator {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd_10";
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <1000000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_sys>;
+ };
+};
+
+&emmc {
+ broken-cd;
+ bus-width = <8>;
+ cap-mmc-highspeed;
+ disable-wp;
+ mmc-pwrseq = <&emmc_pwrseq>;
+ non-removable;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
+ status = "okay";
+};
+
+&gmac {
+ phy-supply = <&vcc_lan>;
+ phy-mode = "rmii";
+ clock_in_out = "output";
+ snps,reset-gpio = <&gpio3 12 0>;
+ snps,reset-active-low;
+ snps,reset-delays-us = <0 10000 1000000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&rmii_pins>;
+ tx_delay = <0x30>;
+ rx_delay = <0x10>;
+ status = "ok";
+};
+
+&i2c0 {
+ status = "okay";
+
+ vdd_cpu: syr827@40 {
+ compatible = "silergy,syr827";
+ reg = <0x40>;
+ fcs,suspend-voltage-selector = <1>;
+ regulator-name = "vdd_cpu";
+ regulator-enable-ramp-delay = <300>;
+ regulator-min-microvolt = <712500>;
+ regulator-max-microvolt = <1500000>;
+ regulator-ramp-delay = <8000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&vcc_sys>;
+ };
+
+ hym8563: hym8563@51 {
+ compatible = "haoyu,hym8563";
+ reg = <0x51>;
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "xin32k";
+ /* rtc_int is not connected */
+ };
+};
+
+&sdio0 {
+ assigned-clocks = <&cru SCLK_SDIO0>;
+ assigned-clock-parents = <&cru PLL_CPLL>;
+ broken-cd;
+ bus-width = <4>;
+ cap-sd-highspeed;
+ cap-sdio-irq;
+ keep-power-in-suspend;
+ mmc-pwrseq = <&sdio_pwrseq>;
+ non-removable;
+ num-slots = <1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sdio0_clk &sdio0_cmd &sdio0_bus4>;
+ vmmc-supply = <&vcc_io>;
+ vqmmc-supply = <&vccio_wl>;
+ status = "okay";
+};
+
+&pinctrl {
+ pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+ bias-disable;
+ drive-strength = <8>;
+ };
+
+ pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+ bias-pull-up;
+ drive-strength = <8>;
+ };
+
+ emmc {
+ emmc_bus8: emmc-bus8 {
+ rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 19 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 20 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 21 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 22 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 23 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 24 RK_FUNC_2 &pcfg_pull_up_drv_8ma>,
+ <1 25 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ };
+
+ emmc-clk {
+ rockchip,pins = <2 4 RK_FUNC_2 &pcfg_pull_none_drv_8ma>;
+ };
+
+ emmc-cmd {
+ rockchip,pins = <1 26 RK_FUNC_2 &pcfg_pull_up_drv_8ma>;
+ };
+
+ emmc_reset: emmc-reset {
+ rockchip,pins = <2 3 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ ir {
+ ir_int: ir-int {
+ rockchip,pins = <3 30 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ keys {
+ pwr_key: pwr-key {
+ rockchip,pins = <0 2 RK_FUNC_GPIO &pcfg_pull_up>;
+ };
+ };
+
+ leds {
+ stby_pwren: stby-pwren {
+ rockchip,pins = <0 12 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ led_ctl: led-ctl {
+ rockchip,pins = <3 29 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ sdio {
+ wifi_reg_on: wifi-reg-on {
+ rockchip,pins = <3 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+
+ bt_rst: bt-rst {
+ rockchip,pins = <3 5 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+
+ usb {
+ host_vbus_drv: host-vbus-drv {
+ rockchip,pins = <0 4 RK_FUNC_GPIO &pcfg_pull_none>;
+ };
+ };
+};
+
+&saradc {
+ vref-supply = <&vcc_18>;
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&usb_host0_ehci {
+ status = "okay";
+};
+
+&usb_otg {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&wdt {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
new file mode 100644
index 000000000000..cc093a482aa4
--- /dev/null
+++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi
@@ -0,0 +1,900 @@
+/*
+ * Copyright (c) 2015 Heiko Stuebner <heiko@sntech.de>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ * a) This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ * b) Permission is hereby granted, free of charge, to any person
+ * obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be
+ * included in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/clock/rk3368-cru.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/pinctrl/rockchip.h>
+
+/ {
+ compatible = "rockchip,rk3368";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ aliases {
+ i2c0 = &i2c0;
+ i2c1 = &i2c1;
+ i2c2 = &i2c2;
+ i2c3 = &i2c3;
+ i2c4 = &i2c4;
+ i2c5 = &i2c5;
+ serial0 = &uart0;
+ serial1 = &uart1;
+ serial2 = &uart2;
+ serial3 = &uart3;
+ serial4 = &uart4;
+ spi0 = &spi0;
+ spi1 = &spi1;
+ spi2 = &spi2;
+ };
+
+ cpus {
+ #address-cells = <0x2>;
+ #size-cells = <0x0>;
+
+ cpu-map {
+ cluster0 {
+ core0 {
+ cpu = <&cpu_b0>;
+ };
+ core1 {
+ cpu = <&cpu_b1>;
+ };
+ core2 {
+ cpu = <&cpu_b2>;
+ };
+ core3 {
+ cpu = <&cpu_b3>;
+ };
+ };
+
+ cluster1 {
+ core0 {
+ cpu = <&cpu_l0>;
+ };
+ core1 {
+ cpu = <&cpu_l1>;
+ };
+ core2 {
+ cpu = <&cpu_l2>;
+ };
+ core3 {
+ cpu = <&cpu_l3>;
+ };
+ };
+ };
+
+ idle-states {
+ entry-method = "psci";
+
+ cpu_sleep: cpu-sleep-0 {
+ compatible = "arm,idle-state";
+ arm,psci-suspend-param = <0x1010000>;
+ entry-latency-us = <0x3fffffff>;
+ exit-latency-us = <0x40000000>;
+ min-residency-us = <0xffffffff>;
+ };
+ };
+
+ cpu_l0: cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x0>;
+ cpu-idle-states = <&cpu_sleep>;
+ enable-method = "psci";
+ };
+
+ cpu_l1: cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x1>;
+ cpu-idle-states = <&cpu_sleep>;
+ enable-method = "psci";
+ };
+
+ cpu_l2: cpu@2 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x2>;
+ cpu-idle-states = <&cpu_sleep>;
+ enable-method = "psci";
+ };
+
+ cpu_l3: cpu@3 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x3>;
+ cpu-idle-states = <&cpu_sleep>;
+ enable-method = "psci";
+ };
+
+ cpu_b0: cpu@100 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x100>;
+ cpu-idle-states = <&cpu_sleep>;
+ enable-method = "psci";
+ };
+
+ cpu_b1: cpu@101 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x101>;
+ cpu-idle-states = <&cpu_sleep>;
+ enable-method = "psci";
+ };
+
+ cpu_b2: cpu@102 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x102>;
+ cpu-idle-states = <&cpu_sleep>;
+ enable-method = "psci";
+ };
+
+ cpu_b3: cpu@103 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a53", "arm,armv8";
+ reg = <0x0 0x103>;
+ cpu-idle-states = <&cpu_sleep>;
+ enable-method = "psci";
+ };
+ };
+
+ arm-pmu {
+ compatible = "arm,armv8-pmuv3";
+ interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-affinity = <&cpu_l0>, <&cpu_l1>, <&cpu_l2>,
+ <&cpu_l3>, <&cpu_b0>, <&cpu_b1>,
+ <&cpu_b2>, <&cpu_b3>;
+ };
+
+ psci {
+ compatible = "arm,psci-0.2";
+ method = "smc";
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <GIC_PPI 13
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+ <GIC_PPI 14
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+ <GIC_PPI 11
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>,
+ <GIC_PPI 10
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ xin24m: oscillator {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "xin24m";
+ #clock-cells = <0>;
+ };
+
+ sdmmc: dwmmc@ff0c0000 {
+ compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xff0c0000 0x0 0x4000>;
+ clock-freq-min-max = <400000 150000000>;
+ clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x100>;
+ interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ sdio0: dwmmc@ff0d0000 {
+ compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xff0d0000 0x0 0x4000>;
+ clock-freq-min-max = <400000 150000000>;
+ clocks = <&cru HCLK_SDIO0>, <&cru SCLK_SDIO0>,
+ <&cru SCLK_SDIO0_DRV>, <&cru SCLK_SDIO0_SAMPLE>;
+ clock-names = "biu", "ciu", "ciu_drv", "ciu_sample";
+ fifo-depth = <0x100>;
+ interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ emmc: dwmmc@ff0f0000 {
+ compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
+ reg = <0x0 0xff0f0000 0x0 0x4000>;
+ clock-freq-min-max = <400000 150000000>;
+ clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>;
+ clock-names = "biu", "ciu";
+ fifo-depth = <0x100>;
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ saradc: saradc@ff100000 {
+ compatible = "rockchip,saradc";
+ reg = <0x0 0xff100000 0x0 0x100>;
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ #io-channel-cells = <1>;
+ clocks = <&cru SCLK_SARADC>, <&cru PCLK_SARADC>;
+ clock-names = "saradc", "apb_pclk";
+ status = "disabled";
+ };
+
+ spi0: spi@ff110000 {
+ compatible = "rockchip,rk3368-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xff110000 0x0 0x1000>;
+ clocks = <&cru SCLK_SPI0>, <&cru PCLK_SPI0>;
+ clock-names = "spiclk", "apb_pclk";
+ interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi0_clk &spi0_tx &spi0_rx &spi0_cs0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi1: spi@ff120000 {
+ compatible = "rockchip,rk3368-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xff120000 0x0 0x1000>;
+ clocks = <&cru SCLK_SPI1>, <&cru PCLK_SPI1>;
+ clock-names = "spiclk", "apb_pclk";
+ interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi1_clk &spi1_tx &spi1_rx &spi1_cs0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ spi2: spi@ff130000 {
+ compatible = "rockchip,rk3368-spi", "rockchip,rk3066-spi";
+ reg = <0x0 0xff130000 0x0 0x1000>;
+ clocks = <&cru SCLK_SPI2>, <&cru PCLK_SPI2>;
+ clock-names = "spiclk", "apb_pclk";
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi2_clk &spi2_tx &spi2_rx &spi2_cs0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c1: i2c@ff140000 {
+ compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+ reg = <0x0 0xff140000 0x0 0x1000>;
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "i2c";
+ clocks = <&cru PCLK_I2C1>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c1_xfer>;
+ status = "disabled";
+ };
+
+ i2c3: i2c@ff150000 {
+ compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+ reg = <0x0 0xff150000 0x0 0x1000>;
+ interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "i2c";
+ clocks = <&cru PCLK_I2C3>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c3_xfer>;
+ status = "disabled";
+ };
+
+ i2c4: i2c@ff160000 {
+ compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+ reg = <0x0 0xff160000 0x0 0x1000>;
+ interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "i2c";
+ clocks = <&cru PCLK_I2C4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c4_xfer>;
+ status = "disabled";
+ };
+
+ i2c5: i2c@ff170000 {
+ compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+ reg = <0x0 0xff170000 0x0 0x1000>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "i2c";
+ clocks = <&cru PCLK_I2C5>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c5_xfer>;
+ status = "disabled";
+ };
+
+ uart0: serial@ff180000 {
+ compatible = "rockchip,rk3368-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff180000 0x0 0x100>;
+ clock-frequency = <24000000>;
+ clocks = <&cru SCLK_UART0>, <&cru PCLK_UART0>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart1: serial@ff190000 {
+ compatible = "rockchip,rk3368-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff190000 0x0 0x100>;
+ clock-frequency = <24000000>;
+ clocks = <&cru SCLK_UART1>, <&cru PCLK_UART1>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart3: serial@ff1b0000 {
+ compatible = "rockchip,rk3368-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff1b0000 0x0 0x100>;
+ clock-frequency = <24000000>;
+ clocks = <&cru SCLK_UART3>, <&cru PCLK_UART3>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ uart4: serial@ff1c0000 {
+ compatible = "rockchip,rk3368-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff1c0000 0x0 0x100>;
+ clock-frequency = <24000000>;
+ clocks = <&cru SCLK_UART4>, <&cru PCLK_UART4>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ gmac: ethernet@ff290000 {
+ compatible = "rockchip,rk3368-gmac";
+ reg = <0x0 0xff290000 0x0 0x10000>;
+ interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ rockchip,grf = <&grf>;
+ clocks = <&cru SCLK_MAC>,
+ <&cru SCLK_MAC_RX>, <&cru SCLK_MAC_TX>,
+ <&cru SCLK_MACREF>, <&cru SCLK_MACREF_OUT>,
+ <&cru ACLK_GMAC>, <&cru PCLK_GMAC>;
+ clock-names = "stmmaceth",
+ "mac_clk_rx", "mac_clk_tx",
+ "clk_mac_ref", "clk_mac_refout",
+ "aclk_mac", "pclk_mac";
+ status = "disabled";
+ };
+
+ usb_host0_ehci: usb@ff500000 {
+ compatible = "generic-ehci";
+ reg = <0x0 0xff500000 0x0 0x100>;
+ interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_HOST0>;
+ clock-names = "usbhost";
+ status = "disabled";
+ };
+
+ usb_otg: usb@ff580000 {
+ compatible = "rockchip,rk3368-usb", "rockchip,rk3066-usb",
+ "snps,dwc2";
+ reg = <0x0 0xff580000 0x0 0x40000>;
+ interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru HCLK_OTG0>;
+ clock-names = "otg";
+ dr_mode = "otg";
+ g-np-tx-fifo-size = <16>;
+ g-rx-fifo-size = <275>;
+ g-tx-fifo-size = <256 128 128 64 64 32>;
+ g-use-dma;
+ status = "disabled";
+ };
+
+ i2c0: i2c@ff650000 {
+ compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+ reg = <0x0 0xff650000 0x0 0x1000>;
+ clocks = <&cru PCLK_I2C0>;
+ clock-names = "i2c";
+ interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c0_xfer>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+
+ i2c2: i2c@ff660000 {
+ compatible = "rockchip,rk3368-i2c", "rockchip,rk3288-i2c";
+ reg = <0x0 0xff660000 0x0 0x1000>;
+ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-names = "i2c";
+ clocks = <&cru PCLK_I2C2>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&i2c2_xfer>;
+ status = "disabled";
+ };
+
+ uart2: serial@ff690000 {
+ compatible = "rockchip,rk3368-uart", "snps,dw-apb-uart";
+ reg = <0x0 0xff690000 0x0 0x100>;
+ clocks = <&cru SCLK_UART2>, <&cru PCLK_UART2>;
+ clock-names = "baudclk", "apb_pclk";
+ interrupts = <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart2_xfer>;
+ reg-shift = <2>;
+ reg-io-width = <4>;
+ status = "disabled";
+ };
+
+ pmugrf: syscon@ff738000 {
+ compatible = "rockchip,rk3368-pmugrf", "syscon";
+ reg = <0x0 0xff738000 0x0 0x1000>;
+ };
+
+ cru: clock-controller@ff760000 {
+ compatible = "rockchip,rk3368-cru";
+ reg = <0x0 0xff760000 0x0 0x1000>;
+ rockchip,grf = <&grf>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
+ grf: syscon@ff770000 {
+ compatible = "rockchip,rk3368-grf", "syscon";
+ reg = <0x0 0xff770000 0x0 0x1000>;
+ };
+
+ wdt: watchdog@ff800000 {
+ compatible = "rockchip,rk3368-wdt", "snps,dw-wdt";
+ reg = <0x0 0xff800000 0x0 0x100>;
+ clocks = <&cru PCLK_WDT>;
+ interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+ gic: interrupt-controller@ffb71000 {
+ compatible = "arm,gic-400";
+ interrupt-controller;
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+
+ reg = <0x0 0xffb71000 0x0 0x1000>,
+ <0x0 0xffb72000 0x0 0x1000>,
+ <0x0 0xffb74000 0x0 0x2000>,
+ <0x0 0xffb76000 0x0 0x2000>;
+ interrupts = <GIC_PPI 9
+ (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+ };
+
+ pinctrl: pinctrl {
+ compatible = "rockchip,rk3368-pinctrl";
+ rockchip,grf = <&grf>;
+ rockchip,pmu = <&pmugrf>;
+ #address-cells = <0x2>;
+ #size-cells = <0x2>;
+ ranges;
+
+ gpio0: gpio0@ff750000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff750000 0x0 0x100>;
+ clocks = <&cru PCLK_GPIO0>;
+ interrupts = <GIC_SPI 0x51 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <0x2>;
+
+ interrupt-controller;
+ #interrupt-cells = <0x2>;
+ };
+
+ gpio1: gpio1@ff780000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff780000 0x0 0x100>;
+ clocks = <&cru PCLK_GPIO1>;
+ interrupts = <GIC_SPI 0x52 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <0x2>;
+
+ interrupt-controller;
+ #interrupt-cells = <0x2>;
+ };
+
+ gpio2: gpio2@ff790000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff790000 0x0 0x100>;
+ clocks = <&cru PCLK_GPIO2>;
+ interrupts = <GIC_SPI 0x53 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <0x2>;
+
+ interrupt-controller;
+ #interrupt-cells = <0x2>;
+ };
+
+ gpio3: gpio3@ff7a0000 {
+ compatible = "rockchip,gpio-bank";
+ reg = <0x0 0xff7a0000 0x0 0x100>;
+ clocks = <&cru PCLK_GPIO3>;
+ interrupts = <GIC_SPI 0x54 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-controller;
+ #gpio-cells = <0x2>;
+
+ interrupt-controller;
+ #interrupt-cells = <0x2>;
+ };
+
+ pcfg_pull_up: pcfg-pull-up {
+ bias-pull-up;
+ };
+
+ pcfg_pull_down: pcfg-pull-down {
+ bias-pull-down;
+ };
+
+ pcfg_pull_none: pcfg-pull-none {
+ bias-disable;
+ };
+
+ pcfg_pull_none_12ma: pcfg-pull-none-12ma {
+ bias-disable;
+ drive-strength = <12>;
+ };
+
+ emmc {
+ emmc_clk: emmc-clk {
+ rockchip,pins = <2 4 RK_FUNC_2 &pcfg_pull_none>;
+ };
+
+ emmc_cmd: emmc-cmd {
+ rockchip,pins = <1 26 RK_FUNC_2 &pcfg_pull_up>;
+ };
+
+ emmc_pwr: emmc-pwr {
+ rockchip,pins = <1 27 RK_FUNC_2 &pcfg_pull_up>;
+ };
+
+ emmc_bus1: emmc-bus1 {
+ rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up>;
+ };
+
+ emmc_bus4: emmc-bus4 {
+ rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up>,
+ <1 19 RK_FUNC_2 &pcfg_pull_up>,
+ <1 20 RK_FUNC_2 &pcfg_pull_up>,
+ <1 21 RK_FUNC_2 &pcfg_pull_up>;
+ };
+
+ emmc_bus8: emmc-bus8 {
+ rockchip,pins = <1 18 RK_FUNC_2 &pcfg_pull_up>,
+ <1 19 RK_FUNC_2 &pcfg_pull_up>,
+ <1 20 RK_FUNC_2 &pcfg_pull_up>,
+ <1 21 RK_FUNC_2 &pcfg_pull_up>,
+ <1 22 RK_FUNC_2 &pcfg_pull_up>,
+ <1 23 RK_FUNC_2 &pcfg_pull_up>,
+ <1 24 RK_FUNC_2 &pcfg_pull_up>,
+ <1 25 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ };
+
+ gmac {
+ rgmii_pins: rgmii-pins {
+ rockchip,pins = <3 22 RK_FUNC_1 &pcfg_pull_none>,
+ <3 24 RK_FUNC_1 &pcfg_pull_none>,
+ <3 19 RK_FUNC_1 &pcfg_pull_none>,
+ <3 8 RK_FUNC_1 &pcfg_pull_none_12ma>,
+ <3 9 RK_FUNC_1 &pcfg_pull_none_12ma>,
+ <3 10 RK_FUNC_1 &pcfg_pull_none_12ma>,
+ <3 14 RK_FUNC_1 &pcfg_pull_none_12ma>,
+ <3 28 RK_FUNC_1 &pcfg_pull_none_12ma>,
+ <3 13 RK_FUNC_1 &pcfg_pull_none_12ma>,
+ <3 15 RK_FUNC_1 &pcfg_pull_none>,
+ <3 16 RK_FUNC_1 &pcfg_pull_none>,
+ <3 17 RK_FUNC_1 &pcfg_pull_none>,
+ <3 18 RK_FUNC_1 &pcfg_pull_none>,
+ <3 25 RK_FUNC_1 &pcfg_pull_none>,
+ <3 20 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ rmii_pins: rmii-pins {
+ rockchip,pins = <3 22 RK_FUNC_1 &pcfg_pull_none>,
+ <3 24 RK_FUNC_1 &pcfg_pull_none>,
+ <3 19 RK_FUNC_1 &pcfg_pull_none>,
+ <3 8 RK_FUNC_1 &pcfg_pull_none_12ma>,
+ <3 9 RK_FUNC_1 &pcfg_pull_none_12ma>,
+ <3 13 RK_FUNC_1 &pcfg_pull_none_12ma>,
+ <3 15 RK_FUNC_1 &pcfg_pull_none>,
+ <3 16 RK_FUNC_1 &pcfg_pull_none>,
+ <3 20 RK_FUNC_1 &pcfg_pull_none>,
+ <3 21 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
+ i2c0 {
+ i2c0_xfer: i2c0-xfer {
+ rockchip,pins = <0 6 RK_FUNC_1 &pcfg_pull_none>,
+ <0 7 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
+ i2c1 {
+ i2c1_xfer: i2c1-xfer {
+ rockchip,pins = <2 21 RK_FUNC_1 &pcfg_pull_none>,
+ <2 22 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
+ i2c2 {
+ i2c2_xfer: i2c2-xfer {
+ rockchip,pins = <0 9 RK_FUNC_2 &pcfg_pull_none>,
+ <3 31 RK_FUNC_2 &pcfg_pull_none>;
+ };
+ };
+
+ i2c3 {
+ i2c3_xfer: i2c3-xfer {
+ rockchip,pins = <1 16 RK_FUNC_1 &pcfg_pull_none>,
+ <1 17 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
+ i2c4 {
+ i2c4_xfer: i2c4-xfer {
+ rockchip,pins = <3 24 RK_FUNC_2 &pcfg_pull_none>,
+ <3 25 RK_FUNC_2 &pcfg_pull_none>;
+ };
+ };
+
+ i2c5 {
+ i2c5_xfer: i2c5-xfer {
+ rockchip,pins = <3 26 RK_FUNC_2 &pcfg_pull_none>,
+ <3 27 RK_FUNC_2 &pcfg_pull_none>;
+ };
+ };
+
+ sdio0 {
+ sdio0_bus1: sdio0-bus1 {
+ rockchip,pins = <2 28 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ sdio0_bus4: sdio0-bus4 {
+ rockchip,pins = <2 28 RK_FUNC_1 &pcfg_pull_up>,
+ <2 29 RK_FUNC_1 &pcfg_pull_up>,
+ <2 30 RK_FUNC_1 &pcfg_pull_up>,
+ <2 31 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ sdio0_cmd: sdio0-cmd {
+ rockchip,pins = <3 0 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ sdio0_clk: sdio0-clk {
+ rockchip,pins = <3 1 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sdio0_cd: sdio0-cd {
+ rockchip,pins = <3 2 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ sdio0_wp: sdio0-wp {
+ rockchip,pins = <3 3 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ sdio0_pwr: sdio0-pwr {
+ rockchip,pins = <3 4 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ sdio0_bkpwr: sdio0-bkpwr {
+ rockchip,pins = <3 5 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ sdio0_int: sdio0-int {
+ rockchip,pins = <3 6 RK_FUNC_1 &pcfg_pull_up>;
+ };
+ };
+
+ sdmmc {
+ sdmmc_clk: sdmmc-clk {
+ rockchip,pins = <2 9 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ sdmmc_cmd: sdmmc-cmd {
+ rockchip,pins = <2 10 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ sdmmc_cd: sdmcc-cd {
+ rockchip,pins = <2 11 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ sdmmc_bus1: sdmmc-bus1 {
+ rockchip,pins = <2 5 RK_FUNC_1 &pcfg_pull_up>;
+ };
+
+ sdmmc_bus4: sdmmc-bus4 {
+ rockchip,pins = <2 5 RK_FUNC_1 &pcfg_pull_up>,
+ <2 6 RK_FUNC_1 &pcfg_pull_up>,
+ <2 7 RK_FUNC_1 &pcfg_pull_up>,
+ <2 8 RK_FUNC_1 &pcfg_pull_up>;
+ };
+ };
+
+ spi0 {
+ spi0_clk: spi0-clk {
+ rockchip,pins = <1 29 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ spi0_cs0: spi0-cs0 {
+ rockchip,pins = <1 24 RK_FUNC_3 &pcfg_pull_up>;
+ };
+ spi0_cs1: spi0-cs1 {
+ rockchip,pins = <1 25 RK_FUNC_3 &pcfg_pull_up>;
+ };
+ spi0_tx: spi0-tx {
+ rockchip,pins = <1 23 RK_FUNC_3 &pcfg_pull_up>;
+ };
+ spi0_rx: spi0-rx {
+ rockchip,pins = <1 22 RK_FUNC_3 &pcfg_pull_up>;
+ };
+ };
+
+ spi1 {
+ spi1_clk: spi1-clk {
+ rockchip,pins = <1 14 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ spi1_cs0: spi1-cs0 {
+ rockchip,pins = <1 15 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ spi1_cs1: spi1-cs1 {
+ rockchip,pins = <3 28 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ spi1_rx: spi1-rx {
+ rockchip,pins = <1 16 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ spi1_tx: spi1-tx {
+ rockchip,pins = <1 17 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ };
+
+ spi2 {
+ spi2_clk: spi2-clk {
+ rockchip,pins = <0 12 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ spi2_cs0: spi2-cs0 {
+ rockchip,pins = <0 13 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ spi2_rx: spi2-rx {
+ rockchip,pins = <0 10 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ spi2_tx: spi2-tx {
+ rockchip,pins = <0 11 RK_FUNC_2 &pcfg_pull_up>;
+ };
+ };
+
+ uart0 {
+ uart0_xfer: uart0-xfer {
+ rockchip,pins = <2 24 RK_FUNC_1 &pcfg_pull_up>,
+ <2 25 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ uart0_cts: uart0-cts {
+ rockchip,pins = <2 26 RK_FUNC_1 &pcfg_pull_none>;
+ };
+
+ uart0_rts: uart0-rts {
+ rockchip,pins = <2 27 RK_FUNC_1 &pcfg_pull_none>;
+ };
+ };
+
+ uart1 {
+ uart1_xfer: uart1-xfer {
+ rockchip,pins = <0 20 RK_FUNC_3 &pcfg_pull_up>,
+ <0 21 RK_FUNC_3 &pcfg_pull_none>;
+ };
+
+ uart1_cts: uart1-cts {
+ rockchip,pins = <0 22 RK_FUNC_3 &pcfg_pull_none>;
+ };
+
+ uart1_rts: uart1-rts {
+ rockchip,pins = <0 23 RK_FUNC_3 &pcfg_pull_none>;
+ };
+ };
+
+ uart2 {
+ uart2_xfer: uart2-xfer {
+ rockchip,pins = <2 6 RK_FUNC_2 &pcfg_pull_up>,
+ <2 5 RK_FUNC_2 &pcfg_pull_none>;
+ };
+ /* no rts / cts for uart2 */
+ };
+
+ uart3 {
+ uart3_xfer: uart3-xfer {
+ rockchip,pins = <3 29 RK_FUNC_2 &pcfg_pull_up>,
+ <3 30 RK_FUNC_3 &pcfg_pull_none>;
+ };
+
+ uart3_cts: uart3-cts {
+ rockchip,pins = <3 16 RK_FUNC_2 &pcfg_pull_none>;
+ };
+
+ uart3_rts: uart3-rts {
+ rockchip,pins = <3 17 RK_FUNC_2 &pcfg_pull_none>;
+ };
+ };
+
+ uart4 {
+ uart4_xfer: uart4-xfer {
+ rockchip,pins = <0 27 RK_FUNC_3 &pcfg_pull_up>,
+ <0 26 RK_FUNC_3 &pcfg_pull_none>;
+ };
+
+ uart4_cts: uart4-cts {
+ rockchip,pins = <0 24 RK_FUNC_3 &pcfg_pull_none>;
+ };
+
+ uart4_rts: uart4-rts {
+ rockchip,pins = <0 25 RK_FUNC_3 &pcfg_pull_none>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/sprd/sc9836.dtsi b/arch/arm64/boot/dts/sprd/sc9836.dtsi
index ee34e1a36e03..63894c456969 100644
--- a/arch/arm64/boot/dts/sprd/sc9836.dtsi
+++ b/arch/arm64/boot/dts/sprd/sc9836.dtsi
@@ -16,28 +16,28 @@
#address-cells = <2>;
#size-cells = <0>;
- cpu@0 {
+ cpu0: cpu@0 {
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x0>;
enable-method = "psci";
};
- cpu@1 {
+ cpu1: cpu@1 {
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x1>;
enable-method = "psci";
};
- cpu@2 {
+ cpu2: cpu@2 {
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x2>;
enable-method = "psci";
};
- cpu@3 {
+ cpu3: cpu@3 {
device_type = "cpu";
compatible = "arm,cortex-a53", "arm,armv8";
reg = <0x0 0x3>;
@@ -75,14 +75,103 @@
};
};
- /* funnel input port 0~3 is reserved for ETMs */
+ /* funnel input port 0-4 */
port@1 {
+ reg = <0>;
+ funnel_in_port0: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm0_out>;
+ };
+ };
+
+ port@2 {
+ reg = <1>;
+ funnel_in_port1: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm1_out>;
+ };
+ };
+
+ port@3 {
+ reg = <2>;
+ funnel_in_port2: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm2_out>;
+ };
+ };
+
+ port@4 {
+ reg = <3>;
+ funnel_in_port3: endpoint {
+ slave-mode;
+ remote-endpoint = <&etm3_out>;
+ };
+ };
+
+ port@5 {
reg = <4>;
funnel_in_port4: endpoint {
slave-mode;
remote-endpoint = <&stm_out>;
};
};
+ /* Other input ports aren't connected to anyone */
+ };
+ };
+
+ etm@10440000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x10440000 0 0x1000>;
+
+ cpu = <&cpu0>;
+ clocks = <&clk26mhz>;
+ clock-names = "apb_pclk";
+ port {
+ etm0_out: endpoint {
+ remote-endpoint = <&funnel_in_port0>;
+ };
+ };
+ };
+
+ etm@10540000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x10540000 0 0x1000>;
+
+ cpu = <&cpu1>;
+ clocks = <&clk26mhz>;
+ clock-names = "apb_pclk";
+ port {
+ etm1_out: endpoint {
+ remote-endpoint = <&funnel_in_port1>;
+ };
+ };
+ };
+
+ etm@10640000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x10640000 0 0x1000>;
+
+ cpu = <&cpu2>;
+ clocks = <&clk26mhz>;
+ clock-names = "apb_pclk";
+ port {
+ etm2_out: endpoint {
+ remote-endpoint = <&funnel_in_port2>;
+ };
+ };
+ };
+
+ etm@10740000 {
+ compatible = "arm,coresight-etm4x", "arm,primecell";
+ reg = <0 0x10740000 0 0x1000>;
+
+ cpu = <&cpu3>;
+ clocks = <&clk26mhz>;
+ clock-names = "apb_pclk";
+ port {
+ etm3_out: endpoint {
+ remote-endpoint = <&funnel_in_port3>;
+ };
};
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index 0a3f40ecd06d..ce5d848251fa 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -32,6 +32,10 @@
};
};
+&can0 {
+ status = "okay";
+};
+
&gem0 {
status = "okay";
phy-handle = <&phy0>;
@@ -42,6 +46,91 @@
};
};
+&gpio {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+ clock-frequency = <400000>;
+ eeprom@54 {
+ compatible = "at,24c64";
+ reg = <0x54>;
+ };
+};
+
+&i2c1 {
+ status = "okay";
+ clock-frequency = <400000>;
+ eeprom@55 {
+ compatible = "at,24c64";
+ reg = <0x55>;
+ };
+};
+
+&sata {
+ status = "okay";
+ ceva,broken-gen2;
+};
+
+&sdhci0 {
+ status = "okay";
+};
+
+&sdhci1 {
+ status = "okay";
+};
+
+&spi0 {
+ status = "okay";
+ num-cs = <1>;
+ spi0_flash0: spi0_flash0@0 {
+ compatible = "m25p80";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+
+ spi0_flash0@00000000 {
+ label = "spi0_flash0";
+ reg = <0x0 0x100000>;
+ };
+ };
+};
+
+&spi1 {
+ status = "okay";
+ num-cs = <1>;
+ spi1_flash0: spi1_flash0@0 {
+ compatible = "m25p80";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ spi-max-frequency = <50000000>;
+ reg = <0>;
+
+ spi1_flash0@00000000 {
+ label = "spi1_flash0";
+ reg = <0x0 0x100000>;
+ };
+ };
+};
+
&uart0 {
status = "okay";
};
+
+&usb0 {
+ status = "okay";
+ dr_mode = "peripheral";
+ maximum-speed = "high-speed";
+};
+
+&usb1 {
+ status = "okay";
+ dr_mode = "host";
+ maximum-speed = "high-speed";
+};
+
+&watchdog0 {
+ status = "okay";
+};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 11e0b00045cf..857eda5c7217 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -96,74 +96,38 @@
#size-cells = <1>;
ranges;
- misc_clk: misc_clk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <25000000>;
- };
-
- ttc0: timer@ff110000 {
- compatible = "cdns,ttc";
- status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 36 4>, <0 37 4>, <0 38 4>;
- reg = <0x0 0xff110000 0x1000>;
- clocks = <&misc_clk>;
- timer-width = <32>;
- };
-
- ttc1: timer@ff120000 {
- compatible = "cdns,ttc";
- status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 39 4>, <0 40 4>, <0 41 4>;
- reg = <0x0 0xff120000 0x1000>;
- clocks = <&misc_clk>;
- timer-width = <32>;
- };
-
- ttc2: timer@ff130000 {
- compatible = "cdns,ttc";
- status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 42 4>, <0 43 4>, <0 44 4>;
- reg = <0x0 0xff130000 0x1000>;
- clocks = <&misc_clk>;
- timer-width = <32>;
- };
-
- ttc3: timer@ff140000 {
- compatible = "cdns,ttc";
+ can0: can@ff060000 {
+ compatible = "xlnx,zynq-can-1.0";
status = "disabled";
+ clocks = <&misc_clk &misc_clk>;
+ clock-names = "can_clk", "pclk";
+ reg = <0x0 0xff060000 0x1000>;
+ interrupts = <0 23 4>;
interrupt-parent = <&gic>;
- interrupts = <0 45 4>, <0 46 4>, <0 47 4>;
- reg = <0x0 0xff140000 0x1000>;
- clocks = <&misc_clk>;
- timer-width = <32>;
+ tx-fifo-depth = <0x40>;
+ rx-fifo-depth = <0x40>;
};
- uart0: serial@ff000000 {
- compatible = "cdns,uart-r1p8";
+ can1: can@ff070000 {
+ compatible = "xlnx,zynq-can-1.0";
status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 21 4>;
- reg = <0x0 0xff000000 0x1000>;
- clock-names = "uart_clk", "pclk";
clocks = <&misc_clk &misc_clk>;
+ clock-names = "can_clk", "pclk";
+ reg = <0x0 0xff070000 0x1000>;
+ interrupts = <0 24 4>;
+ interrupt-parent = <&gic>;
+ tx-fifo-depth = <0x40>;
+ rx-fifo-depth = <0x40>;
};
- uart1: serial@ff010000 {
- compatible = "cdns,uart-r1p8";
- status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 22 4>;
- reg = <0x0 0xff010000 0x1000>;
- clock-names = "uart_clk", "pclk";
- clocks = <&misc_clk &misc_clk>;
+ misc_clk: misc_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <25000000>;
};
gpio: gpio@ff0a0000 {
- compatible = "xlnx,zynq-gpio-1.0";
+ compatible = "xlnx,zynqmp-gpio-1.0";
status = "disabled";
#gpio-cells = <0x2>;
clocks = <&misc_clk>;
@@ -220,30 +184,6 @@
#size-cells = <0>;
};
- spi0: spi@ff040000 {
- compatible = "cdns,spi-r1p6";
- status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 19 4>;
- reg = <0x0 0xff040000 0x1000>;
- clock-names = "ref_clk", "pclk";
- clocks = <&misc_clk &misc_clk>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
- spi1: spi@ff050000 {
- compatible = "cdns,spi-r1p6";
- status = "disabled";
- interrupt-parent = <&gic>;
- interrupts = <0 20 4>;
- reg = <0x0 0xff050000 0x1000>;
- clock-names = "ref_clk", "pclk";
- clocks = <&misc_clk &misc_clk>;
- #address-cells = <1>;
- #size-cells = <0>;
- };
-
i2c_clk: i2c_clk {
compatible = "fixed-clock";
#clock-cells = <0x0>;
@@ -272,6 +212,21 @@
#size-cells = <0>;
};
+ sata_clk: sata_clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <75000000>;
+ };
+
+ sata: ahci@fd0c0000 {
+ compatible = "ceva,ahci-1v84";
+ status = "disabled";
+ reg = <0x0 0xfd0c0000 0x2000>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 133 4>;
+ clocks = <&sata_clk>;
+ };
+
sdhci0: sdhci@ff160000 {
compatible = "arasan,sdhci-8.9a";
status = "disabled";
@@ -292,6 +247,122 @@
clocks = <&misc_clk>, <&misc_clk>;
};
+ smmu: smmu@fd800000 {
+ compatible = "arm,mmu-500";
+ reg = <0x0 0xfd800000 0x20000>;
+ #global-interrupts = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 157 4>,
+ <0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>,
+ <0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>,
+ <0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>,
+ <0 157 4>, <0 157 4>, <0 157 4>, <0 157 4>;
+ };
+
+ spi0: spi@ff040000 {
+ compatible = "cdns,spi-r1p6";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 19 4>;
+ reg = <0x0 0xff040000 0x1000>;
+ clock-names = "ref_clk", "pclk";
+ clocks = <&misc_clk &misc_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi1: spi@ff050000 {
+ compatible = "cdns,spi-r1p6";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 20 4>;
+ reg = <0x0 0xff050000 0x1000>;
+ clock-names = "ref_clk", "pclk";
+ clocks = <&misc_clk &misc_clk>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ ttc0: timer@ff110000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 36 4>, <0 37 4>, <0 38 4>;
+ reg = <0x0 0xff110000 0x1000>;
+ clocks = <&misc_clk>;
+ timer-width = <32>;
+ };
+
+ ttc1: timer@ff120000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 39 4>, <0 40 4>, <0 41 4>;
+ reg = <0x0 0xff120000 0x1000>;
+ clocks = <&misc_clk>;
+ timer-width = <32>;
+ };
+
+ ttc2: timer@ff130000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 42 4>, <0 43 4>, <0 44 4>;
+ reg = <0x0 0xff130000 0x1000>;
+ clocks = <&misc_clk>;
+ timer-width = <32>;
+ };
+
+ ttc3: timer@ff140000 {
+ compatible = "cdns,ttc";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 45 4>, <0 46 4>, <0 47 4>;
+ reg = <0x0 0xff140000 0x1000>;
+ clocks = <&misc_clk>;
+ timer-width = <32>;
+ };
+
+ uart0: serial@ff000000 {
+ compatible = "cdns,uart-r1p8";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 21 4>;
+ reg = <0x0 0xff000000 0x1000>;
+ clock-names = "uart_clk", "pclk";
+ clocks = <&misc_clk &misc_clk>;
+ };
+
+ uart1: serial@ff010000 {
+ compatible = "cdns,uart-r1p8";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 22 4>;
+ reg = <0x0 0xff010000 0x1000>;
+ clock-names = "uart_clk", "pclk";
+ clocks = <&misc_clk &misc_clk>;
+ };
+
+ usb0: usb@fe200000 {
+ compatible = "snps,dwc3";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 65 4>;
+ reg = <0x0 0xfe200000 0x40000>;
+ clock-names = "clk_xin", "clk_ahb";
+ clocks = <&misc_clk>, <&misc_clk>;
+ };
+
+ usb1: usb@fe300000 {
+ compatible = "snps,dwc3";
+ status = "disabled";
+ interrupt-parent = <&gic>;
+ interrupts = <0 70 4>;
+ reg = <0x0 0xfe300000 0x40000>;
+ clock-names = "clk_xin", "clk_ahb";
+ clocks = <&misc_clk>, <&misc_clk>;
+ };
+
watchdog0: watchdog@fd4d0000 {
compatible = "cdns,wdt-r1p2";
status = "disabled";
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 4e17e7ede33d..34d71dd86781 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -31,10 +31,13 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_BCM_IPROC=y
+CONFIG_ARCH_BERLIN=y
CONFIG_ARCH_EXYNOS7=y
CONFIG_ARCH_FSL_LS2085A=y
CONFIG_ARCH_HISI=y
CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARCH_SEATTLE=y
CONFIG_ARCH_TEGRA=y
CONFIG_ARCH_TEGRA_132_SOC=y
@@ -102,6 +105,7 @@ CONFIG_SERIO_AMBAKMI=y
CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_8250_MT6577=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
diff --git a/arch/arm64/crypto/aes-ce-ccm-glue.c b/arch/arm64/crypto/aes-ce-ccm-glue.c
index 3303e8a7b837..f4bf2f2a014c 100644
--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
+++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
@@ -124,7 +124,7 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
ce_aes_ccm_auth_data(mac, (u8 *)&ltag, ltag.len, &macp, ctx->key_enc,
num_rounds(ctx));
- scatterwalk_start(&walk, req->assoc);
+ scatterwalk_start(&walk, req->src);
do {
u32 n = scatterwalk_clamp(&walk, len);
@@ -151,6 +151,10 @@ static int ccm_encrypt(struct aead_request *req)
struct crypto_aes_ctx *ctx = crypto_aead_ctx(aead);
struct blkcipher_desc desc = { .info = req->iv };
struct blkcipher_walk walk;
+ struct scatterlist srcbuf[2];
+ struct scatterlist dstbuf[2];
+ struct scatterlist *src;
+ struct scatterlist *dst;
u8 __aligned(8) mac[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u32 len = req->cryptlen;
@@ -168,7 +172,12 @@ static int ccm_encrypt(struct aead_request *req)
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
- blkcipher_walk_init(&walk, req->dst, req->src, len);
+ src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst)
+ dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
+
+ blkcipher_walk_init(&walk, dst, src, len);
err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
AES_BLOCK_SIZE);
@@ -194,7 +203,7 @@ static int ccm_encrypt(struct aead_request *req)
return err;
/* copy authtag to end of dst */
- scatterwalk_map_and_copy(mac, req->dst, req->cryptlen,
+ scatterwalk_map_and_copy(mac, dst, req->cryptlen,
crypto_aead_authsize(aead), 1);
return 0;
@@ -207,6 +216,10 @@ static int ccm_decrypt(struct aead_request *req)
unsigned int authsize = crypto_aead_authsize(aead);
struct blkcipher_desc desc = { .info = req->iv };
struct blkcipher_walk walk;
+ struct scatterlist srcbuf[2];
+ struct scatterlist dstbuf[2];
+ struct scatterlist *src;
+ struct scatterlist *dst;
u8 __aligned(8) mac[AES_BLOCK_SIZE];
u8 buf[AES_BLOCK_SIZE];
u32 len = req->cryptlen - authsize;
@@ -224,7 +237,12 @@ static int ccm_decrypt(struct aead_request *req)
/* preserve the original iv for the final round */
memcpy(buf, req->iv, AES_BLOCK_SIZE);
- blkcipher_walk_init(&walk, req->dst, req->src, len);
+ src = scatterwalk_ffwd(srcbuf, req->src, req->assoclen);
+ dst = src;
+ if (req->src != req->dst)
+ dst = scatterwalk_ffwd(dstbuf, req->dst, req->assoclen);
+
+ blkcipher_walk_init(&walk, dst, src, len);
err = blkcipher_aead_walk_virt_block(&desc, &walk, aead,
AES_BLOCK_SIZE);
@@ -250,44 +268,42 @@ static int ccm_decrypt(struct aead_request *req)
return err;
/* compare calculated auth tag with the stored one */
- scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize,
+ scatterwalk_map_and_copy(buf, src, req->cryptlen - authsize,
authsize, 0);
- if (memcmp(mac, buf, authsize))
+ if (crypto_memneq(mac, buf, authsize))
return -EBADMSG;
return 0;
}
-static struct crypto_alg ccm_aes_alg = {
- .cra_name = "ccm(aes)",
- .cra_driver_name = "ccm-aes-ce",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_AEAD,
- .cra_blocksize = 1,
- .cra_ctxsize = sizeof(struct crypto_aes_ctx),
- .cra_alignmask = 7,
- .cra_type = &crypto_aead_type,
- .cra_module = THIS_MODULE,
- .cra_aead = {
- .ivsize = AES_BLOCK_SIZE,
- .maxauthsize = AES_BLOCK_SIZE,
- .setkey = ccm_setkey,
- .setauthsize = ccm_setauthsize,
- .encrypt = ccm_encrypt,
- .decrypt = ccm_decrypt,
- }
+static struct aead_alg ccm_aes_alg = {
+ .base = {
+ .cra_name = "ccm(aes)",
+ .cra_driver_name = "ccm-aes-ce",
+ .cra_priority = 300,
+ .cra_blocksize = 1,
+ .cra_ctxsize = sizeof(struct crypto_aes_ctx),
+ .cra_alignmask = 7,
+ .cra_module = THIS_MODULE,
+ },
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = AES_BLOCK_SIZE,
+ .setkey = ccm_setkey,
+ .setauthsize = ccm_setauthsize,
+ .encrypt = ccm_encrypt,
+ .decrypt = ccm_decrypt,
};
static int __init aes_mod_init(void)
{
if (!(elf_hwcap & HWCAP_AES))
return -ENODEV;
- return crypto_register_alg(&ccm_aes_alg);
+ return crypto_register_aead(&ccm_aes_alg);
}
static void __exit aes_mod_exit(void)
{
- crypto_unregister_alg(&ccm_aes_alg);
+ crypto_unregister_aead(&ccm_aes_alg);
}
module_init(aes_mod_init);
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index 5aa892a12a0d..5f8a38dee274 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -12,11 +12,11 @@
#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H
-#include <linux/mm.h>
#include <linux/irqchip/arm-gic-acpi.h>
+#include <linux/mm.h>
+#include <linux/psci.h>
#include <asm/cputype.h>
-#include <asm/psci.h>
#include <asm/smp_plat.h>
/* Macros for consistency checks of the GICC subtable of MADT */
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index c385a0c4057f..d56ec0715157 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -3,6 +3,8 @@
#ifndef __ASSEMBLY__
+#include <linux/init.h>
+#include <linux/kconfig.h>
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/stringify.h>
@@ -15,7 +17,7 @@ struct alt_instr {
u8 alt_len; /* size of new instruction(s), <= orig_len */
};
-void apply_alternatives_all(void);
+void __init apply_alternatives_all(void);
void apply_alternatives(void *start, size_t length);
void free_alternatives_memory(void);
@@ -40,7 +42,8 @@ void free_alternatives_memory(void);
* be fixed in a binutils release posterior to 2.25.51.0.2 (anything
* containing commit 4e4d08cf7399b606 or c1baaddf8861).
*/
-#define ALTERNATIVE(oldinstr, newinstr, feature) \
+#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
+ ".if "__stringify(cfg_enabled)" == 1\n" \
"661:\n\t" \
oldinstr "\n" \
"662:\n" \
@@ -53,7 +56,11 @@ void free_alternatives_memory(void);
"664:\n\t" \
".popsection\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
- ".org . - (662b-661b) + (664b-663b)\n"
+ ".org . - (662b-661b) + (664b-663b)\n" \
+ ".endif\n"
+
+#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
+ __ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
#else
@@ -65,7 +72,8 @@ void free_alternatives_memory(void);
.byte \alt_len
.endm
-.macro alternative_insn insn1 insn2 cap
+.macro alternative_insn insn1, insn2, cap, enable = 1
+ .if \enable
661: \insn1
662: .pushsection .altinstructions, "a"
altinstruction_entry 661b, 663f, \cap, 662b-661b, 664f-663f
@@ -75,8 +83,70 @@ void free_alternatives_memory(void);
664: .popsection
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
+ .endif
+.endm
+
+/*
+ * Begin an alternative code sequence.
+ *
+ * The code that follows this macro will be assembled and linked as
+ * normal. There are no restrictions on this code.
+ */
+.macro alternative_if_not cap, enable = 1
+ .if \enable
+ .pushsection .altinstructions, "a"
+ altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
+ .popsection
+661:
+ .endif
+.endm
+
+/*
+ * Provide the alternative code sequence.
+ *
+ * The code that follows this macro is assembled into a special
+ * section to be used for dynamic patching. Code that follows this
+ * macro must:
+ *
+ * 1. Be exactly the same length (in bytes) as the default code
+ * sequence.
+ *
+ * 2. Not contain a branch target that is used outside of the
+ * alternative sequence it is defined in (branches into an
+ * alternative sequence are not fixed up).
+ */
+.macro alternative_else, enable = 1
+ .if \enable
+662: .pushsection .altinstr_replacement, "ax"
+663:
+ .endif
.endm
+/*
+ * Complete an alternative code sequence.
+ */
+.macro alternative_endif, enable = 1
+ .if \enable
+664: .popsection
+ .org . - (664b-663b) + (662b-661b)
+ .org . - (662b-661b) + (664b-663b)
+ .endif
+.endm
+
+#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
+ alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
+
+
#endif /* __ASSEMBLY__ */
+/*
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature));
+ *
+ * Usage: asm(ALTERNATIVE(oldinstr, newinstr, feature, CONFIG_FOO));
+ * N.B. If CONFIG_FOO is specified, but not selected, the whole block
+ * will be omitted, including oldinstr.
+ */
+#define ALTERNATIVE(oldinstr, newinstr, ...) \
+ _ALTERNATIVE_CFG(oldinstr, newinstr, __VA_ARGS__, 1)
+
#endif /* __ASM_ALTERNATIVE_H */
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 144b64ad96c3..b51f2cc22ca9 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -50,18 +50,6 @@
.endm
/*
- * Save/disable and restore interrupts.
- */
- .macro save_and_disable_irqs, olddaif
- mrs \olddaif, daif
- disable_irq
- .endm
-
- .macro restore_irqs, olddaif
- msr daif, \olddaif
- .endm
-
-/*
* Enable and disable debug exceptions.
*/
.macro disable_dbg
@@ -103,9 +91,7 @@
* SMP data memory barrier
*/
.macro smp_dmb, opt
-#ifdef CONFIG_SMP
dmb \opt
-#endif
.endm
#define USER(l, x...) \
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 7047051ded40..35a67783cfa0 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -24,233 +24,72 @@
#include <linux/types.h>
#include <asm/barrier.h>
-#include <asm/cmpxchg.h>
-
-#define ATOMIC_INIT(i) { (i) }
+#include <asm/lse.h>
#ifdef __KERNEL__
-/*
- * On ARM, ordinary assignment (str instruction) doesn't clear the local
- * strex/ldrex monitor on some implementations. The reason we can use it for
- * atomic_set() is the clrex or dummy strex done on every exception return.
- */
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v,i) (((v)->counter) = (i))
-
-/*
- * AArch64 UP and SMP safe atomic ops. We use load exclusive and
- * store exclusive to ensure that these are atomic. We may loop
- * to ensure that the update happens.
- */
-
-#define ATOMIC_OP(op, asm_op) \
-static inline void atomic_##op(int i, atomic_t *v) \
-{ \
- unsigned long tmp; \
- int result; \
- \
- asm volatile("// atomic_" #op "\n" \
-"1: ldxr %w0, %2\n" \
-" " #asm_op " %w0, %w0, %w3\n" \
-" stxr %w1, %w0, %2\n" \
-" cbnz %w1, 1b" \
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i)); \
-} \
+#define __ARM64_IN_ATOMIC_IMPL
-#define ATOMIC_OP_RETURN(op, asm_op) \
-static inline int atomic_##op##_return(int i, atomic_t *v) \
-{ \
- unsigned long tmp; \
- int result; \
- \
- asm volatile("// atomic_" #op "_return\n" \
-"1: ldxr %w0, %2\n" \
-" " #asm_op " %w0, %w0, %w3\n" \
-" stlxr %w1, %w0, %2\n" \
-" cbnz %w1, 1b" \
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
- : "memory"); \
- \
- smp_mb(); \
- return result; \
-}
-
-#define ATOMIC_OPS(op, asm_op) \
- ATOMIC_OP(op, asm_op) \
- ATOMIC_OP_RETURN(op, asm_op)
-
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, sub)
-
-#undef ATOMIC_OPS
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
-{
- unsigned long tmp;
- int oldval;
-
- smp_mb();
-
- asm volatile("// atomic_cmpxchg\n"
-"1: ldxr %w1, %2\n"
-" cmp %w1, %w3\n"
-" b.ne 2f\n"
-" stxr %w0, %w4, %2\n"
-" cbnz %w0, 1b\n"
-"2:"
- : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
- : "Ir" (old), "r" (new)
- : "cc");
-
- smp_mb();
- return oldval;
-}
-
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE)
+#include <asm/atomic_lse.h>
+#else
+#include <asm/atomic_ll_sc.h>
+#endif
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
+#undef __ARM64_IN_ATOMIC_IMPL
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
- c = old;
- return c;
-}
+#include <asm/cmpxchg.h>
-#define atomic_inc(v) atomic_add(1, v)
-#define atomic_dec(v) atomic_sub(1, v)
+#define ___atomic_add_unless(v, a, u, sfx) \
+({ \
+ typeof((v)->counter) c, old; \
+ \
+ c = atomic##sfx##_read(v); \
+ while (c != (u) && \
+ (old = atomic##sfx##_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c; \
+ })
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v) (atomic_add_return(1, v))
-#define atomic_dec_return(v) (atomic_sub_return(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+#define ATOMIC_INIT(i) { (i) }
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_xchg(v, new) xchg(&((v)->counter), (new))
+#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))
+
+#define atomic_inc(v) atomic_add(1, (v))
+#define atomic_dec(v) atomic_sub(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
+#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
+#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_add_negative(i, v) (atomic_add_return((i), (v)) < 0)
+#define __atomic_add_unless(v, a, u) ___atomic_add_unless(v, a, u,)
+#define atomic_andnot atomic_andnot
/*
* 64-bit atomic operations.
*/
-#define ATOMIC64_INIT(i) { (i) }
-
-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
-#define atomic64_set(v,i) (((v)->counter) = (i))
-
-#define ATOMIC64_OP(op, asm_op) \
-static inline void atomic64_##op(long i, atomic64_t *v) \
-{ \
- long result; \
- unsigned long tmp; \
- \
- asm volatile("// atomic64_" #op "\n" \
-"1: ldxr %0, %2\n" \
-" " #asm_op " %0, %0, %3\n" \
-" stxr %w1, %0, %2\n" \
-" cbnz %w1, 1b" \
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i)); \
-} \
-
-#define ATOMIC64_OP_RETURN(op, asm_op) \
-static inline long atomic64_##op##_return(long i, atomic64_t *v) \
-{ \
- long result; \
- unsigned long tmp; \
- \
- asm volatile("// atomic64_" #op "_return\n" \
-"1: ldxr %0, %2\n" \
-" " #asm_op " %0, %0, %3\n" \
-" stlxr %w1, %0, %2\n" \
-" cbnz %w1, 1b" \
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
- : "Ir" (i) \
- : "memory"); \
- \
- smp_mb(); \
- return result; \
-}
-
-#define ATOMIC64_OPS(op, asm_op) \
- ATOMIC64_OP(op, asm_op) \
- ATOMIC64_OP_RETURN(op, asm_op)
-
-ATOMIC64_OPS(add, add)
-ATOMIC64_OPS(sub, sub)
-
-#undef ATOMIC64_OPS
-#undef ATOMIC64_OP_RETURN
-#undef ATOMIC64_OP
-
-static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
-{
- long oldval;
- unsigned long res;
-
- smp_mb();
-
- asm volatile("// atomic64_cmpxchg\n"
-"1: ldxr %1, %2\n"
-" cmp %1, %3\n"
-" b.ne 2f\n"
-" stxr %w0, %4, %2\n"
-" cbnz %w0, 1b\n"
-"2:"
- : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
- : "Ir" (old), "r" (new)
- : "cc");
-
- smp_mb();
- return oldval;
-}
-
-#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
-
-static inline long atomic64_dec_if_positive(atomic64_t *v)
-{
- long result;
- unsigned long tmp;
-
- asm volatile("// atomic64_dec_if_positive\n"
-"1: ldxr %0, %2\n"
-" subs %0, %0, #1\n"
-" b.mi 2f\n"
-" stlxr %w1, %0, %2\n"
-" cbnz %w1, 1b\n"
-" dmb ish\n"
-"2:"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- :
- : "cc", "memory");
-
- return result;
-}
-
-static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
-{
- long c, old;
-
- c = atomic64_read(v);
- while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
- c = old;
-
- return c != u;
-}
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
+#define ATOMIC64_INIT ATOMIC_INIT
+#define atomic64_read atomic_read
+#define atomic64_set atomic_set
+#define atomic64_xchg atomic_xchg
+#define atomic64_cmpxchg atomic_cmpxchg
+
+#define atomic64_inc(v) atomic64_add(1, (v))
+#define atomic64_dec(v) atomic64_sub(1, (v))
+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+#define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0)
+#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
+#define atomic64_add_negative(i, v) (atomic64_add_return((i), (v)) < 0)
+#define atomic64_add_unless(v, a, u) (___atomic_add_unless(v, a, u, 64) != u)
+#define atomic64_andnot atomic64_andnot
+
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#endif
#endif
diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h
new file mode 100644
index 000000000000..b3b5c4ae3800
--- /dev/null
+++ b/arch/arm64/include/asm/atomic_ll_sc.h
@@ -0,0 +1,247 @@
+/*
+ * Based on arch/arm/include/asm/atomic.h
+ *
+ * Copyright (C) 1996 Russell King.
+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ATOMIC_LL_SC_H
+#define __ASM_ATOMIC_LL_SC_H
+
+#ifndef __ARM64_IN_ATOMIC_IMPL
+#error "please don't include this file directly"
+#endif
+
+/*
+ * AArch64 UP and SMP safe atomic ops. We use load exclusive and
+ * store exclusive to ensure that these are atomic. We may loop
+ * to ensure that the update happens.
+ *
+ * NOTE: these functions do *not* follow the PCS and must explicitly
+ * save any clobbered registers other than x0 (regardless of return
+ * value). This is achieved through -fcall-saved-* compiler flags for
+ * this file, which unfortunately don't work on a per-function basis
+ * (the optimize attribute silently ignores these options).
+ */
+
+#define ATOMIC_OP(op, asm_op) \
+__LL_SC_INLINE void \
+__LL_SC_PREFIX(atomic_##op(int i, atomic_t *v)) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "\n" \
+" prfm pstl1strm, %2\n" \
+"1: ldxr %w0, %2\n" \
+" " #asm_op " %w0, %w0, %w3\n" \
+" stxr %w1, %w0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i)); \
+} \
+__LL_SC_EXPORT(atomic_##op);
+
+#define ATOMIC_OP_RETURN(op, asm_op) \
+__LL_SC_INLINE int \
+__LL_SC_PREFIX(atomic_##op##_return(int i, atomic_t *v)) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "_return\n" \
+" prfm pstl1strm, %2\n" \
+"1: ldxr %w0, %2\n" \
+" " #asm_op " %w0, %w0, %w3\n" \
+" stlxr %w1, %w0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i) \
+ : "memory"); \
+ \
+ smp_mb(); \
+ return result; \
+} \
+__LL_SC_EXPORT(atomic_##op##_return);
+
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_OP_RETURN(op, asm_op)
+
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
+
+ATOMIC_OP(and, and)
+ATOMIC_OP(andnot, bic)
+ATOMIC_OP(or, orr)
+ATOMIC_OP(xor, eor)
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define ATOMIC64_OP(op, asm_op) \
+__LL_SC_INLINE void \
+__LL_SC_PREFIX(atomic64_##op(long i, atomic64_t *v)) \
+{ \
+ long result; \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "\n" \
+" prfm pstl1strm, %2\n" \
+"1: ldxr %0, %2\n" \
+" " #asm_op " %0, %0, %3\n" \
+" stxr %w1, %0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i)); \
+} \
+__LL_SC_EXPORT(atomic64_##op);
+
+#define ATOMIC64_OP_RETURN(op, asm_op) \
+__LL_SC_INLINE long \
+__LL_SC_PREFIX(atomic64_##op##_return(long i, atomic64_t *v)) \
+{ \
+ long result; \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "_return\n" \
+" prfm pstl1strm, %2\n" \
+"1: ldxr %0, %2\n" \
+" " #asm_op " %0, %0, %3\n" \
+" stlxr %w1, %0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i) \
+ : "memory"); \
+ \
+ smp_mb(); \
+ return result; \
+} \
+__LL_SC_EXPORT(atomic64_##op##_return);
+
+#define ATOMIC64_OPS(op, asm_op) \
+ ATOMIC64_OP(op, asm_op) \
+ ATOMIC64_OP_RETURN(op, asm_op)
+
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, sub)
+
+ATOMIC64_OP(and, and)
+ATOMIC64_OP(andnot, bic)
+ATOMIC64_OP(or, orr)
+ATOMIC64_OP(xor, eor)
+
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
+
+__LL_SC_INLINE long
+__LL_SC_PREFIX(atomic64_dec_if_positive(atomic64_t *v))
+{
+ long result;
+ unsigned long tmp;
+
+ asm volatile("// atomic64_dec_if_positive\n"
+" prfm pstl1strm, %2\n"
+"1: ldxr %0, %2\n"
+" subs %0, %0, #1\n"
+" b.lt 2f\n"
+" stlxr %w1, %0, %2\n"
+" cbnz %w1, 1b\n"
+" dmb ish\n"
+"2:"
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
+ :
+ : "cc", "memory");
+
+ return result;
+}
+__LL_SC_EXPORT(atomic64_dec_if_positive);
+
+#define __CMPXCHG_CASE(w, sz, name, mb, rel, cl) \
+__LL_SC_INLINE unsigned long \
+__LL_SC_PREFIX(__cmpxchg_case_##name(volatile void *ptr, \
+ unsigned long old, \
+ unsigned long new)) \
+{ \
+ unsigned long tmp, oldval; \
+ \
+ asm volatile( \
+ " prfm pstl1strm, %[v]\n" \
+ "1: ldxr" #sz "\t%" #w "[oldval], %[v]\n" \
+ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
+ " cbnz %" #w "[tmp], 2f\n" \
+ " st" #rel "xr" #sz "\t%w[tmp], %" #w "[new], %[v]\n" \
+ " cbnz %w[tmp], 1b\n" \
+ " " #mb "\n" \
+ " mov %" #w "[oldval], %" #w "[old]\n" \
+ "2:" \
+ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
+ [v] "+Q" (*(unsigned long *)ptr) \
+ : [old] "Lr" (old), [new] "r" (new) \
+ : cl); \
+ \
+ return oldval; \
+} \
+__LL_SC_EXPORT(__cmpxchg_case_##name);
+
+__CMPXCHG_CASE(w, b, 1, , , )
+__CMPXCHG_CASE(w, h, 2, , , )
+__CMPXCHG_CASE(w, , 4, , , )
+__CMPXCHG_CASE( , , 8, , , )
+__CMPXCHG_CASE(w, b, mb_1, dmb ish, l, "memory")
+__CMPXCHG_CASE(w, h, mb_2, dmb ish, l, "memory")
+__CMPXCHG_CASE(w, , mb_4, dmb ish, l, "memory")
+__CMPXCHG_CASE( , , mb_8, dmb ish, l, "memory")
+
+#undef __CMPXCHG_CASE
+
+#define __CMPXCHG_DBL(name, mb, rel, cl) \
+__LL_SC_INLINE int \
+__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
+ unsigned long old2, \
+ unsigned long new1, \
+ unsigned long new2, \
+ volatile void *ptr)) \
+{ \
+ unsigned long tmp, ret; \
+ \
+ asm volatile("// __cmpxchg_double" #name "\n" \
+ " prfm pstl1strm, %2\n" \
+ "1: ldxp %0, %1, %2\n" \
+ " eor %0, %0, %3\n" \
+ " eor %1, %1, %4\n" \
+ " orr %1, %0, %1\n" \
+ " cbnz %1, 2f\n" \
+ " st" #rel "xp %w0, %5, %6, %2\n" \
+ " cbnz %w0, 1b\n" \
+ " " #mb "\n" \
+ "2:" \
+ : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
+ : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
+ : cl); \
+ \
+ return ret; \
+} \
+__LL_SC_EXPORT(__cmpxchg_double##name);
+
+__CMPXCHG_DBL( , , , )
+__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
+
+#undef __CMPXCHG_DBL
+
+#endif /* __ASM_ATOMIC_LL_SC_H */
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
new file mode 100644
index 000000000000..55d740e63459
--- /dev/null
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -0,0 +1,391 @@
+/*
+ * Based on arch/arm/include/asm/atomic.h
+ *
+ * Copyright (C) 1996 Russell King.
+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ATOMIC_LSE_H
+#define __ASM_ATOMIC_LSE_H
+
+#ifndef __ARM64_IN_ATOMIC_IMPL
+#error "please don't include this file directly"
+#endif
+
+#define __LL_SC_ATOMIC(op) __LL_SC_CALL(atomic_##op)
+
+static inline void atomic_andnot(int i, atomic_t *v)
+{
+ register int w0 asm ("w0") = i;
+ register atomic_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(andnot),
+ " stclr %w[i], %[v]\n")
+ : [i] "+r" (w0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline void atomic_or(int i, atomic_t *v)
+{
+ register int w0 asm ("w0") = i;
+ register atomic_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(or),
+ " stset %w[i], %[v]\n")
+ : [i] "+r" (w0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline void atomic_xor(int i, atomic_t *v)
+{
+ register int w0 asm ("w0") = i;
+ register atomic_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(xor),
+ " steor %w[i], %[v]\n")
+ : [i] "+r" (w0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ register int w0 asm ("w0") = i;
+ register atomic_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(add),
+ " stadd %w[i], %[v]\n")
+ : [i] "+r" (w0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ register int w0 asm ("w0") = i;
+ register atomic_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " nop\n"
+ __LL_SC_ATOMIC(add_return),
+ /* LSE atomics */
+ " ldaddal %w[i], w30, %[v]\n"
+ " add %w[i], %w[i], w30")
+ : [i] "+r" (w0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30", "memory");
+
+ return w0;
+}
+
+static inline void atomic_and(int i, atomic_t *v)
+{
+ register int w0 asm ("w0") = i;
+ register atomic_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " nop\n"
+ __LL_SC_ATOMIC(and),
+ /* LSE atomics */
+ " mvn %w[i], %w[i]\n"
+ " stclr %w[i], %[v]")
+ : [i] "+r" (w0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ register int w0 asm ("w0") = i;
+ register atomic_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " nop\n"
+ __LL_SC_ATOMIC(sub),
+ /* LSE atomics */
+ " neg %w[i], %w[i]\n"
+ " stadd %w[i], %[v]")
+ : [i] "+r" (w0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ register int w0 asm ("w0") = i;
+ register atomic_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " nop\n"
+ __LL_SC_ATOMIC(sub_return)
+ " nop",
+ /* LSE atomics */
+ " neg %w[i], %w[i]\n"
+ " ldaddal %w[i], w30, %[v]\n"
+ " add %w[i], %w[i], w30")
+ : [i] "+r" (w0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30", "memory");
+
+ return w0;
+}
+
+#undef __LL_SC_ATOMIC
+
+#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(atomic64_##op)
+
+static inline void atomic64_andnot(long i, atomic64_t *v)
+{
+ register long x0 asm ("x0") = i;
+ register atomic64_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(andnot),
+ " stclr %[i], %[v]\n")
+ : [i] "+r" (x0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline void atomic64_or(long i, atomic64_t *v)
+{
+ register long x0 asm ("x0") = i;
+ register atomic64_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(or),
+ " stset %[i], %[v]\n")
+ : [i] "+r" (x0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline void atomic64_xor(long i, atomic64_t *v)
+{
+ register long x0 asm ("x0") = i;
+ register atomic64_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(xor),
+ " steor %[i], %[v]\n")
+ : [i] "+r" (x0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline void atomic64_add(long i, atomic64_t *v)
+{
+ register long x0 asm ("x0") = i;
+ register atomic64_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(add),
+ " stadd %[i], %[v]\n")
+ : [i] "+r" (x0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline long atomic64_add_return(long i, atomic64_t *v)
+{
+ register long x0 asm ("x0") = i;
+ register atomic64_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " nop\n"
+ __LL_SC_ATOMIC64(add_return),
+ /* LSE atomics */
+ " ldaddal %[i], x30, %[v]\n"
+ " add %[i], %[i], x30")
+ : [i] "+r" (x0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30", "memory");
+
+ return x0;
+}
+
+static inline void atomic64_and(long i, atomic64_t *v)
+{
+ register long x0 asm ("x0") = i;
+ register atomic64_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " nop\n"
+ __LL_SC_ATOMIC64(and),
+ /* LSE atomics */
+ " mvn %[i], %[i]\n"
+ " stclr %[i], %[v]")
+ : [i] "+r" (x0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline void atomic64_sub(long i, atomic64_t *v)
+{
+ register long x0 asm ("x0") = i;
+ register atomic64_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " nop\n"
+ __LL_SC_ATOMIC64(sub),
+ /* LSE atomics */
+ " neg %[i], %[i]\n"
+ " stadd %[i], %[v]")
+ : [i] "+r" (x0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30");
+}
+
+static inline long atomic64_sub_return(long i, atomic64_t *v)
+{
+ register long x0 asm ("x0") = i;
+ register atomic64_t *x1 asm ("x1") = v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " nop\n"
+ __LL_SC_ATOMIC64(sub_return)
+ " nop",
+ /* LSE atomics */
+ " neg %[i], %[i]\n"
+ " ldaddal %[i], x30, %[v]\n"
+ " add %[i], %[i], x30")
+ : [i] "+r" (x0), [v] "+Q" (v->counter)
+ : "r" (x1)
+ : "x30", "memory");
+
+ return x0;
+}
+
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+ register long x0 asm ("x0") = (long)v;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " nop\n"
+ __LL_SC_ATOMIC64(dec_if_positive)
+ " nop\n"
+ " nop\n"
+ " nop\n"
+ " nop\n"
+ " nop",
+ /* LSE atomics */
+ "1: ldr x30, %[v]\n"
+ " subs %[ret], x30, #1\n"
+ " b.lt 2f\n"
+ " casal x30, %[ret], %[v]\n"
+ " sub x30, x30, #1\n"
+ " sub x30, x30, %[ret]\n"
+ " cbnz x30, 1b\n"
+ "2:")
+ : [ret] "+&r" (x0), [v] "+Q" (v->counter)
+ :
+ : "x30", "cc", "memory");
+
+ return x0;
+}
+
+#undef __LL_SC_ATOMIC64
+
+#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op)
+
+#define __CMPXCHG_CASE(w, sz, name, mb, cl...) \
+static inline unsigned long __cmpxchg_case_##name(volatile void *ptr, \
+ unsigned long old, \
+ unsigned long new) \
+{ \
+ register unsigned long x0 asm ("x0") = (unsigned long)ptr; \
+ register unsigned long x1 asm ("x1") = old; \
+ register unsigned long x2 asm ("x2") = new; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ " nop\n" \
+ __LL_SC_CMPXCHG(name) \
+ " nop", \
+ /* LSE atomics */ \
+ " mov " #w "30, %" #w "[old]\n" \
+ " cas" #mb #sz "\t" #w "30, %" #w "[new], %[v]\n" \
+ " mov %" #w "[ret], " #w "30") \
+ : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \
+ : [old] "r" (x1), [new] "r" (x2) \
+ : "x30" , ##cl); \
+ \
+ return x0; \
+}
+
+__CMPXCHG_CASE(w, b, 1, )
+__CMPXCHG_CASE(w, h, 2, )
+__CMPXCHG_CASE(w, , 4, )
+__CMPXCHG_CASE(x, , 8, )
+__CMPXCHG_CASE(w, b, mb_1, al, "memory")
+__CMPXCHG_CASE(w, h, mb_2, al, "memory")
+__CMPXCHG_CASE(w, , mb_4, al, "memory")
+__CMPXCHG_CASE(x, , mb_8, al, "memory")
+
+#undef __LL_SC_CMPXCHG
+#undef __CMPXCHG_CASE
+
+#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
+
+#define __CMPXCHG_DBL(name, mb, cl...) \
+static inline int __cmpxchg_double##name(unsigned long old1, \
+ unsigned long old2, \
+ unsigned long new1, \
+ unsigned long new2, \
+ volatile void *ptr) \
+{ \
+ unsigned long oldval1 = old1; \
+ unsigned long oldval2 = old2; \
+ register unsigned long x0 asm ("x0") = old1; \
+ register unsigned long x1 asm ("x1") = old2; \
+ register unsigned long x2 asm ("x2") = new1; \
+ register unsigned long x3 asm ("x3") = new2; \
+ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
+ \
+ asm volatile(ARM64_LSE_ATOMIC_INSN( \
+ /* LL/SC */ \
+ " nop\n" \
+ " nop\n" \
+ " nop\n" \
+ __LL_SC_CMPXCHG_DBL(name), \
+ /* LSE atomics */ \
+ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
+ " eor %[old1], %[old1], %[oldval1]\n" \
+ " eor %[old2], %[old2], %[oldval2]\n" \
+ " orr %[old1], %[old1], %[old2]") \
+ : [old1] "+r" (x0), [old2] "+r" (x1), \
+ [v] "+Q" (*(unsigned long *)ptr) \
+ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
+ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \
+ : "x30" , ##cl); \
+ \
+ return x0; \
+}
+
+__CMPXCHG_DBL( , )
+__CMPXCHG_DBL(_mb, al, "memory")
+
+#undef __LL_SC_CMPXCHG_DBL
+#undef __CMPXCHG_DBL
+
+#endif /* __ASM_ATOMIC_LSE_H */
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 0fa47c4275cb..624f9679f4b0 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -35,28 +35,6 @@
#define dma_rmb() dmb(oshld)
#define dma_wmb() dmb(oshst)
-#ifndef CONFIG_SMP
-#define smp_mb() barrier()
-#define smp_rmb() barrier()
-#define smp_wmb() barrier()
-
-#define smp_store_release(p, v) \
-do { \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
- ACCESS_ONCE(*p) = (v); \
-} while (0)
-
-#define smp_load_acquire(p) \
-({ \
- typeof(*p) ___p1 = ACCESS_ONCE(*p); \
- compiletime_assert_atomic_type(*p); \
- barrier(); \
- ___p1; \
-})
-
-#else
-
#define smp_mb() dmb(ish)
#define smp_rmb() dmb(ishld)
#define smp_wmb() dmb(ishst)
@@ -109,8 +87,6 @@ do { \
___p1; \
})
-#endif
-
#define read_barrier_depends() do { } while(0)
#define smp_read_barrier_depends() do { } while(0)
diff --git a/arch/arm64/include/asm/bug.h b/arch/arm64/include/asm/bug.h
new file mode 100644
index 000000000000..4a748ce9ba1a
--- /dev/null
+++ b/arch/arm64/include/asm/bug.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015 ARM Limited
+ * Author: Dave Martin <Dave.Martin@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ARCH_ARM64_ASM_BUG_H
+#define _ARCH_ARM64_ASM_BUG_H
+
+#include <asm/debug-monitors.h>
+
+#ifdef CONFIG_GENERIC_BUG
+#define HAVE_ARCH_BUG
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define _BUGVERBOSE_LOCATION(file, line) __BUGVERBOSE_LOCATION(file, line)
+#define __BUGVERBOSE_LOCATION(file, line) \
+ ".pushsection .rodata.str,\"aMS\",@progbits,1\n" \
+ "2: .string \"" file "\"\n\t" \
+ ".popsection\n\t" \
+ \
+ ".long 2b - 0b\n\t" \
+ ".short " #line "\n\t"
+#else
+#define _BUGVERBOSE_LOCATION(file, line)
+#endif
+
+#define _BUG_FLAGS(flags) __BUG_FLAGS(flags)
+
+#define __BUG_FLAGS(flags) asm volatile ( \
+ ".pushsection __bug_table,\"a\"\n\t" \
+ ".align 2\n\t" \
+ "0: .long 1f - 0b\n\t" \
+_BUGVERBOSE_LOCATION(__FILE__, __LINE__) \
+ ".short " #flags "\n\t" \
+ ".popsection\n" \
+ \
+ "1: brk %[imm]" \
+ :: [imm] "i" (BUG_BRK_IMM) \
+)
+
+#define BUG() do { \
+ _BUG_FLAGS(0); \
+ unreachable(); \
+} while (0)
+
+#define __WARN_TAINT(taint) _BUG_FLAGS(BUGFLAG_TAINT(taint))
+
+#endif /* ! CONFIG_GENERIC_BUG */
+
+#include <asm-generic/bug.h>
+
+#endif /* ! _ARCH_ARM64_ASM_BUG_H */
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index d8c25b7b18fb..899e9f1d19e4 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -21,7 +21,9 @@
#include <linux/bug.h>
#include <linux/mmdebug.h>
+#include <asm/atomic.h>
#include <asm/barrier.h>
+#include <asm/lse.h>
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
@@ -29,37 +31,73 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) {
case 1:
- asm volatile("// __xchg1\n"
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " prfm pstl1strm, %2\n"
"1: ldxrb %w0, %2\n"
" stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
+ " dmb ish",
+ /* LSE atomics */
+ " nop\n"
+ " nop\n"
+ " swpalb %w3, %w0, %2\n"
+ " nop\n"
+ " nop")
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x)
: "memory");
break;
case 2:
- asm volatile("// __xchg2\n"
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " prfm pstl1strm, %2\n"
"1: ldxrh %w0, %2\n"
" stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
+ " dmb ish",
+ /* LSE atomics */
+ " nop\n"
+ " nop\n"
+ " swpalh %w3, %w0, %2\n"
+ " nop\n"
+ " nop")
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x)
: "memory");
break;
case 4:
- asm volatile("// __xchg4\n"
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " prfm pstl1strm, %2\n"
"1: ldxr %w0, %2\n"
" stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
+ " dmb ish",
+ /* LSE atomics */
+ " nop\n"
+ " nop\n"
+ " swpal %w3, %w0, %2\n"
+ " nop\n"
+ " nop")
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x)
: "memory");
break;
case 8:
- asm volatile("// __xchg8\n"
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " prfm pstl1strm, %2\n"
"1: ldxr %0, %2\n"
" stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n"
+ " dmb ish",
+ /* LSE atomics */
+ " nop\n"
+ " nop\n"
+ " swpal %3, %0, %2\n"
+ " nop\n"
+ " nop")
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x)
: "memory");
@@ -68,7 +106,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
BUILD_BUG();
}
- smp_mb();
return ret;
}
@@ -83,131 +120,39 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
- unsigned long oldval = 0, res;
-
switch (size) {
case 1:
- do {
- asm volatile("// __cmpxchg1\n"
- " ldxrb %w1, %2\n"
- " mov %w0, #0\n"
- " cmp %w1, %w3\n"
- " b.ne 1f\n"
- " stxrb %w0, %w4, %2\n"
- "1:\n"
- : "=&r" (res), "=&r" (oldval), "+Q" (*(u8 *)ptr)
- : "Ir" (old), "r" (new)
- : "cc");
- } while (res);
- break;
-
+ return __cmpxchg_case_1(ptr, (u8)old, new);
case 2:
- do {
- asm volatile("// __cmpxchg2\n"
- " ldxrh %w1, %2\n"
- " mov %w0, #0\n"
- " cmp %w1, %w3\n"
- " b.ne 1f\n"
- " stxrh %w0, %w4, %2\n"
- "1:\n"
- : "=&r" (res), "=&r" (oldval), "+Q" (*(u16 *)ptr)
- : "Ir" (old), "r" (new)
- : "cc");
- } while (res);
- break;
-
+ return __cmpxchg_case_2(ptr, (u16)old, new);
case 4:
- do {
- asm volatile("// __cmpxchg4\n"
- " ldxr %w1, %2\n"
- " mov %w0, #0\n"
- " cmp %w1, %w3\n"
- " b.ne 1f\n"
- " stxr %w0, %w4, %2\n"
- "1:\n"
- : "=&r" (res), "=&r" (oldval), "+Q" (*(u32 *)ptr)
- : "Ir" (old), "r" (new)
- : "cc");
- } while (res);
- break;
-
+ return __cmpxchg_case_4(ptr, old, new);
case 8:
- do {
- asm volatile("// __cmpxchg8\n"
- " ldxr %1, %2\n"
- " mov %w0, #0\n"
- " cmp %1, %3\n"
- " b.ne 1f\n"
- " stxr %w0, %4, %2\n"
- "1:\n"
- : "=&r" (res), "=&r" (oldval), "+Q" (*(u64 *)ptr)
- : "Ir" (old), "r" (new)
- : "cc");
- } while (res);
- break;
-
+ return __cmpxchg_case_8(ptr, old, new);
default:
BUILD_BUG();
}
- return oldval;
+ unreachable();
}
-#define system_has_cmpxchg_double() 1
-
-static inline int __cmpxchg_double(volatile void *ptr1, volatile void *ptr2,
- unsigned long old1, unsigned long old2,
- unsigned long new1, unsigned long new2, int size)
+static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
{
- unsigned long loop, lost;
-
switch (size) {
+ case 1:
+ return __cmpxchg_case_mb_1(ptr, (u8)old, new);
+ case 2:
+ return __cmpxchg_case_mb_2(ptr, (u16)old, new);
+ case 4:
+ return __cmpxchg_case_mb_4(ptr, old, new);
case 8:
- VM_BUG_ON((unsigned long *)ptr2 - (unsigned long *)ptr1 != 1);
- do {
- asm volatile("// __cmpxchg_double8\n"
- " ldxp %0, %1, %2\n"
- " eor %0, %0, %3\n"
- " eor %1, %1, %4\n"
- " orr %1, %0, %1\n"
- " mov %w0, #0\n"
- " cbnz %1, 1f\n"
- " stxp %w0, %5, %6, %2\n"
- "1:\n"
- : "=&r"(loop), "=&r"(lost), "+Q" (*(u64 *)ptr1)
- : "r" (old1), "r"(old2), "r"(new1), "r"(new2));
- } while (loop);
- break;
+ return __cmpxchg_case_mb_8(ptr, old, new);
default:
BUILD_BUG();
}
- return !lost;
-}
-
-static inline int __cmpxchg_double_mb(volatile void *ptr1, volatile void *ptr2,
- unsigned long old1, unsigned long old2,
- unsigned long new1, unsigned long new2, int size)
-{
- int ret;
-
- smp_mb();
- ret = __cmpxchg_double(ptr1, ptr2, old1, old2, new1, new2, size);
- smp_mb();
-
- return ret;
-}
-
-static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
- unsigned long new, int size)
-{
- unsigned long ret;
-
- smp_mb();
- ret = __cmpxchg(ptr, old, new, size);
- smp_mb();
-
- return ret;
+ unreachable();
}
#define cmpxchg(ptr, o, n) \
@@ -228,21 +173,32 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
__ret; \
})
+#define system_has_cmpxchg_double() 1
+
+#define __cmpxchg_double_check(ptr1, ptr2) \
+({ \
+ if (sizeof(*(ptr1)) != 8) \
+ BUILD_BUG(); \
+ VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
+})
+
#define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
({\
int __ret;\
- __ret = __cmpxchg_double_mb((ptr1), (ptr2), (unsigned long)(o1), \
- (unsigned long)(o2), (unsigned long)(n1), \
- (unsigned long)(n2), sizeof(*(ptr1)));\
+ __cmpxchg_double_check(ptr1, ptr2); \
+ __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
+ (unsigned long)(n1), (unsigned long)(n2), \
+ ptr1); \
__ret; \
})
#define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
({\
int __ret;\
- __ret = __cmpxchg_double((ptr1), (ptr2), (unsigned long)(o1), \
- (unsigned long)(o2), (unsigned long)(n1), \
- (unsigned long)(n2), sizeof(*(ptr1)));\
+ __cmpxchg_double_check(ptr1, ptr2); \
+ __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
+ (unsigned long)(n1), (unsigned long)(n2), \
+ ptr1); \
__ret; \
})
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index c1044218a63a..171570702bb8 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -25,15 +25,20 @@
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
#define ARM64_WORKAROUND_845719 2
#define ARM64_HAS_SYSREG_GIC_CPUIF 3
+#define ARM64_HAS_PAN 4
+#define ARM64_HAS_LSE_ATOMICS 5
-#define ARM64_NCAPS 4
+#define ARM64_NCAPS 6
#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+
struct arm64_cpu_capabilities {
const char *desc;
u16 capability;
bool (*matches)(const struct arm64_cpu_capabilities *);
+ void (*enable)(void);
union {
struct { /* To be used for erratum handling only */
u32 midr_model;
@@ -41,8 +46,8 @@ struct arm64_cpu_capabilities {
};
struct { /* Feature register checking */
- u64 register_mask;
- u64 register_value;
+ int field_pos;
+ int min_field_value;
};
};
};
@@ -70,6 +75,13 @@ static inline void cpus_set_cap(unsigned int num)
__set_bit(num, cpu_hwcaps);
}
+static inline int __attribute_const__ cpuid_feature_extract_field(u64 features,
+ int field)
+{
+ return (s64)(features << (64 - 4 - field)) >> (64 - 4);
+}
+
+
void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
const char *info);
void check_local_cpu_errata(void);
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index a84ec605bed8..ee6403df9fe4 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -81,9 +81,6 @@
#define ID_AA64MMFR0_BIGEND(mmfr0) \
(((mmfr0) & ID_AA64MMFR0_BIGEND_MASK) >> ID_AA64MMFR0_BIGEND_SHIFT)
-#define SCTLR_EL1_CP15BEN (0x1 << 5)
-#define SCTLR_EL1_SED (0x1 << 8)
-
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index 40ec68aa6870..279c85b5ec09 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -18,6 +18,12 @@
#ifdef __KERNEL__
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <asm/esr.h>
+#include <asm/insn.h>
+#include <asm/ptrace.h>
+
/* Low-level stepping controls. */
#define DBG_MDSCR_SS (1 << 0)
#define DBG_SPSR_SS (1 << 21)
@@ -38,12 +44,7 @@
/*
* Break point instruction encoding
*/
-#define BREAK_INSTR_SIZE 4
-
-/*
- * ESR values expected for dynamic and compile time BRK instruction
- */
-#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff))
+#define BREAK_INSTR_SIZE AARCH64_INSN_SIZE
/*
* #imm16 values used for BRK instruction generation
@@ -51,10 +52,12 @@
* 0x100: for triggering a fault on purpose (reserved)
* 0x400: for dynamic BRK instruction
* 0x401: for compile time BRK instruction
+ * 0x800: kernel-mode BUG() and WARN() traps
*/
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
+#define BUG_BRK_IMM 0x800
/*
* BRK instruction encoding
@@ -68,25 +71,10 @@
*/
#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
-/*
- * Extract byte from BRK instruction
- */
-#define KGDB_DYN_DBG_BRK_INS_BYTE(x) \
- ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
-
-/*
- * Extract byte from BRK #imm16
- */
-#define KGBD_DYN_DBG_BRK_IMM_BYTE(x) \
- (((((KGDB_DYN_DBG_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
-
-#define KGDB_DYN_DBG_BRK_BYTE(x) \
- (KGDB_DYN_DBG_BRK_INS_BYTE(x) | KGBD_DYN_DBG_BRK_IMM_BYTE(x))
-
-#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DBG_BRK_BYTE(0)
-#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DBG_BRK_BYTE(1)
-#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DBG_BRK_BYTE(2)
-#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DBG_BRK_BYTE(3)
+#define AARCH64_BREAK_KGDB_DYN_DBG \
+ (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
+#define KGDB_DYN_BRK_INS_BYTE(x) \
+ ((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
#define CACHE_FLUSH_IS_SAFE 1
@@ -127,13 +115,13 @@ void unregister_break_hook(struct break_hook *hook);
u8 debug_monitors_arch(void);
-enum debug_el {
+enum dbg_active_el {
DBG_ACTIVE_EL0 = 0,
DBG_ACTIVE_EL1,
};
-void enable_debug_monitors(enum debug_el el);
-void disable_debug_monitors(enum debug_el el);
+void enable_debug_monitors(enum dbg_active_el el);
+void disable_debug_monitors(enum dbg_active_el el);
void user_rewind_single_step(struct task_struct *task);
void user_fastforward_single_step(struct task_struct *task);
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index f0d6d0bfe55c..cfdb34bedbcd 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -22,8 +22,6 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
-#include <asm-generic/dma-coherent.h>
-
#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
@@ -86,28 +84,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
return (phys_addr_t)dev_addr;
}
-static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- debug_dma_mapping_error(dev, dev_addr);
- return ops->mapping_error(dev, dev_addr);
-}
-
-static inline int dma_supported(struct device *dev, u64 mask)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- return ops->dma_supported(dev, mask);
-}
-
-static inline int dma_set_mask(struct device *dev, u64 mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, mask))
- return -EIO;
- *dev->dma_mask = mask;
-
- return 0;
-}
-
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
{
if (!dev->dma_mask)
@@ -120,50 +96,5 @@ static inline void dma_mark_clean(void *addr, size_t size)
{
}
-#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
-#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL)
-
-static inline void *dma_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
- void *vaddr;
-
- if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr))
- return vaddr;
-
- vaddr = ops->alloc(dev, size, dma_handle, flags, attrs);
- debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr);
- return vaddr;
-}
-
-static inline void dma_free_attrs(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dev_addr,
- struct dma_attrs *attrs)
-{
- struct dma_map_ops *ops = get_dma_ops(dev);
-
- if (dma_release_from_coherent(dev, get_order(size), vaddr))
- return;
-
- debug_dma_free_coherent(dev, size, vaddr, dev_addr);
- ops->free(dev, size, vaddr, dev_addr, attrs);
-}
-
-/*
- * There is no dma_cache_sync() implementation, so just return NULL here.
- */
-static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t flags)
-{
- return NULL;
-}
-
-static inline void dma_free_noncoherent(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle)
-{
-}
-
#endif /* __KERNEL__ */
#endif /* __ASM_DMA_MAPPING_H */
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 70522450ca23..77eeb2cc648f 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -18,6 +18,8 @@
#ifndef __ASM_ESR_H
#define __ASM_ESR_H
+#include <asm/memory.h>
+
#define ESR_ELx_EC_UNKNOWN (0x00)
#define ESR_ELx_EC_WFx (0x01)
/* Unallocated EC: 0x02 */
@@ -99,6 +101,13 @@
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
+/* ESR value templates for specific events */
+
+/* BRK instruction trap from AArch64 state */
+#define ESR_ELx_VAL_BRK64(imm) \
+ ((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \
+ ((imm) & 0xffff))
+
#ifndef __ASSEMBLY__
#include <asm/types.h>
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 0303705fcad6..6cb7e1a6bc02 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -18,7 +18,13 @@
#ifndef __ASM_EXCEPTION_H
#define __ASM_EXCEPTION_H
+#include <linux/ftrace.h>
+
#define __exception __attribute__((section(".exception.text")))
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define __exception_irq_entry __irq_entry
+#else
#define __exception_irq_entry __exception
+#endif
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
index c0739187a920..8b9884c726ad 100644
--- a/arch/arm64/include/asm/fixmap.h
+++ b/arch/arm64/include/asm/fixmap.h
@@ -8,7 +8,7 @@
* Copyright (C) 1998 Ingo Molnar
* Copyright (C) 2013 Mark Salter <msalter@redhat.com>
*
- * Adapted from arch/x86_64 version.
+ * Adapted from arch/x86 version.
*
*/
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 74069b3bd919..007a69fc4f40 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -20,10 +20,17 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
+
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
#include <asm/errno.h>
+#include <asm/sysreg.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
asm volatile( \
+ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN) \
+" prfm pstl1strm, %2\n" \
"1: ldxr %w1, %2\n" \
insn "\n" \
"2: stlxr %w3, %w0, %2\n" \
@@ -39,6 +46,8 @@
" .align 3\n" \
" .quad 1b, 4b, 2b, 4b\n" \
" .popsection\n" \
+ ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN) \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
: "memory")
@@ -112,6 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
+" prfm pstl1strm, %2\n"
"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
" cbnz %w3, 3f\n"
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 6aae421f4d73..a57601f9d17c 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -24,9 +24,7 @@
typedef struct {
unsigned int __softirq_pending;
-#ifdef CONFIG_SMP
unsigned int ipi_irqs[NR_IPI];
-#endif
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
@@ -34,10 +32,8 @@ typedef struct {
#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
-#ifdef CONFIG_SMP
u64 smp_irq_stat_cpu(unsigned int cpu);
#define arch_irq_stat_cpu smp_irq_stat_cpu
-#endif
#define __ARCH_IRQ_EXIT_IRQS_DISABLED 1
@@ -47,9 +43,4 @@ static inline void ack_bad_irq(unsigned int irq)
irq_err_count++;
}
-/*
- * No arch-specific IRQ flags.
- */
-#define set_irq_flags(irq, flags)
-
#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
index 2fd9b14ca295..bb4052e85dba 100644
--- a/arch/arm64/include/asm/hugetlb.h
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_HUGETLB_H
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 52b484b6aa1a..4c47cb2fbb52 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -16,6 +16,8 @@
#ifndef __ASM_HW_BREAKPOINT_H
#define __ASM_HW_BREAKPOINT_H
+#include <asm/cputype.h>
+
#ifdef __KERNEL__
struct arch_hw_breakpoint_ctrl {
@@ -132,5 +134,17 @@ static inline void ptrace_hw_copy_thread(struct task_struct *task)
extern struct pmu perf_ops_bp;
+/* Determine number of BRP registers available. */
+static inline int get_num_brps(void)
+{
+ return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1;
+}
+
+/* Determine number of WRP registers available. */
+static inline int get_num_wrps(void)
+{
+ return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1;
+}
+
#endif /* __KERNEL__ */
#endif /* __ASM_BREAKPOINT_H */
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
index b4f6b19a8a68..8e24ef3f7c82 100644
--- a/arch/arm64/include/asm/irq_work.h
+++ b/arch/arm64/include/asm/irq_work.h
@@ -1,8 +1,6 @@
#ifndef __ASM_IRQ_WORK_H
#define __ASM_IRQ_WORK_H
-#ifdef CONFIG_SMP
-
#include <asm/smp.h>
static inline bool arch_irq_work_has_interrupt(void)
@@ -10,13 +8,4 @@ static inline bool arch_irq_work_has_interrupt(void)
return !!__smp_cross_call;
}
-#else
-
-static inline bool arch_irq_work_has_interrupt(void)
-{
- return false;
-}
-
-#endif
-
#endif /* __ASM_IRQ_WORK_H */
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index c0e5165c2f76..1b5e0e843c3a 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -26,14 +26,28 @@
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
-static __always_inline bool arch_static_branch(struct static_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
".popsection\n\t"
- : : "i"(key) : : l_yes);
+ : : "i"(&((char *)key)[branch]) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+{
+ asm goto("1: b %l[l_yes]\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".align 3\n\t"
+ ".quad 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i"(&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index ac6fafb95fe7..9694f2654593 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -95,6 +95,7 @@
SCTLR_EL2_SA | SCTLR_EL2_I)
/* TCR_EL2 Registers bits */
+#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
#define TCR_EL2_TBI (1 << 20)
#define TCR_EL2_PS (7 << 16)
#define TCR_EL2_PS_40B (2 << 16)
@@ -106,9 +107,10 @@
#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
-#define TCR_EL2_FLAGS (TCR_EL2_PS_40B)
+#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
/* VTCR_EL2 Registers bits */
+#define VTCR_EL2_RES1 (1 << 31)
#define VTCR_EL2_PS_MASK (7 << 16)
#define VTCR_EL2_TG0_MASK (1 << 14)
#define VTCR_EL2_TG0_4K (0 << 14)
@@ -147,7 +149,8 @@
*/
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_64K | VTCR_EL2_SH0_INNER | \
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
- VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
+ VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
+ VTCR_EL2_RES1)
#define VTTBR_X (38 - VTCR_EL2_T0SZ_40B)
#else
/*
@@ -158,7 +161,8 @@
*/
#define VTCR_EL2_FLAGS (VTCR_EL2_TG0_4K | VTCR_EL2_SH0_INNER | \
VTCR_EL2_ORGN0_WBWA | VTCR_EL2_IRGN0_WBWA | \
- VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B)
+ VTCR_EL2_SL0_LVL1 | VTCR_EL2_T0SZ_40B | \
+ VTCR_EL2_RES1)
#define VTTBR_X (37 - VTCR_EL2_T0SZ_40B)
#endif
@@ -168,13 +172,15 @@
#define VTTBR_VMID_MASK (UL(0xFF) << VTTBR_VMID_SHIFT)
/* Hyp System Trap Register */
-#define HSTR_EL2_TTEE (1 << 16)
#define HSTR_EL2_T(x) (1 << x)
+/* Hyp Coproccessor Trap Register Shifts */
+#define CPTR_EL2_TFP_SHIFT 10
+
/* Hyp Coprocessor Trap Register */
#define CPTR_EL2_TCPAC (1 << 31)
#define CPTR_EL2_TTA (1 << 20)
-#define CPTR_EL2_TFP (1 << 10)
+#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
/* Hyp Debug Configuration Register bits */
#define MDCR_EL2_TDRA (1 << 11)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 3c5fe685a2d6..5e377101f919 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -46,24 +46,14 @@
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
#define PAR_EL1 21 /* Physical Address Register */
#define MDSCR_EL1 22 /* Monitor Debug System Control Register */
-#define DBGBCR0_EL1 23 /* Debug Breakpoint Control Registers (0-15) */
-#define DBGBCR15_EL1 38
-#define DBGBVR0_EL1 39 /* Debug Breakpoint Value Registers (0-15) */
-#define DBGBVR15_EL1 54
-#define DBGWCR0_EL1 55 /* Debug Watchpoint Control Registers (0-15) */
-#define DBGWCR15_EL1 70
-#define DBGWVR0_EL1 71 /* Debug Watchpoint Value Registers (0-15) */
-#define DBGWVR15_EL1 86
-#define MDCCINT_EL1 87 /* Monitor Debug Comms Channel Interrupt Enable Reg */
+#define MDCCINT_EL1 23 /* Monitor Debug Comms Channel Interrupt Enable Reg */
/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 88 /* Domain Access Control Register */
-#define IFSR32_EL2 89 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 90 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 91 /* Debug Vector Catch Register */
-#define TEECR32_EL1 92 /* ThumbEE Configuration Register */
-#define TEEHBR32_EL1 93 /* ThumbEE Handler Base Register */
-#define NR_SYS_REGS 94
+#define DACR32_EL2 24 /* Domain Access Control Register */
+#define IFSR32_EL2 25 /* Instruction Fault Status Register */
+#define FPEXC32_EL2 26 /* Floating-Point Exception Control Register */
+#define DBGVCR32_EL2 27 /* Debug Vector Catch Register */
+#define NR_SYS_REGS 28
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -132,6 +122,8 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern u32 __kvm_get_mdcr_el2(void);
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2709db2a7eac..ed039688c221 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -30,19 +30,16 @@
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
-#if defined(CONFIG_KVM_ARM_MAX_VCPUS)
-#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
-#else
-#define KVM_MAX_VCPUS 0
-#endif
-
#define KVM_USER_MEM_SLOTS 32
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_HALT_POLL_NS_DEFAULT 500000
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
+#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
+
#define KVM_VCPU_MAX_FEATURES 3
int __attribute_const__ kvm_target_cpu(void);
@@ -103,15 +100,34 @@ struct kvm_vcpu_arch {
/* HYP configuration */
u64 hcr_el2;
+ u32 mdcr_el2;
/* Exception Information */
struct kvm_vcpu_fault_info fault;
- /* Debug state */
+ /* Guest debug state */
u64 debug_flags;
+ /*
+ * We maintain more than a single set of debug registers to support
+ * debugging the guest from the host and to maintain separate host and
+ * guest state during world switches. vcpu_debug_state are the debug
+ * registers of the vcpu as the guest sees them. host_debug_state are
+ * the host registers which are saved and restored during
+ * world switches. external_debug_state contains the debug
+ * values we want to debug the guest. This is set via the
+ * KVM_SET_GUEST_DEBUG ioctl.
+ *
+ * debug_ptr points to the set of debug registers that should be loaded
+ * onto the hardware when running the guest.
+ */
+ struct kvm_guest_debug_arch *debug_ptr;
+ struct kvm_guest_debug_arch vcpu_debug_state;
+ struct kvm_guest_debug_arch external_debug_state;
+
/* Pointer to host CPU context */
kvm_cpu_context_t *host_cpu_context;
+ struct kvm_guest_debug_arch host_debug_state;
/* VGIC state */
struct vgic_cpu vgic_cpu;
@@ -122,6 +138,17 @@ struct kvm_vcpu_arch {
* here.
*/
+ /*
+ * Guest registers we preserve during guest debugging.
+ *
+ * These shadow registers are updated by the kvm_handle_sys_reg
+ * trap handler if the guest accesses or updates them while we
+ * are using guest debug.
+ */
+ struct {
+ u32 mdscr_el1;
+ } guest_debug_preserved;
+
/* Don't run the guest */
bool pause;
@@ -165,6 +192,7 @@ struct kvm_vm_stat {
struct kvm_vcpu_stat {
u32 halt_successful_poll;
+ u32 halt_attempted_poll;
u32 halt_wakeup;
};
@@ -216,15 +244,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
hyp_stack_ptr, vector_ptr);
}
-struct vgic_sr_vectors {
- void *save_vgic;
- void *restore_vgic;
-};
-
static inline void kvm_arch_hardware_disable(void) {}
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+void kvm_arm_init_debug(void);
+void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
+void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
+void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
new file mode 100644
index 000000000000..3de42d68611d
--- /dev/null
+++ b/arch/arm64/include/asm/lse.h
@@ -0,0 +1,53 @@
+#ifndef __ASM_LSE_H
+#define __ASM_LSE_H
+
+#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+
+#include <linux/stringify.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+
+#ifdef __ASSEMBLER__
+
+.arch_extension lse
+
+.macro alt_lse, llsc, lse
+ alternative_insn "\llsc", "\lse", ARM64_HAS_LSE_ATOMICS
+.endm
+
+#else /* __ASSEMBLER__ */
+
+__asm__(".arch_extension lse");
+
+/* Move the ll/sc atomics out-of-line */
+#define __LL_SC_INLINE
+#define __LL_SC_PREFIX(x) __ll_sc_##x
+#define __LL_SC_EXPORT(x) EXPORT_SYMBOL(__LL_SC_PREFIX(x))
+
+/* Macro for constructing calls to out-of-line ll/sc atomics */
+#define __LL_SC_CALL(op) "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n"
+
+/* In-line patching at runtime */
+#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
+ ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
+
+#endif /* __ASSEMBLER__ */
+#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+
+#ifdef __ASSEMBLER__
+
+.macro alt_lse, llsc, lse
+ \llsc
+.endm
+
+#else /* __ASSEMBLER__ */
+
+#define __LL_SC_INLINE static inline
+#define __LL_SC_PREFIX(x) x
+#define __LL_SC_EXPORT(x)
+
+#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
+
+#endif /* __ASSEMBLER__ */
+#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+#endif /* __ASM_LSE_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 4112b3d7468e..67027c611dbd 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -81,12 +81,6 @@
#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
/*
- * Convert a physical address to a Page Frame Number and back
- */
-#define __phys_to_pfn(paddr) ((unsigned long)((paddr) >> PAGE_SHIFT))
-#define __pfn_to_phys(pfn) ((phys_addr_t)(pfn) << PAGE_SHIFT)
-
-/*
* Convert a page to/from a physical address
*/
#define page_to_phys(page) (__pfn_to_phys(page_to_pfn(page)))
@@ -115,6 +109,14 @@ extern phys_addr_t memstart_addr;
#define PHYS_OFFSET ({ memstart_addr; })
/*
+ * The maximum physical address that the linear direct mapping
+ * of system RAM can cover. (PAGE_OFFSET can be interpreted as
+ * a 2's complement signed quantity and negated to derive the
+ * maximum size of the linear mapping.)
+ */
+#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; })
+
+/*
* PFNs are used to describe any physical page; this means
* PFN 0 == physical address 0.
*
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 79fcfb048884..030208767185 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -28,7 +28,6 @@ typedef struct {
#define ASID(mm) ((mm)->context.id & 0xffff)
extern void paging_init(void);
-extern void setup_mm_for_reboot(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 4fde8c1df97f..0a456bef8c79 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,8 +16,6 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
-#ifdef CONFIG_SMP
-
static inline void set_my_cpu_offset(unsigned long off)
{
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -38,12 +36,6 @@ static inline unsigned long __my_cpu_offset(void)
}
#define __my_cpu_offset __my_cpu_offset()
-#else /* !CONFIG_SMP */
-
-#define set_my_cpu_offset(x) do { } while (0)
-
-#endif /* CONFIG_SMP */
-
#define PERCPU_OP(op, asm_op) \
static inline unsigned long __percpu_##op(void *ptr, \
unsigned long val, int size) \
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 6471773db6fd..7bd3cdb533ea 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,7 +17,7 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
-#ifdef CONFIG_HW_PERF_EVENTS
+#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
extern unsigned long perf_misc_flags(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 59bfae75dc98..24154b055835 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -104,6 +104,7 @@
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_NG (_AT(pteval_t, 1) << 11) /* nG */
+#define PTE_DBM (_AT(pteval_t, 1) << 51) /* Dirty Bit Management */
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
@@ -168,5 +169,7 @@
#define TCR_TG1_64K (UL(3) << 30)
#define TCR_ASID16 (UL(1) << 36)
#define TCR_TBI0 (UL(1) << 37)
+#define TCR_HA (UL(1) << 39)
+#define TCR_HD (UL(1) << 40)
#endif
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 0a105e3254a1..571ca0ed4f05 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -16,6 +16,7 @@
#ifndef __ASM_PGTABLE_H
#define __ASM_PGTABLE_H
+#include <asm/bug.h>
#include <asm/proc-fns.h>
#include <asm/memory.h>
@@ -25,9 +26,9 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
+#define PTE_WRITE (PTE_DBM) /* same as DBM (51) */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
-#define PTE_WRITE (_AT(pteval_t, 1) << 57)
#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
@@ -48,18 +49,16 @@
#define FIRST_USER_ADDRESS 0UL
#ifndef __ASSEMBLY__
+
+#include <linux/mmdebug.h>
+
extern void __pte_error(const char *file, int line, unsigned long val);
extern void __pmd_error(const char *file, int line, unsigned long val);
extern void __pud_error(const char *file, int line, unsigned long val);
extern void __pgd_error(const char *file, int line, unsigned long val);
-#ifdef CONFIG_SMP
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
-#else
-#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF)
-#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
-#endif
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
@@ -82,7 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
-#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
@@ -139,12 +138,20 @@ extern struct page *empty_zero_page;
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
-#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
+#ifdef CONFIG_ARM64_HW_AFDBM
+#define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
+#else
+#define pte_hw_dirty(pte) (0)
+#endif
+#define pte_sw_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
+#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
+
+#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
#define pte_valid_user(pte) \
((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
#define pte_valid_not_user(pte) \
@@ -211,20 +218,49 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
}
}
+struct mm_struct;
+struct vm_area_struct;
+
extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
+/*
+ * PTE bits configuration in the presence of hardware Dirty Bit Management
+ * (PTE_WRITE == PTE_DBM):
+ *
+ * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw)
+ * 0 0 | 1 0 0
+ * 0 1 | 1 1 0
+ * 1 0 | 1 0 1
+ * 1 1 | 0 1 x
+ *
+ * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
+ * the page fault mechanism. Checking the dirty status of a pte becomes:
+ *
+ * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
+ */
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
if (pte_valid_user(pte)) {
if (!pte_special(pte) && pte_exec(pte))
__sync_icache_dcache(pte, addr);
- if (pte_dirty(pte) && pte_write(pte))
+ if (pte_sw_dirty(pte) && pte_write(pte))
pte_val(pte) &= ~PTE_RDONLY;
else
pte_val(pte) |= PTE_RDONLY;
}
+ /*
+ * If the existing pte is valid, check for potential race with
+ * hardware updates of the pte (ptep_set_access_flags safely changes
+ * valid ptes without going through an invalid entry).
+ */
+ if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
+ pte_valid(*ptep)) {
+ BUG_ON(!pte_young(pte));
+ BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
+ }
+
set_pte(ptep, pte);
}
@@ -462,7 +498,10 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
- PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
+ PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
+ /* preserve the hardware dirty information */
+ if (pte_hw_dirty(pte))
+ pte = pte_mkdirty(pte);
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
@@ -472,6 +511,101 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
}
+#ifdef CONFIG_ARM64_HW_AFDBM
+/*
+ * Atomic pte/pmd modifications.
+ */
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ pteval_t pteval;
+ unsigned int tmp, res;
+
+ asm volatile("// ptep_test_and_clear_young\n"
+ " prfm pstl1strm, %2\n"
+ "1: ldxr %0, %2\n"
+ " ubfx %w3, %w0, %5, #1 // extract PTE_AF (young)\n"
+ " and %0, %0, %4 // clear PTE_AF\n"
+ " stxr %w1, %0, %2\n"
+ " cbnz %w1, 1b\n"
+ : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)), "=&r" (res)
+ : "L" (~PTE_AF), "I" (ilog2(PTE_AF)));
+
+ return res;
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pmd_t *pmdp)
+{
+ return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ pteval_t old_pteval;
+ unsigned int tmp;
+
+ asm volatile("// ptep_get_and_clear\n"
+ " prfm pstl1strm, %2\n"
+ "1: ldxr %0, %2\n"
+ " stxr %w1, xzr, %2\n"
+ " cbnz %w1, 1b\n"
+ : "=&r" (old_pteval), "=&r" (tmp), "+Q" (pte_val(*ptep)));
+
+ return __pte(old_pteval);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
+static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/*
+ * ptep_set_wrprotect - mark read-only while trasferring potential hardware
+ * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit.
+ */
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+ pteval_t pteval;
+ unsigned long tmp;
+
+ asm volatile("// ptep_set_wrprotect\n"
+ " prfm pstl1strm, %2\n"
+ "1: ldxr %0, %2\n"
+ " tst %0, %4 // check for hw dirty (!PTE_RDONLY)\n"
+ " csel %1, %3, xzr, eq // set PTE_DIRTY|PTE_RDONLY if dirty\n"
+ " orr %0, %0, %1 // if !dirty, PTE_RDONLY is already set\n"
+ " and %0, %0, %5 // clear PTE_WRITE/PTE_DBM\n"
+ " stxr %w1, %0, %2\n"
+ " cbnz %w1, 1b\n"
+ : "=&r" (pteval), "=&r" (tmp), "+Q" (pte_val(*ptep))
+ : "r" (PTE_DIRTY|PTE_RDONLY), "L" (PTE_RDONLY), "L" (~PTE_WRITE)
+ : "cc");
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
+}
+#endif
+#endif /* CONFIG_ARM64_HW_AFDBM */
+
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
@@ -507,6 +641,21 @@ extern int kern_addr_valid(unsigned long addr);
#define pgtable_cache_init() do { } while (0)
+/*
+ * On AArch64, the cache coherency is handled via the set_pte_at() function.
+ */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ /*
+ * set_pte() does not have a DSB for user mappings, so make sure that
+ * the page table write is visible.
+ */
+ dsb(ishst);
+}
+
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_PGTABLE_H */
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index e4c893e54f01..98f32355dc97 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -186,4 +186,6 @@ static inline void spin_lock_prefetch(const void *x)
#endif
+void cpu_enable_pan(void);
+
#endif /* __ASM_PROCESSOR_H */
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
deleted file mode 100644
index 49d7e1aaebdc..000000000000
--- a/arch/arm64/include/asm/psci.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2013 ARM Limited
- */
-
-#ifndef __ASM_PSCI_H
-#define __ASM_PSCI_H
-
-int __init psci_dt_init(void);
-
-#ifdef CONFIG_ACPI
-int __init psci_acpi_init(void);
-bool __init acpi_psci_present(void);
-bool __init acpi_psci_use_hvc(void);
-#else
-static inline int psci_acpi_init(void) { return 0; }
-static inline bool acpi_psci_present(void) { return false; }
-#endif
-
-#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index d6dd9fdbc3be..536274ed292e 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -183,11 +183,7 @@ static inline int valid_user_regs(struct user_pt_regs *regs)
#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
-#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
-#else
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index db02be81b90a..d9c3d6a6100a 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -20,10 +20,6 @@
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-#ifndef CONFIG_SMP
-# error "<asm/smp.h> included in non-SMP build"
-#endif
-
#define raw_smp_processor_id() (current_thread_info()->cpu)
struct seq_file;
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index 7abf7570c00f..af58dcdefb21 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -56,6 +56,4 @@ static inline int get_logical_index(u64 mpidr)
return -EINVAL;
}
-void __init do_post_cpus_up_work(void);
-
#endif /* __ASM_SMP_PLAT_H */
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index cee128732435..c85e96d174a5 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -16,6 +16,7 @@
#ifndef __ASM_SPINLOCK_H
#define __ASM_SPINLOCK_H
+#include <asm/lse.h>
#include <asm/spinlock_types.h>
#include <asm/processor.h>
@@ -38,11 +39,21 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
asm volatile(
/* Atomically increment the next ticket. */
+ ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
" prfm pstl1strm, %3\n"
"1: ldaxr %w0, %3\n"
" add %w1, %w0, %w5\n"
" stxr %w2, %w1, %3\n"
-" cbnz %w2, 1b\n"
+" cbnz %w2, 1b\n",
+ /* LSE atomics */
+" mov %w2, %w5\n"
+" ldadda %w2, %w0, %3\n"
+" nop\n"
+" nop\n"
+" nop\n"
+ )
+
/* Did we get the lock? */
" eor %w1, %w0, %w0, ror #16\n"
" cbz %w1, 3f\n"
@@ -67,15 +78,25 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
unsigned int tmp;
arch_spinlock_t lockval;
- asm volatile(
-" prfm pstl1strm, %2\n"
-"1: ldaxr %w0, %2\n"
-" eor %w1, %w0, %w0, ror #16\n"
-" cbnz %w1, 2f\n"
-" add %w0, %w0, %3\n"
-" stxr %w1, %w0, %2\n"
-" cbnz %w1, 1b\n"
-"2:"
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " prfm pstl1strm, %2\n"
+ "1: ldaxr %w0, %2\n"
+ " eor %w1, %w0, %w0, ror #16\n"
+ " cbnz %w1, 2f\n"
+ " add %w0, %w0, %3\n"
+ " stxr %w1, %w0, %2\n"
+ " cbnz %w1, 1b\n"
+ "2:",
+ /* LSE atomics */
+ " ldr %w0, %2\n"
+ " eor %w1, %w0, %w0, ror #16\n"
+ " cbnz %w1, 1f\n"
+ " add %w1, %w0, %3\n"
+ " casa %w0, %w1, %2\n"
+ " and %w1, %w1, #0xffff\n"
+ " eor %w1, %w1, %w0, lsr #16\n"
+ "1:")
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
: "I" (1 << TICKET_SHIFT)
: "memory");
@@ -85,10 +106,19 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- asm volatile(
-" stlrh %w1, %0\n"
- : "=Q" (lock->owner)
- : "r" (lock->owner + 1)
+ unsigned long tmp;
+
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " ldrh %w1, %0\n"
+ " add %w1, %w1, #1\n"
+ " stlrh %w1, %0",
+ /* LSE atomics */
+ " mov %w1, #1\n"
+ " nop\n"
+ " staddlh %w1, %0")
+ : "=Q" (lock->owner), "=&r" (tmp)
+ :
: "memory");
}
@@ -123,13 +153,24 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
- asm volatile(
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
" sevl\n"
"1: wfe\n"
"2: ldaxr %w0, %1\n"
" cbnz %w0, 1b\n"
" stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n"
+ " nop",
+ /* LSE atomics */
+ "1: mov %w0, wzr\n"
+ "2: casa %w0, %w2, %1\n"
+ " cbz %w0, 3f\n"
+ " ldxr %w0, %1\n"
+ " cbz %w0, 2b\n"
+ " wfe\n"
+ " b 1b\n"
+ "3:")
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
: "memory");
@@ -139,11 +180,18 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
- asm volatile(
- " ldaxr %w0, %1\n"
- " cbnz %w0, 1f\n"
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ "1: ldaxr %w0, %1\n"
+ " cbnz %w0, 2f\n"
" stxr %w0, %w2, %1\n"
- "1:\n"
+ " cbnz %w0, 1b\n"
+ "2:",
+ /* LSE atomics */
+ " mov %w0, wzr\n"
+ " casa %w0, %w2, %1\n"
+ " nop\n"
+ " nop")
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
: "memory");
@@ -153,9 +201,10 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
- asm volatile(
- " stlr %w1, %0\n"
- : "=Q" (rw->lock) : "r" (0) : "memory");
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ " stlr wzr, %0",
+ " swpl wzr, wzr, %0")
+ : "=Q" (rw->lock) :: "memory");
}
/* write_can_lock - would write_trylock() succeed? */
@@ -172,6 +221,10 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
*
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
+ *
+ * Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
+ * and LSE implementations may exhibit different behaviour (although this
+ * will have no effect on lockdep).
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
@@ -179,26 +232,43 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
asm volatile(
" sevl\n"
+ ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
"1: wfe\n"
"2: ldaxr %w0, %2\n"
" add %w0, %w0, #1\n"
" tbnz %w0, #31, 1b\n"
" stxr %w1, %w0, %2\n"
- " cbnz %w1, 2b\n"
+ " nop\n"
+ " cbnz %w1, 2b",
+ /* LSE atomics */
+ "1: wfe\n"
+ "2: ldxr %w0, %2\n"
+ " adds %w1, %w0, #1\n"
+ " tbnz %w1, #31, 1b\n"
+ " casa %w0, %w1, %2\n"
+ " sbc %w0, %w1, %w0\n"
+ " cbnz %w0, 2b")
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "memory");
+ : "cc", "memory");
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp, tmp2;
- asm volatile(
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
"1: ldxr %w0, %2\n"
" sub %w0, %w0, #1\n"
" stlxr %w1, %w0, %2\n"
- " cbnz %w1, 1b\n"
+ " cbnz %w1, 1b",
+ /* LSE atomics */
+ " movn %w0, #0\n"
+ " nop\n"
+ " nop\n"
+ " staddl %w0, %2")
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
: "memory");
@@ -206,17 +276,28 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- unsigned int tmp, tmp2 = 1;
+ unsigned int tmp, tmp2;
- asm volatile(
- " ldaxr %w0, %2\n"
+ asm volatile(ARM64_LSE_ATOMIC_INSN(
+ /* LL/SC */
+ " mov %w1, #1\n"
+ "1: ldaxr %w0, %2\n"
" add %w0, %w0, #1\n"
- " tbnz %w0, #31, 1f\n"
+ " tbnz %w0, #31, 2f\n"
" stxr %w1, %w0, %2\n"
- "1:\n"
- : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
+ " cbnz %w1, 1b\n"
+ "2:",
+ /* LSE atomics */
+ " ldr %w0, %2\n"
+ " adds %w1, %w0, #1\n"
+ " tbnz %w1, #31, 1f\n"
+ " casa %w0, %w1, %2\n"
+ " sbc %w1, %w1, %w0\n"
+ " nop\n"
+ "1:")
+ : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "memory");
+ : "cc", "memory");
return !tmp2;
}
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index b8d383665f56..55be59a35e3f 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,6 +20,8 @@
# error "please don't include this file directly"
#endif
+#include <linux/types.h>
+
#define TICKET_SHIFT 16
typedef struct {
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 5c89df0acbcb..a7f3d4b2514d 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,8 +20,29 @@
#ifndef __ASM_SYSREG_H
#define __ASM_SYSREG_H
+#include <asm/opcodes.h>
+
+#define SCTLR_EL1_CP15BEN (0x1 << 5)
+#define SCTLR_EL1_SED (0x1 << 8)
+
+/*
+ * ARMv8 ARM reserves the following encoding for system registers:
+ * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
+ * C5.2, version:ARM DDI 0487A.f)
+ * [20-19] : Op0
+ * [18-16] : Op1
+ * [15-12] : CRn
+ * [11-8] : CRm
+ * [7-5] : Op2
+ */
#define sys_reg(op0, op1, crn, crm, op2) \
- ((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+ ((((op0)&3)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+
+#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
+#define SCTLR_EL1_SPAN (1 << 23)
+
+#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM |\
+ (!!x)<<8 | 0x1f)
#ifdef __ASSEMBLY__
@@ -31,11 +52,11 @@
.equ __reg_num_xzr, 31
.macro mrs_s, rt, sreg
- .inst 0xd5300000|(\sreg)|(__reg_num_\rt)
+ .inst 0xd5200000|(\sreg)|(__reg_num_\rt)
.endm
.macro msr_s, sreg, rt
- .inst 0xd5100000|(\sreg)|(__reg_num_\rt)
+ .inst 0xd5000000|(\sreg)|(__reg_num_\rt)
.endm
#else
@@ -47,14 +68,23 @@ asm(
" .equ __reg_num_xzr, 31\n"
"\n"
" .macro mrs_s, rt, sreg\n"
-" .inst 0xd5300000|(\\sreg)|(__reg_num_\\rt)\n"
+" .inst 0xd5200000|(\\sreg)|(__reg_num_\\rt)\n"
" .endm\n"
"\n"
" .macro msr_s, sreg, rt\n"
-" .inst 0xd5100000|(\\sreg)|(__reg_num_\\rt)\n"
+" .inst 0xd5000000|(\\sreg)|(__reg_num_\\rt)\n"
" .endm\n"
);
+static inline void config_sctlr_el1(u32 clear, u32 set)
+{
+ u32 val;
+
+ asm volatile("mrs %0, sctlr_el1" : "=r" (val));
+ val &= ~clear;
+ val |= set;
+ asm volatile("msr sctlr_el1, %0" : : "r" (val));
+}
#endif
#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 3a0242c7eb8d..d6e6b6660380 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -41,7 +41,12 @@ static inline void tlb_flush(struct mmu_gather *tlb)
flush_tlb_mm(tlb->mm);
} else {
struct vm_area_struct vma = { .vm_mm = tlb->mm, };
- flush_tlb_range(&vma, tlb->start, tlb->end);
+ /*
+ * The intermediate page table levels are already handled by
+ * the __(pte|pmd|pud)_free_tlb() functions, so last level
+ * TLBI is sufficient here.
+ */
+ __flush_tlb_range(&vma, tlb->start, tlb->end, true);
}
}
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 934815d45eda..7bd2da021658 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -87,27 +87,56 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
((unsigned long)ASID(vma->vm_mm) << 48);
dsb(ishst);
- asm("tlbi vae1is, %0" : : "r" (addr));
+ asm("tlbi vale1is, %0" : : "r" (addr));
dsb(ish);
}
+/*
+ * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
+ * necessarily a performance improvement.
+ */
+#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
+
static inline void __flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ bool last_level)
{
unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
unsigned long addr;
+
+ if ((end - start) > MAX_TLB_RANGE) {
+ flush_tlb_mm(vma->vm_mm);
+ return;
+ }
+
start = asid | (start >> 12);
end = asid | (end >> 12);
dsb(ishst);
- for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
- asm("tlbi vae1is, %0" : : "r"(addr));
+ for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
+ if (last_level)
+ asm("tlbi vale1is, %0" : : "r"(addr));
+ else
+ asm("tlbi vae1is, %0" : : "r"(addr));
+ }
dsb(ish);
}
-static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ __flush_tlb_range(vma, start, end, false);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long addr;
+
+ if ((end - start) > MAX_TLB_RANGE) {
+ flush_tlb_all();
+ return;
+ }
+
start >>= 12;
end >>= 12;
@@ -119,29 +148,6 @@ static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long e
}
/*
- * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
- * necessarily a performance improvement.
- */
-#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- if ((end - start) <= MAX_TLB_RANGE)
- __flush_tlb_range(vma, start, end);
- else
- flush_tlb_mm(vma->vm_mm);
-}
-
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- if ((end - start) <= MAX_TLB_RANGE)
- __flush_tlb_kernel_range(start, end);
- else
- flush_tlb_all();
-}
-
-/*
* Used to invalidate the TLB (walk caches) corresponding to intermediate page
* table levels (pgd/pud/pmd).
*/
@@ -154,20 +160,6 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
asm("tlbi vae1is, %0" : : "r" (addr));
dsb(ish);
}
-/*
- * On AArch64, the cache coherency is handled via the set_pte_at() function.
- */
-static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long addr, pte_t *ptep)
-{
- /*
- * set_pte() does not have a DSB for user mappings, so make sure that
- * the page table write is visible.
- */
- dsb(ishst);
-}
-
-#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
#endif
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index 225ec3524fbf..a3e9d6fdbf21 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -1,8 +1,6 @@
#ifndef __ASM_TOPOLOGY_H
#define __ASM_TOPOLOGY_H
-#ifdef CONFIG_SMP
-
#include <linux/cpumask.h>
struct cpu_topology {
@@ -24,13 +22,6 @@ void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(int cpu);
-#else
-
-static inline void init_cpu_topology(void) { }
-static inline void store_cpu_topology(unsigned int cpuid) { }
-
-#endif
-
#include <asm-generic/topology.h>
#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h
index 232e4ba5d314..0cc2f29bf9da 100644
--- a/arch/arm64/include/asm/traps.h
+++ b/arch/arm64/include/asm/traps.h
@@ -34,13 +34,32 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+ extern char __irqentry_text_start[];
+ extern char __irqentry_text_end[];
+
+ return ptr >= (unsigned long)&__irqentry_text_start &&
+ ptr < (unsigned long)&__irqentry_text_end;
+}
+#else
+static inline int __in_irqentry_text(unsigned long ptr)
+{
+ return 0;
+}
+#endif
+
static inline int in_exception_text(unsigned long ptr)
{
extern char __exception_text_start[];
extern char __exception_text_end[];
+ int in;
+
+ in = ptr >= (unsigned long)&__exception_text_start &&
+ ptr < (unsigned long)&__exception_text_end;
- return ptr >= (unsigned long)&__exception_text_start &&
- ptr < (unsigned long)&__exception_text_end;
+ return in ? : __in_irqentry_text(ptr);
}
#endif
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 07e1ba449bf1..b2ede967fe7d 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -24,7 +24,10 @@
#include <linux/string.h>
#include <linux/thread_info.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
#include <asm/ptrace.h>
+#include <asm/sysreg.h>
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>
@@ -131,6 +134,8 @@ static inline void set_fs(mm_segment_t fs)
do { \
unsigned long __gu_val; \
__chk_user_ptr(ptr); \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm("ldrb", "%w", __gu_val, (ptr), (err)); \
@@ -148,6 +153,8 @@ do { \
BUILD_BUG(); \
} \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)); \
} while (0)
#define __get_user(x, ptr) \
@@ -194,6 +201,8 @@ do { \
do { \
__typeof__(*(ptr)) __pu_val = (x); \
__chk_user_ptr(ptr); \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)); \
switch (sizeof(*(ptr))) { \
case 1: \
__put_user_asm("strb", "%w", __pu_val, (ptr), (err)); \
@@ -210,6 +219,8 @@ do { \
default: \
BUILD_BUG(); \
} \
+ asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)); \
} while (0)
#define __put_user(x, ptr) \
diff --git a/arch/arm64/include/asm/xen/events.h b/arch/arm64/include/asm/xen/events.h
index 86553213c132..4318866d053c 100644
--- a/arch/arm64/include/asm/xen/events.h
+++ b/arch/arm64/include/asm/xen/events.h
@@ -18,4 +18,10 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
#define xchg_xen_ulong(ptr, val) xchg((ptr), (val))
+/* Rebind event channel is supported by default */
+static inline bool xen_support_evtchn_rebind(void)
+{
+ return true;
+}
+
#endif /* _ASM_ARM64_XEN_EVENTS_H */
diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h
index 73cf0f54d57c..361c8a8ef55f 100644
--- a/arch/arm64/include/uapi/asm/hwcap.h
+++ b/arch/arm64/include/uapi/asm/hwcap.h
@@ -27,5 +27,6 @@
#define HWCAP_SHA1 (1 << 5)
#define HWCAP_SHA2 (1 << 6)
#define HWCAP_CRC32 (1 << 7)
+#define HWCAP_ATOMICS (1 << 8)
#endif /* _UAPI__ASM_HWCAP_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index d26832022127..0cd7b5947dfc 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -53,14 +53,20 @@ struct kvm_regs {
struct user_fpsimd_state fp_regs;
};
-/* Supported Processor Types */
+/*
+ * Supported CPU Targets - Adding a new target type is not recommended,
+ * unless there are some special registers not supported by the
+ * genericv8 syreg table.
+ */
#define KVM_ARM_TARGET_AEM_V8 0
#define KVM_ARM_TARGET_FOUNDATION_V8 1
#define KVM_ARM_TARGET_CORTEX_A57 2
#define KVM_ARM_TARGET_XGENE_POTENZA 3
#define KVM_ARM_TARGET_CORTEX_A53 4
+/* Generic ARM v8 target */
+#define KVM_ARM_TARGET_GENERIC_V8 5
-#define KVM_ARM_NUM_TARGETS 5
+#define KVM_ARM_NUM_TARGETS 6
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
@@ -100,12 +106,39 @@ struct kvm_sregs {
struct kvm_fpu {
};
+/*
+ * See v8 ARM ARM D7.3: Debug Registers
+ *
+ * The architectural limit is 16 debug registers of each type although
+ * in practice there are usually less (see ID_AA64DFR0_EL1).
+ *
+ * Although the control registers are architecturally defined as 32
+ * bits wide we use a 64 bit structure here to keep parity with
+ * KVM_GET/SET_ONE_REG behaviour which treats all system registers as
+ * 64 bit values. It also allows for the possibility of the
+ * architecture expanding the control registers without having to
+ * change the userspace ABI.
+ */
+#define KVM_ARM_MAX_DBG_REGS 16
struct kvm_guest_debug_arch {
+ __u64 dbg_bcr[KVM_ARM_MAX_DBG_REGS];
+ __u64 dbg_bvr[KVM_ARM_MAX_DBG_REGS];
+ __u64 dbg_wcr[KVM_ARM_MAX_DBG_REGS];
+ __u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS];
};
struct kvm_debug_exit_arch {
+ __u32 hsr;
+ __u64 far; /* used for watchpoints */
};
+/*
+ * Architecture specific defines for kvm_guest_debug->control
+ */
+
+#define KVM_GUESTDBG_USE_SW_BP (1 << 16)
+#define KVM_GUESTDBG_USE_HW (1 << 17)
+
struct kvm_sync_regs {
};
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 6913643bbe54..208db3df135a 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -44,6 +44,7 @@
#define PSR_I_BIT 0x00000080
#define PSR_A_BIT 0x00000100
#define PSR_D_BIT 0x00000200
+#define PSR_PAN_BIT 0x00400000
#define PSR_Q_BIT 0x08000000
#define PSR_V_BIT 0x10000000
#define PSR_C_BIT 0x20000000
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 426d0763c81b..22dc9bc781be 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -17,15 +17,15 @@ arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \
return_address.o cpuinfo.o cpu_errata.o \
- cpufeature.o alternative.o cacheinfo.o
+ cpufeature.o alternative.o cacheinfo.o \
+ smp.o smp_spin_table.o topology.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o entry32.o \
../../arm/kernel/opcodes.o
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o
-arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
+arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 221b98312f0c..ab9db0e9818c 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -85,7 +85,7 @@ static u32 get_alt_insn(struct alt_instr *alt, u32 *insnptr, u32 *altinsnptr)
return insn;
}
-static int __apply_alternatives(void *alt_region)
+static void __apply_alternatives(void *alt_region)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
@@ -114,19 +114,39 @@ static int __apply_alternatives(void *alt_region)
flush_icache_range((uintptr_t)origptr,
(uintptr_t)(origptr + nr_inst));
}
-
- return 0;
}
-void apply_alternatives_all(void)
+/*
+ * We might be patching the stop_machine state machine, so implement a
+ * really simple polling protocol here.
+ */
+static int __apply_alternatives_multi_stop(void *unused)
{
+ static int patched = 0;
struct alt_region region = {
.begin = __alt_instructions,
.end = __alt_instructions_end,
};
+ /* We always have a CPU 0 at this point (__init) */
+ if (smp_processor_id()) {
+ while (!READ_ONCE(patched))
+ cpu_relax();
+ isb();
+ } else {
+ BUG_ON(patched);
+ __apply_alternatives(&region);
+ /* Barriers provided by the cache flushing */
+ WRITE_ONCE(patched, 1);
+ }
+
+ return 0;
+}
+
+void __init apply_alternatives_all(void)
+{
/* better not try code patching on a live SMP system */
- stop_machine(__apply_alternatives, &region, NULL);
+ stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
}
void apply_alternatives(void *start, size_t length)
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 7922c2e710ca..bcee7abac68e 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -14,8 +14,11 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/opcodes.h>
+#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
@@ -279,6 +282,8 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
*/
#define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \
+ ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN) \
" mov %w2, %w1\n" \
"0: ldxr"B" %w1, [%3]\n" \
"1: stxr"B" %w0, %w2, [%3]\n" \
@@ -294,7 +299,9 @@ static void register_insn_emulation_sysctl(struct ctl_table *table)
" .align 3\n" \
" .quad 0b, 3b\n" \
" .quad 1b, 3b\n" \
- " .popsection" \
+ " .popsection\n" \
+ ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN) \
: "=&r" (res), "+r" (data), "=&r" (temp) \
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
: "memory")
@@ -504,16 +511,6 @@ ret:
return 0;
}
-static inline void config_sctlr_el1(u32 clear, u32 set)
-{
- u32 val;
-
- asm volatile("mrs %0, sctlr_el1" : "=r" (val));
- val &= ~clear;
- val |= set;
- asm volatile("msr sctlr_el1, %0" : : "r" (val));
-}
-
static int cp15_barrier_set_hw_mode(bool enable)
{
if (enable)
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index c99701a34d7b..8d89cf8dae55 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -116,17 +116,22 @@ int main(void)
DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags));
+ DEFINE(VCPU_DEBUG_PTR, offsetof(struct kvm_vcpu, arch.debug_ptr));
+ DEFINE(DEBUG_BCR, offsetof(struct kvm_guest_debug_arch, dbg_bcr));
+ DEFINE(DEBUG_BVR, offsetof(struct kvm_guest_debug_arch, dbg_bvr));
+ DEFINE(DEBUG_WCR, offsetof(struct kvm_guest_debug_arch, dbg_wcr));
+ DEFINE(DEBUG_WVR, offsetof(struct kvm_guest_debug_arch, dbg_wvr));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
+ DEFINE(VCPU_MDCR_EL2, offsetof(struct kvm_vcpu, arch.mdcr_el2));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
+ DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state));
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
- DEFINE(VGIC_SAVE_FN, offsetof(struct vgic_sr_vectors, save_vgic));
- DEFINE(VGIC_RESTORE_FN, offsetof(struct vgic_sr_vectors, restore_vgic));
DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
index 5ea337dd2f15..b6bd7d447768 100644
--- a/arch/arm64/kernel/cpu_ops.c
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -30,9 +30,7 @@ extern const struct cpu_operations cpu_psci_ops;
const struct cpu_operations *cpu_ops[NR_CPUS];
static const struct cpu_operations *supported_cpu_ops[] __initconst = {
-#ifdef CONFIG_SMP
&smp_spin_table_ops,
-#endif
&cpu_psci_ops,
NULL,
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 5ad86ceac010..3c9aed32f70b 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -21,24 +21,57 @@
#include <linux/types.h>
#include <asm/cpu.h>
#include <asm/cpufeature.h>
+#include <asm/processor.h>
static bool
-has_id_aa64pfr0_feature(const struct arm64_cpu_capabilities *entry)
+feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
{
- u64 val;
+ int val = cpuid_feature_extract_field(reg, entry->field_pos);
- val = read_cpuid(id_aa64pfr0_el1);
- return (val & entry->register_mask) == entry->register_value;
+ return val >= entry->min_field_value;
}
+#define __ID_FEAT_CHK(reg) \
+static bool __maybe_unused \
+has_##reg##_feature(const struct arm64_cpu_capabilities *entry) \
+{ \
+ u64 val; \
+ \
+ val = read_cpuid(reg##_el1); \
+ return feature_matches(val, entry); \
+}
+
+__ID_FEAT_CHK(id_aa64pfr0);
+__ID_FEAT_CHK(id_aa64mmfr1);
+__ID_FEAT_CHK(id_aa64isar0);
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
.matches = has_id_aa64pfr0_feature,
- .register_mask = (0xf << 24),
- .register_value = (1 << 24),
+ .field_pos = 24,
+ .min_field_value = 1,
+ },
+#ifdef CONFIG_ARM64_PAN
+ {
+ .desc = "Privileged Access Never",
+ .capability = ARM64_HAS_PAN,
+ .matches = has_id_aa64mmfr1_feature,
+ .field_pos = 20,
+ .min_field_value = 1,
+ .enable = cpu_enable_pan,
},
+#endif /* CONFIG_ARM64_PAN */
+#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
+ {
+ .desc = "LSE atomic instructions",
+ .capability = ARM64_HAS_LSE_ATOMICS,
+ .matches = has_id_aa64isar0_feature,
+ .field_pos = 20,
+ .min_field_value = 2,
+ },
+#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
{},
};
@@ -55,9 +88,15 @@ void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
pr_info("%s %s\n", info, caps[i].desc);
cpus_set_cap(caps[i].capability);
}
+
+ /* second pass allows enable() to consider interacting capabilities */
+ for (i = 0; caps[i].desc; i++) {
+ if (cpus_have_cap(caps[i].capability) && caps[i].enable)
+ caps[i].enable();
+ }
}
void check_local_cpu_features(void)
{
- check_cpu_capabilities(arm64_features, "detected feature");
+ check_cpu_capabilities(arm64_features, "detected feature:");
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index b056369fd47d..253021ef2769 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -82,7 +82,7 @@ early_param("nodebugmon", early_debug_disable);
static DEFINE_PER_CPU(int, mde_ref_count);
static DEFINE_PER_CPU(int, kde_ref_count);
-void enable_debug_monitors(enum debug_el el)
+void enable_debug_monitors(enum dbg_active_el el)
{
u32 mdscr, enable = 0;
@@ -102,7 +102,7 @@ void enable_debug_monitors(enum debug_el el)
}
}
-void disable_debug_monitors(enum debug_el el)
+void disable_debug_monitors(enum dbg_active_el el)
{
u32 mdscr, disable = 0;
@@ -134,7 +134,7 @@ static int os_lock_notify(struct notifier_block *self,
unsigned long action, void *data)
{
int cpu = (unsigned long)data;
- if (action == CPU_ONLINE)
+ if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
smp_call_function_single(cpu, clear_os_lock, NULL, 1);
return NOTIFY_OK;
}
@@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook)
}
/*
- * Call registered single step handers
+ * Call registered single step handlers
* There is no Syndrome info to check for determining the handler.
* So we call all the registered handlers, until the right handler is
* found which returns zero.
@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
* Use reader/writer locks instead of plain spinlock.
*/
static LIST_HEAD(break_hook);
-static DEFINE_RWLOCK(break_hook_lock);
+static DEFINE_SPINLOCK(break_hook_lock);
void register_break_hook(struct break_hook *hook)
{
- write_lock(&break_hook_lock);
- list_add(&hook->node, &break_hook);
- write_unlock(&break_hook_lock);
+ spin_lock(&break_hook_lock);
+ list_add_rcu(&hook->node, &break_hook);
+ spin_unlock(&break_hook_lock);
}
void unregister_break_hook(struct break_hook *hook)
{
- write_lock(&break_hook_lock);
- list_del(&hook->node);
- write_unlock(&break_hook_lock);
+ spin_lock(&break_hook_lock);
+ list_del_rcu(&hook->node);
+ spin_unlock(&break_hook_lock);
+ synchronize_rcu();
}
static int call_break_hook(struct pt_regs *regs, unsigned int esr)
@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
struct break_hook *hook;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
- read_lock(&break_hook_lock);
- list_for_each_entry(hook, &break_hook, node)
+ rcu_read_lock();
+ list_for_each_entry_rcu(hook, &break_hook, node)
if ((esr & hook->esr_mask) == hook->esr_val)
fn = hook->fn;
- read_unlock(&break_hook_lock);
+ rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
}
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index f5374065ad53..816120ece6bc 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -13,7 +13,7 @@
#include <asm/efi.h>
#include <asm/sections.h>
-efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table,
+efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table_arg,
unsigned long *image_addr,
unsigned long *image_size,
unsigned long *reserve_addr,
@@ -23,21 +23,44 @@ efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table,
{
efi_status_t status;
unsigned long kernel_size, kernel_memsize = 0;
+ unsigned long nr_pages;
+ void *old_image_addr = (void *)*image_addr;
/* Relocate the image, if required. */
kernel_size = _edata - _text;
if (*image_addr != (dram_base + TEXT_OFFSET)) {
kernel_memsize = kernel_size + (_end - _edata);
- status = efi_low_alloc(sys_table, kernel_memsize + TEXT_OFFSET,
- SZ_2M, reserve_addr);
+
+ /*
+ * First, try a straight allocation at the preferred offset.
+ * This will work around the issue where, if dram_base == 0x0,
+ * efi_low_alloc() refuses to allocate at 0x0 (to prevent the
+ * address of the allocation to be mistaken for a FAIL return
+ * value or a NULL pointer). It will also ensure that, on
+ * platforms where the [dram_base, dram_base + TEXT_OFFSET)
+ * interval is partially occupied by the firmware (like on APM
+ * Mustang), we can still place the kernel at the address
+ * 'dram_base + TEXT_OFFSET'.
+ */
+ *image_addr = *reserve_addr = dram_base + TEXT_OFFSET;
+ nr_pages = round_up(kernel_memsize, EFI_ALLOC_ALIGN) /
+ EFI_PAGE_SIZE;
+ status = efi_call_early(allocate_pages, EFI_ALLOCATE_ADDRESS,
+ EFI_LOADER_DATA, nr_pages,
+ (efi_physical_addr_t *)reserve_addr);
if (status != EFI_SUCCESS) {
- pr_efi_err(sys_table, "Failed to relocate kernel\n");
- return status;
+ kernel_memsize += TEXT_OFFSET;
+ status = efi_low_alloc(sys_table_arg, kernel_memsize,
+ SZ_2M, reserve_addr);
+
+ if (status != EFI_SUCCESS) {
+ pr_efi_err(sys_table_arg, "Failed to relocate kernel\n");
+ return status;
+ }
+ *image_addr = *reserve_addr + TEXT_OFFSET;
}
- memcpy((void *)*reserve_addr + TEXT_OFFSET, (void *)*image_addr,
- kernel_size);
- *image_addr = *reserve_addr + TEXT_OFFSET;
- *reserve_size = kernel_memsize + TEXT_OFFSET;
+ memcpy((void *)*image_addr, old_image_addr, kernel_size);
+ *reserve_size = kernel_memsize;
}
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index e8ca6eaedd02..13671a9cf016 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -258,7 +258,8 @@ static bool __init efi_virtmap_init(void)
*/
if (!is_normal_ram(md))
prot = __pgprot(PROT_DEVICE_nGnRE);
- else if (md->type == EFI_RUNTIME_SERVICES_CODE)
+ else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
+ !PAGE_ALIGNED(md->phys_addr))
prot = PAGE_KERNEL_EXEC;
else
prot = PAGE_KERNEL;
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 08cafc518b9a..0f03a8fe2314 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -178,6 +178,24 @@ ENTRY(ftrace_stub)
ENDPROC(ftrace_stub)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* save return value regs*/
+ .macro save_return_regs
+ sub sp, sp, #64
+ stp x0, x1, [sp]
+ stp x2, x3, [sp, #16]
+ stp x4, x5, [sp, #32]
+ stp x6, x7, [sp, #48]
+ .endm
+
+ /* restore return value regs*/
+ .macro restore_return_regs
+ ldp x0, x1, [sp]
+ ldp x2, x3, [sp, #16]
+ ldp x4, x5, [sp, #32]
+ ldp x6, x7, [sp, #48]
+ add sp, sp, #64
+ .endm
+
/*
* void ftrace_graph_caller(void)
*
@@ -204,11 +222,11 @@ ENDPROC(ftrace_graph_caller)
* only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
*/
ENTRY(return_to_handler)
- str x0, [sp, #-16]!
+ save_return_regs
mov x0, x29 // parent's fp
bl ftrace_return_to_handler// addr = ftrace_return_to_hander(fp);
mov x30, x0 // restore the original return address
- ldr x0, [sp], #16
+ restore_return_regs
ret
END(return_to_handler)
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index e16351819fed..4306c937b1ff 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -116,41 +116,34 @@
*/
.endm
- .macro kernel_exit, el, ret = 0
+ .macro kernel_exit, el
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
.if \el == 0
ct_user_enter
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
-
#ifdef CONFIG_ARM64_ERRATUM_845719
-
-#undef SEQUENCE_ORG
-#undef SEQUENCE_ALT
-
+alternative_if_not ARM64_WORKAROUND_845719
+ nop
+ nop
#ifdef CONFIG_PID_IN_CONTEXTIDR
-
-#define SEQUENCE_ORG "nop ; nop ; nop"
-#define SEQUENCE_ALT "tbz x22, #4, 1f ; mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:"
-
+ nop
+#endif
+alternative_else
+ tbz x22, #4, 1f
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+ mrs x29, contextidr_el1
+ msr contextidr_el1, x29
#else
-
-#define SEQUENCE_ORG "nop ; nop"
-#define SEQUENCE_ALT "tbz x22, #4, 1f ; msr contextidr_el1, xzr; 1:"
-
+ msr contextidr_el1, xzr
#endif
-
- alternative_insn SEQUENCE_ORG, SEQUENCE_ALT, ARM64_WORKAROUND_845719
-
+1:
+alternative_endif
#endif
.endif
msr elr_el1, x21 // set up the return data
msr spsr_el1, x22
- .if \ret
- ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
- .else
ldp x0, x1, [sp, #16 * 0]
- .endif
ldp x2, x3, [sp, #16 * 1]
ldp x4, x5, [sp, #16 * 2]
ldp x6, x7, [sp, #16 * 3]
@@ -613,22 +606,21 @@ ENDPROC(cpu_switch_to)
*/
ret_fast_syscall:
disable_irq // disable interrupts
+ str x0, [sp, #S_X0] // returned x0
ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
and x2, x1, #_TIF_SYSCALL_WORK
cbnz x2, ret_fast_syscall_trace
and x2, x1, #_TIF_WORK_MASK
- cbnz x2, fast_work_pending
+ cbnz x2, work_pending
enable_step_tsk x1, x2
- kernel_exit 0, ret = 1
+ kernel_exit 0
ret_fast_syscall_trace:
enable_irq // enable interrupts
- b __sys_trace_return
+ b __sys_trace_return_skipped // we already saved x0
/*
* Ok, we need to do extra processing, enter the slow path.
*/
-fast_work_pending:
- str x0, [sp, #S_X0] // returned x0
work_pending:
tbnz x1, #TIF_NEED_RESCHED, work_resched
/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
@@ -652,7 +644,7 @@ ret_to_user:
cbnz x2, work_pending
enable_step_tsk x1, x2
no_work_pending:
- kernel_exit 0, ret = 0
+ kernel_exit 0
ENDPROC(ret_to_user)
/*
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 44d6f7545505..c56956a16d3f 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -158,6 +158,7 @@ void fpsimd_thread_switch(struct task_struct *next)
void fpsimd_flush_thread(void)
{
memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+ fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index c0ff3ce4299e..90d09eddd5b2 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -62,13 +62,8 @@
/*
* Initial memory map attributes.
*/
-#ifndef CONFIG_SMP
-#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF
-#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF
-#else
#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED
#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S
-#endif
#ifdef CONFIG_ARM64_64K_PAGES
#define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
@@ -528,6 +523,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
msr hstr_el2, xzr // Disable CP15 traps to EL2
#endif
+ /* EL2 debug */
+ mrs x0, pmcr_el0 // Disable debug access traps
+ ubfx x0, x0, #11, #5 // to EL2 and allow access to
+ msr mdcr_el2, x0 // all PMU counters from EL1
+
/* Stage-2 translation */
msr vttbr_el2, xzr
@@ -574,7 +574,6 @@ ENTRY(__boot_cpu_mode)
.long BOOT_CPU_MODE_EL1
.popsection
-#ifdef CONFIG_SMP
/*
* This provides a "holding pen" for platforms to hold all secondary
* cores are held until we're ready for them to initialise.
@@ -622,7 +621,6 @@ ENTRY(__secondary_switched)
mov x29, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
-#endif /* CONFIG_SMP */
/*
* Enable the MMU.
@@ -641,5 +639,13 @@ __enable_mmu:
isb
msr sctlr_el1, x0
isb
+ /*
+ * Invalidate the local I-cache so that any instructions fetched
+ * speculatively from the PoC are discarded, since they may have
+ * been dynamically patched at the PoU.
+ */
+ ic iallu
+ dsb nsh
+ isb
br x27
ENDPROC(__enable_mmu)
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 7a1a5da6c8c1..bba85c8f8037 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -48,18 +48,6 @@ static DEFINE_PER_CPU(int, stepping_kernel_bp);
static int core_num_brps;
static int core_num_wrps;
-/* Determine number of BRP registers available. */
-static int get_num_brps(void)
-{
- return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1;
-}
-
-/* Determine number of WRP registers available. */
-static int get_num_wrps(void)
-{
- return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1;
-}
-
int hw_breakpoint_slots(int type)
{
/*
@@ -156,7 +144,7 @@ static void write_wb_reg(int reg, int n, u64 val)
* Convert a breakpoint privilege level to the corresponding exception
* level.
*/
-static enum debug_el debug_exception_level(int privilege)
+static enum dbg_active_el debug_exception_level(int privilege)
{
switch (privilege) {
case AARCH64_BREAKPOINT_EL0:
@@ -230,7 +218,7 @@ static int hw_breakpoint_control(struct perf_event *bp,
struct perf_event **slots;
struct debug_info *debug_info = &current->thread.debug;
int i, max_slots, ctrl_reg, val_reg, reg_enable;
- enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege);
+ enum dbg_active_el dbg_el = debug_exception_level(info->ctrl.privilege);
u32 ctrl;
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
@@ -537,7 +525,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* exception level at the register level.
* This is used when single-stepping after a breakpoint exception.
*/
-static void toggle_bp_registers(int reg, enum debug_el el, int enable)
+static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
{
int i, max_slots, privilege;
u32 ctrl;
@@ -884,7 +872,7 @@ static int hw_breakpoint_reset_notify(struct notifier_block *self,
void *hcpu)
{
int cpu = (long)hcpu;
- if (action == CPU_ONLINE)
+ if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE)
smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
return NOTIFY_OK;
}
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index dd9671cd0bb2..c08b9ad6f429 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn)
aarch64_insn_is_bcond(insn));
}
-static DEFINE_SPINLOCK(patch_lock);
+static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap)
{
@@ -101,9 +101,8 @@ static void __kprobes *patch_map(void *addr, int fixmap)
return addr;
BUG_ON(!page);
- set_fixmap(fixmap, page_to_phys(page));
-
- return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
+ return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
+ (uintaddr & ~PAGE_MASK));
}
static void __kprobes patch_unmap(int fixmap)
@@ -132,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
unsigned long flags = 0;
int ret;
- spin_lock_irqsave(&patch_lock, flags);
+ raw_spin_lock_irqsave(&patch_lock, flags);
waddr = patch_map(addr, FIX_TEXT_POKE0);
ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
patch_unmap(FIX_TEXT_POKE0);
- spin_unlock_irqrestore(&patch_lock, flags);
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
return ret;
}
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 463fa2e7e34c..11dc3fd47853 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -33,9 +33,7 @@ unsigned long irq_err_count;
int arch_show_interrupts(struct seq_file *p, int prec)
{
-#ifdef CONFIG_SMP
show_ipi_list(p, prec);
-#endif
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
return 0;
}
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
index 4f1fec7a46db..c2dd1ad3e648 100644
--- a/arch/arm64/kernel/jump_label.c
+++ b/arch/arm64/kernel/jump_label.c
@@ -28,7 +28,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
void *addr = (void *)entry->code;
u32 insn;
- if (type == JUMP_LABEL_ENABLE) {
+ if (type == JUMP_LABEL_JMP) {
insn = aarch64_insn_gen_branch_imm(entry->code,
entry->target,
AARCH64_INSN_BRANCH_NOLINK);
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index a0d10c55f307..bcac81e600b9 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -235,13 +235,13 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
static struct break_hook kgdb_brkpt_hook = {
.esr_mask = 0xffffffff,
- .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DBG_BRK_IMM),
+ .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_DYN_DBG_BRK_IMM),
.fn = kgdb_brk_fn
};
static struct break_hook kgdb_compiled_brkpt_hook = {
.esr_mask = 0xffffffff,
- .esr_val = DBG_ESR_VAL_BRK(KGDB_COMPILED_DBG_BRK_IMM),
+ .esr_val = (u32)ESR_ELx_VAL_BRK64(KGDB_COMPILED_DBG_BRK_IMM),
.fn = kgdb_compiled_brk_fn
};
@@ -328,9 +328,9 @@ void kgdb_arch_exit(void)
*/
struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = {
- KGDB_DYN_BRK_INS_BYTE0,
- KGDB_DYN_BRK_INS_BYTE1,
- KGDB_DYN_BRK_INS_BYTE2,
- KGDB_DYN_BRK_INS_BYTE3,
+ KGDB_DYN_BRK_INS_BYTE(0),
+ KGDB_DYN_BRK_INS_BYTE(1),
+ KGDB_DYN_BRK_INS_BYTE(2),
+ KGDB_DYN_BRK_INS_BYTE(3),
}
};
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 67bf4107f6ef..876eb8df50bf 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
AARCH64_INSN_IMM_ADR);
break;
+#ifndef CONFIG_ARM64_ERRATUM_843419
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
AARCH64_INSN_IMM_ADR);
break;
+#endif
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false;
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c
index 4095379dc069..b3d098bd34aa 100644
--- a/arch/arm64/kernel/pci.c
+++ b/arch/arm64/kernel/pci.c
@@ -38,6 +38,19 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
return res->start;
}
+/**
+ * pcibios_enable_device - Enable I/O and memory.
+ * @dev: PCI device to be enabled
+ * @mask: bitmask of BARs to enable
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ if (pci_has_flag(PCI_PROBE_ONLY))
+ return 0;
+
+ return pci_enable_resources(dev, mask);
+}
+
/*
* Try to assign the IRQ number from DT when adding a new device
*/
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
new file mode 100644
index 000000000000..3aa74830cc69
--- /dev/null
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -0,0 +1,196 @@
+/*
+ * arm64 callchain support
+ *
+ * Copyright (C) 2015 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+
+struct frame_tail {
+ struct frame_tail __user *fp;
+ unsigned long lr;
+} __attribute__((packed));
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static struct frame_tail __user *
+user_backtrace(struct frame_tail __user *tail,
+ struct perf_callchain_entry *entry)
+{
+ struct frame_tail buftail;
+ unsigned long err;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ perf_callchain_store(entry, buftail.lr);
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail >= buftail.fp)
+ return NULL;
+
+ return buftail.fp;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct compat_frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct compat_frame_tail {
+ compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
+ u32 sp;
+ u32 lr;
+} __attribute__((packed));
+
+static struct compat_frame_tail __user *
+compat_user_backtrace(struct compat_frame_tail __user *tail,
+ struct perf_callchain_entry *entry)
+{
+ struct compat_frame_tail buftail;
+ unsigned long err;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ perf_callchain_store(entry, buftail.lr);
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail + 1 >= (struct compat_frame_tail __user *)
+ compat_ptr(buftail.fp))
+ return NULL;
+
+ return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+}
+#endif /* CONFIG_COMPAT */
+
+void perf_callchain_user(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ perf_callchain_store(entry, regs->pc);
+
+ if (!compat_user_mode(regs)) {
+ /* AARCH64 mode */
+ struct frame_tail __user *tail;
+
+ tail = (struct frame_tail __user *)regs->regs[29];
+
+ while (entry->nr < PERF_MAX_STACK_DEPTH &&
+ tail && !((unsigned long)tail & 0xf))
+ tail = user_backtrace(tail, entry);
+ } else {
+#ifdef CONFIG_COMPAT
+ /* AARCH32 compat mode */
+ struct compat_frame_tail __user *tail;
+
+ tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
+
+ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+ tail && !((unsigned long)tail & 0x3))
+ tail = compat_user_backtrace(tail, entry);
+#endif
+ }
+}
+
+/*
+ * Gets called by walk_stackframe() for every stackframe. This will be called
+ * whist unwinding the stackframe and is like a subroutine return so we use
+ * the PC.
+ */
+static int callchain_trace(struct stackframe *frame, void *data)
+{
+ struct perf_callchain_entry *entry = data;
+ perf_callchain_store(entry, frame->pc);
+ return 0;
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+ struct pt_regs *regs)
+{
+ struct stackframe frame;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ frame.fp = regs->regs[29];
+ frame.sp = regs->sp;
+ frame.pc = regs->pc;
+
+ walk_stackframe(&frame, callchain_trace, entry);
+}
+
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
+ return perf_guest_cbs->get_guest_ip();
+
+ return instruction_pointer(regs);
+}
+
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+ int misc = 0;
+
+ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ if (perf_guest_cbs->is_user_mode())
+ misc |= PERF_RECORD_MISC_GUEST_USER;
+ else
+ misc |= PERF_RECORD_MISC_GUEST_KERNEL;
+ } else {
+ if (user_mode(regs))
+ misc |= PERF_RECORD_MISC_USER;
+ else
+ misc |= PERF_RECORD_MISC_KERNEL;
+ }
+
+ return misc;
+}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index b31e9a4b6275..f9a74d4fff3b 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -25,7 +25,7 @@
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/export.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/perf_event.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -36,7 +36,6 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/pmu.h>
-#include <asm/stacktrace.h>
/*
* ARMv8 supports a maximum of 32 events.
@@ -78,6 +77,16 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
#define CACHE_OP_UNSUPPORTED 0xFFFF
+#define PERF_MAP_ALL_UNSUPPORTED \
+ [0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
+[0 ... C(MAX) - 1] = { \
+ [0 ... C(OP_MAX) - 1] = { \
+ [0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
+ }, \
+}
+
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
@@ -435,10 +444,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
- if (!pmu_device) {
- pr_err("no PMU device registered\n");
+ if (!pmu_device)
return -ENODEV;
- }
irqs = min(pmu_device->num_resources, num_possible_cpus());
if (!irqs) {
@@ -703,118 +710,28 @@ enum armv8_pmuv3_perf_types {
/* PMUv3 HW events mapping. */
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
- [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
- [C(L1D)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
- [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(L1I)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
- [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(NODE)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
};
/*
@@ -1337,7 +1254,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
}
for_each_possible_cpu(cpu)
- if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL))
+ if (dn == of_cpu_device_node_get(cpu))
break;
if (cpu >= nr_cpu_ids) {
@@ -1415,180 +1332,3 @@ static int __init init_hw_perf_events(void)
}
early_initcall(init_hw_perf_events);
-/*
- * Callchain handling code.
- */
-struct frame_tail {
- struct frame_tail __user *fp;
- unsigned long lr;
-} __attribute__((packed));
-
-/*
- * Get the return address for a single stackframe and return a pointer to the
- * next frame tail.
- */
-static struct frame_tail __user *
-user_backtrace(struct frame_tail __user *tail,
- struct perf_callchain_entry *entry)
-{
- struct frame_tail buftail;
- unsigned long err;
-
- /* Also check accessibility of one struct frame_tail beyond */
- if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
- return NULL;
-
- pagefault_disable();
- err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
- pagefault_enable();
-
- if (err)
- return NULL;
-
- perf_callchain_store(entry, buftail.lr);
-
- /*
- * Frame pointers should strictly progress back up the stack
- * (towards higher addresses).
- */
- if (tail >= buftail.fp)
- return NULL;
-
- return buftail.fp;
-}
-
-#ifdef CONFIG_COMPAT
-/*
- * The registers we're interested in are at the end of the variable
- * length saved register structure. The fp points at the end of this
- * structure so the address of this struct is:
- * (struct compat_frame_tail *)(xxx->fp)-1
- *
- * This code has been adapted from the ARM OProfile support.
- */
-struct compat_frame_tail {
- compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
- u32 sp;
- u32 lr;
-} __attribute__((packed));
-
-static struct compat_frame_tail __user *
-compat_user_backtrace(struct compat_frame_tail __user *tail,
- struct perf_callchain_entry *entry)
-{
- struct compat_frame_tail buftail;
- unsigned long err;
-
- /* Also check accessibility of one struct frame_tail beyond */
- if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
- return NULL;
-
- pagefault_disable();
- err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
- pagefault_enable();
-
- if (err)
- return NULL;
-
- perf_callchain_store(entry, buftail.lr);
-
- /*
- * Frame pointers should strictly progress back up the stack
- * (towards higher addresses).
- */
- if (tail + 1 >= (struct compat_frame_tail __user *)
- compat_ptr(buftail.fp))
- return NULL;
-
- return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
-}
-#endif /* CONFIG_COMPAT */
-
-void perf_callchain_user(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
-{
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* We don't support guest os callchain now */
- return;
- }
-
- perf_callchain_store(entry, regs->pc);
-
- if (!compat_user_mode(regs)) {
- /* AARCH64 mode */
- struct frame_tail __user *tail;
-
- tail = (struct frame_tail __user *)regs->regs[29];
-
- while (entry->nr < PERF_MAX_STACK_DEPTH &&
- tail && !((unsigned long)tail & 0xf))
- tail = user_backtrace(tail, entry);
- } else {
-#ifdef CONFIG_COMPAT
- /* AARCH32 compat mode */
- struct compat_frame_tail __user *tail;
-
- tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
-
- while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
- tail && !((unsigned long)tail & 0x3))
- tail = compat_user_backtrace(tail, entry);
-#endif
- }
-}
-
-/*
- * Gets called by walk_stackframe() for every stackframe. This will be called
- * whist unwinding the stackframe and is like a subroutine return so we use
- * the PC.
- */
-static int callchain_trace(struct stackframe *frame, void *data)
-{
- struct perf_callchain_entry *entry = data;
- perf_callchain_store(entry, frame->pc);
- return 0;
-}
-
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
-{
- struct stackframe frame;
-
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* We don't support guest os callchain now */
- return;
- }
-
- frame.fp = regs->regs[29];
- frame.sp = regs->sp;
- frame.pc = regs->pc;
-
- walk_stackframe(&frame, callchain_trace, entry);
-}
-
-unsigned long perf_instruction_pointer(struct pt_regs *regs)
-{
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- return perf_guest_cbs->get_guest_ip();
-
- return instruction_pointer(regs);
-}
-
-unsigned long perf_misc_flags(struct pt_regs *regs)
-{
- int misc = 0;
-
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- if (perf_guest_cbs->is_user_mode())
- misc |= PERF_RECORD_MISC_GUEST_USER;
- else
- misc |= PERF_RECORD_MISC_GUEST_KERNEL;
- } else {
- if (user_mode(regs))
- misc |= PERF_RECORD_MISC_USER;
- else
- misc |= PERF_RECORD_MISC_KERNEL;
- }
-
- return misc;
-}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 869f202748e8..aa94a88f6279 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -18,23 +18,17 @@
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
-#include <linux/reboot.h>
-#include <linux/pm.h>
#include <linux/delay.h>
+#include <linux/psci.h>
#include <linux/slab.h>
+
#include <uapi/linux/psci.h>
#include <asm/compiler.h>
-#include <asm/cputype.h>
#include <asm/cpu_ops.h>
#include <asm/errno.h>
-#include <asm/psci.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
-#include <asm/system_misc.h>
-
-#define PSCI_POWER_STATE_TYPE_STANDBY 0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
static bool psci_power_state_loses_context(u32 state)
{
@@ -50,122 +44,8 @@ static bool psci_power_state_is_valid(u32 state)
return !(state & ~valid_mask);
}
-/*
- * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
- * calls to its resident CPU, so we must avoid issuing those. We never migrate
- * a Trusted OS even if it claims to be capable of migration -- doing so will
- * require cooperation with a Trusted OS driver.
- */
-static int resident_cpu = -1;
-
-struct psci_operations {
- int (*cpu_suspend)(u32 state, unsigned long entry_point);
- int (*cpu_off)(u32 state);
- int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
- int (*migrate)(unsigned long cpuid);
- int (*affinity_info)(unsigned long target_affinity,
- unsigned long lowest_affinity_level);
- int (*migrate_info_type)(void);
-};
-
-static struct psci_operations psci_ops;
-
-typedef unsigned long (psci_fn)(unsigned long, unsigned long,
- unsigned long, unsigned long);
-asmlinkage psci_fn __invoke_psci_fn_hvc;
-asmlinkage psci_fn __invoke_psci_fn_smc;
-static psci_fn *invoke_psci_fn;
-
-enum psci_function {
- PSCI_FN_CPU_SUSPEND,
- PSCI_FN_CPU_ON,
- PSCI_FN_CPU_OFF,
- PSCI_FN_MIGRATE,
- PSCI_FN_MAX,
-};
-
static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
-static u32 psci_function_id[PSCI_FN_MAX];
-
-static int psci_to_linux_errno(int errno)
-{
- switch (errno) {
- case PSCI_RET_SUCCESS:
- return 0;
- case PSCI_RET_NOT_SUPPORTED:
- return -EOPNOTSUPP;
- case PSCI_RET_INVALID_PARAMS:
- return -EINVAL;
- case PSCI_RET_DENIED:
- return -EPERM;
- };
-
- return -EINVAL;
-}
-
-static u32 psci_get_version(void)
-{
- return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
-}
-
-static int psci_cpu_suspend(u32 state, unsigned long entry_point)
-{
- int err;
- u32 fn;
-
- fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
- err = invoke_psci_fn(fn, state, entry_point, 0);
- return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_off(u32 state)
-{
- int err;
- u32 fn;
-
- fn = psci_function_id[PSCI_FN_CPU_OFF];
- err = invoke_psci_fn(fn, state, 0, 0);
- return psci_to_linux_errno(err);
-}
-
-static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
-{
- int err;
- u32 fn;
-
- fn = psci_function_id[PSCI_FN_CPU_ON];
- err = invoke_psci_fn(fn, cpuid, entry_point, 0);
- return psci_to_linux_errno(err);
-}
-
-static int psci_migrate(unsigned long cpuid)
-{
- int err;
- u32 fn;
-
- fn = psci_function_id[PSCI_FN_MIGRATE];
- err = invoke_psci_fn(fn, cpuid, 0, 0);
- return psci_to_linux_errno(err);
-}
-
-static int psci_affinity_info(unsigned long target_affinity,
- unsigned long lowest_affinity_level)
-{
- return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
- lowest_affinity_level, 0);
-}
-
-static int psci_migrate_info_type(void)
-{
- return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
-}
-
-static unsigned long psci_migrate_info_up_cpu(void)
-{
- return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
-}
-
static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
{
int i, ret, count = 0;
@@ -230,240 +110,6 @@ free_mem:
return ret;
}
-static int get_set_conduit_method(struct device_node *np)
-{
- const char *method;
-
- pr_info("probing for conduit method from DT.\n");
-
- if (of_property_read_string(np, "method", &method)) {
- pr_warn("missing \"method\" property\n");
- return -ENXIO;
- }
-
- if (!strcmp("hvc", method)) {
- invoke_psci_fn = __invoke_psci_fn_hvc;
- } else if (!strcmp("smc", method)) {
- invoke_psci_fn = __invoke_psci_fn_smc;
- } else {
- pr_warn("invalid \"method\" property: %s\n", method);
- return -EINVAL;
- }
- return 0;
-}
-
-static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
-{
- invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
-}
-
-static void psci_sys_poweroff(void)
-{
- invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
-}
-
-/*
- * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
- * return DENIED (which would be fatal).
- */
-static void __init psci_init_migrate(void)
-{
- unsigned long cpuid;
- int type, cpu;
-
- type = psci_ops.migrate_info_type();
-
- if (type == PSCI_0_2_TOS_MP) {
- pr_info("Trusted OS migration not required\n");
- return;
- }
-
- if (type == PSCI_RET_NOT_SUPPORTED) {
- pr_info("MIGRATE_INFO_TYPE not supported.\n");
- return;
- }
-
- if (type != PSCI_0_2_TOS_UP_MIGRATE &&
- type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
- pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
- return;
- }
-
- cpuid = psci_migrate_info_up_cpu();
- if (cpuid & ~MPIDR_HWID_BITMASK) {
- pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
- cpuid);
- return;
- }
-
- cpu = get_logical_index(cpuid);
- resident_cpu = cpu >= 0 ? cpu : -1;
-
- pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
-}
-
-static void __init psci_0_2_set_functions(void)
-{
- pr_info("Using standard PSCI v0.2 function IDs\n");
- psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
- psci_ops.cpu_suspend = psci_cpu_suspend;
-
- psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
- psci_ops.cpu_off = psci_cpu_off;
-
- psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
- psci_ops.cpu_on = psci_cpu_on;
-
- psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
- psci_ops.migrate = psci_migrate;
-
- psci_ops.affinity_info = psci_affinity_info;
-
- psci_ops.migrate_info_type = psci_migrate_info_type;
-
- arm_pm_restart = psci_sys_reset;
-
- pm_power_off = psci_sys_poweroff;
-}
-
-/*
- * Probe function for PSCI firmware versions >= 0.2
- */
-static int __init psci_probe(void)
-{
- u32 ver = psci_get_version();
-
- pr_info("PSCIv%d.%d detected in firmware.\n",
- PSCI_VERSION_MAJOR(ver),
- PSCI_VERSION_MINOR(ver));
-
- if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
- pr_err("Conflicting PSCI version detected.\n");
- return -EINVAL;
- }
-
- psci_0_2_set_functions();
-
- psci_init_migrate();
-
- return 0;
-}
-
-typedef int (*psci_initcall_t)(const struct device_node *);
-
-/*
- * PSCI init function for PSCI versions >=0.2
- *
- * Probe based on PSCI PSCI_VERSION function
- */
-static int __init psci_0_2_init(struct device_node *np)
-{
- int err;
-
- err = get_set_conduit_method(np);
-
- if (err)
- goto out_put_node;
- /*
- * Starting with v0.2, the PSCI specification introduced a call
- * (PSCI_VERSION) that allows probing the firmware version, so
- * that PSCI function IDs and version specific initialization
- * can be carried out according to the specific version reported
- * by firmware
- */
- err = psci_probe();
-
-out_put_node:
- of_node_put(np);
- return err;
-}
-
-/*
- * PSCI < v0.2 get PSCI Function IDs via DT.
- */
-static int __init psci_0_1_init(struct device_node *np)
-{
- u32 id;
- int err;
-
- err = get_set_conduit_method(np);
-
- if (err)
- goto out_put_node;
-
- pr_info("Using PSCI v0.1 Function IDs from DT\n");
-
- if (!of_property_read_u32(np, "cpu_suspend", &id)) {
- psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
- psci_ops.cpu_suspend = psci_cpu_suspend;
- }
-
- if (!of_property_read_u32(np, "cpu_off", &id)) {
- psci_function_id[PSCI_FN_CPU_OFF] = id;
- psci_ops.cpu_off = psci_cpu_off;
- }
-
- if (!of_property_read_u32(np, "cpu_on", &id)) {
- psci_function_id[PSCI_FN_CPU_ON] = id;
- psci_ops.cpu_on = psci_cpu_on;
- }
-
- if (!of_property_read_u32(np, "migrate", &id)) {
- psci_function_id[PSCI_FN_MIGRATE] = id;
- psci_ops.migrate = psci_migrate;
- }
-
-out_put_node:
- of_node_put(np);
- return err;
-}
-
-static const struct of_device_id psci_of_match[] __initconst = {
- { .compatible = "arm,psci", .data = psci_0_1_init},
- { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
- {},
-};
-
-int __init psci_dt_init(void)
-{
- struct device_node *np;
- const struct of_device_id *matched_np;
- psci_initcall_t init_fn;
-
- np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
-
- if (!np)
- return -ENODEV;
-
- init_fn = (psci_initcall_t)matched_np->data;
- return init_fn(np);
-}
-
-#ifdef CONFIG_ACPI
-/*
- * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
- * explicitly clarified in SBBR
- */
-int __init psci_acpi_init(void)
-{
- if (!acpi_psci_present()) {
- pr_info("is not implemented in ACPI.\n");
- return -EOPNOTSUPP;
- }
-
- pr_info("probing for conduit method from ACPI.\n");
-
- if (acpi_psci_use_hvc())
- invoke_psci_fn = __invoke_psci_fn_hvc;
- else
- invoke_psci_fn = __invoke_psci_fn_smc;
-
- return psci_probe();
-}
-#endif
-
-#ifdef CONFIG_SMP
-
static int __init cpu_psci_cpu_init(unsigned int cpu)
{
return 0;
@@ -489,11 +135,6 @@ static int cpu_psci_cpu_boot(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
-static bool psci_tos_resident_on(int cpu)
-{
- return cpu == resident_cpu;
-}
-
static int cpu_psci_cpu_disable(unsigned int cpu)
{
/* Fail early if we don't have CPU_OFF support */
@@ -550,7 +191,6 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
return -ETIMEDOUT;
}
#endif
-#endif
static int psci_suspend_finisher(unsigned long index)
{
@@ -585,7 +225,6 @@ const struct cpu_operations cpu_psci_ops = {
.cpu_init_idle = cpu_psci_cpu_init_idle,
.cpu_suspend = cpu_psci_cpu_suspend,
#endif
-#ifdef CONFIG_SMP
.cpu_init = cpu_psci_cpu_init,
.cpu_prepare = cpu_psci_cpu_prepare,
.cpu_boot = cpu_psci_cpu_boot,
@@ -594,6 +233,5 @@ const struct cpu_operations cpu_psci_ops = {
.cpu_die = cpu_psci_cpu_die,
.cpu_kill = cpu_psci_cpu_kill,
#endif
-#endif
};
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index d882b833dbdb..1971f491bb90 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -826,6 +826,30 @@ static int compat_vfp_set(struct task_struct *target,
return ret;
}
+static int compat_tls_get(struct task_struct *target,
+ const struct user_regset *regset, unsigned int pos,
+ unsigned int count, void *kbuf, void __user *ubuf)
+{
+ compat_ulong_t tls = (compat_ulong_t)target->thread.tp_value;
+ return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+}
+
+static int compat_tls_set(struct task_struct *target,
+ const struct user_regset *regset, unsigned int pos,
+ unsigned int count, const void *kbuf,
+ const void __user *ubuf)
+{
+ int ret;
+ compat_ulong_t tls;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
+ if (ret)
+ return ret;
+
+ target->thread.tp_value = tls;
+ return ret;
+}
+
static const struct user_regset aarch32_regsets[] = {
[REGSET_COMPAT_GPR] = {
.core_note_type = NT_PRSTATUS,
@@ -850,6 +874,64 @@ static const struct user_regset_view user_aarch32_view = {
.regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets)
};
+static const struct user_regset aarch32_ptrace_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = COMPAT_ELF_NGREG,
+ .size = sizeof(compat_elf_greg_t),
+ .align = sizeof(compat_elf_greg_t),
+ .get = compat_gpr_get,
+ .set = compat_gpr_set
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_ARM_VFP,
+ .n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
+ .size = sizeof(compat_ulong_t),
+ .align = sizeof(compat_ulong_t),
+ .get = compat_vfp_get,
+ .set = compat_vfp_set
+ },
+ [REGSET_TLS] = {
+ .core_note_type = NT_ARM_TLS,
+ .n = 1,
+ .size = sizeof(compat_ulong_t),
+ .align = sizeof(compat_ulong_t),
+ .get = compat_tls_get,
+ .set = compat_tls_set,
+ },
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ [REGSET_HW_BREAK] = {
+ .core_note_type = NT_ARM_HW_BREAK,
+ .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .get = hw_break_get,
+ .set = hw_break_set,
+ },
+ [REGSET_HW_WATCH] = {
+ .core_note_type = NT_ARM_HW_WATCH,
+ .n = sizeof(struct user_hwdebug_state) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .get = hw_break_get,
+ .set = hw_break_set,
+ },
+#endif
+ [REGSET_SYSTEM_CALL] = {
+ .core_note_type = NT_ARM_SYSTEM_CALL,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .get = system_call_get,
+ .set = system_call_set,
+ },
+};
+
+static const struct user_regset_view user_aarch32_ptrace_view = {
+ .name = "aarch32", .e_machine = EM_ARM,
+ .regsets = aarch32_ptrace_regsets, .n = ARRAY_SIZE(aarch32_ptrace_regsets)
+};
+
static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off,
compat_ulong_t __user *ret)
{
@@ -1109,8 +1191,16 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
#ifdef CONFIG_COMPAT
- if (is_compat_thread(task_thread_info(task)))
+ /*
+ * Core dumping of 32-bit tasks or compat ptrace requests must use the
+ * user_aarch32_view compatible with arm32. Native ptrace requests on
+ * 32-bit children use an extended user_aarch32_ptrace_view to allow
+ * access to the TLS register.
+ */
+ if (is_compat_task())
return &user_aarch32_view;
+ else if (is_compat_thread(task_thread_info(task)))
+ return &user_aarch32_ptrace_view;
#endif
return &user_aarch64_view;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f3067d4d4e35..232247945b1c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -34,7 +34,6 @@
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/root_dev.h>
-#include <linux/clk-provider.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
@@ -46,6 +45,7 @@
#include <linux/of_platform.h>
#include <linux/efi.h>
#include <linux/personality.h>
+#include <linux/psci.h>
#include <asm/acpi.h>
#include <asm/fixmap.h>
@@ -61,9 +61,7 @@
#include <asm/tlbflush.h>
#include <asm/traps.h>
#include <asm/memblock.h>
-#include <asm/psci.h>
#include <asm/efi.h>
-#include <asm/virt.h>
#include <asm/xen/hypervisor.h>
unsigned long elf_hwcap __read_mostly;
@@ -131,7 +129,6 @@ bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
}
struct mpidr_hash mpidr_hash;
-#ifdef CONFIG_SMP
/**
* smp_build_mpidr_hash - Pre-compute shifts required at each affinity
* level in order to build a linear index from an
@@ -197,35 +194,11 @@ static void __init smp_build_mpidr_hash(void)
pr_warn("Large number of MPIDR hash buckets detected\n");
__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
}
-#endif
-
-static void __init hyp_mode_check(void)
-{
- if (is_hyp_mode_available())
- pr_info("CPU: All CPU(s) started at EL2\n");
- else if (is_hyp_mode_mismatched())
- WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
- "CPU: CPUs started in inconsistent modes");
- else
- pr_info("CPU: All CPU(s) started at EL1\n");
-}
-
-void __init do_post_cpus_up_work(void)
-{
- hyp_mode_check();
- apply_alternatives_all();
-}
-
-#ifdef CONFIG_UP_LATE_INIT
-void __init up_late_init(void)
-{
- do_post_cpus_up_work();
-}
-#endif /* CONFIG_UP_LATE_INIT */
static void __init setup_processor(void)
{
- u64 features, block;
+ u64 features;
+ s64 block;
u32 cwg;
int cls;
@@ -255,8 +228,8 @@ static void __init setup_processor(void)
* for non-negative values. Negative values are reserved.
*/
features = read_cpuid(ID_AA64ISAR0_EL1);
- block = (features >> 4) & 0xf;
- if (!(block & 0x8)) {
+ block = cpuid_feature_extract_field(features, 4);
+ if (block > 0) {
switch (block) {
default:
case 2:
@@ -268,26 +241,36 @@ static void __init setup_processor(void)
}
}
- block = (features >> 8) & 0xf;
- if (block && !(block & 0x8))
+ if (cpuid_feature_extract_field(features, 8) > 0)
elf_hwcap |= HWCAP_SHA1;
- block = (features >> 12) & 0xf;
- if (block && !(block & 0x8))
+ if (cpuid_feature_extract_field(features, 12) > 0)
elf_hwcap |= HWCAP_SHA2;
- block = (features >> 16) & 0xf;
- if (block && !(block & 0x8))
+ if (cpuid_feature_extract_field(features, 16) > 0)
elf_hwcap |= HWCAP_CRC32;
+ block = cpuid_feature_extract_field(features, 20);
+ if (block > 0) {
+ switch (block) {
+ default:
+ case 2:
+ elf_hwcap |= HWCAP_ATOMICS;
+ case 1:
+ /* RESERVED */
+ case 0:
+ break;
+ }
+ }
+
#ifdef CONFIG_COMPAT
/*
* ID_ISAR5_EL1 carries similar information as above, but pertaining to
- * the Aarch32 32-bit execution state.
+ * the AArch32 32-bit execution state.
*/
features = read_cpuid(ID_ISAR5_EL1);
- block = (features >> 4) & 0xf;
- if (!(block & 0x8)) {
+ block = cpuid_feature_extract_field(features, 4);
+ if (block > 0) {
switch (block) {
default:
case 2:
@@ -299,16 +282,13 @@ static void __init setup_processor(void)
}
}
- block = (features >> 8) & 0xf;
- if (block && !(block & 0x8))
+ if (cpuid_feature_extract_field(features, 8) > 0)
compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;
- block = (features >> 12) & 0xf;
- if (block && !(block & 0x8))
+ if (cpuid_feature_extract_field(features, 12) > 0)
compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;
- block = (features >> 16) & 0xf;
- if (block && !(block & 0x8))
+ if (cpuid_feature_extract_field(features, 16) > 0)
compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
#endif
}
@@ -359,6 +339,69 @@ static void __init request_standard_resources(void)
}
}
+#ifdef CONFIG_BLK_DEV_INITRD
+/*
+ * Relocate initrd if it is not completely within the linear mapping.
+ * This would be the case if mem= cuts out all or part of it.
+ */
+static void __init relocate_initrd(void)
+{
+ phys_addr_t orig_start = __virt_to_phys(initrd_start);
+ phys_addr_t orig_end = __virt_to_phys(initrd_end);
+ phys_addr_t ram_end = memblock_end_of_DRAM();
+ phys_addr_t new_start;
+ unsigned long size, to_free = 0;
+ void *dest;
+
+ if (orig_end <= ram_end)
+ return;
+
+ /*
+ * Any of the original initrd which overlaps the linear map should
+ * be freed after relocating.
+ */
+ if (orig_start < ram_end)
+ to_free = ram_end - orig_start;
+
+ size = orig_end - orig_start;
+ if (!size)
+ return;
+
+ /* initrd needs to be relocated completely inside linear mapping */
+ new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
+ size, PAGE_SIZE);
+ if (!new_start)
+ panic("Cannot relocate initrd of size %ld\n", size);
+ memblock_reserve(new_start, size);
+
+ initrd_start = __phys_to_virt(new_start);
+ initrd_end = initrd_start + size;
+
+ pr_info("Moving initrd from [%llx-%llx] to [%llx-%llx]\n",
+ orig_start, orig_start + size - 1,
+ new_start, new_start + size - 1);
+
+ dest = (void *)initrd_start;
+
+ if (to_free) {
+ memcpy(dest, (void *)__phys_to_virt(orig_start), to_free);
+ dest += to_free;
+ }
+
+ copy_from_early_mem(dest, orig_start + to_free, size - to_free);
+
+ if (to_free) {
+ pr_info("Freeing original RAMDISK from [%llx-%llx]\n",
+ orig_start, orig_start + to_free - 1);
+ memblock_free(orig_start, to_free);
+ }
+}
+#else
+static inline void __init relocate_initrd(void)
+{
+}
+#endif
+
u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
void __init setup_arch(char **cmdline_p)
@@ -392,6 +435,7 @@ void __init setup_arch(char **cmdline_p)
acpi_boot_table_init();
paging_init();
+ relocate_initrd();
request_standard_resources();
early_ioremap_reset();
@@ -405,10 +449,8 @@ void __init setup_arch(char **cmdline_p)
xen_early_init();
cpu_read_bootcpu_ops();
-#ifdef CONFIG_SMP
smp_init_cpus();
smp_build_mpidr_hash();
-#endif
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
@@ -427,8 +469,13 @@ void __init setup_arch(char **cmdline_p)
static int __init arm64_device_init(void)
{
- of_iommu_init();
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+ if (of_have_populated_dt()) {
+ of_iommu_init();
+ of_platform_populate(NULL, of_default_bus_match_table,
+ NULL, NULL);
+ } else if (acpi_disabled) {
+ pr_crit("Device tree not populated\n");
+ }
return 0;
}
arch_initcall_sync(arm64_device_init);
@@ -456,6 +503,7 @@ static const char *hwcap_str[] = {
"sha1",
"sha2",
"crc32",
+ "atomics",
NULL
};
@@ -508,9 +556,7 @@ static int c_show(struct seq_file *m, void *v)
* online processors, looking for lines beginning with
* "processor". Give glibc what it expects.
*/
-#ifdef CONFIG_SMP
seq_printf(m, "processor\t: %d\n", i);
-#endif
/*
* Dump out the common processor features in a single line.
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 948f0ad2de23..71ef6dc89ae5 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
/*
* VFP save/restore code.
+ *
+ * We have to be careful with endianness, since the fpsimd context-switch
+ * code operates on 128-bit (Q) register values whereas the compat ABI
+ * uses an array of 64-bit (D) registers. Consequently, we need to swap
+ * the two halves of each Q register when running on a big-endian CPU.
*/
+union __fpsimd_vreg {
+ __uint128_t raw;
+ struct {
+#ifdef __AARCH64EB__
+ u64 hi;
+ u64 lo;
+#else
+ u64 lo;
+ u64 hi;
+#endif
+ };
+};
+
static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
{
struct fpsimd_state *fpsimd = &current->thread.fpsimd_state;
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr, fpexc;
- int err = 0;
+ int i, err = 0;
/*
* Save the hardware registers to the fpsimd_state structure.
@@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame)
/*
* Now copy the FP registers. Since the registers are packed,
* we can copy the prefix we want (V0-V15) as it is.
- * FIXME: Won't work if big endian.
*/
- err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs,
- sizeof(frame->ufp.fpregs));
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg = {
+ .raw = fpsimd->vregs[i >> 1],
+ };
+
+ __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ }
/* Create an AArch32 fpscr from the fpsr and the fpcr. */
fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) |
@@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
compat_ulong_t magic = VFP_MAGIC;
compat_ulong_t size = VFP_STORAGE_SIZE;
compat_ulong_t fpscr;
- int err = 0;
+ int i, err = 0;
__get_user_error(magic, &frame->magic, err);
__get_user_error(size, &frame->size, err);
@@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
- /*
- * Copy the FP registers into the start of the fpsimd_state.
- * FIXME: Won't work if big endian.
- */
- err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs,
- sizeof(frame->ufp.fpregs));
+ /* Copy the FP registers into the start of the fpsimd_state. */
+ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) {
+ union __fpsimd_vreg vreg;
+
+ __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err);
+ __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err);
+ fpsimd.vregs[i >> 1] = vreg.raw;
+ }
/* Extract the fpsr and the fpcr from the fpscr */
__get_user_error(fpscr, &frame->ufp.fpscr, err);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index 803cfea41962..f586f7c875e2 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -82,7 +82,6 @@ ENTRY(__cpu_suspend_enter)
str x2, [x0, #CPU_CTX_SP]
ldr x1, =sleep_save_sp
ldr x1, [x1, #SLEEP_SAVE_SP_VIRT]
-#ifdef CONFIG_SMP
mrs x7, mpidr_el1
ldr x9, =mpidr_hash
ldr x10, [x9, #MPIDR_HASH_MASK]
@@ -94,7 +93,6 @@ ENTRY(__cpu_suspend_enter)
ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
add x1, x1, x8, lsl #3
-#endif
bl __cpu_suspend_save
/*
* Grab suspend finisher in x20 and its argument in x19
@@ -135,6 +133,14 @@ ENTRY(cpu_resume_mmu)
ldr x3, =cpu_resume_after_mmu
msr sctlr_el1, x0 // restore sctlr_el1
isb
+ /*
+ * Invalidate the local I-cache so that any instructions fetched
+ * speculatively from the PoC are discarded, since they may have
+ * been dynamically patched at the PoU.
+ */
+ ic iallu
+ dsb nsh
+ isb
br x3 // global jump to virtual address
ENDPROC(cpu_resume_mmu)
.popsection
@@ -151,7 +157,6 @@ ENDPROC(cpu_resume_after_mmu)
ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly
-#ifdef CONFIG_SMP
mrs x1, mpidr_el1
adrp x8, mpidr_hash
add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
@@ -161,9 +166,6 @@ ENTRY(cpu_resume)
ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
/* x7 contains hash index, let's use it to grab context pointer */
-#else
- mov x7, xzr
-#endif
ldr_l x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
ldr x0, [x0, x7, lsl #3]
/* load sp from context */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 50fb4696654e..dbdaacddd9a5 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -52,6 +52,7 @@
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
+#include <asm/virt.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
@@ -310,10 +311,22 @@ void cpu_die(void)
}
#endif
+static void __init hyp_mode_check(void)
+{
+ if (is_hyp_mode_available())
+ pr_info("CPU: All CPU(s) started at EL2\n");
+ else if (is_hyp_mode_mismatched())
+ WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC,
+ "CPU: CPUs started in inconsistent modes");
+ else
+ pr_info("CPU: All CPU(s) started at EL1\n");
+}
+
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
- do_post_cpus_up_work();
+ hyp_mode_check();
+ apply_alternatives_all();
}
void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 42f9195cf2f8..149151fb42bb 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -42,7 +42,6 @@
#include <asm/thread_info.h>
#include <asm/stacktrace.h>
-#ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs)
{
struct stackframe frame;
@@ -62,7 +61,6 @@ unsigned long profile_pc(struct pt_regs *regs)
return frame.pc;
}
EXPORT_SYMBOL(profile_pc);
-#endif
void __init time_init(void)
{
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index fcb8f7b42271..694f6deedbab 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -300,6 +300,6 @@ void __init init_cpu_topology(void)
* Discard anything that was parsed if we hit an error so we
* don't use partial information.
*/
- if (parse_dt_topology())
+ if (of_have_populated_dt() && parse_dt_topology())
reset_cpu_topology();
}
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 566bc4c35040..f93aae5e4307 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -17,6 +17,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/bug.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/kallsyms.h>
@@ -32,8 +33,10 @@
#include <linux/syscalls.h>
#include <asm/atomic.h>
+#include <asm/bug.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/insn.h>
#include <asm/traps.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
@@ -52,11 +55,12 @@ int show_unhandled_signals = 1;
* Dump out the contents of some memory nicely...
*/
static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
- unsigned long top)
+ unsigned long top, bool compat)
{
unsigned long first;
mm_segment_t fs;
int i;
+ unsigned int width = compat ? 4 : 8;
/*
* We need to switch to kernel mode so that we can use __get_user
@@ -75,13 +79,22 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
memset(str, ' ', sizeof(str));
str[sizeof(str) - 1] = '\0';
- for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
+ for (p = first, i = 0; i < (32 / width)
+ && p < top; i++, p += width) {
if (p >= bottom && p < top) {
- unsigned int val;
- if (__get_user(val, (unsigned int *)p) == 0)
- sprintf(str + i * 9, " %08x", val);
- else
- sprintf(str + i * 9, " ????????");
+ unsigned long val;
+
+ if (width == 8) {
+ if (__get_user(val, (unsigned long *)p) == 0)
+ sprintf(str + i * 17, " %016lx", val);
+ else
+ sprintf(str + i * 17, " ????????????????");
+ } else {
+ if (__get_user(val, (unsigned int *)p) == 0)
+ sprintf(str + i * 9, " %08lx", val);
+ else
+ sprintf(str + i * 9, " ????????");
+ }
}
}
printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
@@ -95,7 +108,7 @@ static void dump_backtrace_entry(unsigned long where, unsigned long stack)
print_ip_sym(where);
if (in_exception_text(where))
dump_mem("", "Exception stack", stack,
- stack + sizeof(struct pt_regs));
+ stack + sizeof(struct pt_regs), false);
}
static void dump_instr(const char *lvl, struct pt_regs *regs)
@@ -179,11 +192,7 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#else
#define S_PREEMPT ""
#endif
-#ifdef CONFIG_SMP
#define S_SMP " SMP"
-#else
-#define S_SMP ""
-#endif
static int __die(const char *str, int err, struct thread_info *thread,
struct pt_regs *regs)
@@ -207,7 +216,8 @@ static int __die(const char *str, int err, struct thread_info *thread,
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->sp,
- THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+ THREAD_SIZE + (unsigned long)task_stack_page(tsk),
+ compat_user_mode(regs));
dump_backtrace(regs, tsk);
dump_instr(KERN_EMERG, regs);
}
@@ -459,7 +469,63 @@ void __pgd_error(const char *file, int line, unsigned long val)
pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
}
+/* GENERIC_BUG traps */
+
+int is_valid_bugaddr(unsigned long addr)
+{
+ /*
+ * bug_handler() only called for BRK #BUG_BRK_IMM.
+ * So the answer is trivial -- any spurious instances with no
+ * bug table entry will be rejected by report_bug() and passed
+ * back to the debug-monitors code and handled as a fatal
+ * unexpected debug exception.
+ */
+ return 1;
+}
+
+static int bug_handler(struct pt_regs *regs, unsigned int esr)
+{
+ if (user_mode(regs))
+ return DBG_HOOK_ERROR;
+
+ switch (report_bug(regs->pc, regs)) {
+ case BUG_TRAP_TYPE_BUG:
+ die("Oops - BUG", regs, 0);
+ break;
+
+ case BUG_TRAP_TYPE_WARN:
+ /* Ideally, report_bug() should backtrace for us... but no. */
+ dump_backtrace(regs, NULL);
+ break;
+
+ default:
+ /* unknown/unrecognised bug trap type */
+ return DBG_HOOK_ERROR;
+ }
+
+ /* If thread survives, skip over the BUG instruction and continue: */
+ regs->pc += AARCH64_INSN_SIZE; /* skip BRK and resume */
+ return DBG_HOOK_HANDLED;
+}
+
+static struct break_hook bug_break_hook = {
+ .esr_val = 0xf2000000 | BUG_BRK_IMM,
+ .esr_mask = 0xffffffff,
+ .fn = bug_handler,
+};
+
+/*
+ * Initial handler for AArch64 BRK exceptions
+ * This handler only used until debug_traps_init().
+ */
+int __init early_brk64(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
+}
+
+/* This registration must happen early, before debug_traps_init(). */
void __init trap_init(void)
{
- return;
+ register_break_hook(&bug_break_hook);
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ec37ab3f524f..97bc68f4c689 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -199,16 +199,15 @@ up_fail:
*/
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec xtime_coarse;
u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
++vdso_data->tb_seq_count;
smp_wmb();
- xtime_coarse = __current_kernel_time();
vdso_data->use_syscall = use_syscall;
- vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
- vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->xtime_coarse_sec = tk->xtime_sec;
+ vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
+ tk->tkr_mono.shift;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index bfffe8f4bd53..5c7e920e4861 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -41,15 +41,4 @@ config KVM_ARM_HOST
---help---
Provides host support for ARM processors.
-config KVM_ARM_MAX_VCPUS
- int "Number maximum supported virtual CPUs per VM"
- depends on KVM_ARM_HOST
- default 4
- help
- Static number of max supported virtual CPUs per VM.
-
- If you choose a high number, the vcpu structures will be quite
- large, so only choose a reasonable number that you expect to
- actually use.
-
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index f90f4aa7f88d..1949fe5f5424 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -17,7 +17,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
-kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
+kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
new file mode 100644
index 000000000000..47e5f0feaee8
--- /dev/null
+++ b/arch/arm64/kvm/debug.c
@@ -0,0 +1,217 @@
+/*
+ * Debug and Guest Debug support
+ *
+ * Copyright (C) 2015 - Linaro Ltd
+ * Author: Alex Bennée <alex.bennee@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/hw_breakpoint.h>
+
+#include <asm/debug-monitors.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_emulate.h>
+
+#include "trace.h"
+
+/* These are the bits of MDSCR_EL1 we may manipulate */
+#define MDSCR_EL1_DEBUG_MASK (DBG_MDSCR_SS | \
+ DBG_MDSCR_KDE | \
+ DBG_MDSCR_MDE)
+
+static DEFINE_PER_CPU(u32, mdcr_el2);
+
+/**
+ * save/restore_guest_debug_regs
+ *
+ * For some debug operations we need to tweak some guest registers. As
+ * a result we need to save the state of those registers before we
+ * make those modifications.
+ *
+ * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
+ * after we have restored the preserved value to the main context.
+ */
+static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.guest_debug_preserved.mdscr_el1 = vcpu_sys_reg(vcpu, MDSCR_EL1);
+
+ trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
+ vcpu->arch.guest_debug_preserved.mdscr_el1);
+}
+
+static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
+{
+ vcpu_sys_reg(vcpu, MDSCR_EL1) = vcpu->arch.guest_debug_preserved.mdscr_el1;
+
+ trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
+ vcpu_sys_reg(vcpu, MDSCR_EL1));
+}
+
+/**
+ * kvm_arm_init_debug - grab what we need for debug
+ *
+ * Currently the sole task of this function is to retrieve the initial
+ * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
+ * presumably been set-up by some knowledgeable bootcode.
+ *
+ * It is called once per-cpu during CPU hyp initialisation.
+ */
+
+void kvm_arm_init_debug(void)
+{
+ __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2));
+}
+
+/**
+ * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
+ */
+
+void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
+}
+
+/**
+ * kvm_arm_setup_debug - set up debug related stuff
+ *
+ * @vcpu: the vcpu pointer
+ *
+ * This is called before each entry into the hypervisor to setup any
+ * debug related registers. Currently this just ensures we will trap
+ * access to:
+ * - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
+ * - Debug ROM Address (MDCR_EL2_TDRA)
+ * - OS related registers (MDCR_EL2_TDOSA)
+ *
+ * Additionally, KVM only traps guest accesses to the debug registers if
+ * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
+ * flag on vcpu->arch.debug_flags). Since the guest must not interfere
+ * with the hardware state when debugging the guest, we must ensure that
+ * trapping is enabled whenever we are debugging the guest using the
+ * debug registers.
+ */
+
+void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+{
+ bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY);
+
+ trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
+
+ vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
+ vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
+ MDCR_EL2_TPMCR |
+ MDCR_EL2_TDRA |
+ MDCR_EL2_TDOSA);
+
+ /* Is Guest debugging in effect? */
+ if (vcpu->guest_debug) {
+ /* Route all software debug exceptions to EL2 */
+ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
+
+ /* Save guest debug state */
+ save_guest_debug_regs(vcpu);
+
+ /*
+ * Single Step (ARM ARM D2.12.3 The software step state
+ * machine)
+ *
+ * If we are doing Single Step we need to manipulate
+ * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
+ * step has occurred the hypervisor will trap the
+ * debug exception and we return to userspace.
+ *
+ * If the guest attempts to single step its userspace
+ * we would have to deal with a trapped exception
+ * while in the guest kernel. Because this would be
+ * hard to unwind we suppress the guest's ability to
+ * do so by masking MDSCR_EL.SS.
+ *
+ * This confuses guest debuggers which use
+ * single-step behind the scenes but everything
+ * returns to normal once the host is no longer
+ * debugging the system.
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+ *vcpu_cpsr(vcpu) |= DBG_SPSR_SS;
+ vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_SS;
+ } else {
+ vcpu_sys_reg(vcpu, MDSCR_EL1) &= ~DBG_MDSCR_SS;
+ }
+
+ trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
+
+ /*
+ * HW Breakpoints and watchpoints
+ *
+ * We simply switch the debug_ptr to point to our new
+ * external_debug_state which has been populated by the
+ * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
+ * mechanism ensures the registers are updated on the
+ * world switch.
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+ /* Enable breakpoints/watchpoints */
+ vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_MDE;
+
+ vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ trap_debug = true;
+
+ trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
+ &vcpu->arch.debug_ptr->dbg_bcr[0],
+ &vcpu->arch.debug_ptr->dbg_bvr[0]);
+
+ trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
+ &vcpu->arch.debug_ptr->dbg_wcr[0],
+ &vcpu->arch.debug_ptr->dbg_wvr[0]);
+ }
+ }
+
+ BUG_ON(!vcpu->guest_debug &&
+ vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
+
+ /* Trap debug register access */
+ if (trap_debug)
+ vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
+
+ trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
+ trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_sys_reg(vcpu, MDSCR_EL1));
+}
+
+void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
+{
+ trace_kvm_arm_clear_debug(vcpu->guest_debug);
+
+ if (vcpu->guest_debug) {
+ restore_guest_debug_regs(vcpu);
+
+ /*
+ * If we were using HW debug we need to restore the
+ * debug_ptr to the guest debug state.
+ */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+ kvm_arm_reset_debug_ptr(vcpu);
+
+ trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
+ &vcpu->arch.debug_ptr->dbg_bcr[0],
+ &vcpu->arch.debug_ptr->dbg_bvr[0]);
+
+ trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
+ &vcpu->arch.debug_ptr->dbg_wcr[0],
+ &vcpu->arch.debug_ptr->dbg_wvr[0]);
+ }
+ }
+}
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 9535bd555d1d..d250160d32bc 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -32,6 +32,8 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
+#include "trace.h"
+
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
@@ -293,7 +295,8 @@ int __attribute_const__ kvm_target_cpu(void)
break;
};
- return -EINVAL;
+ /* Return a default generic target */
+ return KVM_ARM_TARGET_GENERIC_V8;
}
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
@@ -331,3 +334,41 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
{
return -EINVAL;
}
+
+#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
+ KVM_GUESTDBG_USE_SW_BP | \
+ KVM_GUESTDBG_USE_HW | \
+ KVM_GUESTDBG_SINGLESTEP)
+
+/**
+ * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
+ * @kvm: pointer to the KVM struct
+ * @kvm_guest_debug: the ioctl data buffer
+ *
+ * This sets up and enables the VM for guest debugging. Userspace
+ * passes in a control flag to enable different debug types and
+ * potentially other architecture specific information in the rest of
+ * the structure.
+ */
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug *dbg)
+{
+ trace_kvm_set_guest_debug(vcpu, dbg->control);
+
+ if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
+ return -EINVAL;
+
+ if (dbg->control & KVM_GUESTDBG_ENABLE) {
+ vcpu->guest_debug = dbg->control;
+
+ /* Hardware assisted Break and Watch points */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+ vcpu->arch.external_debug_state = dbg->arch;
+ }
+
+ } else {
+ /* If not enabled clear all flags */
+ vcpu->guest_debug = 0;
+ }
+ return 0;
+}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 524fa25671fc..68a0759b1375 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -82,6 +82,45 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 1;
}
+/**
+ * kvm_handle_guest_debug - handle a debug exception instruction
+ *
+ * @vcpu: the vcpu pointer
+ * @run: access to the kvm_run structure for results
+ *
+ * We route all debug exceptions through the same handler. If both the
+ * guest and host are using the same debug facilities it will be up to
+ * userspace to re-inject the correct exception for guest delivery.
+ *
+ * @return: 0 (while setting run->exit_reason), -1 for error
+ */
+static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ u32 hsr = kvm_vcpu_get_hsr(vcpu);
+ int ret = 0;
+
+ run->exit_reason = KVM_EXIT_DEBUG;
+ run->debug.arch.hsr = hsr;
+
+ switch (hsr >> ESR_ELx_EC_SHIFT) {
+ case ESR_ELx_EC_WATCHPT_LOW:
+ run->debug.arch.far = vcpu->arch.fault.far_el2;
+ /* fall through */
+ case ESR_ELx_EC_SOFTSTP_LOW:
+ case ESR_ELx_EC_BREAKPT_LOW:
+ case ESR_ELx_EC_BKPT32:
+ case ESR_ELx_EC_BRK64:
+ break;
+ default:
+ kvm_err("%s: un-handled case hsr: %#08x\n",
+ __func__, (unsigned int) hsr);
+ ret = -1;
+ break;
+ }
+
+ return ret;
+}
+
static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_WFx] = kvm_handle_wfx,
[ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
@@ -96,6 +135,11 @@ static exit_handle_fn arm_exit_handlers[] = {
[ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
[ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
+ [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
+ [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
+ [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
+ [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
+ [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
};
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 17a8fb14f428..e5836138ec42 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -230,199 +230,52 @@
stp x24, x25, [x3, #160]
.endm
-.macro save_debug
- // x2: base address for cpu context
- // x3: tmp register
-
- mrs x26, id_aa64dfr0_el1
- ubfx x24, x26, #12, #4 // Extract BRPs
- ubfx x25, x26, #20, #4 // Extract WRPs
- mov w26, #15
- sub w24, w26, w24 // How many BPs to skip
- sub w25, w26, w25 // How many WPs to skip
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- mrs x20, dbgbcr15_el1
- mrs x19, dbgbcr14_el1
- mrs x18, dbgbcr13_el1
- mrs x17, dbgbcr12_el1
- mrs x16, dbgbcr11_el1
- mrs x15, dbgbcr10_el1
- mrs x14, dbgbcr9_el1
- mrs x13, dbgbcr8_el1
- mrs x12, dbgbcr7_el1
- mrs x11, dbgbcr6_el1
- mrs x10, dbgbcr5_el1
- mrs x9, dbgbcr4_el1
- mrs x8, dbgbcr3_el1
- mrs x7, dbgbcr2_el1
- mrs x6, dbgbcr1_el1
- mrs x5, dbgbcr0_el1
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-
-1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- mrs x20, dbgbvr15_el1
- mrs x19, dbgbvr14_el1
- mrs x18, dbgbvr13_el1
- mrs x17, dbgbvr12_el1
- mrs x16, dbgbvr11_el1
- mrs x15, dbgbvr10_el1
- mrs x14, dbgbvr9_el1
- mrs x13, dbgbvr8_el1
- mrs x12, dbgbvr7_el1
- mrs x11, dbgbvr6_el1
- mrs x10, dbgbvr5_el1
- mrs x9, dbgbvr4_el1
- mrs x8, dbgbvr3_el1
- mrs x7, dbgbvr2_el1
- mrs x6, dbgbvr1_el1
- mrs x5, dbgbvr0_el1
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-
+.macro save_debug type
+ // x4: pointer to register set
+ // x5: number of registers to skip
+ // x6..x22 trashed
+
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
+ mrs x21, \type\()15_el1
+ mrs x20, \type\()14_el1
+ mrs x19, \type\()13_el1
+ mrs x18, \type\()12_el1
+ mrs x17, \type\()11_el1
+ mrs x16, \type\()10_el1
+ mrs x15, \type\()9_el1
+ mrs x14, \type\()8_el1
+ mrs x13, \type\()7_el1
+ mrs x12, \type\()6_el1
+ mrs x11, \type\()5_el1
+ mrs x10, \type\()4_el1
+ mrs x9, \type\()3_el1
+ mrs x8, \type\()2_el1
+ mrs x7, \type\()1_el1
+ mrs x6, \type\()0_el1
+
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- mrs x20, dbgwcr15_el1
- mrs x19, dbgwcr14_el1
- mrs x18, dbgwcr13_el1
- mrs x17, dbgwcr12_el1
- mrs x16, dbgwcr11_el1
- mrs x15, dbgwcr10_el1
- mrs x14, dbgwcr9_el1
- mrs x13, dbgwcr8_el1
- mrs x12, dbgwcr7_el1
- mrs x11, dbgwcr6_el1
- mrs x10, dbgwcr5_el1
- mrs x9, dbgwcr4_el1
- mrs x8, dbgwcr3_el1
- mrs x7, dbgwcr2_el1
- mrs x6, dbgwcr1_el1
- mrs x5, dbgwcr0_el1
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-
-1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- mrs x20, dbgwvr15_el1
- mrs x19, dbgwvr14_el1
- mrs x18, dbgwvr13_el1
- mrs x17, dbgwvr12_el1
- mrs x16, dbgwvr11_el1
- mrs x15, dbgwvr10_el1
- mrs x14, dbgwvr9_el1
- mrs x13, dbgwvr8_el1
- mrs x12, dbgwvr7_el1
- mrs x11, dbgwvr6_el1
- mrs x10, dbgwvr5_el1
- mrs x9, dbgwvr4_el1
- mrs x8, dbgwvr3_el1
- mrs x7, dbgwvr2_el1
- mrs x6, dbgwvr1_el1
- mrs x5, dbgwvr0_el1
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-
-1:
- str x20, [x3, #(15 * 8)]
- str x19, [x3, #(14 * 8)]
- str x18, [x3, #(13 * 8)]
- str x17, [x3, #(12 * 8)]
- str x16, [x3, #(11 * 8)]
- str x15, [x3, #(10 * 8)]
- str x14, [x3, #(9 * 8)]
- str x13, [x3, #(8 * 8)]
- str x12, [x3, #(7 * 8)]
- str x11, [x3, #(6 * 8)]
- str x10, [x3, #(5 * 8)]
- str x9, [x3, #(4 * 8)]
- str x8, [x3, #(3 * 8)]
- str x7, [x3, #(2 * 8)]
- str x6, [x3, #(1 * 8)]
- str x5, [x3, #(0 * 8)]
-
- mrs x21, mdccint_el1
- str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+ str x21, [x4, #(15 * 8)]
+ str x20, [x4, #(14 * 8)]
+ str x19, [x4, #(13 * 8)]
+ str x18, [x4, #(12 * 8)]
+ str x17, [x4, #(11 * 8)]
+ str x16, [x4, #(10 * 8)]
+ str x15, [x4, #(9 * 8)]
+ str x14, [x4, #(8 * 8)]
+ str x13, [x4, #(7 * 8)]
+ str x12, [x4, #(6 * 8)]
+ str x11, [x4, #(5 * 8)]
+ str x10, [x4, #(4 * 8)]
+ str x9, [x4, #(3 * 8)]
+ str x8, [x4, #(2 * 8)]
+ str x7, [x4, #(1 * 8)]
+ str x6, [x4, #(0 * 8)]
.endm
.macro restore_sysregs
@@ -467,195 +320,52 @@
msr mdscr_el1, x25
.endm
-.macro restore_debug
- // x2: base address for cpu context
- // x3: tmp register
+.macro restore_debug type
+ // x4: pointer to register set
+ // x5: number of registers to skip
+ // x6..x22 trashed
- mrs x26, id_aa64dfr0_el1
- ubfx x24, x26, #12, #4 // Extract BRPs
- ubfx x25, x26, #20, #4 // Extract WRPs
- mov w26, #15
- sub w24, w26, w24 // How many BPs to skip
- sub w25, w26, w25 // How many WPs to skip
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- msr dbgbcr15_el1, x20
- msr dbgbcr14_el1, x19
- msr dbgbcr13_el1, x18
- msr dbgbcr12_el1, x17
- msr dbgbcr11_el1, x16
- msr dbgbcr10_el1, x15
- msr dbgbcr9_el1, x14
- msr dbgbcr8_el1, x13
- msr dbgbcr7_el1, x12
- msr dbgbcr6_el1, x11
- msr dbgbcr5_el1, x10
- msr dbgbcr4_el1, x9
- msr dbgbcr3_el1, x8
- msr dbgbcr2_el1, x7
- msr dbgbcr1_el1, x6
- msr dbgbcr0_el1, x5
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x24, lsl #2
- br x26
-1:
- msr dbgbvr15_el1, x20
- msr dbgbvr14_el1, x19
- msr dbgbvr13_el1, x18
- msr dbgbvr12_el1, x17
- msr dbgbvr11_el1, x16
- msr dbgbvr10_el1, x15
- msr dbgbvr9_el1, x14
- msr dbgbvr8_el1, x13
- msr dbgbvr7_el1, x12
- msr dbgbvr6_el1, x11
- msr dbgbvr5_el1, x10
- msr dbgbvr4_el1, x9
- msr dbgbvr3_el1, x8
- msr dbgbvr2_el1, x7
- msr dbgbvr1_el1, x6
- msr dbgbvr0_el1, x5
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- msr dbgwcr15_el1, x20
- msr dbgwcr14_el1, x19
- msr dbgwcr13_el1, x18
- msr dbgwcr12_el1, x17
- msr dbgwcr11_el1, x16
- msr dbgwcr10_el1, x15
- msr dbgwcr9_el1, x14
- msr dbgwcr8_el1, x13
- msr dbgwcr7_el1, x12
- msr dbgwcr6_el1, x11
- msr dbgwcr5_el1, x10
- msr dbgwcr4_el1, x9
- msr dbgwcr3_el1, x8
- msr dbgwcr2_el1, x7
- msr dbgwcr1_el1, x6
- msr dbgwcr0_el1, x5
-
- add x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
+ ldr x21, [x4, #(15 * 8)]
+ ldr x20, [x4, #(14 * 8)]
+ ldr x19, [x4, #(13 * 8)]
+ ldr x18, [x4, #(12 * 8)]
+ ldr x17, [x4, #(11 * 8)]
+ ldr x16, [x4, #(10 * 8)]
+ ldr x15, [x4, #(9 * 8)]
+ ldr x14, [x4, #(8 * 8)]
+ ldr x13, [x4, #(7 * 8)]
+ ldr x12, [x4, #(6 * 8)]
+ ldr x11, [x4, #(5 * 8)]
+ ldr x10, [x4, #(4 * 8)]
+ ldr x9, [x4, #(3 * 8)]
+ ldr x8, [x4, #(2 * 8)]
+ ldr x7, [x4, #(1 * 8)]
+ ldr x6, [x4, #(0 * 8)]
+
+ adr x22, 1f
+ add x22, x22, x5, lsl #2
+ br x22
1:
- ldr x20, [x3, #(15 * 8)]
- ldr x19, [x3, #(14 * 8)]
- ldr x18, [x3, #(13 * 8)]
- ldr x17, [x3, #(12 * 8)]
- ldr x16, [x3, #(11 * 8)]
- ldr x15, [x3, #(10 * 8)]
- ldr x14, [x3, #(9 * 8)]
- ldr x13, [x3, #(8 * 8)]
- ldr x12, [x3, #(7 * 8)]
- ldr x11, [x3, #(6 * 8)]
- ldr x10, [x3, #(5 * 8)]
- ldr x9, [x3, #(4 * 8)]
- ldr x8, [x3, #(3 * 8)]
- ldr x7, [x3, #(2 * 8)]
- ldr x6, [x3, #(1 * 8)]
- ldr x5, [x3, #(0 * 8)]
-
- adr x26, 1f
- add x26, x26, x25, lsl #2
- br x26
-1:
- msr dbgwvr15_el1, x20
- msr dbgwvr14_el1, x19
- msr dbgwvr13_el1, x18
- msr dbgwvr12_el1, x17
- msr dbgwvr11_el1, x16
- msr dbgwvr10_el1, x15
- msr dbgwvr9_el1, x14
- msr dbgwvr8_el1, x13
- msr dbgwvr7_el1, x12
- msr dbgwvr6_el1, x11
- msr dbgwvr5_el1, x10
- msr dbgwvr4_el1, x9
- msr dbgwvr3_el1, x8
- msr dbgwvr2_el1, x7
- msr dbgwvr1_el1, x6
- msr dbgwvr0_el1, x5
-
- ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
- msr mdccint_el1, x21
+ msr \type\()15_el1, x21
+ msr \type\()14_el1, x20
+ msr \type\()13_el1, x19
+ msr \type\()12_el1, x18
+ msr \type\()11_el1, x17
+ msr \type\()10_el1, x16
+ msr \type\()9_el1, x15
+ msr \type\()8_el1, x14
+ msr \type\()7_el1, x13
+ msr \type\()6_el1, x12
+ msr \type\()5_el1, x11
+ msr \type\()4_el1, x10
+ msr \type\()3_el1, x9
+ msr \type\()2_el1, x8
+ msr \type\()1_el1, x7
+ msr \type\()0_el1, x6
.endm
.macro skip_32bit_state tmp, target
@@ -675,6 +385,14 @@
tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
.endm
+/*
+ * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled)
+ */
+.macro skip_fpsimd_state tmp, target
+ mrs \tmp, cptr_el2
+ tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target
+.endm
+
.macro compute_debug_state target
// Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
// is set, we do a full save/restore cycle and disable trapping.
@@ -713,20 +431,15 @@
add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
mrs x4, dacr32_el2
mrs x5, ifsr32_el2
- mrs x6, fpexc32_el2
stp x4, x5, [x3]
- str x6, [x3, #16]
- skip_debug_state x8, 2f
+ skip_fpsimd_state x8, 2f
+ mrs x6, fpexc32_el2
+ str x6, [x3, #16]
+2:
+ skip_debug_state x8, 1f
mrs x7, dbgvcr32_el2
str x7, [x3, #24]
-2:
- skip_tee_state x8, 1f
-
- add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
- mrs x4, teecr32_el1
- mrs x5, teehbr32_el1
- stp x4, x5, [x3]
1:
.endm
@@ -743,51 +456,46 @@
add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
ldp x4, x5, [x3]
- ldr x6, [x3, #16]
msr dacr32_el2, x4
msr ifsr32_el2, x5
- msr fpexc32_el2, x6
- skip_debug_state x8, 2f
+ skip_debug_state x8, 1f
ldr x7, [x3, #24]
msr dbgvcr32_el2, x7
-2:
- skip_tee_state x8, 1f
-
- add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
- ldp x4, x5, [x3]
- msr teecr32_el1, x4
- msr teehbr32_el1, x5
1:
.endm
.macro activate_traps
ldr x2, [x0, #VCPU_HCR_EL2]
+
+ /*
+ * We are about to set CPTR_EL2.TFP to trap all floating point
+ * register accesses to EL2, however, the ARM ARM clearly states that
+ * traps are only taken to EL2 if the operation would not otherwise
+ * trap to EL1. Therefore, always make sure that for 32-bit guests,
+ * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
+ */
+ tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state
+ mov x3, #(1 << 30)
+ msr fpexc32_el2, x3
+ isb
+99:
msr hcr_el2, x2
mov x2, #CPTR_EL2_TTA
+ orr x2, x2, #CPTR_EL2_TFP
msr cptr_el2, x2
mov x2, #(1 << 15) // Trap CP15 Cr=15
msr hstr_el2, x2
- mrs x2, mdcr_el2
- and x2, x2, #MDCR_EL2_HPMN_MASK
- orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
- orr x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
-
- // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
- // if not dirty.
- ldr x3, [x0, #VCPU_DEBUG_FLAGS]
- tbnz x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
- orr x2, x2, #MDCR_EL2_TDA
-1:
+ // Monitor Debug Config - see kvm_arm_setup_debug()
+ ldr x2, [x0, #VCPU_MDCR_EL2]
msr mdcr_el2, x2
.endm
.macro deactivate_traps
mov x2, #HCR_RW
msr hcr_el2, x2
- msr cptr_el2, xzr
msr hstr_el2, xzr
mrs x2, mdcr_el2
@@ -810,7 +518,11 @@
* Call into the vgic backend for state saving
*/
.macro save_vgic_state
- alternative_insn "bl __save_vgic_v2_state", "bl __save_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+ bl __save_vgic_v2_state
+alternative_else
+ bl __save_vgic_v3_state
+alternative_endif
mrs x24, hcr_el2
mov x25, #HCR_INT_OVERRIDE
neg x25, x25
@@ -827,7 +539,11 @@
orr x24, x24, #HCR_INT_OVERRIDE
orr x24, x24, x25
msr hcr_el2, x24
- alternative_insn "bl __restore_vgic_v2_state", "bl __restore_vgic_v3_state", ARM64_HAS_SYSREG_GIC_CPUIF
+alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
+ bl __restore_vgic_v2_state
+alternative_else
+ bl __restore_vgic_v3_state
+alternative_endif
.endm
.macro save_timer_state
@@ -840,8 +556,6 @@
mrs x3, cntv_ctl_el0
and x3, x3, #3
str w3, [x0, #VCPU_TIMER_CNTV_CTL]
- bic x3, x3, #1 // Clear Enable
- msr cntv_ctl_el0, x3
isb
@@ -849,6 +563,9 @@
str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
1:
+ // Disable the virtual timer
+ msr cntv_ctl_el0, xzr
+
// Allow physical timer/counter access for the host
mrs x2, cnthctl_el2
orr x2, x2, #3
@@ -892,21 +609,101 @@ __restore_sysregs:
restore_sysregs
ret
+/* Save debug state */
__save_debug:
- save_debug
+ // x2: ptr to CPU context
+ // x3: ptr to debug reg struct
+ // x4/x5/x6-22/x24-26: trashed
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ mov x5, x24
+ add x4, x3, #DEBUG_BCR
+ save_debug dbgbcr
+ add x4, x3, #DEBUG_BVR
+ save_debug dbgbvr
+
+ mov x5, x25
+ add x4, x3, #DEBUG_WCR
+ save_debug dbgwcr
+ add x4, x3, #DEBUG_WVR
+ save_debug dbgwvr
+
+ mrs x21, mdccint_el1
+ str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
ret
+/* Restore debug state */
__restore_debug:
- restore_debug
+ // x2: ptr to CPU context
+ // x3: ptr to debug reg struct
+ // x4/x5/x6-22/x24-26: trashed
+
+ mrs x26, id_aa64dfr0_el1
+ ubfx x24, x26, #12, #4 // Extract BRPs
+ ubfx x25, x26, #20, #4 // Extract WRPs
+ mov w26, #15
+ sub w24, w26, w24 // How many BPs to skip
+ sub w25, w26, w25 // How many WPs to skip
+
+ mov x5, x24
+ add x4, x3, #DEBUG_BCR
+ restore_debug dbgbcr
+ add x4, x3, #DEBUG_BVR
+ restore_debug dbgbvr
+
+ mov x5, x25
+ add x4, x3, #DEBUG_WCR
+ restore_debug dbgwcr
+ add x4, x3, #DEBUG_WVR
+ restore_debug dbgwvr
+
+ ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+ msr mdccint_el1, x21
+
ret
__save_fpsimd:
+ skip_fpsimd_state x3, 1f
save_fpsimd
- ret
+1: ret
__restore_fpsimd:
+ skip_fpsimd_state x3, 1f
restore_fpsimd
- ret
+1: ret
+
+switch_to_guest_fpsimd:
+ push x4, lr
+
+ mrs x2, cptr_el2
+ bic x2, x2, #CPTR_EL2_TFP
+ msr cptr_el2, x2
+ isb
+
+ mrs x0, tpidr_el2
+
+ ldr x2, [x0, #VCPU_HOST_CONTEXT]
+ kern_hyp_va x2
+ bl __save_fpsimd
+
+ add x2, x0, #VCPU_CONTEXT
+ bl __restore_fpsimd
+
+ skip_32bit_state x3, 1f
+ ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
+ msr fpexc32_el2, x4
+1:
+ pop x4, lr
+ pop x2, x3
+ pop x0, x1
+
+ eret
/*
* u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
@@ -928,10 +725,10 @@ ENTRY(__kvm_vcpu_run)
kern_hyp_va x2
save_host_regs
- bl __save_fpsimd
bl __save_sysregs
compute_debug_state 1f
+ add x3, x0, #VCPU_HOST_DEBUG_STATE
bl __save_debug
1:
activate_traps
@@ -943,13 +740,16 @@ ENTRY(__kvm_vcpu_run)
// Guest context
add x2, x0, #VCPU_CONTEXT
+ // We must restore the 32-bit state before the sysregs, thanks
+ // to Cortex-A57 erratum #852523.
+ restore_guest_32bit_state
bl __restore_sysregs
- bl __restore_fpsimd
skip_debug_state x3, 1f
+ ldr x3, [x0, #VCPU_DEBUG_PTR]
+ kern_hyp_va x3
bl __restore_debug
1:
- restore_guest_32bit_state
restore_guest_regs
// That's it, no more messing around.
@@ -967,6 +767,8 @@ __kvm_vcpu_return:
bl __save_sysregs
skip_debug_state x3, 1f
+ ldr x3, [x0, #VCPU_DEBUG_PTR]
+ kern_hyp_va x3
bl __save_debug
1:
save_guest_32bit_state
@@ -983,12 +785,15 @@ __kvm_vcpu_return:
bl __restore_sysregs
bl __restore_fpsimd
+ /* Clear FPSIMD and Trace trapping */
+ msr cptr_el2, xzr
skip_debug_state x3, 1f
// Clear the dirty flag for the next run, as all the state has
// already been saved. Note that we nuke the whole 64bit word.
// If we ever add more flags, we'll have to be more careful...
str xzr, [x0, #VCPU_DEBUG_FLAGS]
+ add x3, x0, #VCPU_HOST_DEBUG_STATE
bl __restore_debug
1:
restore_host_regs
@@ -1191,6 +996,11 @@ el1_trap:
* x1: ESR
* x2: ESR_EC
*/
+
+ /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+ cmp x2, #ESR_ELx_EC_FP_ASIMD
+ b.eq switch_to_guest_fpsimd
+
cmp x2, #ESR_ELx_EC_DABT_LOW
mov x0, #ESR_ELx_EC_IABT_LOW
ccmp x2, x0, #4, ne
@@ -1285,4 +1095,10 @@ ENTRY(__kvm_hyp_vector)
ventry el1_error_invalid // Error 32-bit EL1
ENDPROC(__kvm_hyp_vector)
+
+ENTRY(__kvm_get_mdcr_el2)
+ mrs x0, mdcr_el2
+ ret
+ENDPROC(__kvm_get_mdcr_el2)
+
.popsection
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index f02530e726f6..85c57158dcd9 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -168,8 +168,8 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_abt32(vcpu, false, addr);
-
- inject_abt64(vcpu, false, addr);
+ else
+ inject_abt64(vcpu, false, addr);
}
/**
@@ -184,8 +184,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_abt32(vcpu, true, addr);
-
- inject_abt64(vcpu, true, addr);
+ else
+ inject_abt64(vcpu, true, addr);
}
/**
@@ -198,6 +198,6 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.hcr_el2 & HCR_RW))
inject_undef32(vcpu);
-
- inject_undef64(vcpu);
+ else
+ inject_undef64(vcpu);
}
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 0b4326578985..91cf5350b328 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -22,6 +22,7 @@
#include <linux/errno.h>
#include <linux/kvm_host.h>
#include <linux/kvm.h>
+#include <linux/hw_breakpoint.h>
#include <kvm/arm_arch_timer.h>
@@ -56,6 +57,12 @@ static bool cpu_has_32bit_el1(void)
return !!(pfr0 & 0x20);
}
+/**
+ * kvm_arch_dev_ioctl_check_extension
+ *
+ * We currently assume that the number of HW registers is uniform
+ * across all CPUs (see cpuinfo_sanity_check).
+ */
int kvm_arch_dev_ioctl_check_extension(long ext)
{
int r;
@@ -64,6 +71,15 @@ int kvm_arch_dev_ioctl_check_extension(long ext)
case KVM_CAP_ARM_EL1_32BIT:
r = cpu_has_32bit_el1();
break;
+ case KVM_CAP_GUEST_DEBUG_HW_BPS:
+ r = get_num_brps();
+ break;
+ case KVM_CAP_GUEST_DEBUG_HW_WPS:
+ r = get_num_wrps();
+ break;
+ case KVM_CAP_SET_GUEST_DEBUG:
+ r = 1;
+ break;
default:
r = 0;
}
@@ -105,7 +121,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
kvm_reset_sys_regs(vcpu);
/* Reset timer */
- kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
-
- return 0;
+ return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c370b4014799..d03d3af17e7e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -38,6 +38,8 @@
#include "sys_regs.h"
+#include "trace.h"
+
/*
* All of this file is extremly similar to the ARM coproc.c, but the
* types are different. My gut feeling is that it should be pretty
@@ -208,9 +210,217 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
*vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
}
+ trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
+
+ return true;
+}
+
+/*
+ * reg_to_dbg/dbg_to_reg
+ *
+ * A 32 bit write to a debug register leave top bits alone
+ * A 32 bit read from a debug register only returns the bottom bits
+ *
+ * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
+ * hyp.S code switches between host and guest values in future.
+ */
+static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ u64 *dbg_reg)
+{
+ u64 val = *vcpu_reg(vcpu, p->Rt);
+
+ if (p->is_32bit) {
+ val &= 0xffffffffUL;
+ val |= ((*dbg_reg >> 32) << 32);
+ }
+
+ *dbg_reg = val;
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+}
+
+static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ u64 *dbg_reg)
+{
+ u64 val = *dbg_reg;
+
+ if (p->is_32bit)
+ val &= 0xffffffffUL;
+
+ *vcpu_reg(vcpu, p->Rt) = val;
+}
+
+static inline bool trap_bvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
return true;
}
+static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_bvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
+}
+
+static inline bool trap_bcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+ return true;
+}
+
+static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_bcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
+}
+
+static inline bool trap_wvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write,
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
+
+ return true;
+}
+
+static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_wvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
+}
+
+static inline bool trap_wcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+ if (p->is_write)
+ reg_to_dbg(vcpu, p, dbg_reg);
+ else
+ dbg_to_reg(vcpu, p, dbg_reg);
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+ return true;
+}
+
+static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+ if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr)
+{
+ __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+ if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+ return -EFAULT;
+ return 0;
+}
+
+static inline void reset_wcr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *rd)
+{
+ vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
+}
+
static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
{
u64 amair;
@@ -240,16 +450,16 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
/* DBGBVRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100), \
- trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 }, \
+ trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
/* DBGBCRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101), \
- trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 }, \
+ trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
/* DBGWVRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110), \
- trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 }, \
+ trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
/* DBGWCRn_EL1 */ \
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
- trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
+ trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
/*
* Architected system registers.
@@ -329,13 +539,6 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ Op0(0b10), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b110),
trap_dbgauthstatus_el1 },
- /* TEECR32_EL1 */
- { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000),
- NULL, reset_val, TEECR32_EL1, 0 },
- /* TEEHBR32_EL1 */
- { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000),
- NULL, reset_val, TEEHBR32_EL1, 0 },
-
/* MDCCSR_EL1 */
{ Op0(0b10), Op1(0b011), CRn(0b0000), CRm(0b0001), Op2(0b000),
trap_raz_wi },
@@ -516,28 +719,57 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
return true;
}
-#define DBG_BCR_BVR_WCR_WVR(n) \
- /* DBGBVRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32, \
- NULL, (cp14_DBGBVR0 + (n) * 2) }, \
- /* DBGBCRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32, \
- NULL, (cp14_DBGBCR0 + (n) * 2) }, \
- /* DBGWVRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32, \
- NULL, (cp14_DBGWVR0 + (n) * 2) }, \
- /* DBGWCRn */ \
- { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32, \
- NULL, (cp14_DBGWCR0 + (n) * 2) }
-
-#define DBGBXVR(n) \
- { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32, \
- NULL, cp14_DBGBXVR0 + n * 2 }
+/* AArch32 debug register mappings
+ *
+ * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
+ * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
+ *
+ * All control registers and watchpoint value registers are mapped to
+ * the lower 32 bits of their AArch64 equivalents. We share the trap
+ * handlers with the above AArch64 code which checks what mode the
+ * system is in.
+ */
+
+static inline bool trap_xvr(struct kvm_vcpu *vcpu,
+ const struct sys_reg_params *p,
+ const struct sys_reg_desc *rd)
+{
+ u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+ if (p->is_write) {
+ u64 val = *dbg_reg;
+
+ val &= 0xffffffffUL;
+ val |= *vcpu_reg(vcpu, p->Rt) << 32;
+ *dbg_reg = val;
+
+ vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+ } else {
+ *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
+ }
+
+ trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+ return true;
+}
+
+#define DBG_BCR_BVR_WCR_WVR(n) \
+ /* DBGBVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
+ /* DBGBCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
+ /* DBGWVRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
+ /* DBGWCRn */ \
+ { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
+
+#define DBGBXVR(n) \
+ { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
/*
* Trapped cp14 registers. We generally ignore most of the external
* debug, on the principle that they don't really make sense to a
- * guest. Revisit this one day, whould this principle change.
+ * guest. Revisit this one day, would this principle change.
*/
static const struct sys_reg_desc cp14_regs[] = {
/* DBGIDR */
@@ -999,6 +1231,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
struct sys_reg_params params;
unsigned long esr = kvm_vcpu_get_hsr(vcpu);
+ trace_kvm_handle_sys_reg(esr);
+
params.is_aarch32 = false;
params.is_32bit = false;
params.Op0 = (esr >> 20) & 3;
@@ -1303,6 +1537,9 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
if (!r)
return get_invariant_sys_reg(reg->id, uaddr);
+ if (r->get_user)
+ return (r->get_user)(vcpu, r, reg, uaddr);
+
return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
}
@@ -1321,6 +1558,9 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
if (!r)
return set_invariant_sys_reg(reg->id, uaddr);
+ if (r->set_user)
+ return (r->set_user)(vcpu, r, reg, uaddr);
+
return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
}
diff --git a/arch/arm64/kvm/sys_regs.h b/arch/arm64/kvm/sys_regs.h
index d411e251412c..eaa324e4db4d 100644
--- a/arch/arm64/kvm/sys_regs.h
+++ b/arch/arm64/kvm/sys_regs.h
@@ -55,6 +55,12 @@ struct sys_reg_desc {
/* Value (usually reset value) */
u64 val;
+
+ /* Custom get/set_user functions, fallback to generic if NULL */
+ int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr);
+ int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+ const struct kvm_one_reg *reg, void __user *uaddr);
};
static inline void print_sys_reg_instr(const struct sys_reg_params *p)
diff --git a/arch/arm64/kvm/sys_regs_generic_v8.c b/arch/arm64/kvm/sys_regs_generic_v8.c
index 475fd2929310..1e4576824165 100644
--- a/arch/arm64/kvm/sys_regs_generic_v8.c
+++ b/arch/arm64/kvm/sys_regs_generic_v8.c
@@ -94,6 +94,8 @@ static int __init sys_reg_genericv8_init(void)
&genericv8_target_table);
kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA,
&genericv8_target_table);
+ kvm_register_target_sys_reg_table(KVM_ARM_TARGET_GENERIC_V8,
+ &genericv8_target_table);
return 0;
}
diff --git a/arch/arm64/kvm/trace.h b/arch/arm64/kvm/trace.h
index 157416e963f2..7fb0008c4fa3 100644
--- a/arch/arm64/kvm/trace.h
+++ b/arch/arm64/kvm/trace.h
@@ -44,6 +44,129 @@ TRACE_EVENT(kvm_hvc_arm64,
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
+TRACE_EVENT(kvm_arm_setup_debug,
+ TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
+ TP_ARGS(vcpu, guest_debug),
+
+ TP_STRUCT__entry(
+ __field(struct kvm_vcpu *, vcpu)
+ __field(__u32, guest_debug)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu = vcpu;
+ __entry->guest_debug = guest_debug;
+ ),
+
+ TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
+);
+
+TRACE_EVENT(kvm_arm_clear_debug,
+ TP_PROTO(__u32 guest_debug),
+ TP_ARGS(guest_debug),
+
+ TP_STRUCT__entry(
+ __field(__u32, guest_debug)
+ ),
+
+ TP_fast_assign(
+ __entry->guest_debug = guest_debug;
+ ),
+
+ TP_printk("flags: 0x%08x", __entry->guest_debug)
+);
+
+TRACE_EVENT(kvm_arm_set_dreg32,
+ TP_PROTO(const char *name, __u32 value),
+ TP_ARGS(name, value),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(__u32, value)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->value = value;
+ ),
+
+ TP_printk("%s: 0x%08x", __entry->name, __entry->value)
+);
+
+TRACE_EVENT(kvm_arm_set_regset,
+ TP_PROTO(const char *type, int len, __u64 *control, __u64 *value),
+ TP_ARGS(type, len, control, value),
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, len)
+ __array(u64, ctrls, 16)
+ __array(u64, values, 16)
+ ),
+ TP_fast_assign(
+ __entry->name = type;
+ __entry->len = len;
+ memcpy(__entry->ctrls, control, len << 3);
+ memcpy(__entry->values, value, len << 3);
+ ),
+ TP_printk("%d %s CTRL:%s VALUE:%s", __entry->len, __entry->name,
+ __print_array(__entry->ctrls, __entry->len, sizeof(__u64)),
+ __print_array(__entry->values, __entry->len, sizeof(__u64)))
+);
+
+TRACE_EVENT(trap_reg,
+ TP_PROTO(const char *fn, int reg, bool is_write, u64 write_value),
+ TP_ARGS(fn, reg, is_write, write_value),
+
+ TP_STRUCT__entry(
+ __field(const char *, fn)
+ __field(int, reg)
+ __field(bool, is_write)
+ __field(u64, write_value)
+ ),
+
+ TP_fast_assign(
+ __entry->fn = fn;
+ __entry->reg = reg;
+ __entry->is_write = is_write;
+ __entry->write_value = write_value;
+ ),
+
+ TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
+);
+
+TRACE_EVENT(kvm_handle_sys_reg,
+ TP_PROTO(unsigned long hsr),
+ TP_ARGS(hsr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, hsr)
+ ),
+
+ TP_fast_assign(
+ __entry->hsr = hsr;
+ ),
+
+ TP_printk("HSR 0x%08lx", __entry->hsr)
+);
+
+TRACE_EVENT(kvm_set_guest_debug,
+ TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
+ TP_ARGS(vcpu, guest_debug),
+
+ TP_STRUCT__entry(
+ __field(struct kvm_vcpu *, vcpu)
+ __field(__u32, guest_debug)
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu = vcpu;
+ __entry->guest_debug = guest_debug;
+ ),
+
+ TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
+);
+
+
#endif /* _TRACE_ARM64_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index d98d3e39879e..1a811ecf71da 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -3,3 +3,16 @@ lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
clear_page.o memchr.o memcpy.o memmove.o memset.o \
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
strchr.o strrchr.o
+
+# Tell the compiler to treat all general purpose registers as
+# callee-saved, which allows for efficient runtime patching of the bl
+# instruction in the caller with an atomic instruction when supported by
+# the CPU. Result and argument registers are handled correctly, based on
+# the function prototype.
+lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o
+CFLAGS_atomic_ll_sc.o := -fcall-used-x0 -ffixed-x1 -ffixed-x2 \
+ -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \
+ -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \
+ -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \
+ -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \
+ -fcall-saved-x16 -fcall-saved-x17 -fcall-saved-x18
diff --git a/arch/arm64/lib/atomic_ll_sc.c b/arch/arm64/lib/atomic_ll_sc.c
new file mode 100644
index 000000000000..b0c538b0da28
--- /dev/null
+++ b/arch/arm64/lib/atomic_ll_sc.c
@@ -0,0 +1,3 @@
+#include <asm/atomic.h>
+#define __ARM64_IN_ATOMIC_IMPL
+#include <asm/atomic_ll_sc.h>
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index 7dac371cc9a2..43ac736baa5b 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -18,52 +18,59 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/lse.h>
/*
* x0: bits 5:0 bit offset
* bits 31:6 word offset
* x1: address
*/
- .macro bitop, name, instr
+ .macro bitop, name, llsc, lse
ENTRY( \name )
and w3, w0, #63 // Get bit offset
eor w0, w0, w3 // Clear low bits
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
+alt_lse " prfm pstl1strm, [x1]", "nop"
lsl x3, x2, x3 // Create mask
-1: ldxr x2, [x1]
- \instr x2, x2, x3
- stxr w0, x2, [x1]
- cbnz w0, 1b
+
+alt_lse "1: ldxr x2, [x1]", "\lse x3, [x1]"
+alt_lse " \llsc x2, x2, x3", "nop"
+alt_lse " stxr w0, x2, [x1]", "nop"
+alt_lse " cbnz w0, 1b", "nop"
+
ret
ENDPROC(\name )
.endm
- .macro testop, name, instr
+ .macro testop, name, llsc, lse
ENTRY( \name )
and w3, w0, #63 // Get bit offset
eor w0, w0, w3 // Clear low bits
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
+alt_lse " prfm pstl1strm, [x1]", "nop"
lsl x4, x2, x3 // Create mask
-1: ldxr x2, [x1]
- lsr x0, x2, x3 // Save old value of bit
- \instr x2, x2, x4 // toggle bit
- stlxr w5, x2, [x1]
- cbnz w5, 1b
- dmb ish
+
+alt_lse "1: ldxr x2, [x1]", "\lse x4, x2, [x1]"
+ lsr x0, x2, x3
+alt_lse " \llsc x2, x2, x4", "nop"
+alt_lse " stlxr w5, x2, [x1]", "nop"
+alt_lse " cbnz w5, 1b", "nop"
+alt_lse " dmb ish", "nop"
+
and x0, x0, #1
-3: ret
+ ret
ENDPROC(\name )
.endm
/*
* Atomic bit operations.
*/
- bitop change_bit, eor
- bitop clear_bit, bic
- bitop set_bit, orr
+ bitop change_bit, eor, steor
+ bitop clear_bit, bic, stclr
+ bitop set_bit, orr, stset
- testop test_and_change_bit, eor
- testop test_and_clear_bit, bic
- testop test_and_set_bit, orr
+ testop test_and_change_bit, eor, ldeoral
+ testop test_and_clear_bit, bic, ldclral
+ testop test_and_set_bit, orr, ldsetal
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index c17967fdf5f6..a9723c71c52b 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -16,7 +16,11 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
+
+#include <asm/alternative.h>
#include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
.text
@@ -29,6 +33,8 @@
* Alignment fixed up by hardware.
*/
ENTRY(__clear_user)
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)
mov x2, x1 // save the size for fixup return
subs x1, x1, #8
b.mi 2f
@@ -48,6 +54,8 @@ USER(9f, strh wzr, [x0], #2 )
b.mi 5f
USER(9f, strb wzr, [x0] )
5: mov x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)
ret
ENDPROC(__clear_user)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 5e27add9d362..1be9ef27be97 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -15,7 +15,11 @@
*/
#include <linux/linkage.h>
+
+#include <asm/alternative.h>
#include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
/*
* Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -28,14 +32,21 @@
* x0 - bytes not copied
*/
ENTRY(__copy_from_user)
- add x4, x1, x2 // upper user buffer boundary
- subs x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)
+ add x5, x1, x2 // upper user buffer boundary
+ subs x2, x2, #16
+ b.mi 1f
+0:
+USER(9f, ldp x3, x4, [x1], #16)
+ subs x2, x2, #16
+ stp x3, x4, [x0], #16
+ b.pl 0b
+1: adds x2, x2, #8
b.mi 2f
-1:
USER(9f, ldr x3, [x1], #8 )
- subs x2, x2, #8
+ sub x2, x2, #8
str x3, [x0], #8
- b.pl 1b
2: adds x2, x2, #4
b.mi 3f
USER(9f, ldr w3, [x1], #4 )
@@ -51,12 +62,14 @@ USER(9f, ldrh w3, [x1], #2 )
USER(9f, ldrb w3, [x1] )
strb w3, [x0]
5: mov x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)
ret
ENDPROC(__copy_from_user)
.section .fixup,"ax"
.align 2
-9: sub x2, x4, x1
+9: sub x2, x5, x1
mov x3, x2
10: strb wzr, [x0], #1 // zero remaining buffer space
subs x3, x3, #1
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 84b6c9bb9b93..1b94661e22b3 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -17,7 +17,11 @@
*/
#include <linux/linkage.h>
+
+#include <asm/alternative.h>
#include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
/*
* Copy from user space to user space (alignment handled by the hardware)
@@ -30,14 +34,21 @@
* x0 - bytes not copied
*/
ENTRY(__copy_in_user)
- add x4, x0, x2 // upper user buffer boundary
- subs x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)
+ add x5, x0, x2 // upper user buffer boundary
+ subs x2, x2, #16
+ b.mi 1f
+0:
+USER(9f, ldp x3, x4, [x1], #16)
+ subs x2, x2, #16
+USER(9f, stp x3, x4, [x0], #16)
+ b.pl 0b
+1: adds x2, x2, #8
b.mi 2f
-1:
USER(9f, ldr x3, [x1], #8 )
- subs x2, x2, #8
+ sub x2, x2, #8
USER(9f, str x3, [x0], #8 )
- b.pl 1b
2: adds x2, x2, #4
b.mi 3f
USER(9f, ldr w3, [x1], #4 )
@@ -53,11 +64,13 @@ USER(9f, strh w3, [x0], #2 )
USER(9f, ldrb w3, [x1] )
USER(9f, strb w3, [x0] )
5: mov x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)
ret
ENDPROC(__copy_in_user)
.section .fixup,"ax"
.align 2
-9: sub x0, x4, x0 // bytes not copied
+9: sub x0, x5, x0 // bytes not copied
ret
.previous
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index a0aeeb9b7a28..a257b47e2dc4 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -15,7 +15,11 @@
*/
#include <linux/linkage.h>
+
+#include <asm/alternative.h>
#include <asm/assembler.h>
+#include <asm/cpufeature.h>
+#include <asm/sysreg.h>
/*
* Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -28,14 +32,21 @@
* x0 - bytes not copied
*/
ENTRY(__copy_to_user)
- add x4, x0, x2 // upper user buffer boundary
- subs x2, x2, #8
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)
+ add x5, x0, x2 // upper user buffer boundary
+ subs x2, x2, #16
+ b.mi 1f
+0:
+ ldp x3, x4, [x1], #16
+ subs x2, x2, #16
+USER(9f, stp x3, x4, [x0], #16)
+ b.pl 0b
+1: adds x2, x2, #8
b.mi 2f
-1:
ldr x3, [x1], #8
- subs x2, x2, #8
+ sub x2, x2, #8
USER(9f, str x3, [x0], #8 )
- b.pl 1b
2: adds x2, x2, #4
b.mi 3f
ldr w3, [x1], #4
@@ -51,11 +62,13 @@ USER(9f, strh w3, [x0], #2 )
ldrb w3, [x1]
USER(9f, strb w3, [x0] )
5: mov x0, #0
+ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_HAS_PAN, \
+ CONFIG_ARM64_PAN)
ret
ENDPROC(__copy_to_user)
.section .fixup,"ax"
.align 2
-9: sub x0, x4, x0 // bytes not copied
+9: sub x0, x5, x0 // bytes not copied
ret
.previous
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index bdeb5d38c2dd..eb48d5df4a0f 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -143,7 +143,12 @@ __dma_clean_range:
dcache_line_size x2, x3
sub x3, x2, #1
bic x0, x0, x3
-1: alternative_insn "dc cvac, x0", "dc civac, x0", ARM64_WORKAROUND_CLEAN_CACHE
+1:
+alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
+ dc cvac, x0
+alternative_else
+ dc civac, x0
+alternative_endif
add x0, x0, x2
cmp x0, x1
b.lo 1b
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 76c1e6cd36fc..d70ff14dbdbd 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -53,8 +53,6 @@ static void flush_context(void)
__flush_icache_all();
}
-#ifdef CONFIG_SMP
-
static void set_mm_context(struct mm_struct *mm, unsigned int asid)
{
unsigned long flags;
@@ -110,23 +108,12 @@ static void reset_context(void *info)
cpu_switch_mm(mm->pgd, mm);
}
-#else
-
-static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
-{
- mm->context.id = asid;
- cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
-}
-
-#endif
-
void __new_context(struct mm_struct *mm)
{
unsigned int asid;
unsigned int bits = asid_bits();
raw_spin_lock(&cpu_asid_lock);
-#ifdef CONFIG_SMP
/*
* Check the ASID again, in case the change was broadcast from another
* CPU before we acquired the lock.
@@ -136,7 +123,6 @@ void __new_context(struct mm_struct *mm)
raw_spin_unlock(&cpu_asid_lock);
return;
}
-#endif
/*
* At this point, it is guaranteed that the current mm (with an old
* ASID) isn't active on any other CPU since the ASIDs are changed
@@ -155,10 +141,8 @@ void __new_context(struct mm_struct *mm)
cpu_last_asid = ASID_FIRST_VERSION;
asid = cpu_last_asid + smp_processor_id();
flush_context();
-#ifdef CONFIG_SMP
smp_wmb();
smp_call_function(reset_context, NULL, 1);
-#endif
cpu_last_asid += NR_CPUS - 1;
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d16a1cead23f..99224dcebdc5 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -100,7 +100,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
flags |= GFP_DMA;
- if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
+ if (dev_get_cma_area(dev) && (flags & __GFP_WAIT)) {
struct page *page;
void *addr;
@@ -144,6 +144,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
struct page *page;
void *ptr, *coherent_ptr;
bool coherent = is_device_dma_coherent(dev);
+ pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
size = PAGE_ALIGN(size);
@@ -171,9 +172,7 @@ static void *__dma_alloc(struct device *dev, size_t size,
/* create a coherent mapping */
page = virt_to_page(ptr);
coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
- __get_dma_pgprot(attrs,
- __pgprot(PROT_NORMAL_NC), false),
- NULL);
+ prot, NULL);
if (!coherent_ptr)
goto no_map;
@@ -303,9 +302,10 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
sg->length, dir);
}
-/* vma->vm_page_prot must be set appropriately before calling this function */
-static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+static int __swiotlb_mmap(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
{
int ret = -ENXIO;
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
@@ -314,6 +314,9 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff;
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+ is_device_dma_coherent(dev));
+
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
@@ -327,20 +330,24 @@ static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
-static int __swiotlb_mmap(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ struct dma_attrs *attrs)
{
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
- is_device_dma_coherent(dev));
- return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+ int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+
+ if (!ret)
+ sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
+ PAGE_ALIGN(size), 0);
+
+ return ret;
}
static struct dma_map_ops swiotlb_dma_ops = {
.alloc = __dma_alloc,
.free = __dma_free,
.mmap = __swiotlb_mmap,
+ .get_sgtable = __swiotlb_get_sgtable,
.map_page = __swiotlb_map_page,
.unmap_page = __swiotlb_unmap_page,
.map_sg = __swiotlb_map_sg_attrs,
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 94d98cd1aad8..9fadf6d7039b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -30,9 +30,11 @@
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <asm/cpufeature.h>
#include <asm/exception.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
@@ -224,6 +226,13 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
}
/*
+ * PAN bit set implies the fault happened in kernel space, but not
+ * in the arch's user access functions.
+ */
+ if (IS_ENABLED(CONFIG_ARM64_PAN) && (regs->pstate & PSR_PAN_BIT))
+ goto no_context;
+
+ /*
* As per x86, we may deadlock here. However, since the kernel only
* validly references user space from well defined areas of the code,
* we can bug out early if this is from code which shouldn't.
@@ -278,6 +287,7 @@ retry:
* starvation.
*/
mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ mm_flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
@@ -492,14 +502,22 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
arm64_notify_die("Oops - SP/PC alignment exception", regs, &info, esr);
}
-static struct fault_info debug_fault_info[] = {
+int __init early_brk64(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs);
+
+/*
+ * __refdata because early_brk64 is __init, but the reference to it is
+ * clobbered at arch_initcall time.
+ * See traps.c and debug-monitors.c:debug_traps_init().
+ */
+static struct fault_info __refdata debug_fault_info[] = {
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" },
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" },
{ do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" },
{ do_bad, SIGBUS, 0, "unknown 3" },
{ do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
{ do_bad, SIGTRAP, 0, "aarch32 vector catch" },
- { do_bad, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
+ { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" },
{ do_bad, SIGBUS, 0, "unknown 7" },
};
@@ -536,3 +554,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
return 0;
}
+
+#ifdef CONFIG_ARM64_PAN
+void cpu_enable_pan(void)
+{
+ config_sctlr_el1(SCTLR_EL1_SPAN, 0);
+}
+#endif /* CONFIG_ARM64_PAN */
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 4dfa3975ce5b..c26b804015e8 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -60,14 +60,10 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long uaddr, void *dst, const void *src,
unsigned long len)
{
-#ifdef CONFIG_SMP
preempt_disable();
-#endif
memcpy(dst, src, len);
flush_ptrace_access(vma, page, uaddr, dst, len);
-#ifdef CONFIG_SMP
preempt_enable();
-#endif
}
void __sync_icache_dcache(pte_t pte, unsigned long addr)
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 831ec534d449..383b03ff38f8 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -13,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index ad87ce826cce..f5c0680d17d9 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -358,9 +358,9 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD
-static int keep_initrd;
+static int keep_initrd __initdata;
-void free_initrd_mem(unsigned long start, unsigned long end)
+void __init free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd)
free_reserved_area((void *)start, (void *)end, 0, "initrd");
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index a4ede4e2ddd1..9211b8527f25 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -267,7 +267,7 @@ static void *late_alloc(unsigned long size)
return ptr;
}
-static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
+static void __init create_mapping(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot)
{
if (virt < VMALLOC_START) {
@@ -461,17 +461,6 @@ void __init paging_init(void)
}
/*
- * Enable the identity mapping to allow the MMU disabling.
- */
-void setup_mm_for_reboot(void)
-{
- cpu_set_reserved_ttbr0();
- flush_tlb_all();
- cpu_set_idmap_tcr_t0sz();
- cpu_switch_mm(idmap_pg_dir, &init_mm);
-}
-
-/*
* Check whether a kernel address is valid (derived from arch/x86/).
*/
int kern_addr_valid(unsigned long addr)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 160a1b5ab9c6..7783ff05f74c 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -34,11 +34,7 @@
#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
#endif
-#ifdef CONFIG_SMP
#define TCR_SMP_FLAGS TCR_SHARED
-#else
-#define TCR_SMP_FLAGS 0
-#endif
/* PTWs cacheable, inner/outer WBWA */
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
@@ -150,13 +146,13 @@ ENDPROC(cpu_do_switch_mm)
* value of the SCTLR_EL1 register.
*/
ENTRY(__cpu_setup)
- ic iallu // I+BTB cache invalidate
tlbi vmalle1is // invalidate I + D TLBs
dsb ish
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
- msr mdscr_el1, xzr // Reset mdscr_el1
+ mov x0, #1 << 12 // Reset mdscr_el1 and disable
+ msr mdscr_el1, x0 // access to the DCC from EL0
/*
* Memory region attributes for LPAE:
*
@@ -198,6 +194,19 @@ ENTRY(__cpu_setup)
*/
mrs x9, ID_AA64MMFR0_EL1
bfi x10, x9, #32, #3
+#ifdef CONFIG_ARM64_HW_AFDBM
+ /*
+ * Hardware update of the Access and Dirty bits.
+ */
+ mrs x9, ID_AA64MMFR1_EL1
+ and x9, x9, #0xf
+ cbz x9, 2f
+ cmp x9, #2
+ b.lt 1f
+ orr x10, x10, #TCR_HD // hardware Dirty flag update
+1: orr x10, x10, #TCR_HA // hardware Access flag update
+2:
+#endif /* CONFIG_ARM64_HW_AFDBM */
msr tcr_el1, x10
ret // return to head.S
ENDPROC(__cpu_setup)