summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/vdso
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2012-03-05 12:49:31 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2012-09-17 14:42:09 +0200
commit9031fefde6f2ac1d1e5e0f8f4b22db4a091229bb (patch)
treea0663ffbc50293991604badf82f263dd50c009c7 /arch/arm64/kernel/vdso
parentarm64: System calls handling (diff)
downloadlinux-9031fefde6f2ac1d1e5e0f8f4b22db4a091229bb.tar.xz
linux-9031fefde6f2ac1d1e5e0f8f4b22db4a091229bb.zip
arm64: VDSO support
This patch adds VDSO support for 64-bit applications. The VDSO code is currently used for sys_rt_sigreturn() and optimised gettimeofday() (using the user-accessible generic counter). Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Tony Lindgren <tony@atomide.com> Acked-by: Nicolas Pitre <nico@linaro.org> Acked-by: Olof Johansson <olof@lixom.net> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Acked-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm64/kernel/vdso')
-rw-r--r--arch/arm64/kernel/vdso/.gitignore2
-rw-r--r--arch/arm64/kernel/vdso/Makefile63
-rwxr-xr-xarch/arm64/kernel/vdso/gen_vdso_offsets.sh15
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S242
-rw-r--r--arch/arm64/kernel/vdso/note.S28
-rw-r--r--arch/arm64/kernel/vdso/sigreturn.S37
-rw-r--r--arch/arm64/kernel/vdso/vdso.S33
-rw-r--r--arch/arm64/kernel/vdso/vdso.lds.S100
8 files changed, 520 insertions, 0 deletions
diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore
new file mode 100644
index 000000000000..b8cc94e9698b
--- /dev/null
+++ b/arch/arm64/kernel/vdso/.gitignore
@@ -0,0 +1,2 @@
+vdso.lds
+vdso-offsets.h
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
new file mode 100644
index 000000000000..d8064af42e62
--- /dev/null
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -0,0 +1,63 @@
+#
+# Building a vDSO image for AArch64.
+#
+# Author: Will Deacon <will.deacon@arm.com>
+# Heavily based on the vDSO Makefiles for other archs.
+#
+
+obj-vdso := gettimeofday.o note.o sigreturn.o
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+ccflags-y := -shared -fno-common -fno-builtin
+ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
+ $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+
+obj-y += vdso.o
+extra-y += vdso.lds vdso-offsets.h
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Force dependency (incbin is bad)
+$(obj)/vdso.o : $(obj)/vdso.so
+
+# Link rule for the .so file, .lds has to be first
+$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
+ $(call if_changed,vdsold)
+
+# Strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# Generate VDSO offsets using helper script
+gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+define cmd_vdsosym
+ $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ && \
+ cp $@ include/generated/
+endef
+
+$(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+ $(call if_changed,vdsosym)
+
+# Assembly rules for the .S files
+$(obj-vdso): %.o: %.S
+ $(call if_changed_dep,vdsoas)
+
+# Actual build commands
+quiet_cmd_vdsold = VDSOL $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
+quiet_cmd_vdsoas = VDSOA $@
+ cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
+
+# Install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
new file mode 100755
index 000000000000..01924ff071ad
--- /dev/null
+++ b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+#
+# Match symbols in the DSO that look like VDSO_*; produce a header file
+# of constant offsets into the shared object.
+#
+# Doing this inside the Makefile will break the $(filter-out) function,
+# causing Kbuild to rebuild the vdso-offsets header file every time.
+#
+# Author: Will Deacon <will.deacon@arm.com
+#
+
+LC_ALL=C
+sed -n -e 's/^00*/0/' -e \
+'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p'
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
new file mode 100644
index 000000000000..dcb8c203a3b2
--- /dev/null
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -0,0 +1,242 @@
+/*
+ * Userspace implementations of gettimeofday() and friends.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/unistd.h>
+
+#define NSEC_PER_SEC_LO16 0xca00
+#define NSEC_PER_SEC_HI16 0x3b9a
+
+vdso_data .req x6
+use_syscall .req w7
+seqcnt .req w8
+
+ .macro seqcnt_acquire
+9999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
+ tbnz seqcnt, #0, 9999b
+ dmb ishld
+ ldr use_syscall, [vdso_data, #VDSO_USE_SYSCALL]
+ .endm
+
+ .macro seqcnt_read, cnt
+ dmb ishld
+ ldr \cnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
+ .endm
+
+ .macro seqcnt_check, cnt, fail
+ cmp \cnt, seqcnt
+ b.ne \fail
+ .endm
+
+ .text
+
+/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
+ENTRY(__kernel_gettimeofday)
+ .cfi_startproc
+ mov x2, x30
+ .cfi_register x30, x2
+
+ /* Acquire the sequence counter and get the timespec. */
+ adr vdso_data, _vdso_data
+1: seqcnt_acquire
+ cbnz use_syscall, 4f
+
+ /* If tv is NULL, skip to the timezone code. */
+ cbz x0, 2f
+ bl __do_get_tspec
+ seqcnt_check w13, 1b
+
+ /* Convert ns to us. */
+ mov x11, #1000
+ udiv x10, x10, x11
+ stp x9, x10, [x0, #TVAL_TV_SEC]
+2:
+ /* If tz is NULL, return 0. */
+ cbz x1, 3f
+ ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST]
+ seqcnt_read w13
+ seqcnt_check w13, 1b
+ stp w4, w5, [x1, #TZ_MINWEST]
+3:
+ mov x0, xzr
+ ret x2
+4:
+ /* Syscall fallback. */
+ mov x8, #__NR_gettimeofday
+ svc #0
+ ret x2
+ .cfi_endproc
+ENDPROC(__kernel_gettimeofday)
+
+/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
+ENTRY(__kernel_clock_gettime)
+ .cfi_startproc
+ cmp w0, #CLOCK_REALTIME
+ ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
+ b.ne 2f
+
+ mov x2, x30
+ .cfi_register x30, x2
+
+ /* Get kernel timespec. */
+ adr vdso_data, _vdso_data
+1: seqcnt_acquire
+ cbnz use_syscall, 7f
+
+ bl __do_get_tspec
+ seqcnt_check w13, 1b
+
+ cmp w0, #CLOCK_MONOTONIC
+ b.ne 6f
+
+ /* Get wtm timespec. */
+ ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC]
+
+ /* Check the sequence counter. */
+ seqcnt_read w13
+ seqcnt_check w13, 1b
+ b 4f
+2:
+ cmp w0, #CLOCK_REALTIME_COARSE
+ ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
+ b.ne 8f
+
+ /* Get coarse timespec. */
+ adr vdso_data, _vdso_data
+3: seqcnt_acquire
+ ldp x9, x10, [vdso_data, #VDSO_XTIME_CRS_SEC]
+
+ cmp w0, #CLOCK_MONOTONIC_COARSE
+ b.ne 6f
+
+ /* Get wtm timespec. */
+ ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC]
+
+ /* Check the sequence counter. */
+ seqcnt_read w13
+ seqcnt_check w13, 3b
+4:
+ /* Add on wtm timespec. */
+ add x9, x9, x14
+ add x10, x10, x15
+
+ /* Normalise the new timespec. */
+ mov x14, #NSEC_PER_SEC_LO16
+ movk x14, #NSEC_PER_SEC_HI16, lsl #16
+ cmp x10, x14
+ b.lt 5f
+ sub x10, x10, x14
+ add x9, x9, #1
+5:
+ cmp x10, #0
+ b.ge 6f
+ add x10, x10, x14
+ sub x9, x9, #1
+
+6: /* Store to the user timespec. */
+ stp x9, x10, [x1, #TSPEC_TV_SEC]
+ mov x0, xzr
+ ret x2
+7:
+ mov x30, x2
+8: /* Syscall fallback. */
+ mov x8, #__NR_clock_gettime
+ svc #0
+ ret
+ .cfi_endproc
+ENDPROC(__kernel_clock_gettime)
+
+/* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */
+ENTRY(__kernel_clock_getres)
+ .cfi_startproc
+ cbz w1, 3f
+
+ cmp w0, #CLOCK_REALTIME
+ ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
+ b.ne 1f
+
+ ldr x2, 5f
+ b 2f
+1:
+ cmp w0, #CLOCK_REALTIME_COARSE
+ ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
+ b.ne 4f
+ ldr x2, 6f
+2:
+ stp xzr, x2, [x1]
+
+3: /* res == NULL. */
+ mov w0, wzr
+ ret
+
+4: /* Syscall fallback. */
+ mov x8, #__NR_clock_getres
+ svc #0
+ ret
+5:
+ .quad CLOCK_REALTIME_RES
+6:
+ .quad CLOCK_COARSE_RES
+ .cfi_endproc
+ENDPROC(__kernel_clock_getres)
+
+/*
+ * Read the current time from the architected counter.
+ * Expects vdso_data to be initialised.
+ * Clobbers the temporary registers (x9 - x15).
+ * Returns:
+ * - (x9, x10) = (ts->tv_sec, ts->tv_nsec)
+ * - (x11, x12) = (xtime->tv_sec, xtime->tv_nsec)
+ * - w13 = vDSO sequence counter
+ */
+ENTRY(__do_get_tspec)
+ .cfi_startproc
+
+ /* Read from the vDSO data page. */
+ ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
+ ldp x11, x12, [vdso_data, #VDSO_XTIME_CLK_SEC]
+ ldp w14, w15, [vdso_data, #VDSO_CS_MULT]
+ seqcnt_read w13
+
+ /* Read the physical counter. */
+ isb
+ mrs x9, cntpct_el0
+
+ /* Calculate cycle delta and convert to ns. */
+ sub x10, x9, x10
+ /* We can only guarantee 56 bits of precision. */
+ movn x9, #0xff0, lsl #48
+ and x10, x9, x10
+ mul x10, x10, x14
+ lsr x10, x10, x15
+
+ /* Use the kernel time to calculate the new timespec. */
+ add x10, x12, x10
+ mov x14, #NSEC_PER_SEC_LO16
+ movk x14, #NSEC_PER_SEC_HI16, lsl #16
+ udiv x15, x10, x14
+ add x9, x15, x11
+ mul x14, x14, x15
+ sub x10, x10, x14
+
+ ret
+ .cfi_endproc
+ENDPROC(__do_get_tspec)
diff --git a/arch/arm64/kernel/vdso/note.S b/arch/arm64/kernel/vdso/note.S
new file mode 100644
index 000000000000..b82c85e5d972
--- /dev/null
+++ b/arch/arm64/kernel/vdso/note.S
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S
new file mode 100644
index 000000000000..20d98effa7dd
--- /dev/null
+++ b/arch/arm64/kernel/vdso/sigreturn.S
@@ -0,0 +1,37 @@
+/*
+ * Sigreturn trampoline for returning from a signal when the SA_RESTORER
+ * flag is not set.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+ .text
+
+ nop
+ENTRY(__kernel_rt_sigreturn)
+ .cfi_startproc
+ .cfi_signal_frame
+ .cfi_def_cfa x29, 0
+ .cfi_offset x29, 0 * 8
+ .cfi_offset x30, 1 * 8
+ mov x8, #__NR_rt_sigreturn
+ svc #0
+ .cfi_endproc
+ENDPROC(__kernel_rt_sigreturn)
diff --git a/arch/arm64/kernel/vdso/vdso.S b/arch/arm64/kernel/vdso/vdso.S
new file mode 100644
index 000000000000..60c1db54b41a
--- /dev/null
+++ b/arch/arm64/kernel/vdso/vdso.S
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/const.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso_start, vdso_end
+ .balign PAGE_SIZE
+vdso_start:
+ .incbin "arch/arm64/kernel/vdso/vdso.so"
+ .balign PAGE_SIZE
+vdso_end:
+
+ .previous
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S
new file mode 100644
index 000000000000..8154b8d1c826
--- /dev/null
+++ b/arch/arm64/kernel/vdso/vdso.lds.S
@@ -0,0 +1,100 @@
+/*
+ * GNU linker script for the VDSO library.
+*
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ * Heavily based on the vDSO linker scripts for other archs.
+ */
+
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+
+OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64")
+OUTPUT_ARCH(aarch64)
+
+SECTIONS
+{
+ . = VDSO_LBASE + SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+
+ . = ALIGN(16);
+
+ .text : { *(.text*) } :text =0xd503201f
+ PROVIDE (__etext = .);
+ PROVIDE (_etext = .);
+ PROVIDE (etext = .);
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .rodata : { *(.rodata*) } :text
+
+ _end = .;
+ PROVIDE(end = .);
+
+ . = ALIGN(PAGE_SIZE);
+ PROVIDE(_vdso_data = .);
+
+ /DISCARD/ : {
+ *(.note.GNU-stack)
+ *(.data .data.* .gnu.linkonce.d.* .sdata*)
+ *(.bss .sbss .dynbss .dynsbss)
+ }
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+ LINUX_2.6.39 {
+ global:
+ __kernel_rt_sigreturn;
+ __kernel_gettimeofday;
+ __kernel_clock_gettime;
+ __kernel_clock_getres;
+ local: *;
+ };
+}
+
+/*
+ * Make the sigreturn code visible to the kernel.
+ */
+VDSO_sigtramp = __kernel_rt_sigreturn;