summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJan Beulich <JBeulich@suse.com>2014-11-04 09:50:18 +0100
committerThomas Gleixner <tglx@linutronix.de>2014-11-04 20:43:14 +0100
commit6d24c5f72dfb26e5fa7f02fa9266dfdbae41adba (patch)
tree02a65c8228a6a6bfc0e0c89c6c749e491b2bda8b /arch/x86
parentx86: Convert a few more per-CPU items to read-mostly ones (diff)
downloadlinux-6d24c5f72dfb26e5fa7f02fa9266dfdbae41adba.tar.xz
linux-6d24c5f72dfb26e5fa7f02fa9266dfdbae41adba.zip
x86-64: Handle PC-relative relocations on per-CPU data
This is in preparation of using RIP-relative addressing in many of the per-CPU accesses. Signed-off-by: Jan Beulich <jbeulich@suse.com> Link: http://lkml.kernel.org/r/5458A15A0200007800044A9A@mail.emea.novell.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/boot/compressed/misc.c14
-rw-r--r--arch/x86/tools/relocs.c36
2 files changed, 40 insertions, 10 deletions
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 57ab74df7eea..644abd767c12 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -260,7 +260,7 @@ static void handle_relocations(void *output, unsigned long output_len)
/*
* Process relocations: 32 bit relocations first then 64 bit after.
- * Two sets of binary relocations are added to the end of the kernel
+ * Three sets of binary relocations are added to the end of the kernel
* before compression. Each relocation table entry is the kernel
* address of the location which needs to be updated stored as a
* 32-bit value which is sign extended to 64 bits.
@@ -270,6 +270,8 @@ static void handle_relocations(void *output, unsigned long output_len)
* kernel bits...
* 0 - zero terminator for 64 bit relocations
* 64 bit relocation repeated
+ * 0 - zero terminator for inverse 32 bit relocations
+ * 32 bit inverse relocation repeated
* 0 - zero terminator for 32 bit relocations
* 32 bit relocation repeated
*
@@ -286,6 +288,16 @@ static void handle_relocations(void *output, unsigned long output_len)
*(uint32_t *)ptr += delta;
}
#ifdef CONFIG_X86_64
+ while (*--reloc) {
+ long extended = *reloc;
+ extended += map;
+
+ ptr = (unsigned long)extended;
+ if (ptr < min_addr || ptr > max_addr)
+ error("inverse 32-bit relocation outside of kernel!\n");
+
+ *(int32_t *)ptr -= delta;
+ }
for (reloc--; *reloc; reloc--) {
long extended = *reloc;
extended += map;
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index a5efb21d5228..0c2fae8d929d 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -20,7 +20,10 @@ struct relocs {
static struct relocs relocs16;
static struct relocs relocs32;
+#if ELF_BITS == 64
+static struct relocs relocs32neg;
static struct relocs relocs64;
+#endif
struct section {
Elf_Shdr shdr;
@@ -762,11 +765,16 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
switch (r_type) {
case R_X86_64_NONE:
+ /* NONE can be ignored. */
+ break;
+
case R_X86_64_PC32:
/*
- * NONE can be ignored and PC relative relocations don't
- * need to be adjusted.
+ * PC relative relocations don't need to be adjusted unless
+ * referencing a percpu symbol.
*/
+ if (is_percpu_sym(sym, symname))
+ add_reloc(&relocs32neg, offset);
break;
case R_X86_64_32:
@@ -986,7 +994,10 @@ static void emit_relocs(int as_text, int use_real_mode)
/* Order the relocations for more efficient processing */
sort_relocs(&relocs16);
sort_relocs(&relocs32);
+#if ELF_BITS == 64
+ sort_relocs(&relocs32neg);
sort_relocs(&relocs64);
+#endif
/* Print the relocations */
if (as_text) {
@@ -1007,14 +1018,21 @@ static void emit_relocs(int as_text, int use_real_mode)
for (i = 0; i < relocs32.count; i++)
write_reloc(relocs32.offset[i], stdout);
} else {
- if (ELF_BITS == 64) {
- /* Print a stop */
- write_reloc(0, stdout);
+#if ELF_BITS == 64
+ /* Print a stop */
+ write_reloc(0, stdout);
- /* Now print each relocation */
- for (i = 0; i < relocs64.count; i++)
- write_reloc(relocs64.offset[i], stdout);
- }
+ /* Now print each relocation */
+ for (i = 0; i < relocs64.count; i++)
+ write_reloc(relocs64.offset[i], stdout);
+
+ /* Print a stop */
+ write_reloc(0, stdout);
+
+ /* Now print each inverse 32-bit relocation */
+ for (i = 0; i < relocs32neg.count; i++)
+ write_reloc(relocs32neg.offset[i], stdout);
+#endif
/* Print a stop */
write_reloc(0, stdout);