summaryrefslogtreecommitdiffstats
path: root/fs/binfmt_flat.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-06-13 09:09:02 +0200
committerGreg Ungerer <gerg@kernel.org>2019-06-24 01:16:47 +0200
commita2357223c50a784ae144c8398683551252bcd61d (patch)
treed5ea178771a9f356b6660235a3c2682a0d7eba42 /fs/binfmt_flat.c
parentbinfmt_flat: move the MAX_SHARED_LIBS definition to binfmt_flat.c (diff)
downloadlinux-a2357223c50a784ae144c8398683551252bcd61d.tar.xz
linux-a2357223c50a784ae144c8398683551252bcd61d.zip
binfmt_flat: don't offset the data start
Ever since the initial commit of the binfmt_flat shared library support back in the bitkeeper days we've offset the actual in-memory .data start by one field per possible shared library, or 1 in case shared library support isn't enabled. I can't find anything in the loader that actually makes use of it, nor was it present before shared library support it. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Greg Ungerer <gerg@linux-m68k.org>
Diffstat (limited to 'fs/binfmt_flat.c')
-rw-r--r--fs/binfmt_flat.c20
1 files changed, 8 insertions, 12 deletions
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index ccd9843e979e..80d902fb46e3 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -573,7 +573,7 @@ static int load_flat_file(struct linux_binprm *bprm,
goto err;
}
- len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
+ len = data_len + extra;
len = PAGE_ALIGN(len);
realdatastart = vm_mmap(NULL, 0, len,
PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
@@ -587,9 +587,7 @@ static int load_flat_file(struct linux_binprm *bprm,
vm_munmap(textpos, text_len);
goto err;
}
- datapos = ALIGN(realdatastart +
- MAX_SHARED_LIBS * sizeof(unsigned long),
- FLAT_DATA_ALIGN);
+ datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN);
pr_debug("Allocated data+bss+stack (%u bytes): %lx\n",
data_len + bss_len + stack_len, datapos);
@@ -619,7 +617,7 @@ static int load_flat_file(struct linux_binprm *bprm,
memp_size = len;
} else {
- len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(u32);
+ len = text_len + data_len + extra;
len = PAGE_ALIGN(len);
textpos = vm_mmap(NULL, 0, len,
PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
@@ -634,9 +632,7 @@ static int load_flat_file(struct linux_binprm *bprm,
}
realdatastart = textpos + ntohl(hdr->data_start);
- datapos = ALIGN(realdatastart +
- MAX_SHARED_LIBS * sizeof(u32),
- FLAT_DATA_ALIGN);
+ datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN);
reloc = (__be32 __user *)
(datapos + (ntohl(hdr->reloc_start) - text_len));
@@ -653,8 +649,9 @@ static int load_flat_file(struct linux_binprm *bprm,
(text_len + full_data
- sizeof(struct flat_hdr)),
0);
- memmove((void *) datapos, (void *) realdatastart,
- full_data);
+ if (datapos != realdatastart)
+ memmove((void *)datapos, (void *)realdatastart,
+ full_data);
#else
/*
* This is used on MMU systems mainly for testing.
@@ -710,8 +707,7 @@ static int load_flat_file(struct linux_binprm *bprm,
if (IS_ERR_VALUE(result)) {
ret = result;
pr_err("Unable to read code+data+bss, errno %d\n", ret);
- vm_munmap(textpos, text_len + data_len + extra +
- MAX_SHARED_LIBS * sizeof(u32));
+ vm_munmap(textpos, text_len + data_len + extra);
goto err;
}
}