diff options
author | Andy Lutomirski <luto@amacapital.net> | 2014-05-20 00:58:33 +0200 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2014-05-20 20:38:42 +0200 |
commit | a62c34bd2a8a3f159945becd57401e478818d51c (patch) | |
tree | 8721aca251b468606e52376fc811dd0c8beeaeb8 /arch/x86/vdso | |
parent | mm, fs: Add vm_ops->name as an alternative to arch_vma_name (diff) | |
download | linux-a62c34bd2a8a3f159945becd57401e478818d51c.tar.xz linux-a62c34bd2a8a3f159945becd57401e478818d51c.zip |
x86, mm: Improve _install_special_mapping and fix x86 vdso naming
Using arch_vma_name to give special mappings a name is awkward. x86
currently implements it by comparing the start address of the vma to
the expected address of the vdso. This requires tracking the start
address of special mappings and is probably buggy if a special vma
is split or moved.
Improve _install_special_mapping to just name the vma directly. Use
it to give the x86 vvar area a name, which should make CRIU's life
easier.
As a side effect, the vvar area will show up in core dumps. This
could be considered weird and is fixable.
[hpa: I say we accept this as-is but be prepared to deal with knocking
out the vvars from core dumps if this becomes a problem.]
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Pavel Emelyanov <xemul@parallels.com>
Signed-off-by: Andy Lutomirski <luto@amacapital.net>
Link: http://lkml.kernel.org/r/276b39b6b645fb11e345457b503f17b83c2c6fd0.1400538962.git.luto@amacapital.net
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/vdso')
-rw-r--r-- | arch/x86/vdso/vdso2c.h | 5 | ||||
-rw-r--r-- | arch/x86/vdso/vdso32-setup.c | 7 | ||||
-rw-r--r-- | arch/x86/vdso/vma.c | 25 |
3 files changed, 20 insertions, 17 deletions
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h index ed2e894e89ab..3dcc61e796e9 100644 --- a/arch/x86/vdso/vdso2c.h +++ b/arch/x86/vdso/vdso2c.h @@ -136,7 +136,10 @@ static int GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) fprintf(outfile, "const struct vdso_image %s = {\n", name); fprintf(outfile, "\t.data = raw_data,\n"); fprintf(outfile, "\t.size = %lu,\n", data_size); - fprintf(outfile, "\t.pages = pages,\n"); + fprintf(outfile, "\t.text_mapping = {\n"); + fprintf(outfile, "\t\t.name = \"[vdso]\",\n"); + fprintf(outfile, "\t\t.pages = pages,\n"); + fprintf(outfile, "\t},\n"); if (alt_sec) { fprintf(outfile, "\t.alt = %lu,\n", (unsigned long)alt_sec->sh_offset); diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c index c3ed708e50f4..e4f7781ee162 100644 --- a/arch/x86/vdso/vdso32-setup.c +++ b/arch/x86/vdso/vdso32-setup.c @@ -119,13 +119,6 @@ __initcall(ia32_binfmt_init); #else /* CONFIG_X86_32 */ -const char *arch_vma_name(struct vm_area_struct *vma) -{ - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) - return "[vdso]"; - return NULL; -} - struct vm_area_struct *get_gate_vma(struct mm_struct *mm) { return NULL; diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 8ad0081df7a8..e1513c47872a 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -30,7 +30,8 @@ void __init init_vdso_image(const struct vdso_image *image) BUG_ON(image->size % PAGE_SIZE != 0); for (i = 0; i < npages; i++) - image->pages[i] = virt_to_page(image->data + i*PAGE_SIZE); + image->text_mapping.pages[i] = + virt_to_page(image->data + i*PAGE_SIZE); apply_alternatives((struct alt_instr *)(image->data + image->alt), (struct alt_instr *)(image->data + image->alt + @@ -91,6 +92,10 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) unsigned long addr; int ret = 0; static struct page *no_pages[] = {NULL}; + static struct vm_special_mapping vvar_mapping = { + .name = "[vvar]", + .pages = no_pages, + }; if (calculate_addr) { addr = vdso_addr(current->mm->start_stack, @@ -112,21 +117,23 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr) /* * MAYWRITE to allow gdb to COW and set breakpoints */ - ret = install_special_mapping(mm, - addr, - image->size, - VM_READ|VM_EXEC| - VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, - image->pages); + vma = _install_special_mapping(mm, + addr, + image->size, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &image->text_mapping); - if (ret) + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); goto up_fail; + } vma = _install_special_mapping(mm, addr + image->size, image->sym_end_mapping - image->size, VM_READ, - no_pages); + &vvar_mapping); if (IS_ERR(vma)) { ret = PTR_ERR(vma); |