diff options
-rw-r--r-- | arch/x86/include/asm/livepatch.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/setup.h | 7 | ||||
-rw-r--r-- | arch/x86/kernel/machine_kexec_64.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 2 | ||||
-rw-r--r-- | include/linux/livepatch.h | 8 | ||||
-rw-r--r-- | kernel/livepatch/core.c | 78 |
6 files changed, 66 insertions, 33 deletions
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h index 2d29197bd2fb..19c099afa861 100644 --- a/arch/x86/include/asm/livepatch.h +++ b/arch/x86/include/asm/livepatch.h @@ -21,6 +21,7 @@ #ifndef _ASM_X86_LIVEPATCH_H #define _ASM_X86_LIVEPATCH_H +#include <asm/setup.h> #include <linux/module.h> #include <linux/ftrace.h> diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index f69e06b283fb..11af24e09c8a 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -60,17 +60,24 @@ static inline void x86_ce4100_early_setup(void) { } #ifndef _SETUP #include <asm/espfix.h> +#include <linux/kernel.h> /* * This is set up by the setup-routine at boot-time */ extern struct boot_params boot_params; +extern char _text[]; static inline bool kaslr_enabled(void) { return !!(boot_params.hdr.loadflags & KASLR_FLAG); } +static inline unsigned long kaslr_offset(void) +{ + return (unsigned long)&_text - __START_KERNEL; +} + /* * Do NOT EVER look at the BIOS memory size location. * It does not work on many machines. diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 415480d3ea84..e1029633f664 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -25,6 +25,7 @@ #include <asm/io_apic.h> #include <asm/debugreg.h> #include <asm/kexec-bzimage64.h> +#include <asm/setup.h> #ifdef CONFIG_KEXEC_FILE static struct kexec_file_ops *kexec_file_loaders[] = { @@ -334,7 +335,7 @@ void arch_crash_save_vmcoreinfo(void) VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); #endif vmcoreinfo_append_str("KERNELOFFSET=%lx\n", - (unsigned long)&_text - __START_KERNEL); + kaslr_offset()); } /* arch-dependent functionality related to kexec file-based syscall */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index d74ac33290ae..5056d3cfe266 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -834,7 +834,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) { if (kaslr_enabled()) { pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", - (unsigned long)&_text - __START_KERNEL, + kaslr_offset(), __START_KERNEL, __START_KERNEL_map, MODULES_VADDR-1); diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index ee6dbb39a809..31db7a05dd36 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -99,7 +99,7 @@ struct klp_object { struct klp_func *funcs; /* internal */ - struct kobject *kobj; + struct kobject kobj; struct module *mod; enum klp_state state; }; @@ -123,6 +123,12 @@ struct klp_patch { enum klp_state state; }; +#define klp_for_each_object(patch, obj) \ + for (obj = patch->objs; obj->funcs; obj++) + +#define klp_for_each_func(obj, func) \ + for (func = obj->funcs; func->old_name; func++) + int klp_register_patch(struct klp_patch *); int klp_unregister_patch(struct klp_patch *); int klp_enable_patch(struct klp_patch *); diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 9ec555732f1a..c40ebcca0495 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -128,7 +128,7 @@ static bool klp_is_patch_registered(struct klp_patch *patch) static bool klp_initialized(void) { - return klp_root_kobj; + return !!klp_root_kobj; } struct klp_find_arg { @@ -242,8 +242,9 @@ static int klp_find_verify_func_addr(struct klp_object *obj, int ret; #if defined(CONFIG_RANDOMIZE_BASE) - /* KASLR is enabled, disregard old_addr from user */ - func->old_addr = 0; + /* If KASLR has been enabled, adjust old_addr accordingly */ + if (kaslr_enabled() && func->old_addr) + func->old_addr += kaslr_offset(); #endif if (!func->old_addr || klp_is_module(obj)) @@ -430,7 +431,7 @@ static void klp_disable_object(struct klp_object *obj) { struct klp_func *func; - for (func = obj->funcs; func->old_name; func++) + klp_for_each_func(obj, func) if (func->state == KLP_ENABLED) klp_disable_func(func); @@ -448,7 +449,7 @@ static int klp_enable_object(struct klp_object *obj) if (WARN_ON(!klp_is_object_loaded(obj))) return -EINVAL; - for (func = obj->funcs; func->old_name; func++) { + klp_for_each_func(obj, func) { ret = klp_enable_func(func); if (ret) { klp_disable_object(obj); @@ -471,7 +472,7 @@ static int __klp_disable_patch(struct klp_patch *patch) pr_notice("disabling patch '%s'\n", patch->mod->name); - for (obj = patch->objs; obj->funcs; obj++) { + klp_for_each_object(patch, obj) { if (obj->state == KLP_ENABLED) klp_disable_object(obj); } @@ -531,7 +532,7 @@ static int __klp_enable_patch(struct klp_patch *patch) pr_notice("enabling patch '%s'\n", patch->mod->name); - for (obj = patch->objs; obj->funcs; obj++) { + klp_for_each_object(patch, obj) { if (!klp_is_object_loaded(obj)) continue; @@ -659,6 +660,15 @@ static struct kobj_type klp_ktype_patch = { .default_attrs = klp_patch_attrs, }; +static void klp_kobj_release_object(struct kobject *kobj) +{ +} + +static struct kobj_type klp_ktype_object = { + .release = klp_kobj_release_object, + .sysfs_ops = &kobj_sysfs_ops, +}; + static void klp_kobj_release_func(struct kobject *kobj) { } @@ -688,7 +698,7 @@ static void klp_free_object_loaded(struct klp_object *obj) obj->mod = NULL; - for (func = obj->funcs; func->old_name; func++) + klp_for_each_func(obj, func) func->old_addr = 0; } @@ -703,7 +713,7 @@ static void klp_free_objects_limited(struct klp_patch *patch, for (obj = patch->objs; obj->funcs && obj != limit; obj++) { klp_free_funcs_limited(obj, NULL); - kobject_put(obj->kobj); + kobject_put(&obj->kobj); } } @@ -721,7 +731,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) func->state = KLP_DISABLED; return kobject_init_and_add(&func->kobj, &klp_ktype_func, - obj->kobj, "%s", func->old_name); + &obj->kobj, "%s", func->old_name); } /* parts of the initialization that is done only when the object is loaded */ @@ -737,7 +747,7 @@ static int klp_init_object_loaded(struct klp_patch *patch, return ret; } - for (func = obj->funcs; func->old_name; func++) { + klp_for_each_func(obj, func) { ret = klp_find_verify_func_addr(obj, func); if (ret) return ret; @@ -761,11 +771,12 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) klp_find_object_module(obj); name = klp_is_module(obj) ? obj->name : "vmlinux"; - obj->kobj = kobject_create_and_add(name, &patch->kobj); - if (!obj->kobj) - return -ENOMEM; + ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object, + &patch->kobj, "%s", name); + if (ret) + return ret; - for (func = obj->funcs; func->old_name; func++) { + klp_for_each_func(obj, func) { ret = klp_init_func(obj, func); if (ret) goto free; @@ -781,7 +792,7 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) free: klp_free_funcs_limited(obj, func); - kobject_put(obj->kobj); + kobject_put(&obj->kobj); return ret; } @@ -802,7 +813,7 @@ static int klp_init_patch(struct klp_patch *patch) if (ret) goto unlock; - for (obj = patch->objs; obj->funcs; obj++) { + klp_for_each_object(patch, obj) { ret = klp_init_object(patch, obj); if (ret) goto free; @@ -891,7 +902,7 @@ int klp_register_patch(struct klp_patch *patch) } EXPORT_SYMBOL_GPL(klp_register_patch); -static void klp_module_notify_coming(struct klp_patch *patch, +static int klp_module_notify_coming(struct klp_patch *patch, struct klp_object *obj) { struct module *pmod = patch->mod; @@ -899,22 +910,23 @@ static void klp_module_notify_coming(struct klp_patch *patch, int ret; ret = klp_init_object_loaded(patch, obj); - if (ret) - goto err; + if (ret) { + pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n", + pmod->name, mod->name, ret); + return ret; + } if (patch->state == KLP_DISABLED) - return; + return 0; pr_notice("applying patch '%s' to loading module '%s'\n", pmod->name, mod->name); ret = klp_enable_object(obj); - if (!ret) - return; - -err: - pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", - pmod->name, mod->name, ret); + if (ret) + pr_warn("failed to apply patch '%s' to module '%s' (%d)\n", + pmod->name, mod->name, ret); + return ret; } static void klp_module_notify_going(struct klp_patch *patch, @@ -938,6 +950,7 @@ disabled: static int klp_module_notify(struct notifier_block *nb, unsigned long action, void *data) { + int ret; struct module *mod = data; struct klp_patch *patch; struct klp_object *obj; @@ -957,13 +970,18 @@ static int klp_module_notify(struct notifier_block *nb, unsigned long action, mod->klp_alive = false; list_for_each_entry(patch, &klp_patches, list) { - for (obj = patch->objs; obj->funcs; obj++) { + klp_for_each_object(patch, obj) { if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) continue; if (action == MODULE_STATE_COMING) { obj->mod = mod; - klp_module_notify_coming(patch, obj); + ret = klp_module_notify_coming(patch, obj); + if (ret) { + obj->mod = NULL; + pr_warn("patch '%s' is in an inconsistent state!\n", + patch->mod->name); + } } else /* MODULE_STATE_GOING */ klp_module_notify_going(patch, obj); @@ -981,7 +999,7 @@ static struct notifier_block klp_module_nb = { .priority = INT_MIN+1, /* called late but before ftrace notifier */ }; -static int klp_init(void) +static int __init klp_init(void) { int ret; |