summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-05-23 17:13:19 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-23 17:13:19 +0200
commitcb618965bc2073267b7f9345066f502515fcfdf5 (patch)
tree66632a2328272ce75e07f185cdfa8374c6ecfd2a
parentfbdev: fix integer as NULL pointer warning (diff)
parentstop_machine: make stop_machine_run more virtualization friendly (diff)
downloadlinux-cb618965bc2073267b7f9345066f502515fcfdf5.tar.xz
linux-cb618965bc2073267b7f9345066f502515fcfdf5.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus
* git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-for-linus: stop_machine: make stop_machine_run more virtualization friendly doc: add a chapter about trylock functions [Bug 9011] modules: proper cleanup of kobject without CONFIG_SYSFS module loading ELF handling: use SELFMAG instead of numeric constant
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl25
-rw-r--r--kernel/module.c18
-rw-r--r--kernel/stop_machine.c7
3 files changed, 44 insertions, 6 deletions
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 77c42f40be5d..2510763295d0 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -703,6 +703,31 @@
</sect1>
</chapter>
+<chapter id="trylock-functions">
+ <title>The trylock Functions</title>
+ <para>
+ There are functions that try to acquire a lock only once and immediately
+ return a value telling about success or failure to acquire the lock.
+ They can be used if you need no access to the data protected with the lock
+ when some other thread is holding the lock. You should acquire the lock
+ later if you then need access to the data protected with the lock.
+ </para>
+
+ <para>
+ <function>spin_trylock()</function> does not spin but returns non-zero if
+ it acquires the spinlock on the first try or 0 if not. This function can
+ be used in all contexts like <function>spin_lock</function>: you must have
+ disabled the contexts that might interrupt you and acquire the spin lock.
+ </para>
+
+ <para>
+ <function>mutex_trylock()</function> does not suspend your task
+ but returns non-zero if it could lock the mutex on the first try
+ or 0 if not. This function cannot be safely used in hardware or software
+ interrupt contexts despite not sleeping.
+ </para>
+</chapter>
+
<chapter id="Examples">
<title>Common Examples</title>
<para>
diff --git a/kernel/module.c b/kernel/module.c
index f5e9491ef7ac..5f80478b746d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1337,7 +1337,19 @@ out_unreg:
kobject_put(&mod->mkobj.kobj);
return err;
}
-#endif
+
+static void mod_sysfs_fini(struct module *mod)
+{
+ kobject_put(&mod->mkobj.kobj);
+}
+
+#else /* CONFIG_SYSFS */
+
+static void mod_sysfs_fini(struct module *mod)
+{
+}
+
+#endif /* CONFIG_SYSFS */
static void mod_kobject_remove(struct module *mod)
{
@@ -1345,7 +1357,7 @@ static void mod_kobject_remove(struct module *mod)
module_param_sysfs_remove(mod);
kobject_put(mod->mkobj.drivers_dir);
kobject_put(mod->holders_dir);
- kobject_put(&mod->mkobj.kobj);
+ mod_sysfs_fini(mod);
}
/*
@@ -1780,7 +1792,7 @@ static struct module *load_module(void __user *umod,
/* Sanity checks against insmoding binaries or wrong arch,
weird elf version */
- if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
+ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
|| hdr->e_type != ET_REL
|| !elf_check_arch(hdr)
|| hdr->e_shentsize != sizeof(*sechdrs)) {
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 0101aeef7ed7..b7350bbfb076 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -62,8 +62,7 @@ static int stopmachine(void *cpu)
* help our sisters onto their CPUs. */
if (!prepared && !irqs_disabled)
yield();
- else
- cpu_relax();
+ cpu_relax();
}
/* Ack: we are exiting. */
@@ -106,8 +105,10 @@ static int stop_machine(void)
}
/* Wait for them all to come to life. */
- while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
+ while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) {
yield();
+ cpu_relax();
+ }
/* If some failed, kill them all. */
if (ret < 0) {