summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-31 01:25:51 +0100
committerPaul Mackerras <paulus@samba.org>2008-01-31 01:25:51 +0100
commitbd45ac0c5daae35e7c71138172e63df5cf644cf6 (patch)
tree5eb5a599bf6a9d7a8a34e802db932aa9e9555de4 /lib
parentMerge branch 'for-2.6.25' of git://git.secretlab.ca/git/linux-2.6-mpc52xx (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog (diff)
downloadlinux-bd45ac0c5daae35e7c71138172e63df5cf644cf6.tar.xz
linux-bd45ac0c5daae35e7c71138172e63df5cf644cf6.zip
Merge branch 'linux-2.6'
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug98
-rw-r--r--lib/Makefile3
-rw-r--r--lib/find_next_bit.c43
-rw-r--r--lib/kernel_lock.c123
-rw-r--r--lib/kobject.c734
-rw-r--r--lib/kobject_uevent.c38
-rw-r--r--lib/kref.c15
-rw-r--r--lib/pcounter.c58
-rw-r--r--lib/rwsem.c8
-rw-r--r--lib/scatterlist.c294
10 files changed, 995 insertions, 419 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a60109307d32..89f4035b526c 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -79,6 +79,38 @@ config HEADERS_CHECK
exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
your build tree), to make sure they're suitable.
+config DEBUG_SECTION_MISMATCH
+ bool "Enable full Section mismatch analysis"
+ default n
+ help
+ The section mismatch analysis checks if there are illegal
+ references from one section to another section.
+ Linux will during link or during runtime drop some sections
+ and any use of code/data previously in these sections will
+ most likely result in an oops.
+ In the code functions and variables are annotated with
+ __init, __devinit etc. (see full list in include/linux/init.h)
+ which result in the code/data being placed in specific sections.
+ The section mismatch anaylsis are always done after a full
+ kernel build but enabling this options will in addition
+ do the following:
+ - Add the option -fno-inline-functions-called-once to gcc
+ When inlining a function annotated __init in a non-init
+ function we would loose the section information and thus
+ the analysis would not catch the illegal reference.
+ This options tell gcc to inline less but will also
+ result in a larger kernel.
+ - Run the section mismatch analysis for each module/built-in.o
+ When we run the section mismatch analysis on vmlinux.o we
+ looses valueable information about where the mismatch was
+ introduced.
+ Running the analysis for each module/built-in.o file
+ will tell where the mismatch happens much closer to the
+ source. The drawback is that we will report the same
+ mismatch at least twice.
+ - Enable verbose reporting from modpost to help solving
+ the section mismatches reported.
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
@@ -462,6 +494,30 @@ config RCU_TORTURE_TEST
Say M if you want the RCU torture tests to build as a module.
Say N if you are unsure.
+config KPROBES_SANITY_TEST
+ bool "Kprobes sanity tests"
+ depends on DEBUG_KERNEL
+ depends on KPROBES
+ default n
+ help
+ This option provides for testing basic kprobes functionality on
+ boot. A sample kprobe, jprobe and kretprobe are inserted and
+ verified for functionality.
+
+ Say N if you are unsure.
+
+config BACKTRACE_SELF_TEST
+ tristate "Self test for the backtrace code"
+ depends on DEBUG_KERNEL
+ default n
+ help
+ This option provides a kernel module that can be used to test
+ the kernel stack backtrace code. This option is not useful
+ for distributions or general kernels, but only for kernel
+ developers working on architecture code.
+
+ Say N if you are unsure.
+
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
depends on DEBUG_KERNEL
@@ -517,4 +573,46 @@ config FAULT_INJECTION_STACKTRACE_FILTER
help
Provide stacktrace filter for fault-injection capabilities
+config LATENCYTOP
+ bool "Latency measuring infrastructure"
+ select FRAME_POINTER if !MIPS
+ select KALLSYMS
+ select KALLSYMS_ALL
+ select STACKTRACE
+ select SCHEDSTATS
+ select SCHED_DEBUG
+ depends on X86 || X86_64
+ help
+ Enable this option if you want to use the LatencyTOP tool
+ to find out which userspace is blocking on what kernel operations.
+
+config PROVIDE_OHCI1394_DMA_INIT
+ bool "Provide code for enabling DMA over FireWire early on boot"
+ depends on PCI && X86
+ help
+ If you want to debug problems which hang or crash the kernel early
+ on boot and the crashing machine has a FireWire port, you can use
+ this feature to remotely access the memory of the crashed machine
+ over FireWire. This employs remote DMA as part of the OHCI1394
+ specification which is now the standard for FireWire controllers.
+
+ With remote DMA, you can monitor the printk buffer remotely using
+ firescope and access all memory below 4GB using fireproxy from gdb.
+ Even controlling a kernel debugger is possible using remote DMA.
+
+ Usage:
+
+ If ohci1394_dma=early is used as boot parameter, it will initialize
+ all OHCI1394 controllers which are found in the PCI config space.
+
+ As all changes to the FireWire bus such as enabling and disabling
+ devices cause a bus reset and thereby disable remote DMA for all
+ devices, be sure to have the cable plugged and FireWire enabled on
+ the debugging host before booting the debug target for debugging.
+
+ This code (~1k) is freed after boot. By then, the firewire stack
+ in charge of the OHCI-1394 controllers should be used instead.
+
+ See Documentation/debugging-via-ohci1394.txt for more information.
+
source "samples/Kconfig"
diff --git a/lib/Makefile b/lib/Makefile
index b6793ed28d84..543f2502b60a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o \
idr.o int_sqrt.o extable.o prio_tree.o \
sha1.o irq_regs.o reciprocal_div.o argv_split.o \
- proportions.o prio_heap.o
+ proportions.o prio_heap.o scatterlist.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
@@ -61,6 +61,7 @@ obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o
obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o
obj-$(CONFIG_TEXTSEARCH_FSM) += ts_fsm.o
obj-$(CONFIG_SMP) += percpu_counter.o
+obj-$(CONFIG_SMP) += pcounter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c
index bda0d71a2514..78ccd73a8841 100644
--- a/lib/find_next_bit.c
+++ b/lib/find_next_bit.c
@@ -178,4 +178,47 @@ found_middle_swap:
EXPORT_SYMBOL(generic_find_next_zero_le_bit);
+unsigned long generic_find_next_le_bit(const unsigned long *addr, unsigned
+ long size, unsigned long offset)
+{
+ const unsigned long *p = addr + BITOP_WORD(offset);
+ unsigned long result = offset & ~(BITS_PER_LONG - 1);
+ unsigned long tmp;
+
+ if (offset >= size)
+ return size;
+ size -= result;
+ offset &= (BITS_PER_LONG - 1UL);
+ if (offset) {
+ tmp = ext2_swabp(p++);
+ tmp &= (~0UL << offset);
+ if (size < BITS_PER_LONG)
+ goto found_first;
+ if (tmp)
+ goto found_middle;
+ size -= BITS_PER_LONG;
+ result += BITS_PER_LONG;
+ }
+
+ while (size & ~(BITS_PER_LONG - 1)) {
+ tmp = *(p++);
+ if (tmp)
+ goto found_middle_swap;
+ result += BITS_PER_LONG;
+ size -= BITS_PER_LONG;
+ }
+ if (!size)
+ return result;
+ tmp = ext2_swabp(p);
+found_first:
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
+ if (tmp == 0UL) /* Are any bits set? */
+ return result + size; /* Nope. */
+found_middle:
+ return result + __ffs(tmp);
+
+found_middle_swap:
+ return result + __ffs(ext2_swab(tmp));
+}
+EXPORT_SYMBOL(generic_find_next_le_bit);
#endif /* __BIG_ENDIAN */
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index f73e2f8c308f..812dbf00844b 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -9,7 +9,6 @@
#include <linux/module.h>
#include <linux/kallsyms.h>
-#ifdef CONFIG_PREEMPT_BKL
/*
* The 'big kernel semaphore'
*
@@ -86,128 +85,6 @@ void __lockfunc unlock_kernel(void)
up(&kernel_sem);
}
-#else
-
-/*
- * The 'big kernel lock'
- *
- * This spinlock is taken and released recursively by lock_kernel()
- * and unlock_kernel(). It is transparently dropped and reacquired
- * over schedule(). It is used to protect legacy code that hasn't
- * been migrated to a proper locking design yet.
- *
- * Don't use in new code.
- */
-static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
-
-
-/*
- * Acquire/release the underlying lock from the scheduler.
- *
- * This is called with preemption disabled, and should
- * return an error value if it cannot get the lock and
- * TIF_NEED_RESCHED gets set.
- *
- * If it successfully gets the lock, it should increment
- * the preemption count like any spinlock does.
- *
- * (This works on UP too - _raw_spin_trylock will never
- * return false in that case)
- */
-int __lockfunc __reacquire_kernel_lock(void)
-{
- while (!_raw_spin_trylock(&kernel_flag)) {
- if (test_thread_flag(TIF_NEED_RESCHED))
- return -EAGAIN;
- cpu_relax();
- }
- preempt_disable();
- return 0;
-}
-
-void __lockfunc __release_kernel_lock(void)
-{
- _raw_spin_unlock(&kernel_flag);
- preempt_enable_no_resched();
-}
-
-/*
- * These are the BKL spinlocks - we try to be polite about preemption.
- * If SMP is not on (ie UP preemption), this all goes away because the
- * _raw_spin_trylock() will always succeed.
- */
-#ifdef CONFIG_PREEMPT
-static inline void __lock_kernel(void)
-{
- preempt_disable();
- if (unlikely(!_raw_spin_trylock(&kernel_flag))) {
- /*
- * If preemption was disabled even before this
- * was called, there's nothing we can be polite
- * about - just spin.
- */
- if (preempt_count() > 1) {
- _raw_spin_lock(&kernel_flag);
- return;
- }
-
- /*
- * Otherwise, let's wait for the kernel lock
- * with preemption enabled..
- */
- do {
- preempt_enable();
- while (spin_is_locked(&kernel_flag))
- cpu_relax();
- preempt_disable();
- } while (!_raw_spin_trylock(&kernel_flag));
- }
-}
-
-#else
-
-/*
- * Non-preemption case - just get the spinlock
- */
-static inline void __lock_kernel(void)
-{
- _raw_spin_lock(&kernel_flag);
-}
-#endif
-
-static inline void __unlock_kernel(void)
-{
- /*
- * the BKL is not covered by lockdep, so we open-code the
- * unlocking sequence (and thus avoid the dep-chain ops):
- */
- _raw_spin_unlock(&kernel_flag);
- preempt_enable();
-}
-
-/*
- * Getting the big kernel lock.
- *
- * This cannot happen asynchronously, so we only need to
- * worry about other CPU's.
- */
-void __lockfunc lock_kernel(void)
-{
- int depth = current->lock_depth+1;
- if (likely(!depth))
- __lock_kernel();
- current->lock_depth = depth;
-}
-
-void __lockfunc unlock_kernel(void)
-{
- BUG_ON(current->lock_depth < 0);
- if (likely(--current->lock_depth < 0))
- __unlock_kernel();
-}
-
-#endif
-
EXPORT_SYMBOL(lock_kernel);
EXPORT_SYMBOL(unlock_kernel);
diff --git a/lib/kobject.c b/lib/kobject.c
index 3590f022a609..1d63ead1815e 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -18,58 +18,57 @@
#include <linux/stat.h>
#include <linux/slab.h>
-/**
- * populate_dir - populate directory with attributes.
- * @kobj: object we're working on.
- *
- * Most subsystems have a set of default attributes that
- * are associated with an object that registers with them.
- * This is a helper called during object registration that
- * loops through the default attributes of the subsystem
- * and creates attributes files for them in sysfs.
+/*
+ * populate_dir - populate directory with attributes.
+ * @kobj: object we're working on.
*
+ * Most subsystems have a set of default attributes that are associated
+ * with an object that registers with them. This is a helper called during
+ * object registration that loops through the default attributes of the
+ * subsystem and creates attributes files for them in sysfs.
*/
-
-static int populate_dir(struct kobject * kobj)
+static int populate_dir(struct kobject *kobj)
{
- struct kobj_type * t = get_ktype(kobj);
- struct attribute * attr;
+ struct kobj_type *t = get_ktype(kobj);
+ struct attribute *attr;
int error = 0;
int i;
-
+
if (t && t->default_attrs) {
for (i = 0; (attr = t->default_attrs[i]) != NULL; i++) {
- if ((error = sysfs_create_file(kobj,attr)))
+ error = sysfs_create_file(kobj, attr);
+ if (error)
break;
}
}
return error;
}
-static int create_dir(struct kobject * kobj)
+static int create_dir(struct kobject *kobj)
{
int error = 0;
if (kobject_name(kobj)) {
error = sysfs_create_dir(kobj);
if (!error) {
- if ((error = populate_dir(kobj)))
+ error = populate_dir(kobj);
+ if (error)
sysfs_remove_dir(kobj);
}
}
return error;
}
-static inline struct kobject * to_kobj(struct list_head * entry)
+static inline struct kobject *to_kobj(struct list_head *entry)
{
- return container_of(entry,struct kobject,entry);
+ return container_of(entry, struct kobject, entry);
}
static int get_kobj_path_length(struct kobject *kobj)
{
int length = 1;
- struct kobject * parent = kobj;
+ struct kobject *parent = kobj;
- /* walk up the ancestors until we hit the one pointing to the
+ /* walk up the ancestors until we hit the one pointing to the
* root.
* Add 1 to strlen for leading '/' of each level.
*/
@@ -84,18 +83,19 @@ static int get_kobj_path_length(struct kobject *kobj)
static void fill_kobj_path(struct kobject *kobj, char *path, int length)
{
- struct kobject * parent;
+ struct kobject *parent;
--length;
for (parent = kobj; parent; parent = parent->parent) {
int cur = strlen(kobject_name(parent));
/* back up enough to print this name with '/' */
length -= cur;
- strncpy (path + length, kobject_name(parent), cur);
+ strncpy(path + length, kobject_name(parent), cur);
*(path + --length) = '/';
}
- pr_debug("%s: path = '%s'\n",__FUNCTION__,path);
+ pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
+ kobj, __FUNCTION__, path);
}
/**
@@ -123,179 +123,286 @@ char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
}
EXPORT_SYMBOL_GPL(kobject_get_path);
-/**
- * kobject_init - initialize object.
- * @kobj: object in question.
- */
-void kobject_init(struct kobject * kobj)
+/* add the kobject to its kset's list */
+static void kobj_kset_join(struct kobject *kobj)
{
- if (!kobj)
+ if (!kobj->kset)
return;
- kref_init(&kobj->kref);
- INIT_LIST_HEAD(&kobj->entry);
- kobj->kset = kset_get(kobj->kset);
+
+ kset_get(kobj->kset);
+ spin_lock(&kobj->kset->list_lock);
+ list_add_tail(&kobj->entry, &kobj->kset->list);
+ spin_unlock(&kobj->kset->list_lock);
}
+/* remove the kobject from its kset's list */
+static void kobj_kset_leave(struct kobject *kobj)
+{
+ if (!kobj->kset)
+ return;
-/**
- * unlink - remove kobject from kset list.
- * @kobj: kobject.
- *
- * Remove the kobject from the kset list and decrement
- * its parent's refcount.
- * This is separated out, so we can use it in both
- * kobject_del() and kobject_add() on error.
- */
+ spin_lock(&kobj->kset->list_lock);
+ list_del_init(&kobj->entry);
+ spin_unlock(&kobj->kset->list_lock);
+ kset_put(kobj->kset);
+}
-static void unlink(struct kobject * kobj)
+static void kobject_init_internal(struct kobject *kobj)
{
- if (kobj->kset) {
- spin_lock(&kobj->kset->list_lock);
- list_del_init(&kobj->entry);
- spin_unlock(&kobj->kset->list_lock);
- }
- kobject_put(kobj);
+ if (!kobj)
+ return;
+ kref_init(&kobj->kref);
+ INIT_LIST_HEAD(&kobj->entry);
}
-/**
- * kobject_add - add an object to the hierarchy.
- * @kobj: object.
- */
-int kobject_add(struct kobject * kobj)
+static int kobject_add_internal(struct kobject *kobj)
{
int error = 0;
- struct kobject * parent;
+ struct kobject *parent;
- if (!(kobj = kobject_get(kobj)))
+ if (!kobj)
return -ENOENT;
- if (!kobj->k_name)
- kobject_set_name(kobj, "NO_NAME");
- if (!*kobj->k_name) {
- pr_debug("kobject attempted to be registered with no name!\n");
+
+ if (!kobj->name || !kobj->name[0]) {
+ pr_debug("kobject: (%p): attempted to be registered with empty "
+ "name!\n", kobj);
WARN_ON(1);
- kobject_put(kobj);
return -EINVAL;
}
- parent = kobject_get(kobj->parent);
- pr_debug("kobject %s: registering. parent: %s, set: %s\n",
- kobject_name(kobj), parent ? kobject_name(parent) : "<NULL>",
- kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>" );
+ parent = kobject_get(kobj->parent);
+ /* join kset if set, use it as parent if we do not already have one */
if (kobj->kset) {
- spin_lock(&kobj->kset->list_lock);
-
if (!parent)
parent = kobject_get(&kobj->kset->kobj);
-
- list_add_tail(&kobj->entry,&kobj->kset->list);
- spin_unlock(&kobj->kset->list_lock);
+ kobj_kset_join(kobj);
kobj->parent = parent;
}
+ pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n",
+ kobject_name(kobj), kobj, __FUNCTION__,
+ parent ? kobject_name(parent) : "<NULL>",
+ kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>");
+
error = create_dir(kobj);
if (error) {
- /* unlink does the kobject_put() for us */
- unlink(kobj);
+ kobj_kset_leave(kobj);
kobject_put(parent);
+ kobj->parent = NULL;
/* be noisy on error issues */
if (error == -EEXIST)
- printk(KERN_ERR "kobject_add failed for %s with "
+ printk(KERN_ERR "%s failed for %s with "
"-EEXIST, don't try to register things with "
"the same name in the same directory.\n",
- kobject_name(kobj));
+ __FUNCTION__, kobject_name(kobj));
else
- printk(KERN_ERR "kobject_add failed for %s (%d)\n",
- kobject_name(kobj), error);
+ printk(KERN_ERR "%s failed for %s (%d)\n",
+ __FUNCTION__, kobject_name(kobj), error);
dump_stack();
- }
+ } else
+ kobj->state_in_sysfs = 1;
return error;
}
/**
- * kobject_register - initialize and add an object.
- * @kobj: object in question.
+ * kobject_set_name_vargs - Set the name of an kobject
+ * @kobj: struct kobject to set the name of
+ * @fmt: format string used to build the name
+ * @vargs: vargs to format the string.
*/
-
-int kobject_register(struct kobject * kobj)
+static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
+ va_list vargs)
{
- int error = -EINVAL;
- if (kobj) {
- kobject_init(kobj);
- error = kobject_add(kobj);
- if (!error)
- kobject_uevent(kobj, KOBJ_ADD);
- }
- return error;
-}
+ va_list aq;
+ char *name;
+
+ va_copy(aq, vargs);
+ name = kvasprintf(GFP_KERNEL, fmt, vargs);
+ va_end(aq);
+ if (!name)
+ return -ENOMEM;
+
+ /* Free the old name, if necessary. */
+ kfree(kobj->name);
+
+ /* Now, set the new name */
+ kobj->name = name;
+
+ return 0;
+}
/**
* kobject_set_name - Set the name of a kobject
- * @kobj: kobject to name
+ * @kobj: struct kobject to set the name of
* @fmt: format string used to build the name
*
* This sets the name of the kobject. If you have already added the
* kobject to the system, you must call kobject_rename() in order to
* change the name of the kobject.
*/
-int kobject_set_name(struct kobject * kobj, const char * fmt, ...)
+int kobject_set_name(struct kobject *kobj, const char *fmt, ...)
{
- int error = 0;
- int limit;
- int need;
va_list args;
- char *name;
+ int retval;
- /* find out how big a buffer we need */
- name = kmalloc(1024, GFP_KERNEL);
- if (!name) {
- error = -ENOMEM;
- goto done;
- }
va_start(args, fmt);
- need = vsnprintf(name, 1024, fmt, args);
+ retval = kobject_set_name_vargs(kobj, fmt, args);
va_end(args);
- kfree(name);
- /* Allocate the new space and copy the string in */
- limit = need + 1;
- name = kmalloc(limit, GFP_KERNEL);
- if (!name) {
- error = -ENOMEM;
- goto done;
+ return retval;
+}
+EXPORT_SYMBOL(kobject_set_name);
+
+/**
+ * kobject_init - initialize a kobject structure
+ * @kobj: pointer to the kobject to initialize
+ * @ktype: pointer to the ktype for this kobject.
+ *
+ * This function will properly initialize a kobject such that it can then
+ * be passed to the kobject_add() call.
+ *
+ * After this function is called, the kobject MUST be cleaned up by a call
+ * to kobject_put(), not by a call to kfree directly to ensure that all of
+ * the memory is cleaned up properly.
+ */
+void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
+{
+ char *err_str;
+
+ if (!kobj) {
+ err_str = "invalid kobject pointer!";
+ goto error;
+ }
+ if (!ktype) {
+ err_str = "must have a ktype to be initialized properly!\n";
+ goto error;
+ }
+ if (kobj->state_initialized) {
+ /* do not error out as sometimes we can recover */
+ printk(KERN_ERR "kobject (%p): tried to init an initialized "
+ "object, something is seriously wrong.\n", kobj);
+ dump_stack();
}
- va_start(args, fmt);
- need = vsnprintf(name, limit, fmt, args);
- va_end(args);
- /* something wrong with the string we copied? */
- if (need >= limit) {
- kfree(name);
- error = -EFAULT;
- goto done;
+ kref_init(&kobj->kref);
+ INIT_LIST_HEAD(&kobj->entry);
+ kobj->ktype = ktype;
+ kobj->state_in_sysfs = 0;
+ kobj->state_add_uevent_sent = 0;
+ kobj->state_remove_uevent_sent = 0;
+ kobj->state_initialized = 1;
+ return;
+
+error:
+ printk(KERN_ERR "kobject (%p): %s\n", kobj, err_str);
+ dump_stack();
+}
+EXPORT_SYMBOL(kobject_init);
+
+static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
+ const char *fmt, va_list vargs)
+{
+ va_list aq;
+ int retval;
+
+ va_copy(aq, vargs);
+ retval = kobject_set_name_vargs(kobj, fmt, aq);
+ va_end(aq);
+ if (retval) {
+ printk(KERN_ERR "kobject: can not set name properly!\n");
+ return retval;
}
+ kobj->parent = parent;
+ return kobject_add_internal(kobj);
+}
- /* Free the old name, if necessary. */
- kfree(kobj->k_name);
+/**
+ * kobject_add - the main kobject add function
+ * @kobj: the kobject to add
+ * @parent: pointer to the parent of the kobject.
+ * @fmt: format to name the kobject with.
+ *
+ * The kobject name is set and added to the kobject hierarchy in this
+ * function.
+ *
+ * If @parent is set, then the parent of the @kobj will be set to it.
+ * If @parent is NULL, then the parent of the @kobj will be set to the
+ * kobject associted with the kset assigned to this kobject. If no kset
+ * is assigned to the kobject, then the kobject will be located in the
+ * root of the sysfs tree.
+ *
+ * If this function returns an error, kobject_put() must be called to
+ * properly clean up the memory associated with the object.
+ * Under no instance should the kobject that is passed to this function
+ * be directly freed with a call to kfree(), that can leak memory.
+ *
+ * Note, no "add" uevent will be created with this call, the caller should set
+ * up all of the necessary sysfs files for the object and then call
+ * kobject_uevent() with the UEVENT_ADD parameter to ensure that
+ * userspace is properly notified of this kobject's creation.
+ */
+int kobject_add(struct kobject *kobj, struct kobject *parent,
+ const char *fmt, ...)
+{
+ va_list args;
+ int retval;
- /* Now, set the new name */
- kobj->k_name = name;
-done:
- return error;
+ if (!kobj)
+ return -EINVAL;
+
+ if (!kobj->state_initialized) {
+ printk(KERN_ERR "kobject '%s' (%p): tried to add an "
+ "uninitialized object, something is seriously wrong.\n",
+ kobject_name(kobj), kobj);
+ dump_stack();
+ return -EINVAL;
+ }
+ va_start(args, fmt);
+ retval = kobject_add_varg(kobj, parent, fmt, args);
+ va_end(args);
+
+ return retval;
}
-EXPORT_SYMBOL(kobject_set_name);
+EXPORT_SYMBOL(kobject_add);
/**
- * kobject_rename - change the name of an object
- * @kobj: object in question.
- * @new_name: object's new name
+ * kobject_init_and_add - initialize a kobject structure and add it to the kobject hierarchy
+ * @kobj: pointer to the kobject to initialize
+ * @ktype: pointer to the ktype for this kobject.
+ * @parent: pointer to the parent of this kobject.
+ * @fmt: the name of the kobject.
+ *
+ * This function combines the call to kobject_init() and
+ * kobject_add(). The same type of error handling after a call to
+ * kobject_add() and kobject lifetime rules are the same here.
*/
+int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
+ struct kobject *parent, const char *fmt, ...)
+{
+ va_list args;
+ int retval;
+
+ kobject_init(kobj, ktype);
+
+ va_start(args, fmt);
+ retval = kobject_add_varg(kobj, parent, fmt, args);
+ va_end(args);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(kobject_init_and_add);
-int kobject_rename(struct kobject * kobj, const char *new_name)
+/**
+ * kobject_rename - change the name of an object
+ * @kobj: object in question.
+ * @new_name: object's new name
+ */
+int kobject_rename(struct kobject *kobj, const char *new_name)
{
int error = 0;
const char *devpath = NULL;
@@ -334,8 +441,6 @@ int kobject_rename(struct kobject * kobj, const char *new_name)
sprintf(devpath_string, "DEVPATH_OLD=%s", devpath);
envp[0] = devpath_string;
envp[1] = NULL;
- /* Note : if we want to send the new name alone, not the full path,
- * we could probably use kobject_name(kobj); */
error = sysfs_rename_dir(kobj, new_name);
@@ -354,11 +459,10 @@ out:
}
/**
- * kobject_move - move object to another parent
- * @kobj: object in question.
- * @new_parent: object's new parent (can be NULL)
+ * kobject_move - move object to another parent
+ * @kobj: object in question.
+ * @new_parent: object's new parent (can be NULL)
*/
-
int kobject_move(struct kobject *kobj, struct kobject *new_parent)
{
int error;
@@ -406,68 +510,74 @@ out:
}
/**
- * kobject_del - unlink kobject from hierarchy.
- * @kobj: object.
+ * kobject_del - unlink kobject from hierarchy.
+ * @kobj: object.
*/
-
-void kobject_del(struct kobject * kobj)
+void kobject_del(struct kobject *kobj)
{
if (!kobj)
return;
- sysfs_remove_dir(kobj);
- unlink(kobj);
-}
-/**
- * kobject_unregister - remove object from hierarchy and decrement refcount.
- * @kobj: object going away.
- */
-
-void kobject_unregister(struct kobject * kobj)
-{
- if (!kobj)
- return;
- pr_debug("kobject %s: unregistering\n",kobject_name(kobj));
- kobject_uevent(kobj, KOBJ_REMOVE);
- kobject_del(kobj);
- kobject_put(kobj);
+ sysfs_remove_dir(kobj);
+ kobj->state_in_sysfs = 0;
+ kobj_kset_leave(kobj);
+ kobject_put(kobj->parent);
+ kobj->parent = NULL;
}
/**
- * kobject_get - increment refcount for object.
- * @kobj: object.
+ * kobject_get - increment refcount for object.
+ * @kobj: object.
*/
-
-struct kobject * kobject_get(struct kobject * kobj)
+struct kobject *kobject_get(struct kobject *kobj)
{
if (kobj)
kref_get(&kobj->kref);
return kobj;
}
-/**
- * kobject_cleanup - free kobject resources.
- * @kobj: object.
+/*
+ * kobject_cleanup - free kobject resources.
+ * @kobj: object to cleanup
*/
-
-void kobject_cleanup(struct kobject * kobj)
+static void kobject_cleanup(struct kobject *kobj)
{
- struct kobj_type * t = get_ktype(kobj);
- struct kset * s = kobj->kset;
- struct kobject * parent = kobj->parent;
- const char *name = kobj->k_name;
+ struct kobj_type *t = get_ktype(kobj);
+ const char *name = kobj->name;
+
+ pr_debug("kobject: '%s' (%p): %s\n",
+ kobject_name(kobj), kobj, __FUNCTION__);
+
+ if (t && !t->release)
+ pr_debug("kobject: '%s' (%p): does not have a release() "
+ "function, it is broken and must be fixed.\n",
+ kobject_name(kobj), kobj);
+
+ /* send "remove" if the caller did not do it but sent "add" */
+ if (kobj->state_add_uevent_sent && !kobj->state_remove_uevent_sent) {
+ pr_debug("kobject: '%s' (%p): auto cleanup 'remove' event\n",
+ kobject_name(kobj), kobj);
+ kobject_uevent(kobj, KOBJ_REMOVE);
+ }
+
+ /* remove from sysfs if the caller did not do it */
+ if (kobj->state_in_sysfs) {
+ pr_debug("kobject: '%s' (%p): auto cleanup kobject_del\n",
+ kobject_name(kobj), kobj);
+ kobject_del(kobj);
+ }
- pr_debug("kobject %s: cleaning up\n",kobject_name(kobj));
if (t && t->release) {
+ pr_debug("kobject: '%s' (%p): calling ktype release\n",
+ kobject_name(kobj), kobj);
t->release(kobj);
- /* If we have a release function, we can guess that this was
- * not a statically allocated kobject, so we should be safe to
- * free the name */
+ }
+
+ /* free name if we allocated it */
+ if (name) {
+ pr_debug("kobject: '%s': free name\n", name);
kfree(name);
}
- if (s)
- kset_put(s);
- kobject_put(parent);
}
static void kobject_release(struct kref *kref)
@@ -476,107 +586,130 @@ static void kobject_release(struct kref *kref)
}
/**
- * kobject_put - decrement refcount for object.
- * @kobj: object.
+ * kobject_put - decrement refcount for object.
+ * @kobj: object.
*
- * Decrement the refcount, and if 0, call kobject_cleanup().
+ * Decrement the refcount, and if 0, call kobject_cleanup().
*/
-void kobject_put(struct kobject * kobj)
+void kobject_put(struct kobject *kobj)
{
if (kobj)
kref_put(&kobj->kref, kobject_release);
}
-
-static void dir_release(struct kobject *kobj)
+static void dynamic_kobj_release(struct kobject *kobj)
{
+ pr_debug("kobject: (%p): %s\n", kobj, __FUNCTION__);
kfree(kobj);
}
-static struct kobj_type dir_ktype = {
- .release = dir_release,
- .sysfs_ops = NULL,
- .default_attrs = NULL,
+static struct kobj_type dynamic_kobj_ktype = {
+ .release = dynamic_kobj_release,
+ .sysfs_ops = &kobj_sysfs_ops,
};
/**
- * kobject_kset_add_dir - add sub directory of object.
- * @kset: kset the directory is belongs to.
- * @parent: object in which a directory is created.
- * @name: directory name.
+ * kobject_create - create a struct kobject dynamically
*
- * Add a plain directory object as child of given object.
+ * This function creates a kobject structure dynamically and sets it up
+ * to be a "dynamic" kobject with a default release function set up.
+ *
+ * If the kobject was not able to be created, NULL will be returned.
+ * The kobject structure returned from here must be cleaned up with a
+ * call to kobject_put() and not kfree(), as kobject_init() has
+ * already been called on this structure.
*/
-struct kobject *kobject_kset_add_dir(struct kset *kset,
- struct kobject *parent, const char *name)
+struct kobject *kobject_create(void)
{
- struct kobject *k;
- int ret;
-
- if (!parent)
- return NULL;
-
- k = kzalloc(sizeof(*k), GFP_KERNEL);
- if (!k)
- return NULL;
+ struct kobject *kobj;
- k->kset = kset;
- k->parent = parent;
- k->ktype = &dir_ktype;
- kobject_set_name(k, name);
- ret = kobject_register(k);
- if (ret < 0) {
- printk(KERN_WARNING "%s: kobject_register error: %d\n",
- __func__, ret);
- kobject_del(k);
+ kobj = kzalloc(sizeof(*kobj), GFP_KERNEL);
+ if (!kobj)
return NULL;
- }
- return k;
+ kobject_init(kobj, &dynamic_kobj_ktype);
+ return kobj;
}
/**
- * kobject_add_dir - add sub directory of object.
- * @parent: object in which a directory is created.
- * @name: directory name.
+ * kobject_create_and_add - create a struct kobject dynamically and register it with sysfs
+ *
+ * @name: the name for the kset
+ * @parent: the parent kobject of this kobject, if any.
+ *
+ * This function creates a kset structure dynamically and registers it
+ * with sysfs. When you are finished with this structure, call
+ * kobject_put() and the structure will be dynamically freed when
+ * it is no longer being used.
*
- * Add a plain directory object as child of given object.
+ * If the kobject was not able to be created, NULL will be returned.
*/
-struct kobject *kobject_add_dir(struct kobject *parent, const char *name)
+struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
{
- return kobject_kset_add_dir(NULL, parent, name);
+ struct kobject *kobj;
+ int retval;
+
+ kobj = kobject_create();
+ if (!kobj)
+ return NULL;
+
+ retval = kobject_add(kobj, parent, "%s", name);
+ if (retval) {
+ printk(KERN_WARNING "%s: kobject_add error: %d\n",
+ __FUNCTION__, retval);
+ kobject_put(kobj);
+ kobj = NULL;
+ }
+ return kobj;
}
+EXPORT_SYMBOL_GPL(kobject_create_and_add);
/**
- * kset_init - initialize a kset for use
- * @k: kset
+ * kset_init - initialize a kset for use
+ * @k: kset
*/
-
-void kset_init(struct kset * k)
+void kset_init(struct kset *k)
{
- kobject_init(&k->kobj);
+ kobject_init_internal(&k->kobj);
INIT_LIST_HEAD(&k->list);
spin_lock_init(&k->list_lock);
}
+/* default kobject attribute operations */
+static ssize_t kobj_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
-/**
- * kset_add - add a kset object to the hierarchy.
- * @k: kset.
- */
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->show)
+ ret = kattr->show(kobj, kattr, buf);
+ return ret;
+}
-int kset_add(struct kset * k)
+static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
{
- return kobject_add(&k->kobj);
+ struct kobj_attribute *kattr;
+ ssize_t ret = -EIO;
+
+ kattr = container_of(attr, struct kobj_attribute, attr);
+ if (kattr->store)
+ ret = kattr->store(kobj, kattr, buf, count);
+ return ret;
}
+struct sysfs_ops kobj_sysfs_ops = {
+ .show = kobj_attr_show,
+ .store = kobj_attr_store,
+};
/**
- * kset_register - initialize and add a kset.
- * @k: kset.
+ * kset_register - initialize and add a kset.
+ * @k: kset.
*/
-
-int kset_register(struct kset * k)
+int kset_register(struct kset *k)
{
int err;
@@ -584,46 +717,42 @@ int kset_register(struct kset * k)
return -EINVAL;
kset_init(k);
- err = kset_add(k);
+ err = kobject_add_internal(&k->kobj);
if (err)
return err;
kobject_uevent(&k->kobj, KOBJ_ADD);
return 0;
}
-
/**
- * kset_unregister - remove a kset.
- * @k: kset.
+ * kset_unregister - remove a kset.
+ * @k: kset.
*/
-
-void kset_unregister(struct kset * k)
+void kset_unregister(struct kset *k)
{
if (!k)
return;
- kobject_unregister(&k->kobj);
+ kobject_put(&k->kobj);
}
-
/**
- * kset_find_obj - search for object in kset.
- * @kset: kset we're looking in.
- * @name: object's name.
+ * kset_find_obj - search for object in kset.
+ * @kset: kset we're looking in.
+ * @name: object's name.
*
- * Lock kset via @kset->subsys, and iterate over @kset->list,
- * looking for a matching kobject. If matching object is found
- * take a reference and return the object.
+ * Lock kset via @kset->subsys, and iterate over @kset->list,
+ * looking for a matching kobject. If matching object is found
+ * take a reference and return the object.
*/
-
-struct kobject * kset_find_obj(struct kset * kset, const char * name)
+struct kobject *kset_find_obj(struct kset *kset, const char *name)
{
- struct list_head * entry;
- struct kobject * ret = NULL;
+ struct list_head *entry;
+ struct kobject *ret = NULL;
spin_lock(&kset->list_lock);
- list_for_each(entry,&kset->list) {
- struct kobject * k = to_kobj(entry);
- if (kobject_name(k) && !strcmp(kobject_name(k),name)) {
+ list_for_each(entry, &kset->list) {
+ struct kobject *k = to_kobj(entry);
+ if (kobject_name(k) && !strcmp(kobject_name(k), name)) {
ret = kobject_get(k);
break;
}
@@ -632,47 +761,94 @@ struct kobject * kset_find_obj(struct kset * kset, const char * name)
return ret;
}
-int subsystem_register(struct kset *s)
+static void kset_release(struct kobject *kobj)
{
- return kset_register(s);
+ struct kset *kset = container_of(kobj, struct kset, kobj);
+ pr_debug("kobject: '%s' (%p): %s\n",
+ kobject_name(kobj), kobj, __FUNCTION__);
+ kfree(kset);
}
-void subsystem_unregister(struct kset *s)
+static struct kobj_type kset_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = kset_release,
+};
+
+/**
+ * kset_create - create a struct kset dynamically
+ *
+ * @name: the name for the kset
+ * @uevent_ops: a struct kset_uevent_ops for the kset
+ * @parent_kobj: the parent kobject of this kset, if any.
+ *
+ * This function creates a kset structure dynamically. This structure can
+ * then be registered with the system and show up in sysfs with a call to
+ * kset_register(). When you are finished with this structure, if
+ * kset_register() has been called, call kset_unregister() and the
+ * structure will be dynamically freed when it is no longer being used.
+ *
+ * If the kset was not able to be created, NULL will be returned.
+ */
+static struct kset *kset_create(const char *name,
+ struct kset_uevent_ops *uevent_ops,
+ struct kobject *parent_kobj)
{
- kset_unregister(s);
+ struct kset *kset;
+
+ kset = kzalloc(sizeof(*kset), GFP_KERNEL);
+ if (!kset)
+ return NULL;
+ kobject_set_name(&kset->kobj, name);
+ kset->uevent_ops = uevent_ops;
+ kset->kobj.parent = parent_kobj;
+
+ /*
+ * The kobject of this kset will have a type of kset_ktype and belong to
+ * no kset itself. That way we can properly free it when it is
+ * finished being used.
+ */
+ kset->kobj.ktype = &kset_ktype;
+ kset->kobj.kset = NULL;
+
+ return kset;
}
/**
- * subsystem_create_file - export sysfs attribute file.
- * @s: subsystem.
- * @a: subsystem attribute descriptor.
+ * kset_create_and_add - create a struct kset dynamically and add it to sysfs
+ *
+ * @name: the name for the kset
+ * @uevent_ops: a struct kset_uevent_ops for the kset
+ * @parent_kobj: the parent kobject of this kset, if any.
+ *
+ * This function creates a kset structure dynamically and registers it
+ * with sysfs. When you are finished with this structure, call
+ * kset_unregister() and the structure will be dynamically freed when it
+ * is no longer being used.
+ *
+ * If the kset was not able to be created, NULL will be returned.
*/
-
-int subsys_create_file(struct kset *s, struct subsys_attribute *a)
+struct kset *kset_create_and_add(const char *name,
+ struct kset_uevent_ops *uevent_ops,
+ struct kobject *parent_kobj)
{
- int error = 0;
-
- if (!s || !a)
- return -EINVAL;
+ struct kset *kset;
+ int error;
- if (kset_get(s)) {
- error = sysfs_create_file(&s->kobj, &a->attr);
- kset_put(s);
+ kset = kset_create(name, uevent_ops, parent_kobj);
+ if (!kset)
+ return NULL;
+ error = kset_register(kset);
+ if (error) {
+ kfree(kset);
+ return NULL;
}
- return error;
+ return kset;
}
+EXPORT_SYMBOL_GPL(kset_create_and_add);
-EXPORT_SYMBOL(kobject_init);
-EXPORT_SYMBOL(kobject_register);
-EXPORT_SYMBOL(kobject_unregister);
EXPORT_SYMBOL(kobject_get);
EXPORT_SYMBOL(kobject_put);
-EXPORT_SYMBOL(kobject_add);
EXPORT_SYMBOL(kobject_del);
EXPORT_SYMBOL(kset_register);
EXPORT_SYMBOL(kset_unregister);
-
-EXPORT_SYMBOL(subsystem_register);
-EXPORT_SYMBOL(subsystem_unregister);
-EXPORT_SYMBOL(subsys_create_file);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 5886147252d0..5a402e2982af 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -98,7 +98,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
int i = 0;
int retval = 0;
- pr_debug("%s\n", __FUNCTION__);
+ pr_debug("kobject: '%s' (%p): %s\n",
+ kobject_name(kobj), kobj, __FUNCTION__);
/* search the kset we belong to */
top_kobj = kobj;
@@ -106,7 +107,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
top_kobj = top_kobj->parent;
if (!top_kobj->kset) {
- pr_debug("kobject attempted to send uevent without kset!\n");
+ pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
+ "without kset!\n", kobject_name(kobj), kobj,
+ __FUNCTION__);
return -EINVAL;
}
@@ -116,7 +119,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
/* skip the event, if the filter returns zero. */
if (uevent_ops && uevent_ops->filter)
if (!uevent_ops->filter(kset, kobj)) {
- pr_debug("kobject filter function caused the event to drop!\n");
+ pr_debug("kobject: '%s' (%p): %s: filter function "
+ "caused the event to drop!\n",
+ kobject_name(kobj), kobj, __FUNCTION__);
return 0;
}
@@ -126,7 +131,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
else
subsystem = kobject_name(&kset->kobj);
if (!subsystem) {
- pr_debug("unset subsystem caused the event to drop!\n");
+ pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
+ "event to drop!\n", kobject_name(kobj), kobj,
+ __FUNCTION__);
return 0;
}
@@ -166,12 +173,24 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (uevent_ops && uevent_ops->uevent) {
retval = uevent_ops->uevent(kset, kobj, env);
if (retval) {
- pr_debug ("%s - uevent() returned %d\n",
- __FUNCTION__, retval);
+ pr_debug("kobject: '%s' (%p): %s: uevent() returned "
+ "%d\n", kobject_name(kobj), kobj,
+ __FUNCTION__, retval);
goto exit;
}
}
+ /*
+ * Mark "add" and "remove" events in the object to ensure proper
+ * events to userspace during automatic cleanup. If the object did
+ * send an "add" event, "remove" will automatically generated by
+ * the core, if not already done by the caller.
+ */
+ if (action == KOBJ_ADD)
+ kobj->state_add_uevent_sent = 1;
+ else if (action == KOBJ_REMOVE)
+ kobj->state_remove_uevent_sent = 1;
+
/* we will send an event, so request a new sequence number */
spin_lock(&sequence_lock);
seq = ++uevent_seqnum;
@@ -219,11 +238,12 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
retval = add_uevent_var(env, "HOME=/");
if (retval)
goto exit;
- retval = add_uevent_var(env, "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
+ retval = add_uevent_var(env,
+ "PATH=/sbin:/bin:/usr/sbin:/usr/bin");
if (retval)
goto exit;
- call_usermodehelper (argv[0], argv, env->envp, UMH_WAIT_EXEC);
+ call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC);
}
exit:
@@ -231,7 +251,6 @@ exit:
kfree(env);
return retval;
}
-
EXPORT_SYMBOL_GPL(kobject_uevent_env);
/**
@@ -247,7 +266,6 @@ int kobject_uevent(struct kobject *kobj, enum kobject_action action)
{
return kobject_uevent_env(kobj, action, NULL);
}
-
EXPORT_SYMBOL_GPL(kobject_uevent);
/**
diff --git a/lib/kref.c b/lib/kref.c
index a6dc3ec328e0..9ecd6e865610 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -15,13 +15,23 @@
#include <linux/module.h>
/**
+ * kref_set - initialize object and set refcount to requested number.
+ * @kref: object in question.
+ * @num: initial reference counter
+ */
+void kref_set(struct kref *kref, int num)
+{
+ atomic_set(&kref->refcount, num);
+ smp_mb();
+}
+
+/**
* kref_init - initialize object.
* @kref: object in question.
*/
void kref_init(struct kref *kref)
{
- atomic_set(&kref->refcount,1);
- smp_mb();
+ kref_set(kref, 1);
}
/**
@@ -61,6 +71,7 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
return 0;
}
+EXPORT_SYMBOL(kref_set);
EXPORT_SYMBOL(kref_init);
EXPORT_SYMBOL(kref_get);
EXPORT_SYMBOL(kref_put);
diff --git a/lib/pcounter.c b/lib/pcounter.c
new file mode 100644
index 000000000000..9b56807da93b
--- /dev/null
+++ b/lib/pcounter.c
@@ -0,0 +1,58 @@
+/*
+ * Define default pcounter functions
+ * Note that often used pcounters use dedicated functions to get a speed increase.
+ * (see DEFINE_PCOUNTER/REF_PCOUNTER_MEMBER)
+ */
+
+#include <linux/module.h>
+#include <linux/pcounter.h>
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+
+static void pcounter_dyn_add(struct pcounter *self, int inc)
+{
+ per_cpu_ptr(self->per_cpu_values, smp_processor_id())[0] += inc;
+}
+
+static int pcounter_dyn_getval(const struct pcounter *self, int cpu)
+{
+ return per_cpu_ptr(self->per_cpu_values, cpu)[0];
+}
+
+int pcounter_getval(const struct pcounter *self)
+{
+ int res = 0, cpu;
+
+ for_each_possible_cpu(cpu)
+ res += self->getval(self, cpu);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(pcounter_getval);
+
+int pcounter_alloc(struct pcounter *self)
+{
+ int rc = 0;
+ if (self->add == NULL) {
+ self->per_cpu_values = alloc_percpu(int);
+ if (self->per_cpu_values != NULL) {
+ self->add = pcounter_dyn_add;
+ self->getval = pcounter_dyn_getval;
+ } else
+ rc = 1;
+ }
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pcounter_alloc);
+
+void pcounter_free(struct pcounter *self)
+{
+ if (self->per_cpu_values != NULL) {
+ free_percpu(self->per_cpu_values);
+ self->per_cpu_values = NULL;
+ self->getval = NULL;
+ self->add = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(pcounter_free);
+
diff --git a/lib/rwsem.c b/lib/rwsem.c
index 7d02700a4b0e..3e3365e5665e 100644
--- a/lib/rwsem.c
+++ b/lib/rwsem.c
@@ -187,7 +187,7 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
/*
* wait for the read lock to be granted
*/
-struct rw_semaphore fastcall __sched *
+asmregparm struct rw_semaphore __sched *
rwsem_down_read_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
@@ -201,7 +201,7 @@ rwsem_down_read_failed(struct rw_semaphore *sem)
/*
* wait for the write lock to be granted
*/
-struct rw_semaphore fastcall __sched *
+asmregparm struct rw_semaphore __sched *
rwsem_down_write_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
@@ -216,7 +216,7 @@ rwsem_down_write_failed(struct rw_semaphore *sem)
* handle waking up a waiter on the semaphore
* - up_read/up_write has decremented the active part of count if we come here
*/
-struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
+asmregparm struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
unsigned long flags;
@@ -236,7 +236,7 @@ struct rw_semaphore fastcall *rwsem_wake(struct rw_semaphore *sem)
* - caller incremented waiting part of count and discovered it still negative
* - just wake up any readers at the front of the queue
*/
-struct rw_semaphore fastcall *rwsem_downgrade_wake(struct rw_semaphore *sem)
+asmregparm struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
{
unsigned long flags;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
new file mode 100644
index 000000000000..acca4901046c
--- /dev/null
+++ b/lib/scatterlist.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
+ *
+ * Scatterlist handling helpers.
+ *
+ * This source code is licensed under the GNU General Public License,
+ * Version 2. See the file COPYING for more details.
+ */
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+
+/**
+ * sg_next - return the next scatterlist entry in a list
+ * @sg: The current sg entry
+ *
+ * Description:
+ * Usually the next entry will be @sg@ + 1, but if this sg element is part
+ * of a chained scatterlist, it could jump to the start of a new
+ * scatterlist array.
+ *
+ **/
+struct scatterlist *sg_next(struct scatterlist *sg)
+{
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(sg->sg_magic != SG_MAGIC);
+#endif
+ if (sg_is_last(sg))
+ return NULL;
+
+ sg++;
+ if (unlikely(sg_is_chain(sg)))
+ sg = sg_chain_ptr(sg);
+
+ return sg;
+}
+EXPORT_SYMBOL(sg_next);
+
+/**
+ * sg_last - return the last scatterlist entry in a list
+ * @sgl: First entry in the scatterlist
+ * @nents: Number of entries in the scatterlist
+ *
+ * Description:
+ * Should only be used casually, it (currently) scans the entire list
+ * to get the last entry.
+ *
+ * Note that the @sgl@ pointer passed in need not be the first one,
+ * the important bit is that @nents@ denotes the number of entries that
+ * exist from @sgl@.
+ *
+ **/
+struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
+{
+#ifndef ARCH_HAS_SG_CHAIN
+ struct scatterlist *ret = &sgl[nents - 1];
+#else
+ struct scatterlist *sg, *ret = NULL;
+ unsigned int i;
+
+ for_each_sg(sgl, sg, nents, i)
+ ret = sg;
+
+#endif
+#ifdef CONFIG_DEBUG_SG
+ BUG_ON(sgl[0].sg_magic != SG_MAGIC);
+ BUG_ON(!sg_is_last(ret));
+#endif
+ return ret;
+}
+EXPORT_SYMBOL(sg_last);
+
+/**
+ * sg_init_table - Initialize SG table
+ * @sgl: The SG table
+ * @nents: Number of entries in table
+ *
+ * Notes:
+ * If this is part of a chained sg table, sg_mark_end() should be
+ * used only on the last table part.
+ *
+ **/
+void sg_init_table(struct scatterlist *sgl, unsigned int nents)
+{
+ memset(sgl, 0, sizeof(*sgl) * nents);
+#ifdef CONFIG_DEBUG_SG
+ {
+ unsigned int i;
+ for (i = 0; i < nents; i++)
+ sgl[i].sg_magic = SG_MAGIC;
+ }
+#endif
+ sg_mark_end(&sgl[nents - 1]);
+}
+EXPORT_SYMBOL(sg_init_table);
+
+/**
+ * sg_init_one - Initialize a single entry sg list
+ * @sg: SG entry
+ * @buf: Virtual address for IO
+ * @buflen: IO length
+ *
+ **/
+void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
+{
+ sg_init_table(sg, 1);
+ sg_set_buf(sg, buf, buflen);
+}
+EXPORT_SYMBOL(sg_init_one);
+
+/*
+ * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
+ * helpers.
+ */
+static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
+{
+ if (nents == SG_MAX_SINGLE_ALLOC)
+ return (struct scatterlist *) __get_free_page(gfp_mask);
+ else
+ return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
+}
+
+static void sg_kfree(struct scatterlist *sg, unsigned int nents)
+{
+ if (nents == SG_MAX_SINGLE_ALLOC)
+ free_page((unsigned long) sg);
+ else
+ kfree(sg);
+}
+
+/**
+ * __sg_free_table - Free a previously mapped sg table
+ * @table: The sg table header to use
+ * @max_ents: The maximum number of entries per single scatterlist
+ * @free_fn: Free function
+ *
+ * Description:
+ * Free an sg table previously allocated and setup with
+ * __sg_alloc_table(). The @max_ents value must be identical to
+ * that previously used with __sg_alloc_table().
+ *
+ **/
+void __sg_free_table(struct sg_table *table, unsigned int max_ents,
+ sg_free_fn *free_fn)
+{
+ struct scatterlist *sgl, *next;
+
+ if (unlikely(!table->sgl))
+ return;
+
+ sgl = table->sgl;
+ while (table->orig_nents) {
+ unsigned int alloc_size = table->orig_nents;
+ unsigned int sg_size;
+
+ /*
+ * If we have more than max_ents segments left,
+ * then assign 'next' to the sg table after the current one.
+ * sg_size is then one less than alloc size, since the last
+ * element is the chain pointer.
+ */
+ if (alloc_size > max_ents) {
+ next = sg_chain_ptr(&sgl[max_ents - 1]);
+ alloc_size = max_ents;
+ sg_size = alloc_size - 1;
+ } else {
+ sg_size = alloc_size;
+ next = NULL;
+ }
+
+ table->orig_nents -= sg_size;
+ free_fn(sgl, alloc_size);
+ sgl = next;
+ }
+
+ table->sgl = NULL;
+}
+EXPORT_SYMBOL(__sg_free_table);
+
+/**
+ * sg_free_table - Free a previously allocated sg table
+ * @table: The mapped sg table header
+ *
+ **/
+void sg_free_table(struct sg_table *table)
+{
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
+}
+EXPORT_SYMBOL(sg_free_table);
+
+/**
+ * __sg_alloc_table - Allocate and initialize an sg table with given allocator
+ * @table: The sg table header to use
+ * @nents: Number of entries in sg list
+ * @max_ents: The maximum number of entries the allocator returns per call
+ * @gfp_mask: GFP allocation mask
+ * @alloc_fn: Allocator to use
+ *
+ * Description:
+ * This function returns a @table @nents long. The allocator is
+ * defined to return scatterlist chunks of maximum size @max_ents.
+ * Thus if @nents is bigger than @max_ents, the scatterlists will be
+ * chained in units of @max_ents.
+ *
+ * Notes:
+ * If this function returns non-0 (eg failure), the caller must call
+ * __sg_free_table() to cleanup any leftover allocations.
+ *
+ **/
+int __sg_alloc_table(struct sg_table *table, unsigned int nents,
+ unsigned int max_ents, gfp_t gfp_mask,
+ sg_alloc_fn *alloc_fn)
+{
+ struct scatterlist *sg, *prv;
+ unsigned int left;
+
+#ifndef ARCH_HAS_SG_CHAIN
+ BUG_ON(nents > max_ents);
+#endif
+
+ memset(table, 0, sizeof(*table));
+
+ left = nents;
+ prv = NULL;
+ do {
+ unsigned int sg_size, alloc_size = left;
+
+ if (alloc_size > max_ents) {
+ alloc_size = max_ents;
+ sg_size = alloc_size - 1;
+ } else
+ sg_size = alloc_size;
+
+ left -= sg_size;
+
+ sg = alloc_fn(alloc_size, gfp_mask);
+ if (unlikely(!sg))
+ return -ENOMEM;
+
+ sg_init_table(sg, alloc_size);
+ table->nents = table->orig_nents += sg_size;
+
+ /*
+ * If this is the first mapping, assign the sg table header.
+ * If this is not the first mapping, chain previous part.
+ */
+ if (prv)
+ sg_chain(prv, max_ents, sg);
+ else
+ table->sgl = sg;
+
+ /*
+ * If no more entries after this one, mark the end
+ */
+ if (!left)
+ sg_mark_end(&sg[sg_size - 1]);
+
+ /*
+ * only really needed for mempool backed sg allocations (like
+ * SCSI), a possible improvement here would be to pass the
+ * table pointer into the allocator and let that clear these
+ * flags
+ */
+ gfp_mask &= ~__GFP_WAIT;
+ gfp_mask |= __GFP_HIGH;
+ prv = sg;
+ } while (left);
+
+ return 0;
+}
+EXPORT_SYMBOL(__sg_alloc_table);
+
+/**
+ * sg_alloc_table - Allocate and initialize an sg table
+ * @table: The sg table header to use
+ * @nents: Number of entries in sg list
+ * @gfp_mask: GFP allocation mask
+ *
+ * Description:
+ * Allocate and initialize an sg table. If @nents@ is larger than
+ * SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
+ *
+ **/
+int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
+{
+ int ret;
+
+ ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
+ gfp_mask, sg_kmalloc);
+ if (unlikely(ret))
+ __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
+
+ return ret;
+}
+EXPORT_SYMBOL(sg_alloc_table);