summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug31
-rw-r--r--lib/Kconfig.kgdb16
-rw-r--r--lib/Makefile1
-rw-r--r--lib/debugobjects.c890
-rw-r--r--lib/devres.c4
-rw-r--r--lib/div64.c35
-rw-r--r--lib/idr.c2
-rw-r--r--lib/klist.c235
-rw-r--r--lib/kobject.c44
-rw-r--r--lib/kobject_uevent.c10
-rw-r--r--lib/percpu_counter.c1
-rw-r--r--lib/proportions.c38
-rw-r--r--lib/string.c27
13 files changed, 1158 insertions, 176 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 754cc0027f2a..d2099f41aa1e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -194,6 +194,37 @@ config TIMER_STATS
(it defaults to deactivated on bootup and will only be activated
if some application like powertop activates it explicitly).
+config DEBUG_OBJECTS
+ bool "Debug object operations"
+ depends on DEBUG_KERNEL
+ help
+ If you say Y here, additional code will be inserted into the
+ kernel to track the life time of various objects and validate
+ the operations on those objects.
+
+config DEBUG_OBJECTS_SELFTEST
+ bool "Debug objects selftest"
+ depends on DEBUG_OBJECTS
+ help
+ This enables the selftest of the object debug code.
+
+config DEBUG_OBJECTS_FREE
+ bool "Debug objects in freed memory"
+ depends on DEBUG_OBJECTS
+ help
+ This enables checks whether a k/v free operation frees an area
+ which contains an object which has not been deactivated
+ properly. This can make kmalloc/kfree-intensive workloads
+ much slower.
+
+config DEBUG_OBJECTS_TIMERS
+ bool "Debug timer objects"
+ depends on DEBUG_OBJECTS
+ help
+ If you say Y here, additional code will be inserted into the
+ timer routines to track the life time of timer objects and
+ validate the timer operations.
+
config DEBUG_SLAB
bool "Debug slab memory allocations"
depends on DEBUG_KERNEL && SLAB
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index f2e01ac5ab09..a5d4b1dac2a5 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,4 +1,10 @@
+config HAVE_ARCH_KGDB_SHADOW_INFO
+ bool
+
+config HAVE_ARCH_KGDB
+ bool
+
menuconfig KGDB
bool "KGDB: kernel debugging with remote gdb"
select FRAME_POINTER
@@ -10,15 +16,10 @@ menuconfig KGDB
at http://kgdb.sourceforge.net as well as in DocBook form
in Documentation/DocBook/. If unsure, say N.
-config HAVE_ARCH_KGDB_SHADOW_INFO
- bool
-
-config HAVE_ARCH_KGDB
- bool
+if KGDB
config KGDB_SERIAL_CONSOLE
tristate "KGDB: use kgdb over the serial console"
- depends on KGDB
select CONSOLE_POLL
select MAGIC_SYSRQ
default y
@@ -28,7 +29,6 @@ config KGDB_SERIAL_CONSOLE
config KGDB_TESTS
bool "KGDB: internal test suite"
- depends on KGDB
default n
help
This is a kgdb I/O module specifically designed to test
@@ -56,3 +56,5 @@ config KGDB_TESTS_BOOT_STRING
boot. See the drivers/misc/kgdbts.c for detailed
information about other strings you could use beyond the
default of V1F100.
+
+endif # KGDB
diff --git a/lib/Makefile b/lib/Makefile
index 0ae4eb047aac..74b0cfb1fcc3 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_PLIST) += plist.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
+obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
new file mode 100644
index 000000000000..a76a5e122ae1
--- /dev/null
+++ b/lib/debugobjects.c
@@ -0,0 +1,890 @@
+/*
+ * Generic infrastructure for lifetime debugging of objects.
+ *
+ * Started by Thomas Gleixner
+ *
+ * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#include <linux/debugobjects.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/hash.h>
+
+#define ODEBUG_HASH_BITS 14
+#define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
+
+#define ODEBUG_POOL_SIZE 512
+#define ODEBUG_POOL_MIN_LEVEL 256
+
+#define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
+#define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
+#define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
+
+struct debug_bucket {
+ struct hlist_head list;
+ spinlock_t lock;
+};
+
+static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
+
+static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE];
+
+static DEFINE_SPINLOCK(pool_lock);
+
+static HLIST_HEAD(obj_pool);
+
+static int obj_pool_min_free = ODEBUG_POOL_SIZE;
+static int obj_pool_free = ODEBUG_POOL_SIZE;
+static int obj_pool_used;
+static int obj_pool_max_used;
+static struct kmem_cache *obj_cache;
+
+static int debug_objects_maxchain __read_mostly;
+static int debug_objects_fixups __read_mostly;
+static int debug_objects_warnings __read_mostly;
+static int debug_objects_enabled __read_mostly;
+static struct debug_obj_descr *descr_test __read_mostly;
+
+static int __init enable_object_debug(char *str)
+{
+ debug_objects_enabled = 1;
+ return 0;
+}
+early_param("debug_objects", enable_object_debug);
+
+static const char *obj_states[ODEBUG_STATE_MAX] = {
+ [ODEBUG_STATE_NONE] = "none",
+ [ODEBUG_STATE_INIT] = "initialized",
+ [ODEBUG_STATE_INACTIVE] = "inactive",
+ [ODEBUG_STATE_ACTIVE] = "active",
+ [ODEBUG_STATE_DESTROYED] = "destroyed",
+ [ODEBUG_STATE_NOTAVAILABLE] = "not available",
+};
+
+static int fill_pool(void)
+{
+ gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
+ struct debug_obj *new;
+
+ if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
+ return obj_pool_free;
+
+ if (unlikely(!obj_cache))
+ return obj_pool_free;
+
+ while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
+
+ new = kmem_cache_zalloc(obj_cache, gfp);
+ if (!new)
+ return obj_pool_free;
+
+ spin_lock(&pool_lock);
+ hlist_add_head(&new->node, &obj_pool);
+ obj_pool_free++;
+ spin_unlock(&pool_lock);
+ }
+ return obj_pool_free;
+}
+
+/*
+ * Lookup an object in the hash bucket.
+ */
+static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
+{
+ struct hlist_node *node;
+ struct debug_obj *obj;
+ int cnt = 0;
+
+ hlist_for_each_entry(obj, node, &b->list, node) {
+ cnt++;
+ if (obj->object == addr)
+ return obj;
+ }
+ if (cnt > debug_objects_maxchain)
+ debug_objects_maxchain = cnt;
+
+ return NULL;
+}
+
+/*
+ * Allocate a new object. If the pool is empty and no refill possible,
+ * switch off the debugger.
+ */
+static struct debug_obj *
+alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
+{
+ struct debug_obj *obj = NULL;
+ int retry = 0;
+
+repeat:
+ spin_lock(&pool_lock);
+ if (obj_pool.first) {
+ obj = hlist_entry(obj_pool.first, typeof(*obj), node);
+
+ obj->object = addr;
+ obj->descr = descr;
+ obj->state = ODEBUG_STATE_NONE;
+ hlist_del(&obj->node);
+
+ hlist_add_head(&obj->node, &b->list);
+
+ obj_pool_used++;
+ if (obj_pool_used > obj_pool_max_used)
+ obj_pool_max_used = obj_pool_used;
+
+ obj_pool_free--;
+ if (obj_pool_free < obj_pool_min_free)
+ obj_pool_min_free = obj_pool_free;
+ }
+ spin_unlock(&pool_lock);
+
+ if (fill_pool() && !obj && !retry++)
+ goto repeat;
+
+ return obj;
+}
+
+/*
+ * Put the object back into the pool or give it back to kmem_cache:
+ */
+static void free_object(struct debug_obj *obj)
+{
+ unsigned long idx = (unsigned long)(obj - obj_static_pool);
+
+ if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
+ spin_lock(&pool_lock);
+ hlist_add_head(&obj->node, &obj_pool);
+ obj_pool_free++;
+ obj_pool_used--;
+ spin_unlock(&pool_lock);
+ } else {
+ spin_lock(&pool_lock);
+ obj_pool_used--;
+ spin_unlock(&pool_lock);
+ kmem_cache_free(obj_cache, obj);
+ }
+}
+
+/*
+ * We run out of memory. That means we probably have tons of objects
+ * allocated.
+ */
+static void debug_objects_oom(void)
+{
+ struct debug_bucket *db = obj_hash;
+ struct hlist_node *node, *tmp;
+ struct debug_obj *obj;
+ unsigned long flags;
+ int i;
+
+ printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
+
+ for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
+ spin_lock_irqsave(&db->lock, flags);
+ hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+ hlist_del(&obj->node);
+ free_object(obj);
+ }
+ spin_unlock_irqrestore(&db->lock, flags);
+ }
+}
+
+/*
+ * We use the pfn of the address for the hash. That way we can check
+ * for freed objects simply by checking the affected bucket.
+ */
+static struct debug_bucket *get_bucket(unsigned long addr)
+{
+ unsigned long hash;
+
+ hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
+ return &obj_hash[hash];
+}
+
+static void debug_print_object(struct debug_obj *obj, char *msg)
+{
+ static int limit;
+
+ if (limit < 5 && obj->descr != descr_test) {
+ limit++;
+ printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
+ obj_states[obj->state], obj->descr->name);
+ WARN_ON(1);
+ }
+ debug_objects_warnings++;
+}
+
+/*
+ * Try to repair the damage, so we have a better chance to get useful
+ * debug output.
+ */
+static void
+debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
+ void * addr, enum debug_obj_state state)
+{
+ if (fixup)
+ debug_objects_fixups += fixup(addr, state);
+}
+
+static void debug_object_is_on_stack(void *addr, int onstack)
+{
+ void *stack = current->stack;
+ int is_on_stack;
+ static int limit;
+
+ if (limit > 4)
+ return;
+
+ is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE));
+
+ if (is_on_stack == onstack)
+ return;
+
+ limit++;
+ if (is_on_stack)
+ printk(KERN_WARNING
+ "ODEBUG: object is on stack, but not annotated\n");
+ else
+ printk(KERN_WARNING
+ "ODEBUG: object is not on stack, but annotated\n");
+ WARN_ON(1);
+}
+
+static void
+__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj) {
+ obj = alloc_object(addr, db, descr);
+ if (!obj) {
+ debug_objects_enabled = 0;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return;
+ }
+ debug_object_is_on_stack(addr, onstack);
+ }
+
+ switch (obj->state) {
+ case ODEBUG_STATE_NONE:
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_INIT;
+ break;
+
+ case ODEBUG_STATE_ACTIVE:
+ debug_print_object(obj, "init");
+ state = obj->state;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_object_fixup(descr->fixup_init, addr, state);
+ return;
+
+ case ODEBUG_STATE_DESTROYED:
+ debug_print_object(obj, "init");
+ break;
+ default:
+ break;
+ }
+
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
+ * debug_object_init - debug checks when an object is initialized
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_init(void *addr, struct debug_obj_descr *descr)
+{
+ if (!debug_objects_enabled)
+ return;
+
+ __debug_object_init(addr, descr, 0);
+}
+
+/**
+ * debug_object_init_on_stack - debug checks when an object on stack is
+ * initialized
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
+{
+ if (!debug_objects_enabled)
+ return;
+
+ __debug_object_init(addr, descr, 1);
+}
+
+/**
+ * debug_object_activate - debug checks when an object is activated
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_activate(void *addr, struct debug_obj_descr *descr)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (obj) {
+ switch (obj->state) {
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_ACTIVE;
+ break;
+
+ case ODEBUG_STATE_ACTIVE:
+ debug_print_object(obj, "activate");
+ state = obj->state;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_object_fixup(descr->fixup_activate, addr, state);
+ return;
+
+ case ODEBUG_STATE_DESTROYED:
+ debug_print_object(obj, "activate");
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&db->lock, flags);
+ /*
+ * This happens when a static object is activated. We
+ * let the type specific code decide whether this is
+ * true or not.
+ */
+ debug_object_fixup(descr->fixup_activate, addr,
+ ODEBUG_STATE_NOTAVAILABLE);
+}
+
+/**
+ * debug_object_deactivate - debug checks when an object is deactivated
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
+{
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (obj) {
+ switch (obj->state) {
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ case ODEBUG_STATE_ACTIVE:
+ obj->state = ODEBUG_STATE_INACTIVE;
+ break;
+
+ case ODEBUG_STATE_DESTROYED:
+ debug_print_object(obj, "deactivate");
+ break;
+ default:
+ break;
+ }
+ } else {
+ struct debug_obj o = { .object = addr,
+ .state = ODEBUG_STATE_NOTAVAILABLE,
+ .descr = descr };
+
+ debug_print_object(&o, "deactivate");
+ }
+
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
+ * debug_object_destroy - debug checks when an object is destroyed
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj)
+ goto out_unlock;
+
+ switch (obj->state) {
+ case ODEBUG_STATE_NONE:
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_DESTROYED;
+ break;
+ case ODEBUG_STATE_ACTIVE:
+ debug_print_object(obj, "destroy");
+ state = obj->state;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_object_fixup(descr->fixup_destroy, addr, state);
+ return;
+
+ case ODEBUG_STATE_DESTROYED:
+ debug_print_object(obj, "destroy");
+ break;
+ default:
+ break;
+ }
+out_unlock:
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
+ * debug_object_free - debug checks when an object is freed
+ * @addr: address of the object
+ * @descr: pointer to an object specific debug description structure
+ */
+void debug_object_free(void *addr, struct debug_obj_descr *descr)
+{
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+
+ if (!debug_objects_enabled)
+ return;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj)
+ goto out_unlock;
+
+ switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_print_object(obj, "free");
+ state = obj->state;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_object_fixup(descr->fixup_free, addr, state);
+ return;
+ default:
+ hlist_del(&obj->node);
+ free_object(obj);
+ break;
+ }
+out_unlock:
+ spin_unlock_irqrestore(&db->lock, flags);
+}
+
+#ifdef CONFIG_DEBUG_OBJECTS_FREE
+static void __debug_check_no_obj_freed(const void *address, unsigned long size)
+{
+ unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
+ struct hlist_node *node, *tmp;
+ struct debug_obj_descr *descr;
+ enum debug_obj_state state;
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ int cnt;
+
+ saddr = (unsigned long) address;
+ eaddr = saddr + size;
+ paddr = saddr & ODEBUG_CHUNK_MASK;
+ chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
+ chunks >>= ODEBUG_CHUNK_SHIFT;
+
+ for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
+ db = get_bucket(paddr);
+
+repeat:
+ cnt = 0;
+ spin_lock_irqsave(&db->lock, flags);
+ hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+ cnt++;
+ oaddr = (unsigned long) obj->object;
+ if (oaddr < saddr || oaddr >= eaddr)
+ continue;
+
+ switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_print_object(obj, "free");
+ descr = obj->descr;
+ state = obj->state;
+ spin_unlock_irqrestore(&db->lock, flags);
+ debug_object_fixup(descr->fixup_free,
+ (void *) oaddr, state);
+ goto repeat;
+ default:
+ hlist_del(&obj->node);
+ free_object(obj);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&db->lock, flags);
+ if (cnt > debug_objects_maxchain)
+ debug_objects_maxchain = cnt;
+ }
+}
+
+void debug_check_no_obj_freed(const void *address, unsigned long size)
+{
+ if (debug_objects_enabled)
+ __debug_check_no_obj_freed(address, size);
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+static int debug_stats_show(struct seq_file *m, void *v)
+{
+ seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
+ seq_printf(m, "warnings :%d\n", debug_objects_warnings);
+ seq_printf(m, "fixups :%d\n", debug_objects_fixups);
+ seq_printf(m, "pool_free :%d\n", obj_pool_free);
+ seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
+ seq_printf(m, "pool_used :%d\n", obj_pool_used);
+ seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
+ return 0;
+}
+
+static int debug_stats_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, debug_stats_show, NULL);
+}
+
+static const struct file_operations debug_stats_fops = {
+ .open = debug_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init debug_objects_init_debugfs(void)
+{
+ struct dentry *dbgdir, *dbgstats;
+
+ if (!debug_objects_enabled)
+ return 0;
+
+ dbgdir = debugfs_create_dir("debug_objects", NULL);
+ if (!dbgdir)
+ return -ENOMEM;
+
+ dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
+ &debug_stats_fops);
+ if (!dbgstats)
+ goto err;
+
+ return 0;
+
+err:
+ debugfs_remove(dbgdir);
+
+ return -ENOMEM;
+}
+__initcall(debug_objects_init_debugfs);
+
+#else
+static inline void debug_objects_init_debugfs(void) { }
+#endif
+
+#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
+
+/* Random data structure for the self test */
+struct self_test {
+ unsigned long dummy1[6];
+ int static_init;
+ unsigned long dummy2[3];
+};
+
+static __initdata struct debug_obj_descr descr_type_test;
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static int __init fixup_init(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_init(obj, &descr_type_test);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int __init fixup_activate(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_NOTAVAILABLE:
+ if (obj->static_init == 1) {
+ debug_object_init(obj, &descr_type_test);
+ debug_object_activate(obj, &descr_type_test);
+ /*
+ * Real code should return 0 here ! This is
+ * not a fixup of some bad behaviour. We
+ * merily call the debug_init function to keep
+ * track of the object.
+ */
+ return 1;
+ } else {
+ /* Real code needs to emit a warning here */
+ }
+ return 0;
+
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_activate(obj, &descr_type_test);
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_destroy is called when:
+ * - an active object is destroyed
+ */
+static int __init fixup_destroy(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_destroy(obj, &descr_type_test);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int __init fixup_free(void *addr, enum debug_obj_state state)
+{
+ struct self_test *obj = addr;
+
+ switch (state) {
+ case ODEBUG_STATE_ACTIVE:
+ debug_object_deactivate(obj, &descr_type_test);
+ debug_object_free(obj, &descr_type_test);
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int
+check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
+{
+ struct debug_bucket *db;
+ struct debug_obj *obj;
+ unsigned long flags;
+ int res = -EINVAL;
+
+ db = get_bucket((unsigned long) addr);
+
+ spin_lock_irqsave(&db->lock, flags);
+
+ obj = lookup_object(addr, db);
+ if (!obj && state != ODEBUG_STATE_NONE) {
+ printk(KERN_ERR "ODEBUG: selftest object not found\n");
+ WARN_ON(1);
+ goto out;
+ }
+ if (obj && obj->state != state) {
+ printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
+ obj->state, state);
+ WARN_ON(1);
+ goto out;
+ }
+ if (fixups != debug_objects_fixups) {
+ printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
+ fixups, debug_objects_fixups);
+ WARN_ON(1);
+ goto out;
+ }
+ if (warnings != debug_objects_warnings) {
+ printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
+ warnings, debug_objects_warnings);
+ WARN_ON(1);
+ goto out;
+ }
+ res = 0;
+out:
+ spin_unlock_irqrestore(&db->lock, flags);
+ if (res)
+ debug_objects_enabled = 0;
+ return res;
+}
+
+static __initdata struct debug_obj_descr descr_type_test = {
+ .name = "selftest",
+ .fixup_init = fixup_init,
+ .fixup_activate = fixup_activate,
+ .fixup_destroy = fixup_destroy,
+ .fixup_free = fixup_free,
+};
+
+static __initdata struct self_test obj = { .static_init = 0 };
+
+static void __init debug_objects_selftest(void)
+{
+ int fixups, oldfixups, warnings, oldwarnings;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ fixups = oldfixups = debug_objects_fixups;
+ warnings = oldwarnings = debug_objects_warnings;
+ descr_test = &descr_type_test;
+
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
+ goto out;
+ debug_object_deactivate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
+ goto out;
+ debug_object_destroy(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
+ goto out;
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+ goto out;
+ debug_object_deactivate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+ goto out;
+ debug_object_free(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
+ goto out;
+
+ obj.static_init = 1;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
+ goto out;
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
+ goto out;
+ debug_object_free(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
+ goto out;
+
+#ifdef CONFIG_DEBUG_OBJECTS_FREE
+ debug_object_init(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
+ goto out;
+ debug_object_activate(&obj, &descr_type_test);
+ if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
+ goto out;
+ __debug_check_no_obj_freed(&obj, sizeof(obj));
+ if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
+ goto out;
+#endif
+ printk(KERN_INFO "ODEBUG: selftest passed\n");
+
+out:
+ debug_objects_fixups = oldfixups;
+ debug_objects_warnings = oldwarnings;
+ descr_test = NULL;
+
+ local_irq_restore(flags);
+}
+#else
+static inline void debug_objects_selftest(void) { }
+#endif
+
+/*
+ * Called during early boot to initialize the hash buckets and link
+ * the static object pool objects into the poll list. After this call
+ * the object tracker is fully operational.
+ */
+void __init debug_objects_early_init(void)
+{
+ int i;
+
+ for (i = 0; i < ODEBUG_HASH_SIZE; i++)
+ spin_lock_init(&obj_hash[i].lock);
+
+ for (i = 0; i < ODEBUG_POOL_SIZE; i++)
+ hlist_add_head(&obj_static_pool[i].node, &obj_pool);
+}
+
+/*
+ * Called after the kmem_caches are functional to setup a dedicated
+ * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
+ * prevents that the debug code is called on kmem_cache_free() for the
+ * debug tracker objects to avoid recursive calls.
+ */
+void __init debug_objects_mem_init(void)
+{
+ if (!debug_objects_enabled)
+ return;
+
+ obj_cache = kmem_cache_create("debug_objects_cache",
+ sizeof (struct debug_obj), 0,
+ SLAB_DEBUG_OBJECTS, NULL);
+
+ if (!obj_cache)
+ debug_objects_enabled = 0;
+ else
+ debug_objects_selftest();
+}
diff --git a/lib/devres.c b/lib/devres.c
index edc27a5d1b73..26c87c49d776 100644
--- a/lib/devres.c
+++ b/lib/devres.c
@@ -20,7 +20,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
*
* Managed ioremap(). Map is automatically unmapped on driver detach.
*/
-void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
+void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
unsigned long size)
{
void __iomem **ptr, *addr;
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(devm_ioremap);
* Managed ioremap_nocache(). Map is automatically unmapped on driver
* detach.
*/
-void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
+void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
unsigned long size)
{
void __iomem **ptr, *addr;
diff --git a/lib/div64.c b/lib/div64.c
index b71cf93c529a..bb5bd0c0f030 100644
--- a/lib/div64.c
+++ b/lib/div64.c
@@ -16,9 +16,8 @@
* assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
*/
-#include <linux/types.h>
#include <linux/module.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
/* Not needed on 64bit architectures */
#if BITS_PER_LONG == 32
@@ -58,10 +57,31 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
EXPORT_SYMBOL(__div64_32);
+#ifndef div_s64_rem
+s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+ u64 quotient;
+
+ if (dividend < 0) {
+ quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
+ *remainder = -*remainder;
+ if (divisor > 0)
+ quotient = -quotient;
+ } else {
+ quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
+ if (divisor < 0)
+ quotient = -quotient;
+ }
+ return quotient;
+}
+EXPORT_SYMBOL(div_s64_rem);
+#endif
+
/* 64bit divisor, dividend and result. dynamic precision */
-uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+#ifndef div64_u64
+u64 div64_u64(u64 dividend, u64 divisor)
{
- uint32_t high, d;
+ u32 high, d;
high = divisor >> 32;
if (high) {
@@ -72,10 +92,9 @@ uint64_t div64_64(uint64_t dividend, uint64_t divisor)
} else
d = divisor;
- do_div(dividend, d);
-
- return dividend;
+ return div_u64(dividend, d);
}
-EXPORT_SYMBOL(div64_64);
+EXPORT_SYMBOL(div64_u64);
+#endif
#endif /* BITS_PER_LONG == 32 */
diff --git a/lib/idr.c b/lib/idr.c
index 8368c81fcb7d..7a02e173f027 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -385,8 +385,8 @@ void idr_remove(struct idr *idp, int id)
while (idp->id_free_cnt >= IDR_FREE_MAX) {
p = alloc_layer(idp);
kmem_cache_free(idr_layer_cache, p);
- return;
}
+ return;
}
EXPORT_SYMBOL(idr_remove);
diff --git a/lib/klist.c b/lib/klist.c
index 120bd175aa78..cca37f96faa2 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -1,38 +1,37 @@
/*
- * klist.c - Routines for manipulating klists.
+ * klist.c - Routines for manipulating klists.
*
+ * Copyright (C) 2005 Patrick Mochel
*
- * This klist interface provides a couple of structures that wrap around
- * struct list_head to provide explicit list "head" (struct klist) and
- * list "node" (struct klist_node) objects. For struct klist, a spinlock
- * is included that protects access to the actual list itself. struct
- * klist_node provides a pointer to the klist that owns it and a kref
- * reference count that indicates the number of current users of that node
- * in the list.
+ * This file is released under the GPL v2.
*
- * The entire point is to provide an interface for iterating over a list
- * that is safe and allows for modification of the list during the
- * iteration (e.g. insertion and removal), including modification of the
- * current node on the list.
+ * This klist interface provides a couple of structures that wrap around
+ * struct list_head to provide explicit list "head" (struct klist) and list
+ * "node" (struct klist_node) objects. For struct klist, a spinlock is
+ * included that protects access to the actual list itself. struct
+ * klist_node provides a pointer to the klist that owns it and a kref
+ * reference count that indicates the number of current users of that node
+ * in the list.
*
- * It works using a 3rd object type - struct klist_iter - that is declared
- * and initialized before an iteration. klist_next() is used to acquire the
- * next element in the list. It returns NULL if there are no more items.
- * Internally, that routine takes the klist's lock, decrements the reference
- * count of the previous klist_node and increments the count of the next
- * klist_node. It then drops the lock and returns.
+ * The entire point is to provide an interface for iterating over a list
+ * that is safe and allows for modification of the list during the
+ * iteration (e.g. insertion and removal), including modification of the
+ * current node on the list.
*
- * There are primitives for adding and removing nodes to/from a klist.
- * When deleting, klist_del() will simply decrement the reference count.
- * Only when the count goes to 0 is the node removed from the list.
- * klist_remove() will try to delete the node from the list and block
- * until it is actually removed. This is useful for objects (like devices)
- * that have been removed from the system and must be freed (but must wait
- * until all accessors have finished).
+ * It works using a 3rd object type - struct klist_iter - that is declared
+ * and initialized before an iteration. klist_next() is used to acquire the
+ * next element in the list. It returns NULL if there are no more items.
+ * Internally, that routine takes the klist's lock, decrements the
+ * reference count of the previous klist_node and increments the count of
+ * the next klist_node. It then drops the lock and returns.
*
- * Copyright (C) 2005 Patrick Mochel
- *
- * This file is released under the GPL v2.
+ * There are primitives for adding and removing nodes to/from a klist.
+ * When deleting, klist_del() will simply decrement the reference count.
+ * Only when the count goes to 0 is the node removed from the list.
+ * klist_remove() will try to delete the node from the list and block until
+ * it is actually removed. This is useful for objects (like devices) that
+ * have been removed from the system and must be freed (but must wait until
+ * all accessors have finished).
*/
#include <linux/klist.h>
@@ -40,10 +39,10 @@
/**
- * klist_init - Initialize a klist structure.
- * @k: The klist we're initializing.
- * @get: The get function for the embedding object (NULL if none)
- * @put: The put function for the embedding object (NULL if none)
+ * klist_init - Initialize a klist structure.
+ * @k: The klist we're initializing.
+ * @get: The get function for the embedding object (NULL if none)
+ * @put: The put function for the embedding object (NULL if none)
*
* Initialises the klist structure. If the klist_node structures are
* going to be embedded in refcounted objects (necessary for safe
@@ -51,8 +50,7 @@
* functions that take and release references on the embedding
* objects.
*/
-
-void klist_init(struct klist * k, void (*get)(struct klist_node *),
+void klist_init(struct klist *k, void (*get)(struct klist_node *),
void (*put)(struct klist_node *))
{
INIT_LIST_HEAD(&k->k_list);
@@ -60,26 +58,23 @@ void klist_init(struct klist * k, void (*get)(struct klist_node *),
k->get = get;
k->put = put;
}
-
EXPORT_SYMBOL_GPL(klist_init);
-
-static void add_head(struct klist * k, struct klist_node * n)
+static void add_head(struct klist *k, struct klist_node *n)
{
spin_lock(&k->k_lock);
list_add(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
-static void add_tail(struct klist * k, struct klist_node * n)
+static void add_tail(struct klist *k, struct klist_node *n)
{
spin_lock(&k->k_lock);
list_add_tail(&n->n_node, &k->k_list);
spin_unlock(&k->k_lock);
}
-
-static void klist_node_init(struct klist * k, struct klist_node * n)
+static void klist_node_init(struct klist *k, struct klist_node *n)
{
INIT_LIST_HEAD(&n->n_node);
init_completion(&n->n_removed);
@@ -89,60 +84,83 @@ static void klist_node_init(struct klist * k, struct klist_node * n)
k->get(n);
}
-
/**
- * klist_add_head - Initialize a klist_node and add it to front.
- * @n: node we're adding.
- * @k: klist it's going on.
+ * klist_add_head - Initialize a klist_node and add it to front.
+ * @n: node we're adding.
+ * @k: klist it's going on.
*/
-
-void klist_add_head(struct klist_node * n, struct klist * k)
+void klist_add_head(struct klist_node *n, struct klist *k)
{
klist_node_init(k, n);
add_head(k, n);
}
-
EXPORT_SYMBOL_GPL(klist_add_head);
-
/**
- * klist_add_tail - Initialize a klist_node and add it to back.
- * @n: node we're adding.
- * @k: klist it's going on.
+ * klist_add_tail - Initialize a klist_node and add it to back.
+ * @n: node we're adding.
+ * @k: klist it's going on.
*/
-
-void klist_add_tail(struct klist_node * n, struct klist * k)
+void klist_add_tail(struct klist_node *n, struct klist *k)
{
klist_node_init(k, n);
add_tail(k, n);
}
-
EXPORT_SYMBOL_GPL(klist_add_tail);
+/**
+ * klist_add_after - Init a klist_node and add it after an existing node
+ * @n: node we're adding.
+ * @pos: node to put @n after
+ */
+void klist_add_after(struct klist_node *n, struct klist_node *pos)
+{
+ struct klist *k = pos->n_klist;
+
+ klist_node_init(k, n);
+ spin_lock(&k->k_lock);
+ list_add(&n->n_node, &pos->n_node);
+ spin_unlock(&k->k_lock);
+}
+EXPORT_SYMBOL_GPL(klist_add_after);
+
+/**
+ * klist_add_before - Init a klist_node and add it before an existing node
+ * @n: node we're adding.
+ * @pos: node to put @n after
+ */
+void klist_add_before(struct klist_node *n, struct klist_node *pos)
+{
+ struct klist *k = pos->n_klist;
+
+ klist_node_init(k, n);
+ spin_lock(&k->k_lock);
+ list_add_tail(&n->n_node, &pos->n_node);
+ spin_unlock(&k->k_lock);
+}
+EXPORT_SYMBOL_GPL(klist_add_before);
-static void klist_release(struct kref * kref)
+static void klist_release(struct kref *kref)
{
- struct klist_node * n = container_of(kref, struct klist_node, n_ref);
+ struct klist_node *n = container_of(kref, struct klist_node, n_ref);
list_del(&n->n_node);
complete(&n->n_removed);
n->n_klist = NULL;
}
-static int klist_dec_and_del(struct klist_node * n)
+static int klist_dec_and_del(struct klist_node *n)
{
return kref_put(&n->n_ref, klist_release);
}
-
/**
- * klist_del - Decrement the reference count of node and try to remove.
- * @n: node we're deleting.
+ * klist_del - Decrement the reference count of node and try to remove.
+ * @n: node we're deleting.
*/
-
-void klist_del(struct klist_node * n)
+void klist_del(struct klist_node *n)
{
- struct klist * k = n->n_klist;
+ struct klist *k = n->n_klist;
void (*put)(struct klist_node *) = k->put;
spin_lock(&k->k_lock);
@@ -152,48 +170,40 @@ void klist_del(struct klist_node * n)
if (put)
put(n);
}
-
EXPORT_SYMBOL_GPL(klist_del);
-
/**
- * klist_remove - Decrement the refcount of node and wait for it to go away.
- * @n: node we're removing.
+ * klist_remove - Decrement the refcount of node and wait for it to go away.
+ * @n: node we're removing.
*/
-
-void klist_remove(struct klist_node * n)
+void klist_remove(struct klist_node *n)
{
klist_del(n);
wait_for_completion(&n->n_removed);
}
-
EXPORT_SYMBOL_GPL(klist_remove);
-
/**
- * klist_node_attached - Say whether a node is bound to a list or not.
- * @n: Node that we're testing.
+ * klist_node_attached - Say whether a node is bound to a list or not.
+ * @n: Node that we're testing.
*/
-
-int klist_node_attached(struct klist_node * n)
+int klist_node_attached(struct klist_node *n)
{
return (n->n_klist != NULL);
}
-
EXPORT_SYMBOL_GPL(klist_node_attached);
-
/**
- * klist_iter_init_node - Initialize a klist_iter structure.
- * @k: klist we're iterating.
- * @i: klist_iter we're filling.
- * @n: node to start with.
+ * klist_iter_init_node - Initialize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter we're filling.
+ * @n: node to start with.
*
- * Similar to klist_iter_init(), but starts the action off with @n,
- * instead of with the list head.
+ * Similar to klist_iter_init(), but starts the action off with @n,
+ * instead of with the list head.
*/
-
-void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n)
+void klist_iter_init_node(struct klist *k, struct klist_iter *i,
+ struct klist_node *n)
{
i->i_klist = k;
i->i_head = &k->k_list;
@@ -201,66 +211,56 @@ void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_
if (n)
kref_get(&n->n_ref);
}
-
EXPORT_SYMBOL_GPL(klist_iter_init_node);
-
/**
- * klist_iter_init - Iniitalize a klist_iter structure.
- * @k: klist we're iterating.
- * @i: klist_iter structure we're filling.
+ * klist_iter_init - Iniitalize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter structure we're filling.
*
- * Similar to klist_iter_init_node(), but start with the list head.
+ * Similar to klist_iter_init_node(), but start with the list head.
*/
-
-void klist_iter_init(struct klist * k, struct klist_iter * i)
+void klist_iter_init(struct klist *k, struct klist_iter *i)
{
klist_iter_init_node(k, i, NULL);
}
-
EXPORT_SYMBOL_GPL(klist_iter_init);
-
/**
- * klist_iter_exit - Finish a list iteration.
- * @i: Iterator structure.
+ * klist_iter_exit - Finish a list iteration.
+ * @i: Iterator structure.
*
- * Must be called when done iterating over list, as it decrements the
- * refcount of the current node. Necessary in case iteration exited before
- * the end of the list was reached, and always good form.
+ * Must be called when done iterating over list, as it decrements the
+ * refcount of the current node. Necessary in case iteration exited before
+ * the end of the list was reached, and always good form.
*/
-
-void klist_iter_exit(struct klist_iter * i)
+void klist_iter_exit(struct klist_iter *i)
{
if (i->i_cur) {
klist_del(i->i_cur);
i->i_cur = NULL;
}
}
-
EXPORT_SYMBOL_GPL(klist_iter_exit);
-
-static struct klist_node * to_klist_node(struct list_head * n)
+static struct klist_node *to_klist_node(struct list_head *n)
{
return container_of(n, struct klist_node, n_node);
}
-
/**
- * klist_next - Ante up next node in list.
- * @i: Iterator structure.
+ * klist_next - Ante up next node in list.
+ * @i: Iterator structure.
*
- * First grab list lock. Decrement the reference count of the previous
- * node, if there was one. Grab the next node, increment its reference
- * count, drop the lock, and return that next node.
+ * First grab list lock. Decrement the reference count of the previous
+ * node, if there was one. Grab the next node, increment its reference
+ * count, drop the lock, and return that next node.
*/
-
-struct klist_node * klist_next(struct klist_iter * i)
+struct klist_node *klist_next(struct klist_iter *i)
{
- struct list_head * next;
- struct klist_node * lnode = i->i_cur;
- struct klist_node * knode = NULL;
+ struct list_head *next;
+ struct klist_node *lnode = i->i_cur;
+ struct klist_node *knode = NULL;
void (*put)(struct klist_node *) = i->i_klist->put;
spin_lock(&i->i_klist->k_lock);
@@ -281,7 +281,4 @@ struct klist_node * klist_next(struct klist_iter * i)
put(lnode);
return knode;
}
-
EXPORT_SYMBOL_GPL(klist_next);
-
-
diff --git a/lib/kobject.c b/lib/kobject.c
index 2c6490370922..718e5101c263 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -90,7 +90,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
}
pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
- kobj, __FUNCTION__, path);
+ kobj, __func__, path);
}
/**
@@ -181,7 +181,7 @@ static int kobject_add_internal(struct kobject *kobj)
}
pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n",
- kobject_name(kobj), kobj, __FUNCTION__,
+ kobject_name(kobj), kobj, __func__,
parent ? kobject_name(parent) : "<NULL>",
kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>");
@@ -196,10 +196,10 @@ static int kobject_add_internal(struct kobject *kobj)
printk(KERN_ERR "%s failed for %s with "
"-EEXIST, don't try to register things with "
"the same name in the same directory.\n",
- __FUNCTION__, kobject_name(kobj));
+ __func__, kobject_name(kobj));
else
printk(KERN_ERR "%s failed for %s (%d)\n",
- __FUNCTION__, kobject_name(kobj), error);
+ __func__, kobject_name(kobj), error);
dump_stack();
} else
kobj->state_in_sysfs = 1;
@@ -216,21 +216,12 @@ static int kobject_add_internal(struct kobject *kobj)
static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs)
{
- va_list aq;
- char *name;
-
- va_copy(aq, vargs);
- name = kvasprintf(GFP_KERNEL, fmt, vargs);
- va_end(aq);
-
- if (!name)
- return -ENOMEM;
-
/* Free the old name, if necessary. */
kfree(kobj->name);
- /* Now, set the new name */
- kobj->name = name;
+ kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
+ if (!kobj->name)
+ return -ENOMEM;
return 0;
}
@@ -246,12 +237,12 @@ static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
*/
int kobject_set_name(struct kobject *kobj, const char *fmt, ...)
{
- va_list args;
+ va_list vargs;
int retval;
- va_start(args, fmt);
- retval = kobject_set_name_vargs(kobj, fmt, args);
- va_end(args);
+ va_start(vargs, fmt);
+ retval = kobject_set_name_vargs(kobj, fmt, vargs);
+ va_end(vargs);
return retval;
}
@@ -301,12 +292,9 @@ EXPORT_SYMBOL(kobject_init);
static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
const char *fmt, va_list vargs)
{
- va_list aq;
int retval;
- va_copy(aq, vargs);
- retval = kobject_set_name_vargs(kobj, fmt, aq);
- va_end(aq);
+ retval = kobject_set_name_vargs(kobj, fmt, vargs);
if (retval) {
printk(KERN_ERR "kobject: can not set name properly!\n");
return retval;
@@ -540,7 +528,7 @@ static void kobject_cleanup(struct kobject *kobj)
const char *name = kobj->name;
pr_debug("kobject: '%s' (%p): %s\n",
- kobject_name(kobj), kobj, __FUNCTION__);
+ kobject_name(kobj), kobj, __func__);
if (t && !t->release)
pr_debug("kobject: '%s' (%p): does not have a release() "
@@ -600,7 +588,7 @@ void kobject_put(struct kobject *kobj)
static void dynamic_kobj_release(struct kobject *kobj)
{
- pr_debug("kobject: (%p): %s\n", kobj, __FUNCTION__);
+ pr_debug("kobject: (%p): %s\n", kobj, __func__);
kfree(kobj);
}
@@ -657,7 +645,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
retval = kobject_add(kobj, parent, "%s", name);
if (retval) {
printk(KERN_WARNING "%s: kobject_add error: %d\n",
- __FUNCTION__, retval);
+ __func__, retval);
kobject_put(kobj);
kobj = NULL;
}
@@ -765,7 +753,7 @@ static void kset_release(struct kobject *kobj)
{
struct kset *kset = container_of(kobj, struct kset, kobj);
pr_debug("kobject: '%s' (%p): %s\n",
- kobject_name(kobj), kobj, __FUNCTION__);
+ kobject_name(kobj), kobj, __func__);
kfree(kset);
}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 9fb6b86cf6b1..2fa545a63160 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -101,7 +101,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
int retval = 0;
pr_debug("kobject: '%s' (%p): %s\n",
- kobject_name(kobj), kobj, __FUNCTION__);
+ kobject_name(kobj), kobj, __func__);
/* search the kset we belong to */
top_kobj = kobj;
@@ -111,7 +111,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (!top_kobj->kset) {
pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
"without kset!\n", kobject_name(kobj), kobj,
- __FUNCTION__);
+ __func__);
return -EINVAL;
}
@@ -123,7 +123,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (!uevent_ops->filter(kset, kobj)) {
pr_debug("kobject: '%s' (%p): %s: filter function "
"caused the event to drop!\n",
- kobject_name(kobj), kobj, __FUNCTION__);
+ kobject_name(kobj), kobj, __func__);
return 0;
}
@@ -135,7 +135,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (!subsystem) {
pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
"event to drop!\n", kobject_name(kobj), kobj,
- __FUNCTION__);
+ __func__);
return 0;
}
@@ -177,7 +177,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (retval) {
pr_debug("kobject: '%s' (%p): %s: uevent() returned "
"%d\n", kobject_name(kobj), kobj,
- __FUNCTION__, retval);
+ __func__, retval);
goto exit;
}
}
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 393a0e915c23..119174494cb5 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -102,6 +102,7 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
return;
free_percpu(fbc->counters);
+ fbc->counters = NULL;
#ifdef CONFIG_HOTPLUG_CPU
mutex_lock(&percpu_counters_lock);
list_del(&fbc->list);
diff --git a/lib/proportions.c b/lib/proportions.c
index 9508d9a7af3e..4f387a643d72 100644
--- a/lib/proportions.c
+++ b/lib/proportions.c
@@ -73,12 +73,6 @@
#include <linux/proportions.h>
#include <linux/rcupdate.h>
-/*
- * Limit the time part in order to ensure there are some bits left for the
- * cycle counter.
- */
-#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
-
int prop_descriptor_init(struct prop_descriptor *pd, int shift)
{
int err;
@@ -268,6 +262,38 @@ void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
}
/*
+ * identical to __prop_inc_percpu, except that it limits this pl's fraction to
+ * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded.
+ */
+void __prop_inc_percpu_max(struct prop_descriptor *pd,
+ struct prop_local_percpu *pl, long frac)
+{
+ struct prop_global *pg = prop_get_global(pd);
+
+ prop_norm_percpu(pg, pl);
+
+ if (unlikely(frac != PROP_FRAC_BASE)) {
+ unsigned long period_2 = 1UL << (pg->shift - 1);
+ unsigned long counter_mask = period_2 - 1;
+ unsigned long global_count;
+ long numerator, denominator;
+
+ numerator = percpu_counter_read_positive(&pl->events);
+ global_count = percpu_counter_read(&pg->events);
+ denominator = period_2 + (global_count & counter_mask);
+
+ if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT))
+ goto out_put;
+ }
+
+ percpu_counter_add(&pl->events, 1);
+ percpu_counter_add(&pg->events, 1);
+
+out_put:
+ prop_put_global(pd, pg);
+}
+
+/*
* Obtain a fraction of this proportion
*
* p_{j} = x_{j} / (period/2 + t % period/2)
diff --git a/lib/string.c b/lib/string.c
index 5efafed3d6b6..b19b87af65a3 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -493,6 +493,33 @@ char *strsep(char **s, const char *ct)
EXPORT_SYMBOL(strsep);
#endif
+/**
+ * sysfs_streq - return true if strings are equal, modulo trailing newline
+ * @s1: one string
+ * @s2: another string
+ *
+ * This routine returns true iff two strings are equal, treating both
+ * NUL and newline-then-NUL as equivalent string terminations. It's
+ * geared for use with sysfs input strings, which generally terminate
+ * with newlines but are compared against values without newlines.
+ */
+bool sysfs_streq(const char *s1, const char *s2)
+{
+ while (*s1 && *s1 == *s2) {
+ s1++;
+ s2++;
+ }
+
+ if (*s1 == *s2)
+ return true;
+ if (!*s1 && *s2 == '\n' && !s2[1])
+ return true;
+ if (*s1 == '\n' && !s1[1] && !*s2)
+ return true;
+ return false;
+}
+EXPORT_SYMBOL(sysfs_streq);
+
#ifndef __HAVE_ARCH_MEMSET
/**
* memset - Fill a region of memory with the given value