summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig.debug29
-rw-r--r--lib/Kconfig.kgdb3
-rw-r--r--lib/cpumask.c9
-rw-r--r--lib/kobject.c10
-rw-r--r--lib/kobject_uevent.c3
-rw-r--r--lib/scatterlist.c176
-rw-r--r--lib/smp_processor_id.c6
-rw-r--r--lib/textsearch.c16
-rw-r--r--lib/ts_bm.c26
-rw-r--r--lib/ts_fsm.c6
-rw-r--r--lib/ts_kmp.c29
11 files changed, 235 insertions, 78 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index df27132a56f4..882c51048993 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -74,6 +74,9 @@ config DEBUG_FS
debugging files into. Enable this option to be able to read and
write to these files.
+ For detailed documentation on the debugfs API, see
+ Documentation/DocBook/filesystems.
+
If unsure, say N.
config HEADERS_CHECK
@@ -147,7 +150,7 @@ config DETECT_SOFTLOCKUP
help
Say Y here to enable the kernel to detect "soft lockups",
which are bugs that cause the kernel to loop in kernel
- mode for more than 10 seconds, without giving other tasks a
+ mode for more than 60 seconds, without giving other tasks a
chance to run.
When a soft-lockup is detected, the kernel will print the
@@ -159,6 +162,30 @@ config DETECT_SOFTLOCKUP
can be detected via the NMI-watchdog, on platforms that
support it.)
+config BOOTPARAM_SOFTLOCKUP_PANIC
+ bool "Panic (Reboot) On Soft Lockups"
+ depends on DETECT_SOFTLOCKUP
+ help
+ Say Y here to enable the kernel to panic on "soft lockups",
+ which are bugs that cause the kernel to loop in kernel
+ mode for more than 60 seconds, without giving other tasks a
+ chance to run.
+
+ The panic can be used in combination with panic_timeout,
+ to cause the system to reboot automatically after a
+ lockup has been detected. This feature is useful for
+ high-availability systems that have uptime guarantees and
+ where a lockup must be resolved ASAP.
+
+ Say N if unsure.
+
+config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
+ int
+ depends on DETECT_SOFTLOCKUP
+ range 0 1
+ default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
+ default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
+
config SCHED_DEBUG
bool "Collect scheduler debugging info"
depends on DEBUG_KERNEL && PROC_FS
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index a5d4b1dac2a5..2cfd2721f7ed 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -1,7 +1,4 @@
-config HAVE_ARCH_KGDB_SHADOW_INFO
- bool
-
config HAVE_ARCH_KGDB
bool
diff --git a/lib/cpumask.c b/lib/cpumask.c
index bb4f76d3c3e7..5f97dc25ef9c 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -15,6 +15,15 @@ int __next_cpu(int n, const cpumask_t *srcp)
}
EXPORT_SYMBOL(__next_cpu);
+#if NR_CPUS > 64
+int __next_cpu_nr(int n, const cpumask_t *srcp)
+{
+ return min_t(int, nr_cpu_ids,
+ find_next_bit(srcp->bits, nr_cpu_ids, n+1));
+}
+EXPORT_SYMBOL(__next_cpu_nr);
+#endif
+
int __any_online_cpu(const cpumask_t *mask)
{
int cpu;
diff --git a/lib/kobject.c b/lib/kobject.c
index dcade0543bd2..744401571ed7 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -216,13 +216,19 @@ static int kobject_add_internal(struct kobject *kobj)
static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
va_list vargs)
{
- /* Free the old name, if necessary. */
- kfree(kobj->name);
+ const char *old_name = kobj->name;
+ char *s;
kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
if (!kobj->name)
return -ENOMEM;
+ /* ewww... some of these buggers have '/' in the name ... */
+ s = strchr(kobj->name, '/');
+ if (s)
+ s[0] = '!';
+
+ kfree(old_name);
return 0;
}
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index 2fa545a63160..9f8d599459d1 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -245,7 +245,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
if (retval)
goto exit;
- call_usermodehelper(argv[0], argv, env->envp, UMH_WAIT_EXEC);
+ retval = call_usermodehelper(argv[0], argv,
+ env->envp, UMH_WAIT_EXEC);
}
exit:
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index b80c21100d78..876ba6d5b670 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -295,6 +295,117 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
EXPORT_SYMBOL(sg_alloc_table);
/**
+ * sg_miter_start - start mapping iteration over a sg list
+ * @miter: sg mapping iter to be started
+ * @sgl: sg list to iterate over
+ * @nents: number of sg entries
+ *
+ * Description:
+ * Starts mapping iterator @miter.
+ *
+ * Context:
+ * Don't care.
+ */
+void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
+ unsigned int nents, unsigned int flags)
+{
+ memset(miter, 0, sizeof(struct sg_mapping_iter));
+
+ miter->__sg = sgl;
+ miter->__nents = nents;
+ miter->__offset = 0;
+ miter->__flags = flags;
+}
+EXPORT_SYMBOL(sg_miter_start);
+
+/**
+ * sg_miter_next - proceed mapping iterator to the next mapping
+ * @miter: sg mapping iter to proceed
+ *
+ * Description:
+ * Proceeds @miter@ to the next mapping. @miter@ should have been
+ * started using sg_miter_start(). On successful return,
+ * @miter@->page, @miter@->addr and @miter@->length point to the
+ * current mapping.
+ *
+ * Context:
+ * IRQ disabled if SG_MITER_ATOMIC. IRQ must stay disabled till
+ * @miter@ is stopped. May sleep if !SG_MITER_ATOMIC.
+ *
+ * Returns:
+ * true if @miter contains the next mapping. false if end of sg
+ * list is reached.
+ */
+bool sg_miter_next(struct sg_mapping_iter *miter)
+{
+ unsigned int off, len;
+
+ /* check for end and drop resources from the last iteration */
+ if (!miter->__nents)
+ return false;
+
+ sg_miter_stop(miter);
+
+ /* get to the next sg if necessary. __offset is adjusted by stop */
+ if (miter->__offset == miter->__sg->length && --miter->__nents) {
+ miter->__sg = sg_next(miter->__sg);
+ miter->__offset = 0;
+ }
+
+ /* map the next page */
+ off = miter->__sg->offset + miter->__offset;
+ len = miter->__sg->length - miter->__offset;
+
+ miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
+ off &= ~PAGE_MASK;
+ miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
+ miter->consumed = miter->length;
+
+ if (miter->__flags & SG_MITER_ATOMIC)
+ miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
+ else
+ miter->addr = kmap(miter->page) + off;
+
+ return true;
+}
+EXPORT_SYMBOL(sg_miter_next);
+
+/**
+ * sg_miter_stop - stop mapping iteration
+ * @miter: sg mapping iter to be stopped
+ *
+ * Description:
+ * Stops mapping iterator @miter. @miter should have been started
+ * started using sg_miter_start(). A stopped iteration can be
+ * resumed by calling sg_miter_next() on it. This is useful when
+ * resources (kmap) need to be released during iteration.
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+void sg_miter_stop(struct sg_mapping_iter *miter)
+{
+ WARN_ON(miter->consumed > miter->length);
+
+ /* drop resources from the last iteration */
+ if (miter->addr) {
+ miter->__offset += miter->consumed;
+
+ if (miter->__flags & SG_MITER_ATOMIC) {
+ WARN_ON(!irqs_disabled());
+ kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
+ } else
+ kunmap(miter->addr);
+
+ miter->page = NULL;
+ miter->addr = NULL;
+ miter->length = 0;
+ miter->consumed = 0;
+ }
+}
+EXPORT_SYMBOL(sg_miter_stop);
+
+/**
* sg_copy_buffer - Copy data between a linear buffer and an SG list
* @sgl: The SG list
* @nents: Number of SG entries
@@ -309,56 +420,29 @@ EXPORT_SYMBOL(sg_alloc_table);
static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen, int to_buffer)
{
- struct scatterlist *sg;
- size_t buf_off = 0;
- int i;
-
- WARN_ON(!irqs_disabled());
-
- for_each_sg(sgl, sg, nents, i) {
- struct page *page;
- int n = 0;
- unsigned int sg_off = sg->offset;
- unsigned int sg_copy = sg->length;
-
- if (sg_copy > buflen)
- sg_copy = buflen;
- buflen -= sg_copy;
-
- while (sg_copy > 0) {
- unsigned int page_copy;
- void *p;
-
- page_copy = PAGE_SIZE - sg_off;
- if (page_copy > sg_copy)
- page_copy = sg_copy;
-
- page = nth_page(sg_page(sg), n);
- p = kmap_atomic(page, KM_BIO_SRC_IRQ);
-
- if (to_buffer)
- memcpy(buf + buf_off, p + sg_off, page_copy);
- else {
- memcpy(p + sg_off, buf + buf_off, page_copy);
- flush_kernel_dcache_page(page);
- }
-
- kunmap_atomic(p, KM_BIO_SRC_IRQ);
-
- buf_off += page_copy;
- sg_off += page_copy;
- if (sg_off == PAGE_SIZE) {
- sg_off = 0;
- n++;
- }
- sg_copy -= page_copy;
+ unsigned int offset = 0;
+ struct sg_mapping_iter miter;
+
+ sg_miter_start(&miter, sgl, nents, SG_MITER_ATOMIC);
+
+ while (sg_miter_next(&miter) && offset < buflen) {
+ unsigned int len;
+
+ len = min(miter.length, buflen - offset);
+
+ if (to_buffer)
+ memcpy(buf + offset, miter.addr, len);
+ else {
+ memcpy(miter.addr, buf + offset, len);
+ flush_kernel_dcache_page(miter.page);
}
- if (!buflen)
- break;
+ offset += len;
}
- return buf_off;
+ sg_miter_stop(&miter);
+
+ return offset;
}
/**
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
index 3b4dc098181e..c4381d9516f6 100644
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
@@ -11,7 +11,7 @@ notrace unsigned int debug_smp_processor_id(void)
{
unsigned long preempt_count = preempt_count();
int this_cpu = raw_smp_processor_id();
- cpumask_t this_mask;
+ cpumask_of_cpu_ptr_declare(this_mask);
if (likely(preempt_count))
goto out;
@@ -23,9 +23,9 @@ notrace unsigned int debug_smp_processor_id(void)
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
- this_mask = cpumask_of_cpu(this_cpu);
+ cpumask_of_cpu_ptr_next(this_mask, this_cpu);
- if (cpus_equal(current->cpus_allowed, this_mask))
+ if (cpus_equal(current->cpus_allowed, *this_mask))
goto out;
/*
diff --git a/lib/textsearch.c b/lib/textsearch.c
index a3e500ad51d7..9fbcb44c554f 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -54,10 +54,13 @@
* USAGE
*
* Before a search can be performed, a configuration must be created
- * by calling textsearch_prepare() specyfing the searching algorithm and
- * the pattern to look for. The returned configuration may then be used
- * for an arbitary amount of times and even in parallel as long as a
- * separate struct ts_state variable is provided to every instance.
+ * by calling textsearch_prepare() specifying the searching algorithm,
+ * the pattern to look for and flags. As a flag, you can set TS_IGNORECASE
+ * to perform case insensitive matching. But it might slow down
+ * performance of algorithm, so you should use it at own your risk.
+ * The returned configuration may then be used for an arbitary
+ * amount of times and even in parallel as long as a separate struct
+ * ts_state variable is provided to every instance.
*
* The actual search is performed by either calling textsearch_find_-
* continuous() for linear data or by providing an own get_next_block()
@@ -89,7 +92,6 @@
* panic("Oh my god, dancing chickens at %d\n", pos);
*
* textsearch_destroy(conf);
- *
* ==========================================================================
*/
@@ -265,7 +267,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
return ERR_PTR(-EINVAL);
ops = lookup_ts_algo(algo);
-#ifdef CONFIG_KMOD
+#ifdef CONFIG_MODULES
/*
* Why not always autoload you may ask. Some users are
* in a situation where requesting a module may deadlock,
@@ -280,7 +282,7 @@ struct ts_config *textsearch_prepare(const char *algo, const void *pattern,
if (ops == NULL)
goto errout;
- conf = ops->init(pattern, len, gfp_mask);
+ conf = ops->init(pattern, len, gfp_mask, flags);
if (IS_ERR(conf)) {
err = PTR_ERR(conf);
goto errout;
diff --git a/lib/ts_bm.c b/lib/ts_bm.c
index 4a7fce72898e..9e66ee4020e9 100644
--- a/lib/ts_bm.c
+++ b/lib/ts_bm.c
@@ -39,6 +39,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/ctype.h>
#include <linux/textsearch.h>
/* Alphabet size, use ASCII */
@@ -64,6 +65,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
int shift = bm->patlen - 1, bs;
+ const u8 icase = conf->flags & TS_IGNORECASE;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -75,7 +77,9 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
DEBUGP("Searching in position %d (%c)\n",
shift, text[shift]);
for (i = 0; i < bm->patlen; i++)
- if (text[shift-i] != bm->pattern[bm->patlen-1-i])
+ if ((icase ? toupper(text[shift-i])
+ : text[shift-i])
+ != bm->pattern[bm->patlen-1-i])
goto next;
/* London calling... */
@@ -111,14 +115,18 @@ static int subpattern(u8 *pattern, int i, int j, int g)
return ret;
}
-static void compute_prefix_tbl(struct ts_bm *bm)
+static void compute_prefix_tbl(struct ts_bm *bm, int flags)
{
int i, j, g;
for (i = 0; i < ASIZE; i++)
bm->bad_shift[i] = bm->patlen;
- for (i = 0; i < bm->patlen - 1; i++)
+ for (i = 0; i < bm->patlen - 1; i++) {
bm->bad_shift[bm->pattern[i]] = bm->patlen - 1 - i;
+ if (flags & TS_IGNORECASE)
+ bm->bad_shift[tolower(bm->pattern[i])]
+ = bm->patlen - 1 - i;
+ }
/* Compute the good shift array, used to match reocurrences
* of a subpattern */
@@ -135,10 +143,11 @@ static void compute_prefix_tbl(struct ts_bm *bm)
}
static struct ts_config *bm_init(const void *pattern, unsigned int len,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, int flags)
{
struct ts_config *conf;
struct ts_bm *bm;
+ int i;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*bm) + len + prefix_tbl_len;
@@ -146,11 +155,16 @@ static struct ts_config *bm_init(const void *pattern, unsigned int len,
if (IS_ERR(conf))
return conf;
+ conf->flags = flags;
bm = ts_config_priv(conf);
bm->patlen = len;
bm->pattern = (u8 *) bm->good_shift + prefix_tbl_len;
- memcpy(bm->pattern, pattern, len);
- compute_prefix_tbl(bm);
+ if (flags & TS_IGNORECASE)
+ for (i = 0; i < len; i++)
+ bm->pattern[i] = toupper(((u8 *)pattern)[i]);
+ else
+ memcpy(bm->pattern, pattern, len);
+ compute_prefix_tbl(bm, flags);
return conf;
}
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c
index af575b61526b..5696a35184e4 100644
--- a/lib/ts_fsm.c
+++ b/lib/ts_fsm.c
@@ -257,7 +257,7 @@ found_match:
}
static struct ts_config *fsm_init(const void *pattern, unsigned int len,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, int flags)
{
int i, err = -EINVAL;
struct ts_config *conf;
@@ -269,6 +269,9 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
if (len % sizeof(struct ts_fsm_token) || ntokens < 1)
goto errout;
+ if (flags & TS_IGNORECASE)
+ goto errout;
+
for (i = 0; i < ntokens; i++) {
struct ts_fsm_token *t = &tokens[i];
@@ -284,6 +287,7 @@ static struct ts_config *fsm_init(const void *pattern, unsigned int len,
if (IS_ERR(conf))
return conf;
+ conf->flags = flags;
fsm = ts_config_priv(conf);
fsm->ntokens = ntokens;
memcpy(fsm->tokens, pattern, len);
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c
index 3ced628cab4b..632f783e65f1 100644
--- a/lib/ts_kmp.c
+++ b/lib/ts_kmp.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/ctype.h>
#include <linux/textsearch.h>
struct ts_kmp
@@ -47,6 +48,7 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
struct ts_kmp *kmp = ts_config_priv(conf);
unsigned int i, q = 0, text_len, consumed = state->offset;
const u8 *text;
+ const int icase = conf->flags & TS_IGNORECASE;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);
@@ -55,9 +57,11 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
break;
for (i = 0; i < text_len; i++) {
- while (q > 0 && kmp->pattern[q] != text[i])
+ while (q > 0 && kmp->pattern[q]
+ != (icase ? toupper(text[i]) : text[i]))
q = kmp->prefix_tbl[q - 1];
- if (kmp->pattern[q] == text[i])
+ if (kmp->pattern[q]
+ == (icase ? toupper(text[i]) : text[i]))
q++;
if (unlikely(q == kmp->pattern_len)) {
state->offset = consumed + i + 1;
@@ -72,24 +76,28 @@ static unsigned int kmp_find(struct ts_config *conf, struct ts_state *state)
}
static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
- unsigned int *prefix_tbl)
+ unsigned int *prefix_tbl, int flags)
{
unsigned int k, q;
+ const u8 icase = flags & TS_IGNORECASE;
for (k = 0, q = 1; q < len; q++) {
- while (k > 0 && pattern[k] != pattern[q])
+ while (k > 0 && (icase ? toupper(pattern[k]) : pattern[k])
+ != (icase ? toupper(pattern[q]) : pattern[q]))
k = prefix_tbl[k-1];
- if (pattern[k] == pattern[q])
+ if ((icase ? toupper(pattern[k]) : pattern[k])
+ == (icase ? toupper(pattern[q]) : pattern[q]))
k++;
prefix_tbl[q] = k;
}
}
static struct ts_config *kmp_init(const void *pattern, unsigned int len,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, int flags)
{
struct ts_config *conf;
struct ts_kmp *kmp;
+ int i;
unsigned int prefix_tbl_len = len * sizeof(unsigned int);
size_t priv_size = sizeof(*kmp) + len + prefix_tbl_len;
@@ -97,11 +105,16 @@ static struct ts_config *kmp_init(const void *pattern, unsigned int len,
if (IS_ERR(conf))
return conf;
+ conf->flags = flags;
kmp = ts_config_priv(conf);
kmp->pattern_len = len;
- compute_prefix_tbl(pattern, len, kmp->prefix_tbl);
+ compute_prefix_tbl(pattern, len, kmp->prefix_tbl, flags);
kmp->pattern = (u8 *) kmp->prefix_tbl + prefix_tbl_len;
- memcpy(kmp->pattern, pattern, len);
+ if (flags & TS_IGNORECASE)
+ for (i = 0; i < len; i++)
+ kmp->pattern[i] = toupper(((u8 *)pattern)[i]);
+ else
+ memcpy(kmp->pattern, pattern, len);
return conf;
}