diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/bug.c | 19 | ||||
-rw-r--r-- | lib/dynamic_printk.c | 6 | ||||
-rw-r--r-- | lib/idr.c | 22 | ||||
-rw-r--r-- | lib/percpu_counter.c | 7 | ||||
-rw-r--r-- | lib/scatterlist.c | 2 | ||||
-rw-r--r-- | lib/swiotlb.c | 10 |
6 files changed, 49 insertions, 17 deletions
diff --git a/lib/bug.c b/lib/bug.c index bfeafd60ee9f..300e41afbf97 100644 --- a/lib/bug.c +++ b/lib/bug.c @@ -5,6 +5,8 @@ CONFIG_BUG - emit BUG traps. Nothing happens without this. CONFIG_GENERIC_BUG - enable this code. + CONFIG_GENERIC_BUG_RELATIVE_POINTERS - use 32-bit pointers relative to + the containing struct bug_entry for bug_addr and file. CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable @@ -43,6 +45,15 @@ extern const struct bug_entry __start___bug_table[], __stop___bug_table[]; +static inline unsigned long bug_addr(const struct bug_entry *bug) +{ +#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS + return bug->bug_addr; +#else + return (unsigned long)bug + bug->bug_addr_disp; +#endif +} + #ifdef CONFIG_MODULES static LIST_HEAD(module_bug_list); @@ -55,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) unsigned i; for (i = 0; i < mod->num_bugs; ++i, ++bug) - if (bugaddr == bug->bug_addr) + if (bugaddr == bug_addr(bug)) return bug; } return NULL; @@ -108,7 +119,7 @@ const struct bug_entry *find_bug(unsigned long bugaddr) const struct bug_entry *bug; for (bug = __start___bug_table; bug < __stop___bug_table; ++bug) - if (bugaddr == bug->bug_addr) + if (bugaddr == bug_addr(bug)) return bug; return module_find_bug(bugaddr); @@ -133,7 +144,11 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) if (bug) { #ifdef CONFIG_DEBUG_BUGVERBOSE +#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS file = bug->file; +#else + file = (const char *)bug + bug->file_disp; +#endif line = bug->line; #endif warning = (bug->flags & BUGFLAG_WARNING) != 0; diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c index d83660fd6fdd..8e30295e8566 100644 --- a/lib/dynamic_printk.c +++ b/lib/dynamic_printk.c @@ -135,7 +135,7 @@ int unregister_dynamic_debug_module(char *mod_name) nr_entries--; out: up(&debug_list_mutex); - return 0; + return ret; } EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module); @@ -289,7 +289,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf, dynamic_enabled = DYNAMIC_ENABLED_SOME; err = 0; printk(KERN_DEBUG - "debugging enabled for module %s", + "debugging enabled for module %s\n", elem->name); } else if (!value && (elem->enable == 1)) { elem->enable = 0; @@ -309,7 +309,7 @@ static ssize_t pr_debug_write(struct file *file, const char __user *buf, err = 0; printk(KERN_DEBUG "debugging disabled for module " - "%s", elem->name); + "%s\n", elem->name); } } } diff --git a/lib/idr.c b/lib/idr.c index e728c7fccc4d..1c4f9281f412 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -185,6 +185,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) new = get_from_free_list(idp); if (!new) return -1; + new->layer = l-1; rcu_assign_pointer(p->ary[m], new); p->count++; } @@ -210,6 +211,7 @@ build_up: if (unlikely(!p)) { if (!(p = get_from_free_list(idp))) return -1; + p->layer = 0; layers = 1; } /* @@ -218,8 +220,14 @@ build_up: */ while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { layers++; - if (!p->count) + if (!p->count) { + /* special case: if the tree is currently empty, + * then we grow the tree by moving the top node + * upwards. + */ + p->layer++; continue; + } if (!(new = get_from_free_list(idp))) { /* * The allocation failed. If we built part of @@ -237,6 +245,7 @@ build_up: } new->ary[0] = p; new->count = 1; + new->layer = layers-1; if (p->bitmap == IDR_FULL) __set_bit(0, &new->bitmap); p = new; @@ -493,17 +502,21 @@ void *idr_find(struct idr *idp, int id) int n; struct idr_layer *p; - n = idp->layers * IDR_BITS; p = rcu_dereference(idp->top); + if (!p) + return NULL; + n = (p->layer+1) * IDR_BITS; /* Mask off upper bits we don't use for the search. */ id &= MAX_ID_MASK; if (id >= (1 << n)) return NULL; + BUG_ON(n == 0); while (n > 0 && p) { n -= IDR_BITS; + BUG_ON(n != p->layer*IDR_BITS); p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); } return((void *)p); @@ -582,8 +595,11 @@ void *idr_replace(struct idr *idp, void *ptr, int id) int n; struct idr_layer *p, *old_p; - n = idp->layers * IDR_BITS; p = idp->top; + if (!p) + return ERR_PTR(-EINVAL); + + n = (p->layer+1) * IDR_BITS; id &= MAX_ID_MASK; diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index a8663890a88c..b255b939bc1b 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -62,10 +62,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) for_each_online_cpu(cpu) { s32 *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; - *pcount = 0; } - fbc->count = ret; - spin_unlock(&fbc->lock); return ret; } @@ -104,13 +101,13 @@ void percpu_counter_destroy(struct percpu_counter *fbc) if (!fbc->counters) return; - free_percpu(fbc->counters); - fbc->counters = NULL; #ifdef CONFIG_HOTPLUG_CPU mutex_lock(&percpu_counters_lock); list_del(&fbc->list); mutex_unlock(&percpu_counters_lock); #endif + free_percpu(fbc->counters); + fbc->counters = NULL; } EXPORT_SYMBOL(percpu_counter_destroy); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 8d2688ff1352..b7b449dafbe5 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) WARN_ON(!irqs_disabled()); kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); } else - kunmap(miter->addr); + kunmap(miter->page); miter->page = NULL; miter->addr = NULL; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 78330c37a61b..5f6c629a924d 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -467,9 +467,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t dev_addr; void *ret; int order = get_order(size); + u64 dma_mask = DMA_32BIT_MASK; + + if (hwdev && hwdev->coherent_dma_mask) + dma_mask = hwdev->coherent_dma_mask; ret = (void *)__get_free_pages(flags, order); - if (ret && address_needs_mapping(hwdev, virt_to_bus(ret), size)) { + if (ret && !is_buffer_dma_capable(dma_mask, virt_to_bus(ret), size)) { /* * The allocated memory isn't reachable by the device. * Fall back on swiotlb_map_single(). @@ -493,9 +497,9 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, dev_addr = virt_to_bus(ret); /* Confirm address can be DMA'd by device */ - if (address_needs_mapping(hwdev, dev_addr, size)) { + if (!is_buffer_dma_capable(dma_mask, dev_addr, size)) { printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n", - (unsigned long long)*hwdev->dma_mask, + (unsigned long long)dma_mask, (unsigned long long)dev_addr); /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ |