diff options
author | Tejun Heo <tj@kernel.org> | 2012-04-01 21:30:01 +0200 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-04-01 21:55:00 +0200 |
commit | 959d851caa48829eb85cb85aa949fd6b4c5d5bc6 (patch) | |
tree | 3ba9c94ec346275fb44c4f0d1cd2537cdff8d811 /block | |
parent | blkcg: change a spin_lock() to spin_lock_irq() (diff) | |
parent | cgroup: make css->refcnt clearing on cgroup removal optional (diff) | |
download | linux-959d851caa48829eb85cb85aa949fd6b4c5d5bc6.tar.xz linux-959d851caa48829eb85cb85aa949fd6b4c5d5bc6.zip |
Merge branch 'for-3.5' of ../cgroup into block/for-3.5/core-merged
cgroup/for-3.5 contains the following changes which blk-cgroup needs
to proceed with the on-going cleanup.
* Dynamic addition and removal of cftypes to make config/stat file
handling modular for policies.
* cgroup removal update to not wait for css references to drain to fix
blkcg removal hang caused by cfq caching cfqgs.
Pull in cgroup/for-3.5 into block/for-3.5/core. This causes the
following conflicts in block/blk-cgroup.c.
* 761b3ef50e "cgroup: remove cgroup_subsys argument from callbacks"
conflicts with blkiocg_pre_destroy() addition and blkiocg_attach()
removal. Resolved by removing @subsys from all subsys methods.
* 676f7c8f84 "cgroup: relocate cftype and cgroup_subsys definitions in
controllers" conflicts with ->pre_destroy() and ->attach() updates
and removal of modular config. Resolved by dropping forward
declarations of the methods and applying updates to the relocated
blkio_subsys.
* 4baf6e3325 "cgroup: convert all non-memcg controllers to the new
cftype interface" builds upon the previous item. Resolved by adding
->base_cftypes to the relocated blkio_subsys.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 51 | ||||
-rw-r--r-- | block/blk-ioc.c | 6 | ||||
-rw-r--r-- | block/blk-softirq.c | 16 | ||||
-rw-r--r-- | block/blk.h | 16 | ||||
-rw-r--r-- | block/partitions/ldm.c | 11 |
5 files changed, 35 insertions, 65 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index aa54c4110f54..4fdeb46b4436 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -43,32 +43,12 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup); static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES]; -static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, - struct cgroup *); -static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, - struct cgroup_taskset *); -static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *); -static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); -static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); - /* for encoding cft->private value on file */ #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val)) /* What policy owns the file, proportional or throttle */ #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff) #define BLKIOFILE_ATTR(val) ((val) & 0xffff) -struct cgroup_subsys blkio_subsys = { - .name = "blkio", - .create = blkiocg_create, - .can_attach = blkiocg_can_attach, - .pre_destroy = blkiocg_pre_destroy, - .destroy = blkiocg_destroy, - .populate = blkiocg_populate, - .subsys_id = blkio_subsys_id, - .module = THIS_MODULE, -}; -EXPORT_SYMBOL_GPL(blkio_subsys); - struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), @@ -1563,17 +1543,11 @@ struct cftype blkio_files[] = { .read_map = blkiocg_file_read_map, }, #endif + { } /* terminate */ }; -static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) -{ - return cgroup_add_files(cgroup, subsys, blkio_files, - ARRAY_SIZE(blkio_files)); -} - /** * blkiocg_pre_destroy - cgroup pre_destroy callback - * @subsys: cgroup subsys * @cgroup: cgroup of interest * * This function is called when @cgroup is about to go away and responsible @@ -1583,8 +1557,7 @@ static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) * * This is the blkcg counterpart of ioc_release_fn(). */ -static int blkiocg_pre_destroy(struct cgroup_subsys *subsys, - struct cgroup *cgroup) +static int blkiocg_pre_destroy(struct cgroup *cgroup) { struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); @@ -1609,7 +1582,7 @@ static int blkiocg_pre_destroy(struct cgroup_subsys *subsys, return 0; } -static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) +static void blkiocg_destroy(struct cgroup *cgroup) { struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); @@ -1617,8 +1590,7 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) kfree(blkcg); } -static struct cgroup_subsys_state * -blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) +static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup) { static atomic64_t id_seq = ATOMIC64_INIT(0); struct blkio_cgroup *blkcg; @@ -1706,8 +1678,7 @@ void blkcg_exit_queue(struct request_queue *q) * of the main cic data structures. For now we allow a task to change * its cgroup only if it's the only owner of its ioc. */ -static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, - struct cgroup_taskset *tset) +static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) { struct task_struct *task; struct io_context *ioc; @@ -1750,6 +1721,18 @@ static void blkcg_bypass_end(void) mutex_unlock(&all_q_mutex); } +struct cgroup_subsys blkio_subsys = { + .name = "blkio", + .create = blkiocg_create, + .can_attach = blkiocg_can_attach, + .pre_destroy = blkiocg_pre_destroy, + .destroy = blkiocg_destroy, + .subsys_id = blkio_subsys_id, + .base_cftypes = blkio_files, + .module = THIS_MODULE, +}; +EXPORT_SYMBOL_GPL(blkio_subsys); + void blkio_policy_register(struct blkio_policy_type *blkiop) { struct request_queue *q; diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 3f3dd51a1280..1e2d53b04858 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -130,6 +130,7 @@ static void ioc_release_fn(struct work_struct *work) void put_io_context(struct io_context *ioc) { unsigned long flags; + bool free_ioc = false; if (ioc == NULL) return; @@ -144,8 +145,13 @@ void put_io_context(struct io_context *ioc) spin_lock_irqsave(&ioc->lock, flags); if (!hlist_empty(&ioc->icq_list)) schedule_work(&ioc->release_work); + else + free_ioc = true; spin_unlock_irqrestore(&ioc->lock, flags); } + + if (free_ioc) + kmem_cache_free(iocontext_cachep, ioc); } EXPORT_SYMBOL(put_io_context); diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 1366a89d8e66..467c8de88642 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -8,6 +8,7 @@ #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/cpu.h> +#include <linux/sched.h> #include "blk.h" @@ -103,9 +104,10 @@ static struct notifier_block __cpuinitdata blk_cpu_notifier = { void __blk_complete_request(struct request *req) { - int ccpu, cpu, group_cpu = NR_CPUS; + int ccpu, cpu; struct request_queue *q = req->q; unsigned long flags; + bool shared = false; BUG_ON(!q->softirq_done_fn); @@ -117,22 +119,20 @@ void __blk_complete_request(struct request *req) */ if (req->cpu != -1) { ccpu = req->cpu; - if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) { - ccpu = blk_cpu_to_group(ccpu); - group_cpu = blk_cpu_to_group(cpu); - } + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) + shared = cpus_share_cache(cpu, ccpu); } else ccpu = cpu; /* - * If current CPU and requested CPU are in the same group, running - * softirq in current CPU. One might concern this is just like + * If current CPU and requested CPU share a cache, run the softirq on + * the current CPU. One might concern this is just like * QUEUE_FLAG_SAME_FORCE, but actually not. blk_complete_request() is * running in interrupt handler, and currently I/O controller doesn't * support multiple interrupts, so current CPU is unique actually. This * avoids IPI sending from current CPU to the first CPU of a group. */ - if (ccpu == cpu || ccpu == group_cpu) { + if (ccpu == cpu || shared) { struct list_head *list; do_local: list = &__get_cpu_var(blk_cpu_done); diff --git a/block/blk.h b/block/blk.h index aa81afde8220..85f6ae42f7d3 100644 --- a/block/blk.h +++ b/block/blk.h @@ -164,22 +164,6 @@ static inline int queue_congestion_off_threshold(struct request_queue *q) return q->nr_congestion_off; } -static inline int blk_cpu_to_group(int cpu) -{ - int group = NR_CPUS; -#ifdef CONFIG_SCHED_MC - const struct cpumask *mask = cpu_coregroup_mask(cpu); - group = cpumask_first(mask); -#elif defined(CONFIG_SCHED_SMT) - group = cpumask_first(topology_thread_cpumask(cpu)); -#else - return cpu; -#endif - if (likely(group < NR_CPUS)) - return group; - return cpu; -} - /* * Contribute to IO statistics IFF: * diff --git a/block/partitions/ldm.c b/block/partitions/ldm.c index bd8ae788f689..e507cfbd044e 100644 --- a/block/partitions/ldm.c +++ b/block/partitions/ldm.c @@ -2,7 +2,7 @@ * ldm - Support for Windows Logical Disk Manager (Dynamic Disks) * * Copyright (C) 2001,2002 Richard Russon <ldm@flatcap.org> - * Copyright (c) 2001-2007 Anton Altaparmakov + * Copyright (c) 2001-2012 Anton Altaparmakov * Copyright (C) 2001,2002 Jakob Kemi <jakob.kemi@telia.com> * * Documentation is available at http://www.linux-ntfs.org/doku.php?id=downloads @@ -1341,20 +1341,17 @@ found: ldm_error("REC value (%d) exceeds NUM value (%d)", rec, f->num); return false; } - if (f->map & (1 << rec)) { ldm_error ("Duplicate VBLK, part %d.", rec); f->map &= 0x7F; /* Mark the group as broken */ return false; } - f->map |= (1 << rec); - + if (!rec) + memcpy(f->data, data, VBLK_SIZE_HEAD); data += VBLK_SIZE_HEAD; size -= VBLK_SIZE_HEAD; - - memcpy (f->data+rec*(size-VBLK_SIZE_HEAD)+VBLK_SIZE_HEAD, data, size); - + memcpy(f->data + VBLK_SIZE_HEAD + rec * size, data, size); return true; } |