summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c3
-rw-r--r--block/blktrace.c14
-rw-r--r--block/cfq-iosched.c5
-rw-r--r--block/deadline-iosched.c1
-rw-r--r--block/elevator.c4
-rw-r--r--block/genhd.c10
-rw-r--r--block/ioctl.c4
-rw-r--r--block/ll_rw_blk.c147
8 files changed, 135 insertions, 53 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index 1ec5df466708..5da56d48fbd3 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -10,7 +10,6 @@
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/bio.h>
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -892,7 +891,7 @@ static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
}
/*
- * as_can_anticipate indicates weather we should either run arq
+ * as_can_anticipate indicates whether we should either run arq
* or keep anticipating a better request.
*/
static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
diff --git a/block/blktrace.c b/block/blktrace.c
index 36f3a172275f..8ff33441d8a2 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -15,7 +15,6 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/blktrace_api.h>
@@ -70,7 +69,7 @@ static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK
/*
* Bio action bits of interest
*/
-static u32 bio_act[3] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC) };
+static u32 bio_act[5] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC), 0, BLK_TC_ACT(BLK_TC_AHEAD) };
/*
* More could be added as needed, taking care to increment the decrementer
@@ -80,6 +79,8 @@ static u32 bio_act[3] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_AC
(((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
#define trace_sync_bit(rw) \
(((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
+#define trace_ahead_bit(rw) \
+ (((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
/*
* The worker for the various blk_add_trace*() types. Fills out a
@@ -101,6 +102,7 @@ void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
what |= ddir_act[rw & WRITE];
what |= bio_act[trace_barrier_bit(rw)];
what |= bio_act[trace_sync_bit(rw)];
+ what |= bio_act[trace_ahead_bit(rw)];
pid = tsk->pid;
if (unlikely(act_log_check(bt, what, sector, pid)))
@@ -215,7 +217,7 @@ static int blk_trace_remove(request_queue_t *q)
static int blk_dropped_open(struct inode *inode, struct file *filp)
{
- filp->private_data = inode->u.generic_ip;
+ filp->private_data = inode->i_private;
return 0;
}
@@ -448,8 +450,10 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
**/
void blk_trace_shutdown(request_queue_t *q)
{
- blk_trace_startstop(q, 0);
- blk_trace_remove(q);
+ if (q->blk_trace) {
+ blk_trace_startstop(q, 0);
+ blk_trace_remove(q);
+ }
}
/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index e25223e147a2..3a3aee08ec5f 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -6,7 +6,6 @@
*
* Copyright (C) 2003 Jens Axboe <axboe@suse.de>
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
@@ -937,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
* seeks. so allow a little bit of time for him to submit a new rq
*/
if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
- sl = 2;
+ sl = min(sl, msecs_to_jiffies(2));
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
return 1;
@@ -1562,7 +1561,7 @@ restart:
/* ->key must be copied to avoid race with cfq_exit_queue() */
k = __cic->key;
if (unlikely(!k)) {
- cfq_drop_dead_cic(ioc, cic);
+ cfq_drop_dead_cic(ioc, __cic);
goto restart;
}
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 4469dd84623c..c7ca9f0b6498 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -8,7 +8,6 @@
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/bio.h>
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
diff --git a/block/elevator.c b/block/elevator.c
index d00b283f31d2..9b72dc7c8a5c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -27,7 +27,6 @@
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/bio.h>
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
@@ -766,7 +765,8 @@ void elv_unregister(struct elevator_type *e)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
- e->ops.trim(p->io_context);
+ if (p->io_context)
+ e->ops.trim(p->io_context);
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
diff --git a/block/genhd.c b/block/genhd.c
index 8d7339511e5e..653919d50cd4 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -2,7 +2,6 @@
* gendisk handling
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/genhd.h>
@@ -296,10 +295,15 @@ static struct kobject *base_probe(dev_t dev, int *part, void *data)
static int __init genhd_device_init(void)
{
+ int err;
+
bdev_map = kobj_map_init(base_probe, &block_subsys_lock);
blk_dev_init();
- subsystem_register(&block_subsys);
- return 0;
+ err = subsystem_register(&block_subsys);
+ if (err < 0)
+ printk(KERN_WARNING "%s: subsystem_register error: %d\n",
+ __FUNCTION__, err);
+ return err;
}
subsys_initcall(genhd_device_init);
diff --git a/block/ioctl.c b/block/ioctl.c
index 9cfa2e1ecb24..309760b7e37f 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -72,7 +72,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
bdevp = bdget_disk(disk, part);
if (!bdevp)
return -ENOMEM;
- mutex_lock(&bdevp->bd_mutex);
+ mutex_lock_nested(&bdevp->bd_mutex, BD_MUTEX_PARTITION);
if (bdevp->bd_openers) {
mutex_unlock(&bdevp->bd_mutex);
bdput(bdevp);
@@ -82,7 +82,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
fsync_bdev(bdevp);
invalidate_bdev(bdevp, 0);
- mutex_lock(&bdev->bd_mutex);
+ mutex_lock_nested(&bdev->bd_mutex, BD_MUTEX_WHOLE);
delete_partition(disk, part);
mutex_unlock(&bdev->bd_mutex);
mutex_unlock(&bdevp->bd_mutex);
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 0603ab2f3692..51dc0edf76e0 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -10,7 +10,6 @@
/*
* This handles all read/write requests to block devices
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
@@ -849,21 +848,18 @@ struct request *blk_queue_find_tag(request_queue_t *q, int tag)
EXPORT_SYMBOL(blk_queue_find_tag);
/**
- * __blk_queue_free_tags - release tag maintenance info
- * @q: the request queue for the device
+ * __blk_free_tags - release a given set of tag maintenance info
+ * @bqt: the tag map to free
*
- * Notes:
- * blk_cleanup_queue() will take care of calling this function, if tagging
- * has been used. So there's no need to call this directly.
- **/
-static void __blk_queue_free_tags(request_queue_t *q)
+ * Tries to free the specified @bqt@. Returns true if it was
+ * actually freed and false if there are still references using it
+ */
+static int __blk_free_tags(struct blk_queue_tag *bqt)
{
- struct blk_queue_tag *bqt = q->queue_tags;
+ int retval;
- if (!bqt)
- return;
-
- if (atomic_dec_and_test(&bqt->refcnt)) {
+ retval = atomic_dec_and_test(&bqt->refcnt);
+ if (retval) {
BUG_ON(bqt->busy);
BUG_ON(!list_empty(&bqt->busy_list));
@@ -874,12 +870,49 @@ static void __blk_queue_free_tags(request_queue_t *q)
bqt->tag_map = NULL;
kfree(bqt);
+
}
+ return retval;
+}
+
+/**
+ * __blk_queue_free_tags - release tag maintenance info
+ * @q: the request queue for the device
+ *
+ * Notes:
+ * blk_cleanup_queue() will take care of calling this function, if tagging
+ * has been used. So there's no need to call this directly.
+ **/
+static void __blk_queue_free_tags(request_queue_t *q)
+{
+ struct blk_queue_tag *bqt = q->queue_tags;
+
+ if (!bqt)
+ return;
+
+ __blk_free_tags(bqt);
+
q->queue_tags = NULL;
q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
}
+
+/**
+ * blk_free_tags - release a given set of tag maintenance info
+ * @bqt: the tag map to free
+ *
+ * For externally managed @bqt@ frees the map. Callers of this
+ * function must guarantee to have released all the queues that
+ * might have been using this tag map.
+ */
+void blk_free_tags(struct blk_queue_tag *bqt)
+{
+ if (unlikely(!__blk_free_tags(bqt)))
+ BUG();
+}
+EXPORT_SYMBOL(blk_free_tags);
+
/**
* blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
@@ -902,7 +935,7 @@ init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
unsigned long *tag_map;
int nr_ulongs;
- if (depth > q->nr_requests * 2) {
+ if (q && depth > q->nr_requests * 2) {
depth = q->nr_requests * 2;
printk(KERN_ERR "%s: adjusted depth to %d\n",
__FUNCTION__, depth);
@@ -928,6 +961,38 @@ fail:
return -ENOMEM;
}
+static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
+ int depth)
+{
+ struct blk_queue_tag *tags;
+
+ tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
+ if (!tags)
+ goto fail;
+
+ if (init_tag_map(q, tags, depth))
+ goto fail;
+
+ INIT_LIST_HEAD(&tags->busy_list);
+ tags->busy = 0;
+ atomic_set(&tags->refcnt, 1);
+ return tags;
+fail:
+ kfree(tags);
+ return NULL;
+}
+
+/**
+ * blk_init_tags - initialize the tag info for an external tag map
+ * @depth: the maximum queue depth supported
+ * @tags: the tag to use
+ **/
+struct blk_queue_tag *blk_init_tags(int depth)
+{
+ return __blk_queue_init_tags(NULL, depth);
+}
+EXPORT_SYMBOL(blk_init_tags);
+
/**
* blk_queue_init_tags - initialize the queue tag info
* @q: the request queue for the device
@@ -942,16 +1007,10 @@ int blk_queue_init_tags(request_queue_t *q, int depth,
BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
if (!tags && !q->queue_tags) {
- tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
- if (!tags)
- goto fail;
+ tags = __blk_queue_init_tags(q, depth);
- if (init_tag_map(q, tags, depth))
+ if (!tags)
goto fail;
-
- INIT_LIST_HEAD(&tags->busy_list);
- tags->busy = 0;
- atomic_set(&tags->refcnt, 1);
} else if (q->queue_tags) {
if ((rc = blk_queue_resize_tags(q, depth)))
return rc;
@@ -1003,6 +1062,13 @@ int blk_queue_resize_tags(request_queue_t *q, int new_depth)
}
/*
+ * Currently cannot replace a shared tag map with a new
+ * one, so error out if this is the case
+ */
+ if (atomic_read(&bqt->refcnt) != 1)
+ return -EBUSY;
+
+ /*
* save the old state info, so we can copy it back
*/
tag_index = bqt->tag_index;
@@ -1781,8 +1847,7 @@ static void blk_release_queue(struct kobject *kobj)
if (q->queue_tags)
__blk_queue_free_tags(q);
- if (q->blk_trace)
- blk_trace_shutdown(q);
+ blk_trace_shutdown(q);
kmem_cache_free(requestq_cachep, q);
}
@@ -2517,7 +2582,7 @@ EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
struct request *rq, int at_head)
{
- DECLARE_COMPLETION(wait);
+ DECLARE_COMPLETION_ONSTACK(wait);
char sense[SCSI_SENSE_BUFFERSIZE];
int err = 0;
@@ -2735,6 +2800,18 @@ long blk_congestion_wait(int rw, long timeout)
EXPORT_SYMBOL(blk_congestion_wait);
+/**
+ * blk_congestion_end - wake up sleepers on a congestion queue
+ * @rw: READ or WRITE
+ */
+void blk_congestion_end(int rw)
+{
+ wait_queue_head_t *wqh = &congestion_wqh[rw];
+
+ if (waitqueue_active(wqh))
+ wake_up(wqh);
+}
+
/*
* Has to be called with the request spinlock acquired
*/
@@ -2745,7 +2822,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
return 0;
/*
- * not contigious
+ * not contiguous
*/
if (req->sector + req->nr_sectors != next->sector)
return 0;
@@ -3117,9 +3194,9 @@ void submit_bio(int rw, struct bio *bio)
BIO_BUG_ON(!bio->bi_io_vec);
bio->bi_rw |= rw;
if (rw & WRITE)
- mod_page_state(pgpgout, count);
+ count_vm_events(PGPGOUT, count);
else
- mod_page_state(pgpgin, count);
+ count_vm_events(PGPGIN, count);
if (unlikely(block_dump)) {
char b[BDEVNAME_SIZE];
@@ -3403,7 +3480,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
}
-static struct notifier_block blk_cpu_notifier = {
+static struct notifier_block __devinitdata blk_cpu_notifier = {
.notifier_call = blk_cpu_notify,
};
@@ -3415,7 +3492,7 @@ static struct notifier_block blk_cpu_notifier = {
*
* Description:
* Ends all I/O on a request. It does not handle partial completions,
- * unless the driver actually implements this in its completionc callback
+ * unless the driver actually implements this in its completion callback
* through requeueing. Theh actual completion happens out-of-order,
* through a softirq handler. The user must have registered a completion
* callback through blk_queue_softirq_done().
@@ -3492,8 +3569,8 @@ EXPORT_SYMBOL(end_request);
void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
{
- /* first three bits are identical in rq->flags and bio->bi_rw */
- rq->flags |= (bio->bi_rw & 7);
+ /* first two bits are identical in rq->flags and bio->bi_rw */
+ rq->flags |= (bio->bi_rw & 3);
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->nr_hw_segments = bio_hw_segments(q, bio);
@@ -3541,9 +3618,7 @@ int __init blk_dev_init(void)
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
-#ifdef CONFIG_HOTPLUG_CPU
- register_cpu_notifier(&blk_cpu_notifier);
-#endif
+ register_hotcpu_notifier(&blk_cpu_notifier);
blk_max_low_pfn = max_low_pfn;
blk_max_pfn = max_pfn;
@@ -3631,6 +3706,8 @@ struct io_context *current_io_context(gfp_t gfp_flags)
ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL;
ret->cic_root.rb_node = NULL;
+ /* make sure set_task_ioprio() sees the settings above */
+ smp_wmb();
tsk->io_context = ret;
}