summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2006-12-05 15:37:56 +0100
committerDavid Howells <dhowells@warthog.cambridge.redhat.com>2006-12-05 15:37:56 +0100
commit4c1ac1b49122b805adfa4efc620592f68dccf5db (patch)
tree87557f4bc2fd4fe65b7570489c2f610c45c0adcd /block
parentWorkStruct: make allyesconfig (diff)
parentRemove long-unmaintained ftape driver subsystem. (diff)
downloadlinux-4c1ac1b49122b805adfa4efc620592f68dccf5db.tar.xz
linux-4c1ac1b49122b805adfa4efc620592f68dccf5db.zip
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: drivers/infiniband/core/iwcm.c drivers/net/chelsio/cxgb2.c drivers/net/wireless/bcm43xx/bcm43xx_main.c drivers/net/wireless/prism54/islpci_eth.c drivers/usb/core/hub.h drivers/usb/input/hid-core.c net/core/netpoll.c Fix up merge failures with Linus's head and fix new compilation failures. Signed-Off-By: David Howells <dhowells@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/as-iosched.c2
-rw-r--r--block/blktrace.c57
-rw-r--r--block/cfq-iosched.c9
-rw-r--r--block/deadline-iosched.c2
-rw-r--r--block/elevator.c4
-rw-r--r--block/ll_rw_blk.c166
-rw-r--r--block/noop-iosched.c2
-rw-r--r--block/scsi_ioctl.c53
8 files changed, 206 insertions, 89 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c
index f371c9359999..5934c4bfd52a 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1318,7 +1318,7 @@ static void as_exit_queue(elevator_t *e)
/*
* initialize elevator private data (as_data).
*/
-static void *as_init_queue(request_queue_t *q, elevator_t *e)
+static void *as_init_queue(request_queue_t *q)
{
struct as_data *ad;
diff --git a/block/blktrace.c b/block/blktrace.c
index 135593c8e45b..562ca7cbf858 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -22,30 +22,61 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
+#include <linux/time.h>
#include <asm/uaccess.h>
static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, };
static unsigned int blktrace_seq __read_mostly = 1;
/*
+ * Send out a notify message.
+ */
+static inline unsigned int trace_note(struct blk_trace *bt,
+ pid_t pid, int action,
+ const void *data, size_t len)
+{
+ struct blk_io_trace *t;
+ int cpu = smp_processor_id();
+
+ t = relay_reserve(bt->rchan, sizeof(*t) + len);
+ if (t == NULL)
+ return 0;
+
+ t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
+ t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu);
+ t->device = bt->dev;
+ t->action = action;
+ t->pid = pid;
+ t->cpu = cpu;
+ t->pdu_len = len;
+ memcpy((void *) t + sizeof(*t), data, len);
+ return blktrace_seq;
+}
+
+/*
* Send out a notify for this process, if we haven't done so since a trace
* started
*/
static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
{
- struct blk_io_trace *t;
+ tsk->btrace_seq = trace_note(bt, tsk->pid,
+ BLK_TN_PROCESS,
+ tsk->comm, sizeof(tsk->comm));
+}
- t = relay_reserve(bt->rchan, sizeof(*t) + sizeof(tsk->comm));
- if (t) {
- t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
- t->device = bt->dev;
- t->action = BLK_TC_ACT(BLK_TC_NOTIFY);
- t->pid = tsk->pid;
- t->cpu = smp_processor_id();
- t->pdu_len = sizeof(tsk->comm);
- memcpy((void *) t + sizeof(*t), tsk->comm, t->pdu_len);
- tsk->btrace_seq = blktrace_seq;
- }
+static void trace_note_time(struct blk_trace *bt)
+{
+ struct timespec now;
+ unsigned long flags;
+ u32 words[2];
+
+ getnstimeofday(&now);
+ words[0] = now.tv_sec;
+ words[1] = now.tv_nsec;
+
+ local_irq_save(flags);
+ trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
+ local_irq_restore(flags);
}
static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
@@ -394,6 +425,8 @@ static int blk_trace_startstop(request_queue_t *q, int start)
blktrace_seq++;
smp_mb();
bt->trace_state = Blktrace_running;
+
+ trace_note_time(bt);
ret = 0;
}
} else {
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 6cec3a1dccb8..84e9be073180 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1464,8 +1464,7 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
}
static void
-cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
- struct request *rq)
+cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
{
sector_t sdist;
u64 total;
@@ -1617,7 +1616,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
}
cfq_update_io_thinktime(cfqd, cic);
- cfq_update_io_seektime(cfqd, cic, rq);
+ cfq_update_io_seektime(cic, rq);
cfq_update_idle_window(cfqd, cfqq, cic);
cic->last_queue = jiffies;
@@ -1770,7 +1769,7 @@ static int cfq_may_queue(request_queue_t *q, int rw)
/*
* queue lock held here
*/
-static void cfq_put_request(request_queue_t *q, struct request *rq)
+static void cfq_put_request(struct request *rq)
{
struct cfq_queue *cfqq = RQ_CFQQ(rq);
@@ -1953,7 +1952,7 @@ static void cfq_exit_queue(elevator_t *e)
kfree(cfqd);
}
-static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q)
{
struct cfq_data *cfqd;
int i;
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index b7c5b34cb7b4..6d673e938d3e 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -356,7 +356,7 @@ static void deadline_exit_queue(elevator_t *e)
/*
* initialize elevator private data (deadline_data).
*/
-static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
+static void *deadline_init_queue(request_queue_t *q)
{
struct deadline_data *dd;
diff --git a/block/elevator.c b/block/elevator.c
index 8ccd163254b8..c0063f345c5d 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -129,7 +129,7 @@ static struct elevator_type *elevator_get(const char *name)
static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
{
- return eq->ops->elevator_init_fn(q, eq);
+ return eq->ops->elevator_init_fn(q);
}
static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
@@ -810,7 +810,7 @@ void elv_put_request(request_queue_t *q, struct request *rq)
elevator_t *e = q->elevator;
if (e->ops->elevator_put_req_fn)
- e->ops->elevator_put_req_fn(q, rq);
+ e->ops->elevator_put_req_fn(rq);
}
int elv_may_queue(request_queue_t *q, int rw)
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index eb4cf6df7374..cc6e95f8e5d9 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -2322,6 +2322,84 @@ void blk_insert_request(request_queue_t *q, struct request *rq,
EXPORT_SYMBOL(blk_insert_request);
+static int __blk_rq_unmap_user(struct bio *bio)
+{
+ int ret = 0;
+
+ if (bio) {
+ if (bio_flagged(bio, BIO_USER_MAPPED))
+ bio_unmap_user(bio);
+ else
+ ret = bio_uncopy_user(bio);
+ }
+
+ return ret;
+}
+
+static int __blk_rq_map_user(request_queue_t *q, struct request *rq,
+ void __user *ubuf, unsigned int len)
+{
+ unsigned long uaddr;
+ struct bio *bio, *orig_bio;
+ int reading, ret;
+
+ reading = rq_data_dir(rq) == READ;
+
+ /*
+ * if alignment requirement is satisfied, map in user pages for
+ * direct dma. else, set up kernel bounce buffers
+ */
+ uaddr = (unsigned long) ubuf;
+ if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+ bio = bio_map_user(q, NULL, uaddr, len, reading);
+ else
+ bio = bio_copy_user(q, uaddr, len, reading);
+
+ if (IS_ERR(bio)) {
+ return PTR_ERR(bio);
+ }
+
+ orig_bio = bio;
+ blk_queue_bounce(q, &bio);
+ /*
+ * We link the bounce buffer in and could have to traverse it
+ * later so we have to get a ref to prevent it from being freed
+ */
+ bio_get(bio);
+
+ /*
+ * for most (all? don't know of any) queues we could
+ * skip grabbing the queue lock here. only drivers with
+ * funky private ->back_merge_fn() function could be
+ * problematic.
+ */
+ spin_lock_irq(q->queue_lock);
+ if (!rq->bio)
+ blk_rq_bio_prep(q, rq, bio);
+ else if (!q->back_merge_fn(q, rq, bio)) {
+ ret = -EINVAL;
+ spin_unlock_irq(q->queue_lock);
+ goto unmap_bio;
+ } else {
+ rq->biotail->bi_next = bio;
+ rq->biotail = bio;
+
+ rq->nr_sectors += bio_sectors(bio);
+ rq->hard_nr_sectors = rq->nr_sectors;
+ rq->data_len += bio->bi_size;
+ }
+ spin_unlock_irq(q->queue_lock);
+
+ return bio->bi_size;
+
+unmap_bio:
+ /* if it was boucned we must call the end io function */
+ bio_endio(bio, bio->bi_size, 0);
+ __blk_rq_unmap_user(orig_bio);
+ bio_put(bio);
+ return ret;
+}
+
/**
* blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
* @q: request queue where request should be inserted
@@ -2343,42 +2421,44 @@ EXPORT_SYMBOL(blk_insert_request);
* unmapping.
*/
int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf,
- unsigned int len)
+ unsigned long len)
{
- unsigned long uaddr;
- struct bio *bio;
- int reading;
+ unsigned long bytes_read = 0;
+ int ret;
if (len > (q->max_hw_sectors << 9))
return -EINVAL;
if (!len || !ubuf)
return -EINVAL;
- reading = rq_data_dir(rq) == READ;
+ while (bytes_read != len) {
+ unsigned long map_len, end, start;
- /*
- * if alignment requirement is satisfied, map in user pages for
- * direct dma. else, set up kernel bounce buffers
- */
- uaddr = (unsigned long) ubuf;
- if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
- bio = bio_map_user(q, NULL, uaddr, len, reading);
- else
- bio = bio_copy_user(q, uaddr, len, reading);
+ map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE);
+ end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1)
+ >> PAGE_SHIFT;
+ start = (unsigned long)ubuf >> PAGE_SHIFT;
- if (!IS_ERR(bio)) {
- rq->bio = rq->biotail = bio;
- blk_rq_bio_prep(q, rq, bio);
+ /*
+ * A bad offset could cause us to require BIO_MAX_PAGES + 1
+ * pages. If this happens we just lower the requested
+ * mapping len by a page so that we can fit
+ */
+ if (end - start > BIO_MAX_PAGES)
+ map_len -= PAGE_SIZE;
- rq->buffer = rq->data = NULL;
- rq->data_len = len;
- return 0;
+ ret = __blk_rq_map_user(q, rq, ubuf, map_len);
+ if (ret < 0)
+ goto unmap_rq;
+ bytes_read += ret;
+ ubuf += ret;
}
- /*
- * bio is the err-ptr
- */
- return PTR_ERR(bio);
+ rq->buffer = rq->data = NULL;
+ return 0;
+unmap_rq:
+ blk_rq_unmap_user(rq);
+ return ret;
}
EXPORT_SYMBOL(blk_rq_map_user);
@@ -2404,7 +2484,7 @@ EXPORT_SYMBOL(blk_rq_map_user);
* unmapping.
*/
int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
- struct sg_iovec *iov, int iov_count)
+ struct sg_iovec *iov, int iov_count, unsigned int len)
{
struct bio *bio;
@@ -2418,10 +2498,15 @@ int blk_rq_map_user_iov(request_queue_t *q, struct request *rq,
if (IS_ERR(bio))
return PTR_ERR(bio);
- rq->bio = rq->biotail = bio;
+ if (bio->bi_size != len) {
+ bio_endio(bio, bio->bi_size, 0);
+ bio_unmap_user(bio);
+ return -EINVAL;
+ }
+
+ bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
rq->buffer = rq->data = NULL;
- rq->data_len = bio->bi_size;
return 0;
}
@@ -2429,23 +2514,26 @@ EXPORT_SYMBOL(blk_rq_map_user_iov);
/**
* blk_rq_unmap_user - unmap a request with user data
- * @bio: bio to be unmapped
- * @ulen: length of user buffer
+ * @rq: rq to be unmapped
*
* Description:
- * Unmap a bio previously mapped by blk_rq_map_user().
+ * Unmap a rq previously mapped by blk_rq_map_user().
+ * rq->bio must be set to the original head of the request.
*/
-int blk_rq_unmap_user(struct bio *bio, unsigned int ulen)
+int blk_rq_unmap_user(struct request *rq)
{
- int ret = 0;
+ struct bio *bio, *mapped_bio;
- if (bio) {
- if (bio_flagged(bio, BIO_USER_MAPPED))
- bio_unmap_user(bio);
+ while ((bio = rq->bio)) {
+ if (bio_flagged(bio, BIO_BOUNCED))
+ mapped_bio = bio->bi_private;
else
- ret = bio_uncopy_user(bio);
- }
+ mapped_bio = bio;
+ __blk_rq_unmap_user(mapped_bio);
+ rq->bio = bio->bi_next;
+ bio_put(bio);
+ }
return 0;
}
@@ -2476,11 +2564,8 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf,
if (rq_data_dir(rq) == WRITE)
bio->bi_rw |= (1 << BIO_RW);
- rq->bio = rq->biotail = bio;
blk_rq_bio_prep(q, rq, bio);
-
rq->buffer = rq->data = NULL;
- rq->data_len = len;
return 0;
}
@@ -3495,6 +3580,7 @@ void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq->buffer = bio_data(bio);
+ rq->data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
}
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index 79af43179421..1c3de2b9a6b5 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -65,7 +65,7 @@ noop_latter_request(request_queue_t *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static void *noop_init_queue(request_queue_t *q, elevator_t *e)
+static void *noop_init_queue(request_queue_t *q)
{
struct noop_data *nd;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index e55a75621437..5493c2fbbab1 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -226,7 +226,6 @@ static int sg_io(struct file *file, request_queue_t *q,
unsigned long start_time;
int writing = 0, ret = 0;
struct request *rq;
- struct bio *bio;
char sense[SCSI_SENSE_BUFFERSIZE];
unsigned char cmd[BLK_MAX_CDB];
@@ -258,30 +257,6 @@ static int sg_io(struct file *file, request_queue_t *q,
if (!rq)
return -ENOMEM;
- if (hdr->iovec_count) {
- const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
- struct sg_iovec *iov;
-
- iov = kmalloc(size, GFP_KERNEL);
- if (!iov) {
- ret = -ENOMEM;
- goto out;
- }
-
- if (copy_from_user(iov, hdr->dxferp, size)) {
- kfree(iov);
- ret = -EFAULT;
- goto out;
- }
-
- ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count);
- kfree(iov);
- } else if (hdr->dxfer_len)
- ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
-
- if (ret)
- goto out;
-
/*
* fill in request structure
*/
@@ -294,7 +269,6 @@ static int sg_io(struct file *file, request_queue_t *q,
rq->sense_len = 0;
rq->cmd_type = REQ_TYPE_BLOCK_PC;
- bio = rq->bio;
/*
* bounce this after holding a reference to the original bio, it's
@@ -309,6 +283,31 @@ static int sg_io(struct file *file, request_queue_t *q,
if (!rq->timeout)
rq->timeout = BLK_DEFAULT_TIMEOUT;
+ if (hdr->iovec_count) {
+ const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
+ struct sg_iovec *iov;
+
+ iov = kmalloc(size, GFP_KERNEL);
+ if (!iov) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(iov, hdr->dxferp, size)) {
+ kfree(iov);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = blk_rq_map_user_iov(q, rq, iov, hdr->iovec_count,
+ hdr->dxfer_len);
+ kfree(iov);
+ } else if (hdr->dxfer_len)
+ ret = blk_rq_map_user(q, rq, hdr->dxferp, hdr->dxfer_len);
+
+ if (ret)
+ goto out;
+
rq->retries = 0;
start_time = jiffies;
@@ -339,7 +338,7 @@ static int sg_io(struct file *file, request_queue_t *q,
hdr->sb_len_wr = len;
}
- if (blk_rq_unmap_user(bio, hdr->dxfer_len))
+ if (blk_rq_unmap_user(rq))
ret = -EFAULT;
/* may not have succeeded, but output values written to control