diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-12-31 08:31:57 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-12-31 08:31:57 +0100 |
commit | a9de18eb761f7c1c860964b2e5addc1a35c7e861 (patch) | |
tree | 886e75fdfd09690cd262ca69cb7f5d1d42b48602 /drivers/md | |
parent | Merge branch 'linus' into stackprotector (diff) | |
parent | Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs (diff) | |
download | linux-a9de18eb761f7c1c860964b2e5addc1a35c7e861.tar.xz linux-a9de18eb761f7c1c860964b2e5addc1a35c7e861.zip |
Merge branch 'linus' into stackprotector
Conflicts:
arch/x86/include/asm/pda.h
kernel/fork.c
Diffstat (limited to 'drivers/md')
31 files changed, 1256 insertions, 1111 deletions
diff --git a/drivers/md/Makefile b/drivers/md/Makefile index f1ef33dfd8cf..1c615804ea76 100644 --- a/drivers/md/Makefile +++ b/drivers/md/Makefile @@ -34,7 +34,7 @@ obj-$(CONFIG_DM_CRYPT) += dm-crypt.o obj-$(CONFIG_DM_DELAY) += dm-delay.o obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o -obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o +obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o obj-$(CONFIG_DM_ZERO) += dm-zero.o quiet_cmd_unroll = UNROLL $@ diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ac89a5deaca2..ab7c8e4a61f9 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -208,16 +208,19 @@ static void bitmap_checkfree(struct bitmap *bitmap, unsigned long page) */ /* IO operations when bitmap is stored near all superblocks */ -static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long index) +static struct page *read_sb_page(mddev_t *mddev, long offset, + struct page *page, + unsigned long index, int size) { /* choose a good rdev and read the page from there */ mdk_rdev_t *rdev; struct list_head *tmp; - struct page *page = alloc_page(GFP_KERNEL); sector_t target; if (!page) + page = alloc_page(GFP_KERNEL); + if (!page) return ERR_PTR(-ENOMEM); rdev_for_each(rdev, tmp, mddev) { @@ -227,7 +230,9 @@ static struct page *read_sb_page(mddev_t *mddev, long offset, unsigned long inde target = rdev->sb_start + offset + index * (PAGE_SIZE/512); - if (sync_page_io(rdev->bdev, target, PAGE_SIZE, page, READ)) { + if (sync_page_io(rdev->bdev, target, + roundup(size, bdev_hardsect_size(rdev->bdev)), + page, READ)) { page->index = index; attach_page_buffers(page, NULL); /* so that free_buffer will * quietly no-op */ @@ -544,7 +549,9 @@ static int bitmap_read_sb(struct bitmap *bitmap) bitmap->sb_page = read_page(bitmap->file, 0, bitmap, bytes); } else { - bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, 0); + bitmap->sb_page = read_sb_page(bitmap->mddev, bitmap->offset, + NULL, + 0, sizeof(bitmap_super_t)); } if (IS_ERR(bitmap->sb_page)) { err = PTR_ERR(bitmap->sb_page); @@ -957,11 +964,16 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start) */ page = bitmap->sb_page; offset = sizeof(bitmap_super_t); + read_sb_page(bitmap->mddev, bitmap->offset, + page, + index, count); } else if (file) { page = read_page(file, index, bitmap, count); offset = 0; } else { - page = read_sb_page(bitmap->mddev, bitmap->offset, index); + page = read_sb_page(bitmap->mddev, bitmap->offset, + NULL, + index, count); offset = 0; } if (IS_ERR(page)) { /* read error */ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 682ef9e6acd3..3326750ec02c 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -23,7 +23,7 @@ #include <asm/page.h> #include <asm/unaligned.h> -#include "dm.h" +#include <linux/device-mapper.h> #define DM_MSG_PREFIX "crypt" #define MESG_STR(x) x, sizeof(x) @@ -56,6 +56,7 @@ struct dm_crypt_io { atomic_t pending; int error; sector_t sector; + struct dm_crypt_io *base_io; }; struct dm_crypt_request { @@ -93,7 +94,6 @@ struct crypt_config { struct workqueue_struct *io_queue; struct workqueue_struct *crypt_queue; - wait_queue_head_t writeq; /* * crypto related data @@ -534,6 +534,7 @@ static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, io->base_bio = bio; io->sector = sector; io->error = 0; + io->base_io = NULL; atomic_set(&io->pending, 0); return io; @@ -547,6 +548,7 @@ static void crypt_inc_pending(struct dm_crypt_io *io) /* * One of the bios was finished. Check for completion of * the whole request and correctly clean up the buffer. + * If base_io is set, wait for the last fragment to complete. */ static void crypt_dec_pending(struct dm_crypt_io *io) { @@ -555,7 +557,14 @@ static void crypt_dec_pending(struct dm_crypt_io *io) if (!atomic_dec_and_test(&io->pending)) return; - bio_endio(io->base_bio, io->error); + if (likely(!io->base_io)) + bio_endio(io->base_bio, io->error); + else { + if (io->error && !io->base_io->error) + io->base_io->error = io->error; + crypt_dec_pending(io->base_io); + } + mempool_free(io, cc->io_pool); } @@ -646,10 +655,7 @@ static void kcryptd_io_read(struct dm_crypt_io *io) static void kcryptd_io_write(struct dm_crypt_io *io) { struct bio *clone = io->ctx.bio_out; - struct crypt_config *cc = io->target->private; - generic_make_request(clone); - wake_up(&cc->writeq); } static void kcryptd_io(struct work_struct *work) @@ -688,7 +694,6 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, BUG_ON(io->ctx.idx_out < clone->bi_vcnt); clone->bi_sector = cc->start + io->sector; - io->sector += bio_sectors(clone); if (async) kcryptd_queue_io(io); @@ -700,16 +705,18 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) { struct crypt_config *cc = io->target->private; struct bio *clone; + struct dm_crypt_io *new_io; int crypt_finished; unsigned out_of_pages = 0; unsigned remaining = io->base_bio->bi_size; + sector_t sector = io->sector; int r; /* * Prevent io from disappearing until this function completes. */ crypt_inc_pending(io); - crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, io->sector); + crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); /* * The allocated buffers can be smaller than the whole bio, @@ -726,6 +733,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) io->ctx.idx_out = 0; remaining -= clone->bi_size; + sector += bio_sectors(clone); crypt_inc_pending(io); r = crypt_convert(cc, &io->ctx); @@ -741,6 +749,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) */ if (unlikely(r < 0)) break; + + io->sector = sector; } /* @@ -750,8 +760,33 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) if (unlikely(out_of_pages)) congestion_wait(WRITE, HZ/100); - if (unlikely(remaining)) - wait_event(cc->writeq, !atomic_read(&io->ctx.pending)); + /* + * With async crypto it is unsafe to share the crypto context + * between fragments, so switch to a new dm_crypt_io structure. + */ + if (unlikely(!crypt_finished && remaining)) { + new_io = crypt_io_alloc(io->target, io->base_bio, + sector); + crypt_inc_pending(new_io); + crypt_convert_init(cc, &new_io->ctx, NULL, + io->base_bio, sector); + new_io->ctx.idx_in = io->ctx.idx_in; + new_io->ctx.offset_in = io->ctx.offset_in; + + /* + * Fragments after the first use the base_io + * pending count. + */ + if (!io->base_io) + new_io->base_io = io; + else { + new_io->base_io = io->base_io; + crypt_inc_pending(io->base_io); + crypt_dec_pending(io); + } + + io = new_io; + } } crypt_dec_pending(io); @@ -1025,7 +1060,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_page_pool; } - cc->bs = bioset_create(MIN_IOS, MIN_IOS); + cc->bs = bioset_create(MIN_IOS, 0); if (!cc->bs) { ti->error = "Cannot allocate crypt bioset"; goto bad_bs; @@ -1078,7 +1113,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad_crypt_queue; } - init_waitqueue_head(&cc->writeq); ti->private = cc; return 0; diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index bdd37f881c42..848b381f1173 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c @@ -13,7 +13,8 @@ #include <linux/bio.h> #include <linux/slab.h> -#include "dm.h" +#include <linux/device-mapper.h> + #include "dm-bio-list.h" #define DM_MSG_PREFIX "delay" diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 769ab677f8e0..01590f3e0009 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c @@ -7,7 +7,6 @@ * This file is released under the GPL. */ -#include "dm.h" #include "dm-snap.h" #include <linux/mm.h> @@ -105,6 +104,11 @@ struct pstore { void *area; /* + * An area of zeros used to clear the next area. + */ + void *zero_area; + + /* * Used to keep track of which metadata area the data in * 'chunk' refers to. */ @@ -149,6 +153,13 @@ static int alloc_area(struct pstore *ps) if (!ps->area) return r; + ps->zero_area = vmalloc(len); + if (!ps->zero_area) { + vfree(ps->area); + return r; + } + memset(ps->zero_area, 0, len); + return 0; } @@ -156,6 +167,8 @@ static void free_area(struct pstore *ps) { vfree(ps->area); ps->area = NULL; + vfree(ps->zero_area); + ps->zero_area = NULL; } struct mdata_req { @@ -220,25 +233,41 @@ static chunk_t area_location(struct pstore *ps, chunk_t area) * Read or write a metadata area. Remembering to skip the first * chunk which holds the header. */ -static int area_io(struct pstore *ps, chunk_t area, int rw) +static int area_io(struct pstore *ps, int rw) { int r; chunk_t chunk; - chunk = area_location(ps, area); + chunk = area_location(ps, ps->current_area); r = chunk_io(ps, chunk, rw, 0); if (r) return r; - ps->current_area = area; return 0; } -static int zero_area(struct pstore *ps, chunk_t area) +static void zero_memory_area(struct pstore *ps) { memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); - return area_io(ps, area, WRITE); +} + +static int zero_disk_area(struct pstore *ps, chunk_t area) +{ + struct dm_io_region where = { + .bdev = ps->snap->cow->bdev, + .sector = ps->snap->chunk_size * area_location(ps, area), + .count = ps->snap->chunk_size, + }; + struct dm_io_request io_req = { + .bi_rw = WRITE, + .mem.type = DM_IO_VMA, + .mem.ptr.vma = ps->zero_area, + .client = ps->io_client, + .notify.fn = NULL, + }; + + return dm_io(&io_req, 1, &where, NULL); } static int read_header(struct pstore *ps, int *new_snapshot) @@ -411,15 +440,14 @@ static int insert_exceptions(struct pstore *ps, int *full) static int read_exceptions(struct pstore *ps) { - chunk_t area; int r, full = 1; /* * Keeping reading chunks and inserting exceptions until * we find a partially full area. */ - for (area = 0; full; area++) { - r = area_io(ps, area, READ); + for (ps->current_area = 0; full; ps->current_area++) { + r = area_io(ps, READ); if (r) return r; @@ -428,6 +456,8 @@ static int read_exceptions(struct pstore *ps) return r; } + ps->current_area--; + return 0; } @@ -486,12 +516,13 @@ static int persistent_read_metadata(struct exception_store *store) return r; } - r = zero_area(ps, 0); + ps->current_area = 0; + zero_memory_area(ps); + r = zero_disk_area(ps, 0); if (r) { - DMWARN("zero_area(0) failed"); + DMWARN("zero_disk_area(0) failed"); return r; } - } else { /* * Sanity checks. @@ -551,7 +582,6 @@ static void persistent_commit(struct exception_store *store, void (*callback) (void *, int success), void *callback_context) { - int r; unsigned int i; struct pstore *ps = get_info(store); struct disk_exception de; @@ -572,33 +602,41 @@ static void persistent_commit(struct exception_store *store, cb->context = callback_context; /* - * If there are no more exceptions in flight, or we have - * filled this metadata area we commit the exceptions to - * disk. + * If there are exceptions in flight and we have not yet + * filled this metadata area there's nothing more to do. */ - if (atomic_dec_and_test(&ps->pending_count) || - (ps->current_committed == ps->exceptions_per_area)) { - r = area_io(ps, ps->current_area, WRITE); - if (r) - ps->valid = 0; + if (!atomic_dec_and_test(&ps->pending_count) && + (ps->current_committed != ps->exceptions_per_area)) + return; - /* - * Have we completely filled the current area ? - */ - if (ps->current_committed == ps->exceptions_per_area) { - ps->current_committed = 0; - r = zero_area(ps, ps->current_area + 1); - if (r) - ps->valid = 0; - } + /* + * If we completely filled the current area, then wipe the next one. + */ + if ((ps->current_committed == ps->exceptions_per_area) && + zero_disk_area(ps, ps->current_area + 1)) + ps->valid = 0; - for (i = 0; i < ps->callback_count; i++) { - cb = ps->callbacks + i; - cb->callback(cb->context, r == 0 ? 1 : 0); - } + /* + * Commit exceptions to disk. + */ + if (ps->valid && area_io(ps, WRITE)) + ps->valid = 0; - ps->callback_count = 0; + /* + * Advance to the next area if this one is full. + */ + if (ps->current_committed == ps->exceptions_per_area) { + ps->current_committed = 0; + ps->current_area++; + zero_memory_area(ps); } + + for (i = 0; i < ps->callback_count; i++) { + cb = ps->callbacks + i; + cb->callback(cb->context, ps->valid); + } + + ps->callback_count = 0; } static void persistent_drop(struct exception_store *store) diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 4789c42d9a3a..a34338567a2a 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -5,7 +5,7 @@ * This file is released under the GPL. */ -#include "dm.h" +#include <linux/device-mapper.h> #include <linux/bio.h> #include <linux/mempool.h> @@ -56,7 +56,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages) if (!client->pool) goto bad; - client->bios = bioset_create(16, 16); + client->bios = bioset_create(16, 0); if (!client->bios) goto bad; diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index dca401dc70a0..777c948180f9 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -988,9 +988,9 @@ static int dev_wait(struct dm_ioctl *param, size_t param_size) return r; } -static inline int get_mode(struct dm_ioctl *param) +static inline fmode_t get_mode(struct dm_ioctl *param) { - int mode = FMODE_READ | FMODE_WRITE; + fmode_t mode = FMODE_READ | FMODE_WRITE; if (param->flags & DM_READONLY_FLAG) mode = FMODE_READ; diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 996802b8a452..3073618269ea 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -22,6 +22,7 @@ #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/mutex.h> +#include <linux/device-mapper.h> #include <linux/dm-kcopyd.h> #include "dm.h" @@ -268,6 +269,17 @@ static void push(struct list_head *jobs, struct kcopyd_job *job) spin_unlock_irqrestore(&kc->job_lock, flags); } + +static void push_head(struct list_head *jobs, struct kcopyd_job *job) +{ + unsigned long flags; + struct dm_kcopyd_client *kc = job->kc; + + spin_lock_irqsave(&kc->job_lock, flags); + list_add(&job->list, jobs); + spin_unlock_irqrestore(&kc->job_lock, flags); +} + /* * These three functions process 1 item from the corresponding * job list. @@ -398,7 +410,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, * We couldn't service this job ATM, so * push this job back onto the list. */ - push(jobs, job); + push_head(jobs, job); break; } diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 6449bcdf84ca..44042becad8a 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -5,12 +5,12 @@ */ #include "dm.h" - #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/slab.h> +#include <linux/device-mapper.h> #define DM_MSG_PREFIX "linear" @@ -110,20 +110,11 @@ static int linear_status(struct dm_target *ti, status_type_t type, return 0; } -static int linear_ioctl(struct dm_target *ti, struct inode *inode, - struct file *filp, unsigned int cmd, +static int linear_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { struct linear_c *lc = (struct linear_c *) ti->private; - struct block_device *bdev = lc->dev->bdev; - struct file fake_file = {}; - struct dentry fake_dentry = {}; - - fake_file.f_mode = lc->dev->mode; - fake_file.f_path.dentry = &fake_dentry; - fake_dentry.d_inode = bdev->bd_inode; - - return blkdev_driver_ioctl(bdev->bd_inode, &fake_file, bdev->bd_disk, cmd, arg); + return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg); } static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm, diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 5b48478c79f5..a8c0fc79ca78 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -12,7 +12,7 @@ #include <linux/dm-io.h> #include <linux/dm-dirty-log.h> -#include "dm.h" +#include <linux/device-mapper.h> #define DM_MSG_PREFIX "dirty region log" diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 103304c1e3b0..3d7f4923cd13 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -5,7 +5,8 @@ * This file is released under the GPL. */ -#include "dm.h" +#include <linux/device-mapper.h> + #include "dm-path-selector.h" #include "dm-bio-list.h" #include "dm-bio-record.h" @@ -440,13 +441,13 @@ static void process_queued_ios(struct work_struct *work) __choose_pgpath(m); pgpath = m->current_pgpath; - m->pgpath_to_activate = m->current_pgpath; if ((pgpath && !m->queue_io) || (!pgpath && !m->queue_if_no_path)) must_queue = 0; - if (m->pg_init_required && !m->pg_init_in_progress) { + if (m->pg_init_required && !m->pg_init_in_progress && pgpath) { + m->pgpath_to_activate = pgpath; m->pg_init_count++; m->pg_init_required = 0; m->pg_init_in_progress = 1; @@ -707,6 +708,10 @@ static int parse_hw_handler(struct arg_set *as, struct multipath *m) m->hw_handler_name = NULL; return -EINVAL; } + + if (hw_argc > 1) + DMWARN("Ignoring user-specified arguments for " + "hardware handler \"%s\"", m->hw_handler_name); consume(as, hw_argc - 1); return 0; @@ -849,7 +854,7 @@ static int multipath_map(struct dm_target *ti, struct bio *bio, dm_bio_record(&mpio->details, bio); map_context->ptr = mpio; - bio->bi_rw |= (1 << BIO_RW_FAILFAST); + bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); r = map_io(m, bio, mpio, 0); if (r < 0 || r == DM_MAPIO_REQUEUE) mempool_free(mpio, m->mpio_pool); @@ -1395,19 +1400,15 @@ error: return -EINVAL; } -static int multipath_ioctl(struct dm_target *ti, struct inode *inode, - struct file *filp, unsigned int cmd, +static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { struct multipath *m = (struct multipath *) ti->private; struct block_device *bdev = NULL; + fmode_t mode = 0; unsigned long flags; - struct file fake_file = {}; - struct dentry fake_dentry = {}; int r = 0; - fake_file.f_path.dentry = &fake_dentry; - spin_lock_irqsave(&m->lock, flags); if (!m->current_pgpath) @@ -1415,8 +1416,7 @@ static int multipath_ioctl(struct dm_target *ti, struct inode *inode, if (m->current_pgpath) { bdev = m->current_pgpath->path.dev->bdev; - fake_dentry.d_inode = bdev->bd_inode; - fake_file.f_mode = m->current_pgpath->path.dev->mode; + mode = m->current_pgpath->path.dev->mode; } if (m->queue_io) @@ -1426,8 +1426,7 @@ static int multipath_ioctl(struct dm_target *ti, struct inode *inode, spin_unlock_irqrestore(&m->lock, flags); - return r ? : blkdev_driver_ioctl(bdev->bd_inode, &fake_file, - bdev->bd_disk, cmd, arg); + return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); } /*----------------------------------------------------------------- diff --git a/drivers/md/dm-path-selector.c b/drivers/md/dm-path-selector.c index ca1bb636a3e4..96ea226155b1 100644 --- a/drivers/md/dm-path-selector.c +++ b/drivers/md/dm-path-selector.c @@ -9,7 +9,8 @@ * Path selector registration. */ -#include "dm.h" +#include <linux/device-mapper.h> + #include "dm-path-selector.h" #include <linux/slab.h> diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 29913e42c4ab..ec43f9fa4b2a 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -1,30 +1,30 @@ /* * Copyright (C) 2003 Sistina Software Limited. + * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. * * This file is released under the GPL. */ -#include "dm.h" #include "dm-bio-list.h" #include "dm-bio-record.h" -#include <linux/ctype.h> #include <linux/init.h> #include <linux/mempool.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/slab.h> -#include <linux/time.h> -#include <linux/vmalloc.h> #include <linux/workqueue.h> -#include <linux/log2.h> -#include <linux/hardirq.h> +#include <linux/device-mapper.h> #include <linux/dm-io.h> #include <linux/dm-dirty-log.h> #include <linux/dm-kcopyd.h> +#include <linux/dm-region-hash.h> #define DM_MSG_PREFIX "raid1" + +#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */ #define DM_IO_PAGES 64 +#define DM_KCOPYD_PAGES 64 #define DM_RAID1_HANDLE_ERRORS 0x01 #define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS) @@ -32,87 +32,6 @@ static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped); /*----------------------------------------------------------------- - * Region hash - * - * The mirror splits itself up into discrete regions. Each - * region can be in one of three states: clean, dirty, - * nosync. There is no need to put clean regions in the hash. - * - * In addition to being present in the hash table a region _may_ - * be present on one of three lists. - * - * clean_regions: Regions on this list have no io pending to - * them, they are in sync, we are no longer interested in them, - * they are dull. rh_update_states() will remove them from the - * hash table. - * - * quiesced_regions: These regions have been spun down, ready - * for recovery. rh_recovery_start() will remove regions from - * this list and hand them to kmirrord, which will schedule the - * recovery io with kcopyd. - * - * recovered_regions: Regions that kcopyd has successfully - * recovered. rh_update_states() will now schedule any delayed - * io, up the recovery_count, and remove the region from the - * hash. - * - * There are 2 locks: - * A rw spin lock 'hash_lock' protects just the hash table, - * this is never held in write mode from interrupt context, - * which I believe means that we only have to disable irqs when - * doing a write lock. - * - * An ordinary spin lock 'region_lock' that protects the three - * lists in the region_hash, with the 'state', 'list' and - * 'bhs_delayed' fields of the regions. This is used from irq - * context, so all other uses will have to suspend local irqs. - *---------------------------------------------------------------*/ -struct mirror_set; -struct region_hash { - struct mirror_set *ms; - uint32_t region_size; - unsigned region_shift; - - /* holds persistent region state */ - struct dm_dirty_log *log; - - /* hash table */ - rwlock_t hash_lock; - mempool_t *region_pool; - unsigned int mask; - unsigned int nr_buckets; - struct list_head *buckets; - - spinlock_t region_lock; - atomic_t recovery_in_flight; - struct semaphore recovery_count; - struct list_head clean_regions; - struct list_head quiesced_regions; - struct list_head recovered_regions; - struct list_head failed_recovered_regions; -}; - -enum { - RH_CLEAN, - RH_DIRTY, - RH_NOSYNC, - RH_RECOVERING -}; - -struct region { - struct region_hash *rh; /* FIXME: can we get rid of this ? */ - region_t key; - int state; - - struct list_head hash_list; - struct list_head list; - - atomic_t pending; - struct bio_list delayed_bios; -}; - - -/*----------------------------------------------------------------- * Mirror set structures. *---------------------------------------------------------------*/ enum dm_raid1_error { @@ -132,8 +51,7 @@ struct mirror { struct mirror_set { struct dm_target *ti; struct list_head list; - struct region_hash rh; - struct dm_kcopyd_client *kcopyd_client; + uint64_t features; spinlock_t lock; /* protects the lists */ @@ -141,6 +59,8 @@ struct mirror_set { struct bio_list writes; struct bio_list failures; + struct dm_region_hash *rh; + struct dm_kcopyd_client *kcopyd_client; struct dm_io_client *io_client; mempool_t *read_record_pool; @@ -159,25 +79,14 @@ struct mirror_set { struct work_struct trigger_event; - unsigned int nr_mirrors; + unsigned nr_mirrors; struct mirror mirror[0]; }; -/* - * Conversion fns - */ -static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio) -{ - return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift; -} - -static inline sector_t region_to_sector(struct region_hash *rh, region_t region) +static void wakeup_mirrord(void *context) { - return region << rh->region_shift; -} + struct mirror_set *ms = context; -static void wake(struct mirror_set *ms) -{ queue_work(ms->kmirrord_wq, &ms->kmirrord_work); } @@ -186,7 +95,7 @@ static void delayed_wake_fn(unsigned long data) struct mirror_set *ms = (struct mirror_set *) data; clear_bit(0, &ms->timer_pending); - wake(ms); + wakeup_mirrord(ms); } static void delayed_wake(struct mirror_set *ms) @@ -200,473 +109,34 @@ static void delayed_wake(struct mirror_set *ms) add_timer(&ms->timer); } -/* FIXME move this */ -static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); - -#define MIN_REGIONS 64 -#define MAX_RECOVERY 1 -static int rh_init(struct region_hash *rh, struct mirror_set *ms, - struct dm_dirty_log *log, uint32_t region_size, - region_t nr_regions) -{ - unsigned int nr_buckets, max_buckets; - size_t i; - - /* - * Calculate a suitable number of buckets for our hash - * table. - */ - max_buckets = nr_regions >> 6; - for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) - ; - nr_buckets >>= 1; - - rh->ms = ms; - rh->log = log; - rh->region_size = region_size; - rh->region_shift = ffs(region_size) - 1; - rwlock_init(&rh->hash_lock); - rh->mask = nr_buckets - 1; - rh->nr_buckets = nr_buckets; - - rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); - if (!rh->buckets) { - DMERR("unable to allocate region hash memory"); - return -ENOMEM; - } - - for (i = 0; i < nr_buckets; i++) - INIT_LIST_HEAD(rh->buckets + i); - - spin_lock_init(&rh->region_lock); - sema_init(&rh->recovery_count, 0); - atomic_set(&rh->recovery_in_flight, 0); - INIT_LIST_HEAD(&rh->clean_regions); - INIT_LIST_HEAD(&rh->quiesced_regions); - INIT_LIST_HEAD(&rh->recovered_regions); - INIT_LIST_HEAD(&rh->failed_recovered_regions); - - rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, - sizeof(struct region)); - if (!rh->region_pool) { - vfree(rh->buckets); - rh->buckets = NULL; - return -ENOMEM; - } - - return 0; -} - -static void rh_exit(struct region_hash *rh) -{ - unsigned int h; - struct region *reg, *nreg; - - BUG_ON(!list_empty(&rh->quiesced_regions)); - for (h = 0; h < rh->nr_buckets; h++) { - list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) { - BUG_ON(atomic_read(®->pending)); - mempool_free(reg, rh->region_pool); - } - } - - if (rh->log) - dm_dirty_log_destroy(rh->log); - if (rh->region_pool) - mempool_destroy(rh->region_pool); - vfree(rh->buckets); -} - -#define RH_HASH_MULT 2654435387U - -static inline unsigned int rh_hash(struct region_hash *rh, region_t region) -{ - return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask; -} - -static struct region *__rh_lookup(struct region_hash *rh, region_t region) -{ - struct region *reg; - - list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list) - if (reg->key == region) - return reg; - - return NULL; -} - -static void __rh_insert(struct region_hash *rh, struct region *reg) -{ - unsigned int h = rh_hash(rh, reg->key); - list_add(®->hash_list, rh->buckets + h); -} - -static struct region *__rh_alloc(struct region_hash *rh, region_t region) -{ - struct region *reg, *nreg; - - read_unlock(&rh->hash_lock); - nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); - if (unlikely(!nreg)) - nreg = kmalloc(sizeof(struct region), GFP_NOIO); - nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? - RH_CLEAN : RH_NOSYNC; - nreg->rh = rh; - nreg->key = region; - - INIT_LIST_HEAD(&nreg->list); - - atomic_set(&nreg->pending, 0); - bio_list_init(&nreg->delayed_bios); - write_lock_irq(&rh->hash_lock); - - reg = __rh_lookup(rh, region); - if (reg) - /* we lost the race */ - mempool_free(nreg, rh->region_pool); - - else { - __rh_insert(rh, nreg); - if (nreg->state == RH_CLEAN) { - spin_lock(&rh->region_lock); - list_add(&nreg->list, &rh->clean_regions); - spin_unlock(&rh->region_lock); - } - reg = nreg; - } - write_unlock_irq(&rh->hash_lock); - read_lock(&rh->hash_lock); - - return reg; -} - -static inline struct region *__rh_find(struct region_hash *rh, region_t region) -{ - struct region *reg; - - reg = __rh_lookup(rh, region); - if (!reg) - reg = __rh_alloc(rh, region); - - return reg; -} - -static int rh_state(struct region_hash *rh, region_t region, int may_block) -{ - int r; - struct region *reg; - - read_lock(&rh->hash_lock); - reg = __rh_lookup(rh, region); - read_unlock(&rh->hash_lock); - - if (reg) - return reg->state; - - /* - * The region wasn't in the hash, so we fall back to the - * dirty log. - */ - r = rh->log->type->in_sync(rh->log, region, may_block); - - /* - * Any error from the dirty log (eg. -EWOULDBLOCK) gets - * taken as a RH_NOSYNC - */ - return r == 1 ? RH_CLEAN : RH_NOSYNC; -} - -static inline int rh_in_sync(struct region_hash *rh, - region_t region, int may_block) +static void wakeup_all_recovery_waiters(void *context) { - int state = rh_state(rh, region, may_block); - return state == RH_CLEAN || state == RH_DIRTY; + wake_up_all(&_kmirrord_recovery_stopped); } -static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list) -{ - struct bio *bio; - - while ((bio = bio_list_pop(bio_list))) { - queue_bio(ms, bio, WRITE); - } -} - -static void complete_resync_work(struct region *reg, int success) -{ - struct region_hash *rh = reg->rh; - - rh->log->type->set_region_sync(rh->log, reg->key, success); - - /* - * Dispatch the bios before we call 'wake_up_all'. - * This is important because if we are suspending, - * we want to know that recovery is complete and - * the work queue is flushed. If we wake_up_all - * before we dispatch_bios (queue bios and call wake()), - * then we risk suspending before the work queue - * has been properly flushed. - */ - dispatch_bios(rh->ms, ®->delayed_bios); - if (atomic_dec_and_test(&rh->recovery_in_flight)) - wake_up_all(&_kmirrord_recovery_stopped); - up(&rh->recovery_count); -} - -static void rh_update_states(struct region_hash *rh) -{ - struct region *reg, *next; - - LIST_HEAD(clean); - LIST_HEAD(recovered); - LIST_HEAD(failed_recovered); - - /* - * Quickly grab the lists. - */ - write_lock_irq(&rh->hash_lock); - spin_lock(&rh->region_lock); - if (!list_empty(&rh->clean_regions)) { - list_splice_init(&rh->clean_regions, &clean); - - list_for_each_entry(reg, &clean, list) - list_del(®->hash_list); - } - - if (!list_empty(&rh->recovered_regions)) { - list_splice_init(&rh->recovered_regions, &recovered); - - list_for_each_entry (reg, &recovered, list) - list_del(®->hash_list); - } - - if (!list_empty(&rh->failed_recovered_regions)) { - list_splice_init(&rh->failed_recovered_regions, - &failed_recovered); - - list_for_each_entry(reg, &failed_recovered, list) - list_del(®->hash_list); - } - - spin_unlock(&rh->region_lock); - write_unlock_irq(&rh->hash_lock); - - /* - * All the regions on the recovered and clean lists have - * now been pulled out of the system, so no need to do - * any more locking. - */ - list_for_each_entry_safe (reg, next, &recovered, list) { - rh->log->type->clear_region(rh->log, reg->key); - complete_resync_work(reg, 1); - mempool_free(reg, rh->region_pool); - } - - list_for_each_entry_safe(reg, next, &failed_recovered, list) { - complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1); - mempool_free(reg, rh->region_pool); - } - - list_for_each_entry_safe(reg, next, &clean, list) { - rh->log->type->clear_region(rh->log, reg->key); - mempool_free(reg, rh->region_pool); - } - - rh->log->type->flush(rh->log); -} - -static void rh_inc(struct region_hash *rh, region_t region) -{ - struct region *reg; - - read_lock(&rh->hash_lock); - reg = __rh_find(rh, region); - - spin_lock_irq(&rh->region_lock); - atomic_inc(®->pending); - - if (reg->state == RH_CLEAN) { - reg->state = RH_DIRTY; - list_del_init(®->list); /* take off the clean list */ - spin_unlock_irq(&rh->region_lock); - - rh->log->type->mark_region(rh->log, reg->key); - } else - spin_unlock_irq(&rh->region_lock); - - - read_unlock(&rh->hash_lock); -} - -static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios) -{ - struct bio *bio; - - for (bio = bios->head; bio; bio = bio->bi_next) - rh_inc(rh, bio_to_region(rh, bio)); -} - -static void rh_dec(struct region_hash *rh, region_t region) +static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) { unsigned long flags; - struct region *reg; int should_wake = 0; + struct bio_list *bl; - read_lock(&rh->hash_lock); - reg = __rh_lookup(rh, region); - read_unlock(&rh->hash_lock); - - spin_lock_irqsave(&rh->region_lock, flags); - if (atomic_dec_and_test(®->pending)) { - /* - * There is no pending I/O for this region. - * We can move the region to corresponding list for next action. - * At this point, the region is not yet connected to any list. - * - * If the state is RH_NOSYNC, the region should be kept off - * from clean list. - * The hash entry for RH_NOSYNC will remain in memory - * until the region is recovered or the map is reloaded. - */ - - /* do nothing for RH_NOSYNC */ - if (reg->state == RH_RECOVERING) { - list_add_tail(®->list, &rh->quiesced_regions); - } else if (reg->state == RH_DIRTY) { - reg->state = RH_CLEAN; - list_add(®->list, &rh->clean_regions); - } - should_wake = 1; - } - spin_unlock_irqrestore(&rh->region_lock, flags); + bl = (rw == WRITE) ? &ms->writes : &ms->reads; + spin_lock_irqsave(&ms->lock, flags); + should_wake = !(bl->head); + bio_list_add(bl, bio); + spin_unlock_irqrestore(&ms->lock, flags); if (should_wake) - wake(rh->ms); + wakeup_mirrord(ms); } -/* - * Starts quiescing a region in preparation for recovery. - */ -static int __rh_recovery_prepare(struct region_hash *rh) +static void dispatch_bios(void *context, struct bio_list *bio_list) { - int r; - struct region *reg; - region_t region; - - /* - * Ask the dirty log what's next. - */ - r = rh->log->type->get_resync_work(rh->log, ®ion); - if (r <= 0) - return r; - - /* - * Get this region, and start it quiescing by setting the - * recovering flag. - */ - read_lock(&rh->hash_lock); - reg = __rh_find(rh, region); - read_unlock(&rh->hash_lock); - - spin_lock_irq(&rh->region_lock); - reg->state = RH_RECOVERING; - - /* Already quiesced ? */ - if (atomic_read(®->pending)) - list_del_init(®->list); - else - list_move(®->list, &rh->quiesced_regions); - - spin_unlock_irq(&rh->region_lock); - - return 1; -} - -static void rh_recovery_prepare(struct region_hash *rh) -{ - /* Extra reference to avoid race with rh_stop_recovery */ - atomic_inc(&rh->recovery_in_flight); - - while (!down_trylock(&rh->recovery_count)) { - atomic_inc(&rh->recovery_in_flight); - if (__rh_recovery_prepare(rh) <= 0) { - atomic_dec(&rh->recovery_in_flight); - up(&rh->recovery_count); - break; - } - } - - /* Drop the extra reference */ - if (atomic_dec_and_test(&rh->recovery_in_flight)) - wake_up_all(&_kmirrord_recovery_stopped); -} - -/* - * Returns any quiesced regions. - */ -static struct region *rh_recovery_start(struct region_hash *rh) -{ - struct region *reg = NULL; - - spin_lock_irq(&rh->region_lock); - if (!list_empty(&rh->quiesced_regions)) { - reg = list_entry(rh->quiesced_regions.next, - struct region, list); - list_del_init(®->list); /* remove from the quiesced list */ - } - spin_unlock_irq(&rh->region_lock); - - return reg; -} - -static void rh_recovery_end(struct region *reg, int success) -{ - struct region_hash *rh = reg->rh; - - spin_lock_irq(&rh->region_lock); - if (success) - list_add(®->list, ®->rh->recovered_regions); - else { - reg->state = RH_NOSYNC; - list_add(®->list, ®->rh->failed_recovered_regions); - } - spin_unlock_irq(&rh->region_lock); - - wake(rh->ms); -} - -static int rh_flush(struct region_hash *rh) -{ - return rh->log->type->flush(rh->log); -} - -static void rh_delay(struct region_hash *rh, struct bio *bio) -{ - struct region *reg; - - read_lock(&rh->hash_lock); - reg = __rh_find(rh, bio_to_region(rh, bio)); - bio_list_add(®->delayed_bios, bio); - read_unlock(&rh->hash_lock); -} - -static void rh_stop_recovery(struct region_hash *rh) -{ - int i; - - /* wait for any recovering regions */ - for (i = 0; i < MAX_RECOVERY; i++) - down(&rh->recovery_count); -} - -static void rh_start_recovery(struct region_hash *rh) -{ - int i; - - for (i = 0; i < MAX_RECOVERY; i++) - up(&rh->recovery_count); + struct mirror_set *ms = context; + struct bio *bio; - wake(rh->ms); + while ((bio = bio_list_pop(bio_list))) + queue_bio(ms, bio, WRITE); } #define MIN_READ_RECORDS 20 @@ -776,8 +246,8 @@ out: static void recovery_complete(int read_err, unsigned long write_err, void *context) { - struct region *reg = (struct region *)context; - struct mirror_set *ms = reg->rh->ms; + struct dm_region *reg = context; + struct mirror_set *ms = dm_rh_region_context(reg); int m, bit = 0; if (read_err) { @@ -803,31 +273,33 @@ static void recovery_complete(int read_err, unsigned long write_err, } } - rh_recovery_end(reg, !(read_err || write_err)); + dm_rh_recovery_end(reg, !(read_err || write_err)); } -static int recover(struct mirror_set *ms, struct region *reg) +static int recover(struct mirror_set *ms, struct dm_region *reg) { int r; - unsigned int i; + unsigned i; struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest; struct mirror *m; unsigned long flags = 0; + region_t key = dm_rh_get_region_key(reg); + sector_t region_size = dm_rh_get_region_size(ms->rh); /* fill in the source */ m = get_default_mirror(ms); from.bdev = m->dev->bdev; - from.sector = m->offset + region_to_sector(reg->rh, reg->key); - if (reg->key == (ms->nr_regions - 1)) { + from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key); + if (key == (ms->nr_regions - 1)) { /* * The final region may be smaller than * region_size. */ - from.count = ms->ti->len & (reg->rh->region_size - 1); + from.count = ms->ti->len & (region_size - 1); if (!from.count) - from.count = reg->rh->region_size; + from.count = region_size; } else - from.count = reg->rh->region_size; + from.count = region_size; /* fill in the destinations */ for (i = 0, dest = to; i < ms->nr_mirrors; i++) { @@ -836,7 +308,7 @@ static int recover(struct mirror_set *ms, struct region *reg) m = ms->mirror + i; dest->bdev = m->dev->bdev; - dest->sector = m->offset + region_to_sector(reg->rh, reg->key); + dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key); dest->count = from.count; dest++; } @@ -853,22 +325,22 @@ static int recover(struct mirror_set *ms, struct region *reg) static void do_recovery(struct mirror_set *ms) { + struct dm_region *reg; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); int r; - struct region *reg; - struct dm_dirty_log *log = ms->rh.log; /* * Start quiescing some regions. */ - rh_recovery_prepare(&ms->rh); + dm_rh_recovery_prepare(ms->rh); /* * Copy any already quiesced regions. */ - while ((reg = rh_recovery_start(&ms->rh))) { + while ((reg = dm_rh_recovery_start(ms->rh))) { r = recover(ms, reg); if (r) - rh_recovery_end(reg, 0); + dm_rh_recovery_end(reg, 0); } /* @@ -909,9 +381,10 @@ static int default_ok(struct mirror *m) static int mirror_available(struct mirror_set *ms, struct bio *bio) { - region_t region = bio_to_region(&ms->rh, bio); + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); + region_t region = dm_rh_bio_to_region(ms->rh, bio); - if (ms->rh.log->type->in_sync(ms->rh.log, region, 0)) + if (log->type->in_sync(log, region, 0)) return choose_mirror(ms, bio->bi_sector) ? 1 : 0; return 0; @@ -985,7 +458,14 @@ static void read_async_bio(struct mirror *m, struct bio *bio) map_region(&io, m, bio); bio_set_m(bio, m); - (void) dm_io(&io_req, 1, &io, NULL); + BUG_ON(dm_io(&io_req, 1, &io, NULL)); +} + +static inline int region_in_sync(struct mirror_set *ms, region_t region, + int may_block) +{ + int state = dm_rh_get_state(ms->rh, region, may_block); + return state == DM_RH_CLEAN || state == DM_RH_DIRTY; } static void do_reads(struct mirror_set *ms, struct bio_list *reads) @@ -995,13 +475,13 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) struct mirror *m; while ((bio = bio_list_pop(reads))) { - region = bio_to_region(&ms->rh, bio); + region = dm_rh_bio_to_region(ms->rh, bio); m = get_default_mirror(ms); /* * We can only read balance if the region is in sync. */ - if (likely(rh_in_sync(&ms->rh, region, 1))) + if (likely(region_in_sync(ms, region, 1))) m = choose_mirror(ms, bio->bi_sector); else if (m && atomic_read(&m->error_count)) m = NULL; @@ -1024,57 +504,6 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) * NOSYNC: increment pending, just write to the default mirror *---------------------------------------------------------------*/ -/* __bio_mark_nosync - * @ms - * @bio - * @done - * @error - * - * The bio was written on some mirror(s) but failed on other mirror(s). - * We can successfully endio the bio but should avoid the region being - * marked clean by setting the state RH_NOSYNC. - * - * This function is _not_ safe in interrupt context! - */ -static void __bio_mark_nosync(struct mirror_set *ms, - struct bio *bio, unsigned done, int error) -{ - unsigned long flags; - struct region_hash *rh = &ms->rh; - struct dm_dirty_log *log = ms->rh.log; - struct region *reg; - region_t region = bio_to_region(rh, bio); - int recovering = 0; - - /* We must inform the log that the sync count has changed. */ - log->type->set_region_sync(log, region, 0); - ms->in_sync = 0; - - read_lock(&rh->hash_lock); - reg = __rh_find(rh, region); - read_unlock(&rh->hash_lock); - - /* region hash entry should exist because write was in-flight */ - BUG_ON(!reg); - BUG_ON(!list_empty(®->list)); - - spin_lock_irqsave(&rh->region_lock, flags); - /* - * Possible cases: - * 1) RH_DIRTY - * 2) RH_NOSYNC: was dirty, other preceeding writes failed - * 3) RH_RECOVERING: flushing pending writes - * Either case, the region should have not been connected to list. - */ - recovering = (reg->state == RH_RECOVERING); - reg->state = RH_NOSYNC; - BUG_ON(!list_empty(®->list)); - spin_unlock_irqrestore(&rh->region_lock, flags); - - bio_endio(bio, error); - if (recovering) - complete_resync_work(reg, 0); -} static void write_callback(unsigned long error, void *context) { @@ -1119,7 +548,7 @@ static void write_callback(unsigned long error, void *context) bio_list_add(&ms->failures, bio); spin_unlock_irqrestore(&ms->lock, flags); if (should_wake) - wake(ms); + wakeup_mirrord(ms); return; } out: @@ -1149,7 +578,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio) */ bio_set_m(bio, get_default_mirror(ms)); - (void) dm_io(&io_req, ms->nr_mirrors, io, NULL); + BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL)); } static void do_writes(struct mirror_set *ms, struct bio_list *writes) @@ -1169,18 +598,19 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) bio_list_init(&recover); while ((bio = bio_list_pop(writes))) { - state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1); + state = dm_rh_get_state(ms->rh, + dm_rh_bio_to_region(ms->rh, bio), 1); switch (state) { - case RH_CLEAN: - case RH_DIRTY: + case DM_RH_CLEAN: + case DM_RH_DIRTY: this_list = &sync; break; - case RH_NOSYNC: + case DM_RH_NOSYNC: this_list = &nosync; break; - case RH_RECOVERING: + case DM_RH_RECOVERING: this_list = &recover; break; } @@ -1193,9 +623,9 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) * be written to (writes to recover regions are going to * be delayed). */ - rh_inc_pending(&ms->rh, &sync); - rh_inc_pending(&ms->rh, &nosync); - ms->log_failure = rh_flush(&ms->rh) ? 1 : 0; + dm_rh_inc_pending(ms->rh, &sync); + dm_rh_inc_pending(ms->rh, &nosync); + ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; /* * Dispatch io. @@ -1204,13 +634,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) spin_lock_irq(&ms->lock); bio_list_merge(&ms->failures, &sync); spin_unlock_irq(&ms->lock); - wake(ms); + wakeup_mirrord(ms); } else while ((bio = bio_list_pop(&sync))) do_write(ms, bio); while ((bio = bio_list_pop(&recover))) - rh_delay(&ms->rh, bio); + dm_rh_delay(ms->rh, bio); while ((bio = bio_list_pop(&nosync))) { map_bio(get_default_mirror(ms), bio); @@ -1226,8 +656,10 @@ static void do_failures(struct mirror_set *ms, struct bio_list *failures) return; if (!ms->log_failure) { - while ((bio = bio_list_pop(failures))) - __bio_mark_nosync(ms, bio, bio->bi_size, 0); + while ((bio = bio_list_pop(failures))) { + ms->in_sync = 0; + dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0); + } return; } @@ -1280,8 +712,8 @@ static void trigger_event(struct work_struct *work) *---------------------------------------------------------------*/ static void do_mirror(struct work_struct *work) { - struct mirror_set *ms =container_of(work, struct mirror_set, - kmirrord_work); + struct mirror_set *ms = container_of(work, struct mirror_set, + kmirrord_work); struct bio_list reads, writes, failures; unsigned long flags; @@ -1294,7 +726,7 @@ static void do_mirror(struct work_struct *work) bio_list_init(&ms->failures); spin_unlock_irqrestore(&ms->lock, flags); - rh_update_states(&ms->rh); + dm_rh_update_states(ms->rh, errors_handled(ms)); do_recovery(ms); do_reads(ms, &reads); do_writes(ms, &writes); @@ -1303,7 +735,6 @@ static void do_mirror(struct work_struct *work) dm_table_unplug_all(ms->ti->table); } - /*----------------------------------------------------------------- * Target functions *---------------------------------------------------------------*/ @@ -1315,9 +746,6 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, size_t len; struct mirror_set *ms = NULL; - if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors)) - return NULL; - len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors); ms = kzalloc(len, GFP_KERNEL); @@ -1353,7 +781,11 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors, return NULL; } - if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) { + ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord, + wakeup_all_recovery_waiters, + ms->ti->begin, MAX_RECOVERY, + dl, region_size, ms->nr_regions); + if (IS_ERR(ms->rh)) { ti->error = "Error creating dirty region hash"; dm_io_client_destroy(ms->io_client); mempool_destroy(ms->read_record_pool); @@ -1371,7 +803,7 @@ static void free_context(struct mirror_set *ms, struct dm_target *ti, dm_put_device(ti, ms->mirror[m].dev); dm_io_client_destroy(ms->io_client); - rh_exit(&ms->rh); + dm_region_hash_destroy(ms->rh); mempool_destroy(ms->read_record_pool); kfree(ms); } @@ -1411,10 +843,10 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti, * Create dirty log: log_type #log_params <log_params> */ static struct dm_dirty_log *create_dirty_log(struct dm_target *ti, - unsigned int argc, char **argv, - unsigned int *args_used) + unsigned argc, char **argv, + unsigned *args_used) { - unsigned int param_count; + unsigned param_count; struct dm_dirty_log *dl; if (argc < 2) { @@ -1545,7 +977,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) } ti->private = ms; - ti->split_io = ms->rh.region_size; + ti->split_io = dm_rh_get_region_size(ms->rh); ms->kmirrord_wq = create_singlethread_workqueue("kmirrord"); if (!ms->kmirrord_wq) { @@ -1580,11 +1012,11 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto err_destroy_wq; } - r = dm_kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client); + r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client); if (r) goto err_destroy_wq; - wake(ms); + wakeup_mirrord(ms); return 0; err_destroy_wq: @@ -1600,27 +1032,12 @@ static void mirror_dtr(struct dm_target *ti) del_timer_sync(&ms->timer); flush_workqueue(ms->kmirrord_wq); + flush_scheduled_work(); dm_kcopyd_client_destroy(ms->kcopyd_client); destroy_workqueue(ms->kmirrord_wq); free_context(ms, ti, ms->nr_mirrors); } -static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw) -{ - unsigned long flags; - int should_wake = 0; - struct bio_list *bl; - - bl = (rw == WRITE) ? &ms->writes : &ms->reads; - spin_lock_irqsave(&ms->lock, flags); - should_wake = !(bl->head); - bio_list_add(bl, bio); - spin_unlock_irqrestore(&ms->lock, flags); - - if (should_wake) - wake(ms); -} - /* * Mirror mapping function */ @@ -1631,16 +1048,16 @@ static int mirror_map(struct dm_target *ti, struct bio *bio, struct mirror *m; struct mirror_set *ms = ti->private; struct dm_raid1_read_record *read_record = NULL; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); if (rw == WRITE) { /* Save region for mirror_end_io() handler */ - map_context->ll = bio_to_region(&ms->rh, bio); + map_context->ll = dm_rh_bio_to_region(ms->rh, bio); queue_bio(ms, bio, rw); return DM_MAPIO_SUBMITTED; } - r = ms->rh.log->type->in_sync(ms->rh.log, - bio_to_region(&ms->rh, bio), 0); + r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0); if (r < 0 && r != -EWOULDBLOCK) return r; @@ -1688,7 +1105,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, * We need to dec pending if this was a write. */ if (rw == WRITE) { - rh_dec(&ms->rh, map_context->ll); + dm_rh_dec(ms->rh, map_context->ll); return error; } @@ -1744,7 +1161,7 @@ out: static void mirror_presuspend(struct dm_target *ti) { struct mirror_set *ms = (struct mirror_set *) ti->private; - struct dm_dirty_log *log = ms->rh.log; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); atomic_set(&ms->suspend, 1); @@ -1752,10 +1169,10 @@ static void mirror_presuspend(struct dm_target *ti) * We must finish up all the work that we've * generated (i.e. recovery work). */ - rh_stop_recovery(&ms->rh); + dm_rh_stop_recovery(ms->rh); wait_event(_kmirrord_recovery_stopped, - !atomic_read(&ms->rh.recovery_in_flight)); + !dm_rh_recovery_in_flight(ms->rh)); if (log->type->presuspend && log->type->presuspend(log)) /* FIXME: need better error handling */ @@ -1773,7 +1190,7 @@ static void mirror_presuspend(struct dm_target *ti) static void mirror_postsuspend(struct dm_target *ti) { struct mirror_set *ms = ti->private; - struct dm_dirty_log *log = ms->rh.log; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); if (log->type->postsuspend && log->type->postsuspend(log)) /* FIXME: need better error handling */ @@ -1783,13 +1200,13 @@ static void mirror_postsuspend(struct dm_target *ti) static void mirror_resume(struct dm_target *ti) { struct mirror_set *ms = ti->private; - struct dm_dirty_log *log = ms->rh.log; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); atomic_set(&ms->suspend, 0); if (log->type->resume && log->type->resume(log)) /* FIXME: need better error handling */ DMWARN("log resume failed"); - rh_start_recovery(&ms->rh); + dm_rh_start_recovery(ms->rh); } /* @@ -1821,7 +1238,7 @@ static int mirror_status(struct dm_target *ti, status_type_t type, { unsigned int m, sz = 0; struct mirror_set *ms = (struct mirror_set *) ti->private; - struct dm_dirty_log *log = ms->rh.log; + struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh); char buffer[ms->nr_mirrors + 1]; switch (type) { @@ -1834,15 +1251,15 @@ static int mirror_status(struct dm_target *ti, status_type_t type, buffer[m] = '\0'; DMEMIT("%llu/%llu 1 %s ", - (unsigned long long)log->type->get_sync_count(ms->rh.log), + (unsigned long long)log->type->get_sync_count(log), (unsigned long long)ms->nr_regions, buffer); - sz += log->type->status(ms->rh.log, type, result+sz, maxlen-sz); + sz += log->type->status(log, type, result+sz, maxlen-sz); break; case STATUSTYPE_TABLE: - sz = log->type->status(ms->rh.log, type, result, maxlen); + sz = log->type->status(log, type, result, maxlen); DMEMIT("%d", ms->nr_mirrors); for (m = 0; m < ms->nr_mirrors; m++) diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c new file mode 100644 index 000000000000..59f8d9df9e1a --- /dev/null +++ b/drivers/md/dm-region-hash.c @@ -0,0 +1,704 @@ +/* + * Copyright (C) 2003 Sistina Software Limited. + * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. + * + * This file is released under the GPL. + */ + +#include <linux/dm-dirty-log.h> +#include <linux/dm-region-hash.h> + +#include <linux/ctype.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/vmalloc.h> + +#include "dm.h" +#include "dm-bio-list.h" + +#define DM_MSG_PREFIX "region hash" + +/*----------------------------------------------------------------- + * Region hash + * + * The mirror splits itself up into discrete regions. Each + * region can be in one of three states: clean, dirty, + * nosync. There is no need to put clean regions in the hash. + * + * In addition to being present in the hash table a region _may_ + * be present on one of three lists. + * + * clean_regions: Regions on this list have no io pending to + * them, they are in sync, we are no longer interested in them, + * they are dull. dm_rh_update_states() will remove them from the + * hash table. + * + * quiesced_regions: These regions have been spun down, ready + * for recovery. rh_recovery_start() will remove regions from + * this list and hand them to kmirrord, which will schedule the + * recovery io with kcopyd. + * + * recovered_regions: Regions that kcopyd has successfully + * recovered. dm_rh_update_states() will now schedule any delayed + * io, up the recovery_count, and remove the region from the + * hash. + * + * There are 2 locks: + * A rw spin lock 'hash_lock' protects just the hash table, + * this is never held in write mode from interrupt context, + * which I believe means that we only have to disable irqs when + * doing a write lock. + * + * An ordinary spin lock 'region_lock' that protects the three + * lists in the region_hash, with the 'state', 'list' and + * 'delayed_bios' fields of the regions. This is used from irq + * context, so all other uses will have to suspend local irqs. + *---------------------------------------------------------------*/ +struct dm_region_hash { + uint32_t region_size; + unsigned region_shift; + + /* holds persistent region state */ + struct dm_dirty_log *log; + + /* hash table */ + rwlock_t hash_lock; + mempool_t *region_pool; + unsigned mask; + unsigned nr_buckets; + unsigned prime; + unsigned shift; + struct list_head *buckets; + + unsigned max_recovery; /* Max # of regions to recover in parallel */ + + spinlock_t region_lock; + atomic_t recovery_in_flight; + struct semaphore recovery_count; + struct list_head clean_regions; + struct list_head quiesced_regions; + struct list_head recovered_regions; + struct list_head failed_recovered_regions; + + void *context; + sector_t target_begin; + + /* Callback function to schedule bios writes */ + void (*dispatch_bios)(void *context, struct bio_list *bios); + + /* Callback function to wakeup callers worker thread. */ + void (*wakeup_workers)(void *context); + + /* Callback function to wakeup callers recovery waiters. */ + void (*wakeup_all_recovery_waiters)(void *context); +}; + +struct dm_region { + struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */ + region_t key; + int state; + + struct list_head hash_list; + struct list_head list; + + atomic_t pending; + struct bio_list delayed_bios; +}; + +/* + * Conversion fns + */ +static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) +{ + return sector >> rh->region_shift; +} + +sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) +{ + return region << rh->region_shift; +} +EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); + +region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) +{ + return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); +} +EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); + +void *dm_rh_region_context(struct dm_region *reg) +{ + return reg->rh->context; +} +EXPORT_SYMBOL_GPL(dm_rh_region_context); + +region_t dm_rh_get_region_key(struct dm_region *reg) +{ + return reg->key; +} +EXPORT_SYMBOL_GPL(dm_rh_get_region_key); + +sector_t dm_rh_get_region_size(struct dm_region_hash *rh) +{ + return rh->region_size; +} +EXPORT_SYMBOL_GPL(dm_rh_get_region_size); + +/* + * FIXME: shall we pass in a structure instead of all these args to + * dm_region_hash_create()???? + */ +#define RH_HASH_MULT 2654435387U +#define RH_HASH_SHIFT 12 + +#define MIN_REGIONS 64 +struct dm_region_hash *dm_region_hash_create( + void *context, void (*dispatch_bios)(void *context, + struct bio_list *bios), + void (*wakeup_workers)(void *context), + void (*wakeup_all_recovery_waiters)(void *context), + sector_t target_begin, unsigned max_recovery, + struct dm_dirty_log *log, uint32_t region_size, + region_t nr_regions) +{ + struct dm_region_hash *rh; + unsigned nr_buckets, max_buckets; + size_t i; + + /* + * Calculate a suitable number of buckets for our hash + * table. + */ + max_buckets = nr_regions >> 6; + for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) + ; + nr_buckets >>= 1; + + rh = kmalloc(sizeof(*rh), GFP_KERNEL); + if (!rh) { + DMERR("unable to allocate region hash memory"); + return ERR_PTR(-ENOMEM); + } + + rh->context = context; + rh->dispatch_bios = dispatch_bios; + rh->wakeup_workers = wakeup_workers; + rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters; + rh->target_begin = target_begin; + rh->max_recovery = max_recovery; + rh->log = log; + rh->region_size = region_size; + rh->region_shift = ffs(region_size) - 1; + rwlock_init(&rh->hash_lock); + rh->mask = nr_buckets - 1; + rh->nr_buckets = nr_buckets; + + rh->shift = RH_HASH_SHIFT; + rh->prime = RH_HASH_MULT; + + rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); + if (!rh->buckets) { + DMERR("unable to allocate region hash bucket memory"); + kfree(rh); + return ERR_PTR(-ENOMEM); + } + + for (i = 0; i < nr_buckets; i++) + INIT_LIST_HEAD(rh->buckets + i); + + spin_lock_init(&rh->region_lock); + sema_init(&rh->recovery_count, 0); + atomic_set(&rh->recovery_in_flight, 0); + INIT_LIST_HEAD(&rh->clean_regions); + INIT_LIST_HEAD(&rh->quiesced_regions); + INIT_LIST_HEAD(&rh->recovered_regions); + INIT_LIST_HEAD(&rh->failed_recovered_regions); + + rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, + sizeof(struct dm_region)); + if (!rh->region_pool) { + vfree(rh->buckets); + kfree(rh); + rh = ERR_PTR(-ENOMEM); + } + + return rh; +} +EXPORT_SYMBOL_GPL(dm_region_hash_create); + +void dm_region_hash_destroy(struct dm_region_hash *rh) +{ + unsigned h; + struct dm_region *reg, *nreg; + + BUG_ON(!list_empty(&rh->quiesced_regions)); + for (h = 0; h < rh->nr_buckets; h++) { + list_for_each_entry_safe(reg, nreg, rh->buckets + h, + hash_list) { + BUG_ON(atomic_read(®->pending)); + mempool_free(reg, rh->region_pool); + } + } + + if (rh->log) + dm_dirty_log_destroy(rh->log); + + if (rh->region_pool) + mempool_destroy(rh->region_pool); + + vfree(rh->buckets); + kfree(rh); +} +EXPORT_SYMBOL_GPL(dm_region_hash_destroy); + +struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh) +{ + return rh->log; +} +EXPORT_SYMBOL_GPL(dm_rh_dirty_log); + +static unsigned rh_hash(struct dm_region_hash *rh, region_t region) +{ + return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask; +} + +static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg; + struct list_head *bucket = rh->buckets + rh_hash(rh, region); + + list_for_each_entry(reg, bucket, hash_list) + if (reg->key == region) + return reg; + + return NULL; +} + +static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg) +{ + list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key)); +} + +static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg, *nreg; + + nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); + if (unlikely(!nreg)) + nreg = kmalloc(sizeof(*nreg), GFP_NOIO); + + nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? + DM_RH_CLEAN : DM_RH_NOSYNC; + nreg->rh = rh; + nreg->key = region; + INIT_LIST_HEAD(&nreg->list); + atomic_set(&nreg->pending, 0); + bio_list_init(&nreg->delayed_bios); + + write_lock_irq(&rh->hash_lock); + reg = __rh_lookup(rh, region); + if (reg) + /* We lost the race. */ + mempool_free(nreg, rh->region_pool); + else { + __rh_insert(rh, nreg); + if (nreg->state == DM_RH_CLEAN) { + spin_lock(&rh->region_lock); + list_add(&nreg->list, &rh->clean_regions); + spin_unlock(&rh->region_lock); + } + + reg = nreg; + } + write_unlock_irq(&rh->hash_lock); + + return reg; +} + +static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg; + + reg = __rh_lookup(rh, region); + if (!reg) { + read_unlock(&rh->hash_lock); + reg = __rh_alloc(rh, region); + read_lock(&rh->hash_lock); + } + + return reg; +} + +int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block) +{ + int r; + struct dm_region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_lookup(rh, region); + read_unlock(&rh->hash_lock); + + if (reg) + return reg->state; + + /* + * The region wasn't in the hash, so we fall back to the + * dirty log. + */ + r = rh->log->type->in_sync(rh->log, region, may_block); + + /* + * Any error from the dirty log (eg. -EWOULDBLOCK) gets + * taken as a DM_RH_NOSYNC + */ + return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC; +} +EXPORT_SYMBOL_GPL(dm_rh_get_state); + +static void complete_resync_work(struct dm_region *reg, int success) +{ + struct dm_region_hash *rh = reg->rh; + + rh->log->type->set_region_sync(rh->log, reg->key, success); + + /* + * Dispatch the bios before we call 'wake_up_all'. + * This is important because if we are suspending, + * we want to know that recovery is complete and + * the work queue is flushed. If we wake_up_all + * before we dispatch_bios (queue bios and call wake()), + * then we risk suspending before the work queue + * has been properly flushed. + */ + rh->dispatch_bios(rh->context, ®->delayed_bios); + if (atomic_dec_and_test(&rh->recovery_in_flight)) + rh->wakeup_all_recovery_waiters(rh->context); + up(&rh->recovery_count); +} + +/* dm_rh_mark_nosync + * @ms + * @bio + * @done + * @error + * + * The bio was written on some mirror(s) but failed on other mirror(s). + * We can successfully endio the bio but should avoid the region being + * marked clean by setting the state DM_RH_NOSYNC. + * + * This function is _not_ safe in interrupt context! + */ +void dm_rh_mark_nosync(struct dm_region_hash *rh, + struct bio *bio, unsigned done, int error) +{ + unsigned long flags; + struct dm_dirty_log *log = rh->log; + struct dm_region *reg; + region_t region = dm_rh_bio_to_region(rh, bio); + int recovering = 0; + + /* We must inform the log that the sync count has changed. */ + log->type->set_region_sync(log, region, 0); + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + read_unlock(&rh->hash_lock); + + /* region hash entry should exist because write was in-flight */ + BUG_ON(!reg); + BUG_ON(!list_empty(®->list)); + + spin_lock_irqsave(&rh->region_lock, flags); + /* + * Possible cases: + * 1) DM_RH_DIRTY + * 2) DM_RH_NOSYNC: was dirty, other preceeding writes failed + * 3) DM_RH_RECOVERING: flushing pending writes + * Either case, the region should have not been connected to list. + */ + recovering = (reg->state == DM_RH_RECOVERING); + reg->state = DM_RH_NOSYNC; + BUG_ON(!list_empty(®->list)); + spin_unlock_irqrestore(&rh->region_lock, flags); + + bio_endio(bio, error); + if (recovering) + complete_resync_work(reg, 0); +} +EXPORT_SYMBOL_GPL(dm_rh_mark_nosync); + +void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled) +{ + struct dm_region *reg, *next; + + LIST_HEAD(clean); + LIST_HEAD(recovered); + LIST_HEAD(failed_recovered); + + /* + * Quickly grab the lists. + */ + write_lock_irq(&rh->hash_lock); + spin_lock(&rh->region_lock); + if (!list_empty(&rh->clean_regions)) { + list_splice_init(&rh->clean_regions, &clean); + + list_for_each_entry(reg, &clean, list) + list_del(®->hash_list); + } + + if (!list_empty(&rh->recovered_regions)) { + list_splice_init(&rh->recovered_regions, &recovered); + + list_for_each_entry(reg, &recovered, list) + list_del(®->hash_list); + } + + if (!list_empty(&rh->failed_recovered_regions)) { + list_splice_init(&rh->failed_recovered_regions, + &failed_recovered); + + list_for_each_entry(reg, &failed_recovered, list) + list_del(®->hash_list); + } + + spin_unlock(&rh->region_lock); + write_unlock_irq(&rh->hash_lock); + + /* + * All the regions on the recovered and clean lists have + * now been pulled out of the system, so no need to do + * any more locking. + */ + list_for_each_entry_safe(reg, next, &recovered, list) { + rh->log->type->clear_region(rh->log, reg->key); + complete_resync_work(reg, 1); + mempool_free(reg, rh->region_pool); + } + + list_for_each_entry_safe(reg, next, &failed_recovered, list) { + complete_resync_work(reg, errors_handled ? 0 : 1); + mempool_free(reg, rh->region_pool); + } + + list_for_each_entry_safe(reg, next, &clean, list) { + rh->log->type->clear_region(rh->log, reg->key); + mempool_free(reg, rh->region_pool); + } + + rh->log->type->flush(rh->log); +} +EXPORT_SYMBOL_GPL(dm_rh_update_states); + +static void rh_inc(struct dm_region_hash *rh, region_t region) +{ + struct dm_region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + + spin_lock_irq(&rh->region_lock); + atomic_inc(®->pending); + + if (reg->state == DM_RH_CLEAN) { + reg->state = DM_RH_DIRTY; + list_del_init(®->list); /* take off the clean list */ + spin_unlock_irq(&rh->region_lock); + + rh->log->type->mark_region(rh->log, reg->key); + } else + spin_unlock_irq(&rh->region_lock); + + + read_unlock(&rh->hash_lock); +} + +void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) +{ + struct bio *bio; + + for (bio = bios->head; bio; bio = bio->bi_next) + rh_inc(rh, dm_rh_bio_to_region(rh, bio)); +} +EXPORT_SYMBOL_GPL(dm_rh_inc_pending); + +void dm_rh_dec(struct dm_region_hash *rh, region_t region) +{ + unsigned long flags; + struct dm_region *reg; + int should_wake = 0; + + read_lock(&rh->hash_lock); + reg = __rh_lookup(rh, region); + read_unlock(&rh->hash_lock); + + spin_lock_irqsave(&rh->region_lock, flags); + if (atomic_dec_and_test(®->pending)) { + /* + * There is no pending I/O for this region. + * We can move the region to corresponding list for next action. + * At this point, the region is not yet connected to any list. + * + * If the state is DM_RH_NOSYNC, the region should be kept off + * from clean list. + * The hash entry for DM_RH_NOSYNC will remain in memory + * until the region is recovered or the map is reloaded. + */ + + /* do nothing for DM_RH_NOSYNC */ + if (reg->state == DM_RH_RECOVERING) { + list_add_tail(®->list, &rh->quiesced_regions); + } else if (reg->state == DM_RH_DIRTY) { + reg->state = DM_RH_CLEAN; + list_add(®->list, &rh->clean_regions); + } + should_wake = 1; + } + spin_unlock_irqrestore(&rh->region_lock, flags); + + if (should_wake) + rh->wakeup_workers(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_dec); + +/* + * Starts quiescing a region in preparation for recovery. + */ +static int __rh_recovery_prepare(struct dm_region_hash *rh) +{ + int r; + region_t region; + struct dm_region *reg; + + /* + * Ask the dirty log what's next. + */ + r = rh->log->type->get_resync_work(rh->log, ®ion); + if (r <= 0) + return r; + + /* + * Get this region, and start it quiescing by setting the + * recovering flag. + */ + read_lock(&rh->hash_lock); + reg = __rh_find(rh, region); + read_unlock(&rh->hash_lock); + + spin_lock_irq(&rh->region_lock); + reg->state = DM_RH_RECOVERING; + + /* Already quiesced ? */ + if (atomic_read(®->pending)) + list_del_init(®->list); + else + list_move(®->list, &rh->quiesced_regions); + + spin_unlock_irq(&rh->region_lock); + + return 1; +} + +void dm_rh_recovery_prepare(struct dm_region_hash *rh) +{ + /* Extra reference to avoid race with dm_rh_stop_recovery */ + atomic_inc(&rh->recovery_in_flight); + + while (!down_trylock(&rh->recovery_count)) { + atomic_inc(&rh->recovery_in_flight); + if (__rh_recovery_prepare(rh) <= 0) { + atomic_dec(&rh->recovery_in_flight); + up(&rh->recovery_count); + break; + } + } + + /* Drop the extra reference */ + if (atomic_dec_and_test(&rh->recovery_in_flight)) + rh->wakeup_all_recovery_waiters(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare); + +/* + * Returns any quiesced regions. + */ +struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh) +{ + struct dm_region *reg = NULL; + + spin_lock_irq(&rh->region_lock); + if (!list_empty(&rh->quiesced_regions)) { + reg = list_entry(rh->quiesced_regions.next, + struct dm_region, list); + list_del_init(®->list); /* remove from the quiesced list */ + } + spin_unlock_irq(&rh->region_lock); + + return reg; +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_start); + +void dm_rh_recovery_end(struct dm_region *reg, int success) +{ + struct dm_region_hash *rh = reg->rh; + + spin_lock_irq(&rh->region_lock); + if (success) + list_add(®->list, ®->rh->recovered_regions); + else { + reg->state = DM_RH_NOSYNC; + list_add(®->list, ®->rh->failed_recovered_regions); + } + spin_unlock_irq(&rh->region_lock); + + rh->wakeup_workers(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_end); + +/* Return recovery in flight count. */ +int dm_rh_recovery_in_flight(struct dm_region_hash *rh) +{ + return atomic_read(&rh->recovery_in_flight); +} +EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight); + +int dm_rh_flush(struct dm_region_hash *rh) +{ + return rh->log->type->flush(rh->log); +} +EXPORT_SYMBOL_GPL(dm_rh_flush); + +void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio) +{ + struct dm_region *reg; + + read_lock(&rh->hash_lock); + reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio)); + bio_list_add(®->delayed_bios, bio); + read_unlock(&rh->hash_lock); +} +EXPORT_SYMBOL_GPL(dm_rh_delay); + +void dm_rh_stop_recovery(struct dm_region_hash *rh) +{ + int i; + + /* wait for any recovering regions */ + for (i = 0; i < rh->max_recovery; i++) + down(&rh->recovery_count); +} +EXPORT_SYMBOL_GPL(dm_rh_stop_recovery); + +void dm_rh_start_recovery(struct dm_region_hash *rh) +{ + int i; + + for (i = 0; i < rh->max_recovery; i++) + up(&rh->recovery_count); + + rh->wakeup_workers(rh->context); +} +EXPORT_SYMBOL_GPL(dm_rh_start_recovery); + +MODULE_DESCRIPTION(DM_NAME " region hash"); +MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <dm-devel@redhat.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c index 391dfa2ad434..cdfbf65b28cb 100644 --- a/drivers/md/dm-round-robin.c +++ b/drivers/md/dm-round-robin.c @@ -9,7 +9,8 @@ * Round-robin path selector. */ -#include "dm.h" +#include <linux/device-mapper.h> + #include "dm-path-selector.h" #include <linux/slab.h> diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 6e5528aecc98..6c96db26b87c 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -229,19 +229,21 @@ static void __insert_origin(struct origin *o) */ static int register_snapshot(struct dm_snapshot *snap) { - struct origin *o; + struct origin *o, *new_o; struct block_device *bdev = snap->origin->bdev; + new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); + if (!new_o) + return -ENOMEM; + down_write(&_origins_lock); o = __lookup_origin(bdev); - if (!o) { + if (o) + kfree(new_o); + else { /* New origin */ - o = kmalloc(sizeof(*o), GFP_KERNEL); - if (!o) { - up_write(&_origins_lock); - return -ENOMEM; - } + o = new_o; /* Initialise the struct */ INIT_LIST_HEAD(&o->snapshots); @@ -368,6 +370,7 @@ static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snaps struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, GFP_NOIO); + atomic_inc(&s->pending_exceptions_count); pe->snap = s; return pe; @@ -375,7 +378,11 @@ static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snaps static void free_pending_exception(struct dm_snap_pending_exception *pe) { - mempool_free(pe, pe->snap->pending_pool); + struct dm_snapshot *s = pe->snap; + + mempool_free(pe, s->pending_pool); + smp_mb__before_atomic_dec(); + atomic_dec(&s->pending_exceptions_count); } static void insert_completed_exception(struct dm_snapshot *s, @@ -600,7 +607,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->valid = 1; s->active = 0; - s->last_percent = 0; + atomic_set(&s->pending_exceptions_count, 0); init_rwsem(&s->lock); spin_lock_init(&s->pe_lock); s->ti = ti; @@ -727,6 +734,14 @@ static void snapshot_dtr(struct dm_target *ti) /* After this returns there can be no new kcopyd jobs. */ unregister_snapshot(s); + while (atomic_read(&s->pending_exceptions_count)) + yield(); + /* + * Ensure instructions in mempool_destroy aren't reordered + * before atomic_read. + */ + smp_mb(); + #ifdef CONFIG_DM_DEBUG for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); @@ -824,8 +839,10 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) * the bios for the original write to the origin. */ if (primary_pe && - atomic_dec_and_test(&primary_pe->ref_count)) + atomic_dec_and_test(&primary_pe->ref_count)) { origin_bios = bio_list_get(&primary_pe->origin_bios); + free_pending_exception(primary_pe); + } /* * Free the pe if it's not linked to an origin write or if @@ -834,12 +851,6 @@ static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) if (!primary_pe || primary_pe != pe) free_pending_exception(pe); - /* - * Free the primary pe if nothing references it. - */ - if (primary_pe && !atomic_read(&primary_pe->ref_count)) - free_pending_exception(primary_pe); - return origin_bios; } diff --git a/drivers/md/dm-snap.h b/drivers/md/dm-snap.h index 292c15609ae3..99c0106ede2d 100644 --- a/drivers/md/dm-snap.h +++ b/drivers/md/dm-snap.h @@ -9,7 +9,7 @@ #ifndef DM_SNAPSHOT_H #define DM_SNAPSHOT_H -#include "dm.h" +#include <linux/device-mapper.h> #include "dm-bio-list.h" #include <linux/blkdev.h> #include <linux/workqueue.h> @@ -158,11 +158,10 @@ struct dm_snapshot { /* Used for display of table */ char type; - /* The last percentage we notified */ - int last_percent; - mempool_t *pending_pool; + atomic_t pending_exceptions_count; + struct exception_table pending; struct exception_table complete; diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index b745d8ac625b..9e4ef88d421e 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -4,7 +4,7 @@ * This file is released under the GPL. */ -#include "dm.h" +#include <linux/device-mapper.h> #include <linux/module.h> #include <linux/init.h> @@ -60,8 +60,8 @@ static inline struct stripe_c *alloc_context(unsigned int stripes) { size_t len; - if (array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), - stripes)) + if (dm_array_too_big(sizeof(struct stripe_c), sizeof(struct stripe), + stripes)) return NULL; len = sizeof(struct stripe_c) + (sizeof(struct stripe) * stripes); @@ -320,8 +320,10 @@ int __init dm_stripe_init(void) int r; r = dm_register_target(&stripe_target); - if (r < 0) + if (r < 0) { DMWARN("target registration failed"); + return r; + } kstriped = create_singlethread_workqueue("kstriped"); if (!kstriped) { diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index a740a6950f59..04e5fd742c2c 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -43,7 +43,7 @@ struct dm_table { * device. This should be a combination of FMODE_READ * and FMODE_WRITE. */ - int mode; + fmode_t mode; /* a list of devices used by this table */ struct list_head devices; @@ -217,7 +217,7 @@ static int alloc_targets(struct dm_table *t, unsigned int num) return 0; } -int dm_table_create(struct dm_table **result, int mode, +int dm_table_create(struct dm_table **result, fmode_t mode, unsigned num_targets, struct mapped_device *md) { struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); @@ -313,19 +313,6 @@ static inline int check_space(struct dm_table *t) } /* - * Convert a device path to a dev_t. - */ -static int lookup_device(const char *path, dev_t *dev) -{ - struct block_device *bdev = lookup_bdev(path); - if (IS_ERR(bdev)) - return PTR_ERR(bdev); - *dev = bdev->bd_dev; - bdput(bdev); - return 0; -} - -/* * See if we've already got a device in the list. */ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) @@ -357,7 +344,7 @@ static int open_dev(struct dm_dev_internal *d, dev_t dev, return PTR_ERR(bdev); r = bd_claim_by_disk(bdev, _claim_ptr, dm_disk(md)); if (r) - blkdev_put(bdev); + blkdev_put(bdev, d->dm_dev.mode); else d->dm_dev.bdev = bdev; return r; @@ -372,7 +359,7 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) return; bd_release_from_disk(d->dm_dev.bdev, dm_disk(md)); - blkdev_put(d->dm_dev.bdev); + blkdev_put(d->dm_dev.bdev, d->dm_dev.mode); d->dm_dev.bdev = NULL; } @@ -395,7 +382,7 @@ static int check_device_area(struct dm_dev_internal *dd, sector_t start, * careful to leave things as they were if we fail to reopen the * device. */ -static int upgrade_mode(struct dm_dev_internal *dd, int new_mode, +static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, struct mapped_device *md) { int r; @@ -421,7 +408,7 @@ static int upgrade_mode(struct dm_dev_internal *dd, int new_mode, */ static int __table_get_device(struct dm_table *t, struct dm_target *ti, const char *path, sector_t start, sector_t len, - int mode, struct dm_dev **result) + fmode_t mode, struct dm_dev **result) { int r; dev_t uninitialized_var(dev); @@ -437,8 +424,12 @@ static int __table_get_device(struct dm_table *t, struct dm_target *ti, return -EOVERFLOW; } else { /* convert the path to a device */ - if ((r = lookup_device(path, &dev))) - return r; + struct block_device *bdev = lookup_bdev(path); + + if (IS_ERR(bdev)) + return PTR_ERR(bdev); + dev = bdev->bd_dev; + bdput(bdev); } dd = find_device(&t->devices, dev); @@ -537,7 +528,7 @@ void dm_set_device_limits(struct dm_target *ti, struct block_device *bdev) EXPORT_SYMBOL_GPL(dm_set_device_limits); int dm_get_device(struct dm_target *ti, const char *path, sector_t start, - sector_t len, int mode, struct dm_dev **result) + sector_t len, fmode_t mode, struct dm_dev **result) { int r = __table_get_device(ti->table, ti, path, start, len, mode, result); @@ -677,7 +668,7 @@ static void check_for_valid_limits(struct io_restrictions *rs) if (!rs->max_segment_size) rs->max_segment_size = MAX_SEGMENT_SIZE; if (!rs->seg_boundary_mask) - rs->seg_boundary_mask = -1; + rs->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; if (!rs->bounce_pfn) rs->bounce_pfn = -1; } @@ -887,7 +878,7 @@ struct list_head *dm_table_get_devices(struct dm_table *t) return &t->devices; } -int dm_table_get_mode(struct dm_table *t) +fmode_t dm_table_get_mode(struct dm_table *t) { return t->mode; } diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c index bdec206c404b..cdbf126ec106 100644 --- a/drivers/md/dm-zero.c +++ b/drivers/md/dm-zero.c @@ -4,7 +4,7 @@ * This file is released under the GPL. */ -#include "dm.h" +#include <linux/device-mapper.h> #include <linux/module.h> #include <linux/init.h> diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 327de03a5bdf..421c9f02d8ca 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -21,7 +21,7 @@ #include <linux/idr.h> #include <linux/hdreg.h> #include <linux/blktrace_api.h> -#include <linux/smp_lock.h> +#include <trace/block.h> #define DM_MSG_PREFIX "core" @@ -52,6 +52,8 @@ struct dm_target_io { union map_info info; }; +DEFINE_TRACE(block_bio_complete); + union map_info *dm_get_mapinfo(struct bio *bio) { if (bio && bio->bi_private) @@ -76,7 +78,6 @@ union map_info *dm_get_mapinfo(struct bio *bio) */ struct dm_wq_req { enum { - DM_WQ_FLUSH_ALL, DM_WQ_FLUSH_DEFERRED, } type; struct work_struct work; @@ -151,40 +152,40 @@ static struct kmem_cache *_tio_cache; static int __init local_init(void) { - int r; + int r = -ENOMEM; /* allocate a slab for the dm_ios */ _io_cache = KMEM_CACHE(dm_io, 0); if (!_io_cache) - return -ENOMEM; + return r; /* allocate a slab for the target ios */ _tio_cache = KMEM_CACHE(dm_target_io, 0); - if (!_tio_cache) { - kmem_cache_destroy(_io_cache); - return -ENOMEM; - } + if (!_tio_cache) + goto out_free_io_cache; r = dm_uevent_init(); - if (r) { - kmem_cache_destroy(_tio_cache); - kmem_cache_destroy(_io_cache); - return r; - } + if (r) + goto out_free_tio_cache; _major = major; r = register_blkdev(_major, _name); - if (r < 0) { - kmem_cache_destroy(_tio_cache); - kmem_cache_destroy(_io_cache); - dm_uevent_exit(); - return r; - } + if (r < 0) + goto out_uevent_exit; if (!_major) _major = r; return 0; + +out_uevent_exit: + dm_uevent_exit(); +out_free_tio_cache: + kmem_cache_destroy(_tio_cache); +out_free_io_cache: + kmem_cache_destroy(_io_cache); + + return r; } static void local_exit(void) @@ -249,13 +250,13 @@ static void __exit dm_exit(void) /* * Block device functions */ -static int dm_blk_open(struct inode *inode, struct file *file) +static int dm_blk_open(struct block_device *bdev, fmode_t mode) { struct mapped_device *md; spin_lock(&_minor_lock); - md = inode->i_bdev->bd_disk->private_data; + md = bdev->bd_disk->private_data; if (!md) goto out; @@ -274,11 +275,9 @@ out: return md ? 0 : -ENXIO; } -static int dm_blk_close(struct inode *inode, struct file *file) +static int dm_blk_close(struct gendisk *disk, fmode_t mode) { - struct mapped_device *md; - - md = inode->i_bdev->bd_disk->private_data; + struct mapped_device *md = disk->private_data; atomic_dec(&md->open_count); dm_put(md); return 0; @@ -315,21 +314,14 @@ static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) return dm_get_geometry(md, geo); } -static int dm_blk_ioctl(struct inode *inode, struct file *file, +static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct mapped_device *md; - struct dm_table *map; + struct mapped_device *md = bdev->bd_disk->private_data; + struct dm_table *map = dm_get_table(md); struct dm_target *tgt; int r = -ENOTTY; - /* We don't really need this lock, but we do need 'inode'. */ - unlock_kernel(); - - md = inode->i_bdev->bd_disk->private_data; - - map = dm_get_table(md); - if (!map || !dm_table_get_size(map)) goto out; @@ -345,12 +337,11 @@ static int dm_blk_ioctl(struct inode *inode, struct file *file, } if (tgt->type->ioctl) - r = tgt->type->ioctl(tgt, inode, file, cmd, arg); + r = tgt->type->ioctl(tgt, cmd, arg); out: dm_table_put(map); - lock_kernel(); return r; } @@ -387,7 +378,7 @@ static void start_io_acct(struct dm_io *io) dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); } -static int end_io_acct(struct dm_io *io) +static void end_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; struct bio *bio = io->bio; @@ -403,7 +394,9 @@ static int end_io_acct(struct dm_io *io) dm_disk(md)->part0.in_flight = pending = atomic_dec_return(&md->pending); - return !pending; + /* nudge anyone waiting on suspend queue */ + if (!pending) + wake_up(&md->wait); } /* @@ -511,13 +504,10 @@ static void dec_pending(struct dm_io *io, int error) spin_unlock_irqrestore(&io->md->pushback_lock, flags); } - if (end_io_acct(io)) - /* nudge anyone waiting on suspend queue */ - wake_up(&io->md->wait); + end_io_acct(io); if (io->error != DM_ENDIO_REQUEUE) { - blk_add_trace_bio(io->md->queue, io->bio, - BLK_TA_COMPLETE); + trace_block_bio_complete(io->md->queue, io->bio); bio_endio(io->bio, io->error); } @@ -610,7 +600,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, if (r == DM_MAPIO_REMAPPED) { /* the bio has been remapped so dispatch it */ - blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, + trace_block_remap(bdev_get_queue(clone->bi_bdev), clone, tio->io->bio->bi_bdev->bd_dev, clone->bi_sector, sector); @@ -669,6 +659,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector, clone->bi_size = to_bytes(len); clone->bi_io_vec->bv_offset = offset; clone->bi_io_vec->bv_len = clone->bi_size; + clone->bi_flags |= 1 << BIO_CLONED; return clone; } @@ -948,16 +939,24 @@ static void dm_unplug_all(struct request_queue *q) static int dm_any_congested(void *congested_data, int bdi_bits) { - int r; - struct mapped_device *md = (struct mapped_device *) congested_data; - struct dm_table *map = dm_get_table(md); + int r = bdi_bits; + struct mapped_device *md = congested_data; + struct dm_table *map; - if (!map || test_bit(DMF_BLOCK_IO, &md->flags)) - r = bdi_bits; - else - r = dm_table_any_congested(map, bdi_bits); + atomic_inc(&md->pending); + + if (!test_bit(DMF_BLOCK_IO, &md->flags)) { + map = dm_get_table(md); + if (map) { + r = dm_table_any_congested(map, bdi_bits); + dm_table_put(map); + } + } + + if (!atomic_dec_return(&md->pending)) + /* nudge anyone waiting on suspend queue */ + wake_up(&md->wait); - dm_table_put(map); return r; } @@ -1094,7 +1093,7 @@ static struct mapped_device *alloc_dev(int minor) if (!md->tio_pool) goto bad_tio_pool; - md->bs = bioset_create(16, 16); + md->bs = bioset_create(16, 0); if (!md->bs) goto bad_no_bioset; @@ -1394,9 +1393,6 @@ static void dm_wq_work(struct work_struct *work) down_write(&md->io_lock); switch (req->type) { - case DM_WQ_FLUSH_ALL: - __merge_pushback_list(md); - /* pass through */ case DM_WQ_FLUSH_DEFERRED: __flush_deferred_io(md); break; @@ -1526,7 +1522,7 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) if (!md->suspended_bdev) { DMWARN("bdget failed in dm_suspend"); r = -ENOMEM; - goto flush_and_out; + goto out; } /* @@ -1577,14 +1573,6 @@ int dm_suspend(struct mapped_device *md, unsigned suspend_flags) set_bit(DMF_SUSPENDED, &md->flags); -flush_and_out: - if (r && noflush) - /* - * Because there may be already I/Os in the pushback list, - * flush them before return. - */ - dm_queue_flush(md, DM_WQ_FLUSH_ALL, NULL); - out: if (r && md->suspended_bdev) { bdput(md->suspended_bdev); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index cd189da2b2fa..0ade60cdef42 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -62,15 +62,6 @@ void dm_put_target_type(struct target_type *t); int dm_target_iterate(void (*iter_func)(struct target_type *tt, void *param), void *param); -/*----------------------------------------------------------------- - * Useful inlines. - *---------------------------------------------------------------*/ -static inline int array_too_big(unsigned long fixed, unsigned long obj, - unsigned long num) -{ - return (num > (ULONG_MAX - fixed) / obj); -} - int dm_split_args(int *argc, char ***argvp, char *input); /* diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 268547dbfbd3..f26c1f9a475b 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c @@ -287,6 +287,8 @@ static int run(mddev_t *mddev) int i; conf_t *conf = kmalloc(sizeof(*conf), GFP_KERNEL); + if (!conf) + return -ENOMEM; for (i=0; i<Modes; i++) { atomic_set(&conf->counters[i], 0); diff --git a/drivers/md/linear.c b/drivers/md/linear.c index b9cbee688fae..3b90c5c924ec 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c @@ -16,16 +16,8 @@ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <linux/module.h> - -#include <linux/raid/md.h> -#include <linux/slab.h> #include <linux/raid/linear.h> -#define MAJOR_NR MD_MAJOR -#define MD_DRIVER -#define MD_PERSONALITY - /* * find which device holds a particular offset */ @@ -33,16 +25,15 @@ static inline dev_info_t *which_dev(mddev_t *mddev, sector_t sector) { dev_info_t *hash; linear_conf_t *conf = mddev_to_conf(mddev); - sector_t block = sector >> 1; /* * sector_div(a,b) returns the remainer and sets a to a/b */ - block >>= conf->preshift; - (void)sector_div(block, conf->hash_spacing); - hash = conf->hash_table[block]; + sector >>= conf->sector_shift; + (void)sector_div(sector, conf->spacing); + hash = conf->hash_table[sector]; - while ((sector>>1) >= (hash->size + hash->offset)) + while (sector >= hash->num_sectors + hash->start_sector) hash++; return hash; } @@ -65,7 +56,7 @@ static int linear_mergeable_bvec(struct request_queue *q, sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); dev0 = which_dev(mddev, sector); - maxsectors = (dev0->size << 1) - (sector - (dev0->offset<<1)); + maxsectors = dev0->num_sectors - (sector - dev0->start_sector); if (maxsectors < bio_sectors) maxsectors = 0; @@ -112,8 +103,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) dev_info_t **table; mdk_rdev_t *rdev; int i, nb_zone, cnt; - sector_t min_spacing; - sector_t curr_offset; + sector_t min_sectors; + sector_t curr_sector; struct list_head *tmp; conf = kzalloc (sizeof (*conf) + raid_disks*sizeof(dev_info_t), @@ -145,7 +136,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) mddev->queue->max_sectors > (PAGE_SIZE>>9)) blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); - disk->size = rdev->size; + disk->num_sectors = rdev->size * 2; conf->array_sectors += rdev->size * 2; cnt++; @@ -155,34 +146,36 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) goto out; } - min_spacing = conf->array_sectors / 2; - sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *)); + min_sectors = conf->array_sectors; + sector_div(min_sectors, PAGE_SIZE/sizeof(struct dev_info *)); + if (min_sectors == 0) + min_sectors = 1; - /* min_spacing is the minimum spacing that will fit the hash + /* min_sectors is the minimum spacing that will fit the hash * table in one PAGE. This may be much smaller than needed. * We find the smallest non-terminal set of consecutive devices - * that is larger than min_spacing as use the size of that as + * that is larger than min_sectors and use the size of that as * the actual spacing */ - conf->hash_spacing = conf->array_sectors / 2; + conf->spacing = conf->array_sectors; for (i=0; i < cnt-1 ; i++) { - sector_t sz = 0; + sector_t tmp = 0; int j; - for (j = i; j < cnt - 1 && sz < min_spacing; j++) - sz += conf->disks[j].size; - if (sz >= min_spacing && sz < conf->hash_spacing) - conf->hash_spacing = sz; + for (j = i; j < cnt - 1 && tmp < min_sectors; j++) + tmp += conf->disks[j].num_sectors; + if (tmp >= min_sectors && tmp < conf->spacing) + conf->spacing = tmp; } - /* hash_spacing may be too large for sector_div to work with, + /* spacing may be too large for sector_div to work with, * so we might need to pre-shift */ - conf->preshift = 0; + conf->sector_shift = 0; if (sizeof(sector_t) > sizeof(u32)) { - sector_t space = conf->hash_spacing; + sector_t space = conf->spacing; while (space > (sector_t)(~(u32)0)) { space >>= 1; - conf->preshift++; + conf->sector_shift++; } } /* @@ -194,9 +187,9 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) unsigned round; unsigned long base; - sz = conf->array_sectors >> (conf->preshift + 1); + sz = conf->array_sectors >> conf->sector_shift; sz += 1; /* force round-up */ - base = conf->hash_spacing >> conf->preshift; + base = conf->spacing >> conf->sector_shift; round = sector_div(sz, base); nb_zone = sz + (round ? 1 : 0); } @@ -211,32 +204,31 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) * Here we generate the linear hash table * First calculate the device offsets. */ - conf->disks[0].offset = 0; + conf->disks[0].start_sector = 0; for (i = 1; i < raid_disks; i++) - conf->disks[i].offset = - conf->disks[i-1].offset + - conf->disks[i-1].size; + conf->disks[i].start_sector = + conf->disks[i-1].start_sector + + conf->disks[i-1].num_sectors; table = conf->hash_table; - curr_offset = 0; i = 0; - for (curr_offset = 0; - curr_offset < conf->array_sectors / 2; - curr_offset += conf->hash_spacing) { + for (curr_sector = 0; + curr_sector < conf->array_sectors; + curr_sector += conf->spacing) { while (i < raid_disks-1 && - curr_offset >= conf->disks[i+1].offset) + curr_sector >= conf->disks[i+1].start_sector) i++; *table ++ = conf->disks + i; } - if (conf->preshift) { - conf->hash_spacing >>= conf->preshift; - /* round hash_spacing up so that when we divide by it, + if (conf->sector_shift) { + conf->spacing >>= conf->sector_shift; + /* round spacing up so that when we divide by it, * we err on the side of "too-low", which is safest. */ - conf->hash_spacing++; + conf->spacing++; } BUG_ON(table - conf->hash_table > nb_zone); @@ -317,7 +309,6 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) const int rw = bio_data_dir(bio); mddev_t *mddev = q->queuedata; dev_info_t *tmp_dev; - sector_t block; int cpu; if (unlikely(bio_barrier(bio))) { @@ -332,29 +323,33 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) part_stat_unlock(); tmp_dev = which_dev(mddev, bio->bi_sector); - block = bio->bi_sector >> 1; - if (unlikely(block >= (tmp_dev->size + tmp_dev->offset) - || block < tmp_dev->offset)) { + if (unlikely(bio->bi_sector >= (tmp_dev->num_sectors + + tmp_dev->start_sector) + || (bio->bi_sector < + tmp_dev->start_sector))) { char b[BDEVNAME_SIZE]; - printk("linear_make_request: Block %llu out of bounds on " - "dev %s size %llu offset %llu\n", - (unsigned long long)block, + printk("linear_make_request: Sector %llu out of bounds on " + "dev %s: %llu sectors, offset %llu\n", + (unsigned long long)bio->bi_sector, bdevname(tmp_dev->rdev->bdev, b), - (unsigned long long)tmp_dev->size, - (unsigned long long)tmp_dev->offset); + (unsigned long long)tmp_dev->num_sectors, + (unsigned long long)tmp_dev->start_sector); bio_io_error(bio); return 0; } if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > - (tmp_dev->offset + tmp_dev->size)<<1)) { + tmp_dev->start_sector + tmp_dev->num_sectors)) { /* This bio crosses a device boundary, so we have to * split it. */ struct bio_pair *bp; + bp = bio_split(bio, - ((tmp_dev->offset + tmp_dev->size)<<1) - bio->bi_sector); + tmp_dev->start_sector + tmp_dev->num_sectors + - bio->bi_sector); + if (linear_make_request(q, &bp->bio1)) generic_make_request(&bp->bio1); if (linear_make_request(q, &bp->bio2)) @@ -364,7 +359,8 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) } bio->bi_bdev = tmp_dev->rdev->bdev; - bio->bi_sector = bio->bi_sector - (tmp_dev->offset << 1) + tmp_dev->rdev->data_offset; + bio->bi_sector = bio->bi_sector - tmp_dev->start_sector + + tmp_dev->rdev->data_offset; return 1; } @@ -372,29 +368,6 @@ static int linear_make_request (struct request_queue *q, struct bio *bio) static void linear_status (struct seq_file *seq, mddev_t *mddev) { -#undef MD_DEBUG -#ifdef MD_DEBUG - int j; - linear_conf_t *conf = mddev_to_conf(mddev); - sector_t s = 0; - - seq_printf(seq, " "); - for (j = 0; j < mddev->raid_disks; j++) - { - char b[BDEVNAME_SIZE]; - s += conf->smallest_size; - seq_printf(seq, "[%s", - bdevname(conf->hash_table[j][0].rdev->bdev,b)); - - while (s > conf->hash_table[j][0].offset + - conf->hash_table[j][0].size) - seq_printf(seq, "/%s] ", - bdevname(conf->hash_table[j][1].rdev->bdev,b)); - else - seq_printf(seq, "] "); - } - seq_printf(seq, "\n"); -#endif seq_printf(seq, " %dk rounding", mddev->chunk_size/1024); } diff --git a/drivers/md/md.c b/drivers/md/md.c index 0a3a4bdcd4af..1b1d32694f6f 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -32,31 +32,21 @@ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <linux/module.h> -#include <linux/kernel.h> #include <linux/kthread.h> -#include <linux/linkage.h> #include <linux/raid/md.h> #include <linux/raid/bitmap.h> #include <linux/sysctl.h> #include <linux/buffer_head.h> /* for invalidate_bdev */ #include <linux/poll.h> -#include <linux/mutex.h> #include <linux/ctype.h> -#include <linux/freezer.h> - -#include <linux/init.h> - +#include <linux/hdreg.h> +#include <linux/proc_fs.h> +#include <linux/random.h> +#include <linux/reboot.h> #include <linux/file.h> - -#ifdef CONFIG_KMOD -#include <linux/kmod.h> -#endif - -#include <asm/unaligned.h> +#include <linux/delay.h> #define MAJOR_NR MD_MAJOR -#define MD_DRIVER /* 63 partitions with the alternate major number (mdp) */ #define MdpMinorShift 6 @@ -66,7 +56,7 @@ #ifndef MODULE -static void autostart_arrays (int part); +static void autostart_arrays(int part); #endif static LIST_HEAD(pers_list); @@ -212,7 +202,7 @@ static DEFINE_SPINLOCK(all_mddevs_lock); ) -static int md_fail_request (struct request_queue *q, struct bio *bio) +static int md_fail_request(struct request_queue *q, struct bio *bio) { bio_io_error(bio); return 0; @@ -232,6 +222,9 @@ static void mddev_put(mddev_t *mddev) list_del(&mddev->all_mddevs); spin_unlock(&all_mddevs_lock); blk_cleanup_queue(mddev->queue); + if (mddev->sysfs_state) + sysfs_put(mddev->sysfs_state); + mddev->sysfs_state = NULL; kobject_put(&mddev->kobj); } else spin_unlock(&all_mddevs_lock); @@ -1469,6 +1462,8 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) kobject_del(&rdev->kobj); goto fail; } + rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state"); + list_add_rcu(&rdev->same_set, &mddev->disks); bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); return 0; @@ -1498,7 +1493,8 @@ static void unbind_rdev_from_array(mdk_rdev_t * rdev) printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); rdev->mddev = NULL; sysfs_remove_link(&rdev->kobj, "block"); - + sysfs_put(rdev->sysfs_state); + rdev->sysfs_state = NULL; /* We need to delay this, otherwise we can deadlock when * writing to 'remove' to "dev/state". We also need * to delay it due to rcu usage. @@ -1530,7 +1526,7 @@ static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) if (err) { printk(KERN_ERR "md: could not bd_claim %s.\n", bdevname(bdev, b)); - blkdev_put(bdev); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE); return err; } if (!shared) @@ -1546,7 +1542,7 @@ static void unlock_rdev(mdk_rdev_t *rdev) if (!bdev) MD_BUG(); bd_release(bdev); - blkdev_put(bdev); + blkdev_put(bdev, FMODE_READ|FMODE_WRITE); } void md_autodetect_dev(dev_t dev); @@ -1933,8 +1929,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) err = 0; } - if (!err) - sysfs_notify(&rdev->kobj, NULL, "state"); + if (!err && rdev->sysfs_state) + sysfs_notify_dirent(rdev->sysfs_state); return err ? err : len; } static struct rdev_sysfs_entry rdev_state = @@ -2029,7 +2025,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) rdev->raid_disk = -1; return err; } else - sysfs_notify(&rdev->kobj, NULL, "state"); + sysfs_notify_dirent(rdev->sysfs_state); sprintf(nm, "rd%d", rdev->raid_disk); if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) printk(KERN_WARNING @@ -2046,7 +2042,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) clear_bit(Faulty, &rdev->flags); clear_bit(WriteMostly, &rdev->flags); set_bit(In_sync, &rdev->flags); - sysfs_notify(&rdev->kobj, NULL, "state"); + sysfs_notify_dirent(rdev->sysfs_state); } return len; } @@ -2106,8 +2102,6 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) if (strict_strtoull(buf, 10, &size) < 0) return -EINVAL; - if (size < my_mddev->size) - return -EINVAL; if (my_mddev->pers && rdev->raid_disk >= 0) { if (my_mddev->persistent) { size = super_types[my_mddev->major_version]. @@ -2118,9 +2112,9 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) size = (rdev->bdev->bd_inode->i_size >> 10); size -= rdev->data_offset/2; } - if (size < my_mddev->size) - return -EINVAL; /* component must fit device */ } + if (size < my_mddev->size) + return -EINVAL; /* component must fit device */ rdev->size = size; if (size > oldsize && my_mddev->external) { @@ -2406,12 +2400,11 @@ safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) int i; unsigned long msec; char buf[30]; - char *e; + /* remove a period, and count digits after it */ if (len >= sizeof(buf)) return -EINVAL; - strlcpy(buf, cbuf, len); - buf[len] = 0; + strlcpy(buf, cbuf, sizeof(buf)); for (i=0; i<len; i++) { if (dot) { if (isdigit(buf[i])) { @@ -2424,8 +2417,7 @@ safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) buf[i] = 0; } } - msec = simple_strtoul(buf, &e, 10); - if (e == buf || (*e && *e != '\n')) + if (strict_strtoul(buf, 10, &msec) < 0) return -EINVAL; msec = (msec * 1000) / scale; if (msec == 0) @@ -2727,9 +2719,9 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) break; case read_auto: if (mddev->pers) { - if (mddev->ro != 1) + if (mddev->ro == 0) err = do_md_stop(mddev, 1, 0); - else + else if (mddev->ro == 1) err = restart_array(mddev); if (err == 0) { mddev->ro = 2; @@ -2784,7 +2776,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) if (err) return err; else { - sysfs_notify(&mddev->kobj, NULL, "array_state"); + sysfs_notify_dirent(mddev->sysfs_state); return len; } } @@ -2945,7 +2937,13 @@ metadata_store(mddev_t *mddev, const char *buf, size_t len) { int major, minor; char *e; - if (!list_empty(&mddev->disks)) + /* Changing the details of 'external' metadata is + * always permitted. Otherwise there must be + * no devices attached to the array. + */ + if (mddev->external && strncmp(buf, "external:", 9) == 0) + ; + else if (!list_empty(&mddev->disks)) return -EBUSY; if (cmd_match(buf, "none")) { @@ -3465,6 +3463,11 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) disk->fops = &md_fops; disk->private_data = mddev; disk->queue = mddev->queue; + /* Allow extended partitions. This makes the + * 'mdp' device redundant, but we can really + * remove it now. + */ + disk->flags |= GENHD_FL_EXT_DEVT; add_disk(disk); mddev->gendisk = disk; error = kobject_init_and_add(&mddev->kobj, &md_ktype, @@ -3473,8 +3476,10 @@ static struct kobject *md_probe(dev_t dev, int *part, void *data) if (error) printk(KERN_WARNING "md: cannot register %s/md - name in use\n", disk->disk_name); - else + else { kobject_uevent(&mddev->kobj, KOBJ_ADD); + mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); + } return NULL; } @@ -3485,7 +3490,7 @@ static void md_safemode_timeout(unsigned long data) if (!atomic_read(&mddev->writes_pending)) { mddev->safemode = 1; if (mddev->external) - set_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags); + sysfs_notify_dirent(mddev->sysfs_state); } md_wakeup_thread(mddev->thread); } @@ -3527,17 +3532,12 @@ static int do_md_run(mddev_t * mddev) return -EINVAL; } /* - * chunk-size has to be a power of 2 and multiples of PAGE_SIZE + * chunk-size has to be a power of 2 */ if ( (1 << ffz(~chunk_size)) != chunk_size) { printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size); return -EINVAL; } - if (chunk_size < PAGE_SIZE) { - printk(KERN_ERR "too small chunk_size: %d < %ld\n", - chunk_size, PAGE_SIZE); - return -EINVAL; - } /* devices must have minimum size of one chunk */ rdev_for_each(rdev, tmp, mddev) { @@ -3555,12 +3555,10 @@ static int do_md_run(mddev_t * mddev) } } -#ifdef CONFIG_KMOD if (mddev->level != LEVEL_NONE) request_module("md-level-%d", mddev->level); else if (mddev->clevel[0]) request_module("md-%s", mddev->clevel); -#endif /* * Drop all container device buffers, from now on @@ -3593,7 +3591,7 @@ static int do_md_run(mddev_t * mddev) return -EINVAL; } } - sysfs_notify(&rdev->kobj, NULL, "state"); + sysfs_notify_dirent(rdev->sysfs_state); } md_probe(mddev->unit, NULL, NULL); @@ -3755,7 +3753,7 @@ static int do_md_run(mddev_t * mddev) mddev->changed = 1; md_new_event(mddev); - sysfs_notify(&mddev->kobj, NULL, "array_state"); + sysfs_notify_dirent(mddev->sysfs_state); sysfs_notify(&mddev->kobj, NULL, "sync_action"); sysfs_notify(&mddev->kobj, NULL, "degraded"); kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); @@ -3782,7 +3780,7 @@ static int restart_array(mddev_t *mddev) set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); - sysfs_notify(&mddev->kobj, NULL, "array_state"); + sysfs_notify_dirent(mddev->sysfs_state); return 0; } @@ -3862,7 +3860,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) module_put(mddev->pers->owner); mddev->pers = NULL; /* tell userspace to handle 'inactive' */ - sysfs_notify(&mddev->kobj, NULL, "array_state"); + sysfs_notify_dirent(mddev->sysfs_state); set_capacity(disk, 0); mddev->changed = 1; @@ -3942,13 +3940,14 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) mddev->degraded = 0; mddev->barriers_work = 0; mddev->safemode = 0; + kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); } else if (mddev->pers) printk(KERN_INFO "md: %s switched to read-only mode.\n", mdname(mddev)); err = 0; md_new_event(mddev); - sysfs_notify(&mddev->kobj, NULL, "array_state"); + sysfs_notify_dirent(mddev->sysfs_state); out: return err; } @@ -3971,10 +3970,10 @@ static void autorun_array(mddev_t *mddev) } printk("\n"); - err = do_md_run (mddev); + err = do_md_run(mddev); if (err) { printk(KERN_WARNING "md: do_md_run() returned %d\n", err); - do_md_stop (mddev, 0, 0); + do_md_stop(mddev, 0, 0); } } @@ -4312,7 +4311,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) if (err) export_rdev(rdev); else - sysfs_notify(&rdev->kobj, NULL, "state"); + sysfs_notify_dirent(rdev->sysfs_state); md_update_sb(mddev, 1); if (mddev->degraded) @@ -4333,7 +4332,7 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) if (!(info->state & (1<<MD_DISK_FAULTY))) { int err; - rdev = md_import_device (dev, -1, 0); + rdev = md_import_device(dev, -1, 0); if (IS_ERR(rdev)) { printk(KERN_WARNING "md: error, md_import_device() returned %ld\n", @@ -4415,7 +4414,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) return -EINVAL; } - rdev = md_import_device (dev, -1, 0); + rdev = md_import_device(dev, -1, 0); if (IS_ERR(rdev)) { printk(KERN_WARNING "md: error, md_import_device() returned %ld\n", @@ -4800,7 +4799,7 @@ static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) return 0; } -static int md_ioctl(struct inode *inode, struct file *file, +static int md_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { int err = 0; @@ -4838,7 +4837,7 @@ static int md_ioctl(struct inode *inode, struct file *file, * Commands creating/starting a new array: */ - mddev = inode->i_bdev->bd_disk->private_data; + mddev = bdev->bd_disk->private_data; if (!mddev) { BUG(); @@ -4934,11 +4933,11 @@ static int md_ioctl(struct inode *inode, struct file *file, goto done_unlock; case STOP_ARRAY: - err = do_md_stop (mddev, 0, 1); + err = do_md_stop(mddev, 0, 1); goto done_unlock; case STOP_ARRAY_RO: - err = do_md_stop (mddev, 1, 1); + err = do_md_stop(mddev, 1, 1); goto done_unlock; } @@ -4953,7 +4952,7 @@ static int md_ioctl(struct inode *inode, struct file *file, if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { if (mddev->ro == 2) { mddev->ro = 0; - sysfs_notify(&mddev->kobj, NULL, "array_state"); + sysfs_notify_dirent(mddev->sysfs_state); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); } else { @@ -4987,7 +4986,7 @@ static int md_ioctl(struct inode *inode, struct file *file, goto done_unlock; case RUN_ARRAY: - err = do_md_run (mddev); + err = do_md_run(mddev); goto done_unlock; case SET_BITMAP_FILE: @@ -5011,13 +5010,13 @@ abort: return err; } -static int md_open(struct inode *inode, struct file *file) +static int md_open(struct block_device *bdev, fmode_t mode) { /* * Succeed if we can lock the mddev, which confirms that * it isn't being stopped right now. */ - mddev_t *mddev = inode->i_bdev->bd_disk->private_data; + mddev_t *mddev = bdev->bd_disk->private_data; int err; if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) @@ -5028,14 +5027,14 @@ static int md_open(struct inode *inode, struct file *file) atomic_inc(&mddev->openers); mddev_unlock(mddev); - check_disk_change(inode->i_bdev); + check_disk_change(bdev); out: return err; } -static int md_release(struct inode *inode, struct file * file) +static int md_release(struct gendisk *disk, fmode_t mode) { - mddev_t *mddev = inode->i_bdev->bd_disk->private_data; + mddev_t *mddev = disk->private_data; BUG_ON(!mddev); atomic_dec(&mddev->openers); @@ -5063,7 +5062,7 @@ static struct block_device_operations md_fops = .owner = THIS_MODULE, .open = md_open, .release = md_release, - .ioctl = md_ioctl, + .locked_ioctl = md_ioctl, .getgeo = md_getgeo, .media_changed = md_media_changed, .revalidate_disk= md_revalidate, @@ -5425,11 +5424,11 @@ static int md_seq_show(struct seq_file *seq, void *v) seq_printf(seq, " super non-persistent"); if (mddev->pers) { - mddev->pers->status (seq, mddev); + mddev->pers->status(seq, mddev); seq_printf(seq, "\n "); if (mddev->pers->sync_request) { if (mddev->curr_resync > 2) { - status_resync (seq, mddev); + status_resync(seq, mddev); seq_printf(seq, "\n "); } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) seq_printf(seq, "\tresync=DELAYED\n "); @@ -5627,7 +5626,7 @@ void md_write_start(mddev_t *mddev, struct bio *bi) spin_unlock_irq(&mddev->write_lock); } if (did_change) - sysfs_notify(&mddev->kobj, NULL, "array_state"); + sysfs_notify_dirent(mddev->sysfs_state); wait_event(mddev->sb_wait, !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && !test_bit(MD_CHANGE_PENDING, &mddev->flags)); @@ -5670,7 +5669,7 @@ int md_allow_write(mddev_t *mddev) mddev->safemode = 1; spin_unlock_irq(&mddev->write_lock); md_update_sb(mddev, 0); - sysfs_notify(&mddev->kobj, NULL, "array_state"); + sysfs_notify_dirent(mddev->sysfs_state); } else spin_unlock_irq(&mddev->write_lock); @@ -6063,9 +6062,6 @@ void md_check_recovery(mddev_t *mddev) if (mddev->bitmap) bitmap_daemon_work(mddev->bitmap); - if (test_and_clear_bit(MD_NOTIFY_ARRAY_STATE, &mddev->flags)) - sysfs_notify(&mddev->kobj, NULL, "array_state"); - if (mddev->ro) return; @@ -6118,7 +6114,7 @@ void md_check_recovery(mddev_t *mddev) mddev->safemode = 0; spin_unlock_irq(&mddev->write_lock); if (did_change) - sysfs_notify(&mddev->kobj, NULL, "array_state"); + sysfs_notify_dirent(mddev->sysfs_state); } if (mddev->flags) @@ -6126,7 +6122,7 @@ void md_check_recovery(mddev_t *mddev) rdev_for_each(rdev, rtmp, mddev) if (test_and_clear_bit(StateChanged, &rdev->flags)) - sysfs_notify(&rdev->kobj, NULL, "state"); + sysfs_notify_dirent(rdev->sysfs_state); if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && @@ -6236,7 +6232,7 @@ void md_check_recovery(mddev_t *mddev) void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) { - sysfs_notify(&rdev->kobj, NULL, "state"); + sysfs_notify_dirent(rdev->sysfs_state); wait_event_timeout(rdev->blocked_wait, !test_bit(Blocked, &rdev->flags), msecs_to_jiffies(5000)); @@ -6260,7 +6256,7 @@ static int md_notify_reboot(struct notifier_block *this, * appears to still be in use. Hence * the '100'. */ - do_md_stop (mddev, 1, 100); + do_md_stop(mddev, 1, 100); mddev_unlock(mddev); } /* @@ -6304,7 +6300,7 @@ static int __init md_init(void) raid_table_header = register_sysctl_table(raid_root_table); md_geninit(); - return (0); + return 0; } diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 8bb8794129b3..d4ac47d11279 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -19,16 +19,7 @@ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/spinlock.h> #include <linux/raid/multipath.h> -#include <linux/buffer_head.h> -#include <asm/atomic.h> - -#define MAJOR_NR MD_MAJOR -#define MD_DRIVER -#define MD_PERSONALITY #define MAX_WORK_PER_DISK 128 @@ -176,7 +167,7 @@ static int multipath_make_request (struct request_queue *q, struct bio * bio) mp_bh->bio = *bio; mp_bh->bio.bi_sector += multipath->rdev->data_offset; mp_bh->bio.bi_bdev = multipath->rdev->bdev; - mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST); + mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); mp_bh->bio.bi_end_io = multipath_end_request; mp_bh->bio.bi_private = mp_bh; generic_make_request(&mp_bh->bio); @@ -402,7 +393,7 @@ static void multipathd (mddev_t *mddev) *bio = *(mp_bh->master_bio); bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; - bio->bi_rw |= (1 << BIO_RW_FAILFAST); + bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); bio->bi_end_io = multipath_end_request; bio->bi_private = mp_bh; generic_make_request(bio); diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 53508a8a981d..8ac6488ad0dc 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -18,13 +18,8 @@ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ -#include <linux/module.h> #include <linux/raid/raid0.h> -#define MAJOR_NR MD_MAJOR -#define MD_DRIVER -#define MD_PERSONALITY - static void raid0_unplug(struct request_queue *q) { mddev_t *mddev = q->queuedata; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b9764429d856..9c788e2489b1 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -32,6 +32,7 @@ */ #include "dm-bio-list.h" +#include <linux/delay.h> #include <linux/raid/raid1.h> #include <linux/raid/bitmap.h> diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8bdc9bfc2887..970a96ef9b18 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -19,6 +19,7 @@ */ #include "dm-bio-list.h" +#include <linux/delay.h> #include <linux/raid/raid10.h> #include <linux/raid/bitmap.h> @@ -1136,7 +1137,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) if (!enough(conf)) return -EINVAL; - if (rdev->raid_disk) + if (rdev->raid_disk >= 0) first = last = rdev->raid_disk; if (rdev->saved_raid_disk >= 0 && @@ -2028,8 +2029,9 @@ static int run(mddev_t *mddev) int nc, fc, fo; sector_t stride, size; - if (mddev->chunk_size == 0) { - printk(KERN_ERR "md/raid10: non-zero chunk size required.\n"); + if (mddev->chunk_size < PAGE_SIZE) { + printk(KERN_ERR "md/raid10: chunk size must be " + "at least PAGE_SIZE(%ld).\n", PAGE_SIZE); return -EINVAL; } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ae16794bef20..a36a7435edf5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -43,12 +43,7 @@ * miss any bits. */ -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/highmem.h> -#include <linux/bitops.h> #include <linux/kthread.h> -#include <asm/atomic.h> #include "raid6.h" #include <linux/raid/bitmap.h> @@ -275,7 +270,7 @@ static int grow_buffers(struct stripe_head *sh, int num) return 0; } -static void raid5_build_block (struct stripe_head *sh, int i); +static void raid5_build_block(struct stripe_head *sh, int i); static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks) { @@ -1151,7 +1146,7 @@ static void raid5_end_read_request(struct bio * bi, int error) release_stripe(sh); } -static void raid5_end_write_request (struct bio *bi, int error) +static void raid5_end_write_request(struct bio *bi, int error) { struct stripe_head *sh = bi->bi_private; raid5_conf_t *conf = sh->raid_conf; @@ -1183,7 +1178,7 @@ static void raid5_end_write_request (struct bio *bi, int error) static sector_t compute_blocknr(struct stripe_head *sh, int i); -static void raid5_build_block (struct stripe_head *sh, int i) +static void raid5_build_block(struct stripe_head *sh, int i) { struct r5dev *dev = &sh->dev[i]; @@ -1221,10 +1216,10 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev) set_bit(MD_RECOVERY_INTR, &mddev->recovery); } set_bit(Faulty, &rdev->flags); - printk (KERN_ALERT - "raid5: Disk failure on %s, disabling device.\n" - "raid5: Operation continuing on %d devices.\n", - bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); + printk(KERN_ALERT + "raid5: Disk failure on %s, disabling device.\n" + "raid5: Operation continuing on %d devices.\n", + bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); } } @@ -1320,8 +1315,8 @@ static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; break; default: - printk (KERN_CRIT "raid6: unsupported algorithm %d\n", - conf->algorithm); + printk(KERN_CRIT "raid6: unsupported algorithm %d\n", + conf->algorithm); } break; } @@ -1396,8 +1391,8 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) } break; default: - printk (KERN_CRIT "raid6: unsupported algorithm %d\n", - conf->algorithm); + printk(KERN_CRIT "raid6: unsupported algorithm %d\n", + conf->algorithm); } break; } @@ -1405,7 +1400,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i) chunk_number = stripe * data_disks + i; r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; - check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); + check = raid5_compute_sector(r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { printk(KERN_ERR "compute_blocknr: map not correct\n"); return 0; @@ -4012,6 +4007,13 @@ static int run(mddev_t *mddev) return -EIO; } + if (mddev->chunk_size < PAGE_SIZE) { + printk(KERN_ERR "md/raid5: chunk_size must be at least " + "PAGE_SIZE but %d < %ld\n", + mddev->chunk_size, PAGE_SIZE); + return -EINVAL; + } + if (mddev->reshape_position != MaxSector) { /* Check that we can continue the reshape. * Currently only disks can change, it must @@ -4289,7 +4291,7 @@ static int stop(mddev_t *mddev) } #ifdef DEBUG -static void print_sh (struct seq_file *seq, struct stripe_head *sh) +static void print_sh(struct seq_file *seq, struct stripe_head *sh) { int i; @@ -4305,7 +4307,7 @@ static void print_sh (struct seq_file *seq, struct stripe_head *sh) seq_printf(seq, "\n"); } -static void printall (struct seq_file *seq, raid5_conf_t *conf) +static void printall(struct seq_file *seq, raid5_conf_t *conf) { struct stripe_head *sh; struct hlist_node *hn; @@ -4323,7 +4325,7 @@ static void printall (struct seq_file *seq, raid5_conf_t *conf) } #endif -static void status (struct seq_file *seq, mddev_t *mddev) +static void status(struct seq_file *seq, mddev_t *mddev) { raid5_conf_t *conf = (raid5_conf_t *) mddev->private; int i; diff --git a/drivers/md/raid6.h b/drivers/md/raid6.h index 31cbee71365f..98dcde88470e 100644 --- a/drivers/md/raid6.h +++ b/drivers/md/raid6.h @@ -18,15 +18,6 @@ /* Set to 1 to use kernel-wide empty_zero_page */ #define RAID6_USE_EMPTY_ZERO_PAGE 0 -#include <linux/module.h> -#include <linux/stddef.h> -#include <linux/compiler.h> -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/mempool.h> -#include <linux/list.h> -#include <linux/vmalloc.h> #include <linux/raid/md.h> #include <linux/raid/raid5.h> |