summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c78
-rw-r--r--drivers/md/dm-ioctl.c9
-rw-r--r--drivers/md/dm-mpath.c20
-rw-r--r--drivers/md/dm-raid1.c26
-rw-r--r--drivers/md/dm-round-robin.c2
-rw-r--r--drivers/md/dm-snap.c15
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/md/kcopyd.c6
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/raid5.c6
10 files changed, 130 insertions, 47 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 08a40f4e4f60..a1086ee8cccd 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -20,6 +20,7 @@
#include <asm/atomic.h>
#include <linux/scatterlist.h>
#include <asm/page.h>
+#include <asm/unaligned.h>
#include "dm.h"
@@ -85,7 +86,10 @@ struct crypt_config {
*/
struct crypt_iv_operations *iv_gen_ops;
char *iv_mode;
- struct crypto_cipher *iv_gen_private;
+ union {
+ struct crypto_cipher *essiv_tfm;
+ int benbi_shift;
+ } iv_gen_private;
sector_t iv_offset;
unsigned int iv_size;
@@ -101,7 +105,7 @@ struct crypt_config {
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES 8
-static kmem_cache_t *_crypt_io_pool;
+static struct kmem_cache *_crypt_io_pool;
/*
* Different IV generation algorithms:
@@ -113,6 +117,9 @@ static kmem_cache_t *_crypt_io_pool;
* encrypted with the bulk cipher using a salt as key. The salt
* should be derived from the bulk cipher's key via hashing.
*
+ * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1
+ * (needed for LRW-32-AES and possible other narrow block modes)
+ *
* plumb: unimplemented, see:
* http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454
*/
@@ -191,21 +198,61 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
}
kfree(salt);
- cc->iv_gen_private = essiv_tfm;
+ cc->iv_gen_private.essiv_tfm = essiv_tfm;
return 0;
}
static void crypt_iv_essiv_dtr(struct crypt_config *cc)
{
- crypto_free_cipher(cc->iv_gen_private);
- cc->iv_gen_private = NULL;
+ crypto_free_cipher(cc->iv_gen_private.essiv_tfm);
+ cc->iv_gen_private.essiv_tfm = NULL;
}
static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
{
memset(iv, 0, cc->iv_size);
*(u64 *)iv = cpu_to_le64(sector);
- crypto_cipher_encrypt_one(cc->iv_gen_private, iv, iv);
+ crypto_cipher_encrypt_one(cc->iv_gen_private.essiv_tfm, iv, iv);
+ return 0;
+}
+
+static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
+ const char *opts)
+{
+ unsigned int bs = crypto_blkcipher_blocksize(cc->tfm);
+ int log = long_log2(bs);
+
+ /* we need to calculate how far we must shift the sector count
+ * to get the cipher block count, we use this shift in _gen */
+
+ if (1 << log != bs) {
+ ti->error = "cypher blocksize is not a power of 2";
+ return -EINVAL;
+ }
+
+ if (log > 9) {
+ ti->error = "cypher blocksize is > 512";
+ return -EINVAL;
+ }
+
+ cc->iv_gen_private.benbi_shift = 9 - log;
+
+ return 0;
+}
+
+static void crypt_iv_benbi_dtr(struct crypt_config *cc)
+{
+}
+
+static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector)
+{
+ __be64 val;
+
+ memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */
+
+ val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi_shift) + 1);
+ put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64)));
+
return 0;
}
@@ -219,13 +266,18 @@ static struct crypt_iv_operations crypt_iv_essiv_ops = {
.generator = crypt_iv_essiv_gen
};
+static struct crypt_iv_operations crypt_iv_benbi_ops = {
+ .ctr = crypt_iv_benbi_ctr,
+ .dtr = crypt_iv_benbi_dtr,
+ .generator = crypt_iv_benbi_gen
+};
static int
crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out,
struct scatterlist *in, unsigned int length,
int write, sector_t sector)
{
- u8 iv[cc->iv_size];
+ u8 iv[cc->iv_size] __attribute__ ((aligned(__alignof__(u64))));
struct blkcipher_desc desc = {
.tfm = cc->tfm,
.info = iv,
@@ -458,11 +510,11 @@ static void dec_pending(struct crypt_io *io, int error)
* interrupt context.
*/
static struct workqueue_struct *_kcryptd_workqueue;
-static void kcryptd_do_work(void *data);
+static void kcryptd_do_work(struct work_struct *work);
static void kcryptd_queue_io(struct crypt_io *io)
{
- INIT_WORK(&io->work, kcryptd_do_work, io);
+ INIT_WORK(&io->work, kcryptd_do_work);
queue_work(_kcryptd_workqueue, &io->work);
}
@@ -618,9 +670,9 @@ static void process_read_endio(struct crypt_io *io)
dec_pending(io, crypt_convert(cc, &ctx));
}
-static void kcryptd_do_work(void *data)
+static void kcryptd_do_work(struct work_struct *work)
{
- struct crypt_io *io = data;
+ struct crypt_io *io = container_of(work, struct crypt_io, work);
if (io->post_process)
process_read_endio(io);
@@ -768,7 +820,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->tfm = tfm;
/*
- * Choose ivmode. Valid modes: "plain", "essiv:<esshash>".
+ * Choose ivmode. Valid modes: "plain", "essiv:<esshash>", "benbi".
* See comments at iv code
*/
@@ -778,6 +830,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
cc->iv_gen_ops = &crypt_iv_plain_ops;
else if (strcmp(ivmode, "essiv") == 0)
cc->iv_gen_ops = &crypt_iv_essiv_ops;
+ else if (strcmp(ivmode, "benbi") == 0)
+ cc->iv_gen_ops = &crypt_iv_benbi_ops;
else {
ti->error = "Invalid IV mode";
goto bad2;
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index d13bb15a8a02..4510ad8f971c 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -606,9 +606,14 @@ static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
return __get_name_cell(param->name);
md = dm_get_md(huge_decode_dev(param->dev));
- if (md)
- mdptr = dm_get_mdptr(md);
+ if (!md)
+ goto out;
+ mdptr = dm_get_mdptr(md);
+ if (!mdptr)
+ dm_put(md);
+
+out:
return mdptr;
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index d754e0bc6e90..cf8bf052138e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -101,11 +101,11 @@ typedef int (*action_fn) (struct pgpath *pgpath);
#define MIN_IOS 256 /* Mempool size */
-static kmem_cache_t *_mpio_cache;
+static struct kmem_cache *_mpio_cache;
struct workqueue_struct *kmultipathd;
-static void process_queued_ios(void *data);
-static void trigger_event(void *data);
+static void process_queued_ios(struct work_struct *work);
+static void trigger_event(struct work_struct *work);
/*-----------------------------------------------
@@ -173,8 +173,8 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
INIT_LIST_HEAD(&m->priority_groups);
spin_lock_init(&m->lock);
m->queue_io = 1;
- INIT_WORK(&m->process_queued_ios, process_queued_ios, m);
- INIT_WORK(&m->trigger_event, trigger_event, m);
+ INIT_WORK(&m->process_queued_ios, process_queued_ios);
+ INIT_WORK(&m->trigger_event, trigger_event);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
kfree(m);
@@ -379,9 +379,10 @@ static void dispatch_queued_ios(struct multipath *m)
}
}
-static void process_queued_ios(void *data)
+static void process_queued_ios(struct work_struct *work)
{
- struct multipath *m = (struct multipath *) data;
+ struct multipath *m =
+ container_of(work, struct multipath, process_queued_ios);
struct hw_handler *hwh = &m->hw_handler;
struct pgpath *pgpath = NULL;
unsigned init_required = 0, must_queue = 1;
@@ -421,9 +422,10 @@ out:
* An event is triggered whenever a path is taken out of use.
* Includes path failure and PG bypass.
*/
-static void trigger_event(void *data)
+static void trigger_event(struct work_struct *work)
{
- struct multipath *m = (struct multipath *) data;
+ struct multipath *m =
+ container_of(work, struct multipath, trigger_event);
dm_table_event(m->ti->table);
}
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 659224cb7c53..fc8cbb168e3e 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -24,6 +24,7 @@
static struct workqueue_struct *_kmirrord_wq;
static struct work_struct _kmirrord_work;
+static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
static inline void wake(void)
{
@@ -83,6 +84,7 @@ struct region_hash {
struct list_head *buckets;
spinlock_t region_lock;
+ atomic_t recovery_in_flight;
struct semaphore recovery_count;
struct list_head clean_regions;
struct list_head quiesced_regions;
@@ -191,6 +193,7 @@ static int rh_init(struct region_hash *rh, struct mirror_set *ms,
spin_lock_init(&rh->region_lock);
sema_init(&rh->recovery_count, 0);
+ atomic_set(&rh->recovery_in_flight, 0);
INIT_LIST_HEAD(&rh->clean_regions);
INIT_LIST_HEAD(&rh->quiesced_regions);
INIT_LIST_HEAD(&rh->recovered_regions);
@@ -382,6 +385,8 @@ static void rh_update_states(struct region_hash *rh)
rh->log->type->clear_region(rh->log, reg->key);
rh->log->type->complete_resync_work(rh->log, reg->key, 1);
dispatch_bios(rh->ms, &reg->delayed_bios);
+ if (atomic_dec_and_test(&rh->recovery_in_flight))
+ wake_up_all(&_kmirrord_recovery_stopped);
up(&rh->recovery_count);
mempool_free(reg, rh->region_pool);
}
@@ -502,11 +507,21 @@ static int __rh_recovery_prepare(struct region_hash *rh)
static void rh_recovery_prepare(struct region_hash *rh)
{
- while (!down_trylock(&rh->recovery_count))
+ /* Extra reference to avoid race with rh_stop_recovery */
+ atomic_inc(&rh->recovery_in_flight);
+
+ while (!down_trylock(&rh->recovery_count)) {
+ atomic_inc(&rh->recovery_in_flight);
if (__rh_recovery_prepare(rh) <= 0) {
+ atomic_dec(&rh->recovery_in_flight);
up(&rh->recovery_count);
break;
}
+ }
+
+ /* Drop the extra reference */
+ if (atomic_dec_and_test(&rh->recovery_in_flight))
+ wake_up_all(&_kmirrord_recovery_stopped);
}
/*
@@ -868,7 +883,7 @@ static void do_mirror(struct mirror_set *ms)
do_writes(ms, &writes);
}
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
{
struct mirror_set *ms;
@@ -1177,6 +1192,11 @@ static void mirror_postsuspend(struct dm_target *ti)
struct dirty_log *log = ms->rh.log;
rh_stop_recovery(&ms->rh);
+
+ /* Wait for all I/O we generated to complete */
+ wait_event(_kmirrord_recovery_stopped,
+ !atomic_read(&ms->rh.recovery_in_flight));
+
if (log->type->suspend && log->type->suspend(log))
/* FIXME: need better error handling */
DMWARN("log suspend failed");
@@ -1249,7 +1269,7 @@ static int __init dm_mirror_init(void)
dm_dirty_log_exit();
return r;
}
- INIT_WORK(&_kmirrord_work, do_work, NULL);
+ INIT_WORK(&_kmirrord_work, do_work);
r = dm_register_target(&mirror_target);
if (r < 0) {
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-round-robin.c
index c5a16c550122..6f9fcd4db9b5 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-round-robin.c
@@ -136,7 +136,7 @@ static int rr_add_path(struct path_selector *ps, struct path *path,
path->pscontext = pi;
- list_add(&pi->list, &s->valid_paths);
+ list_add_tail(&pi->list, &s->valid_paths);
return 0;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 5281e0094072..b0ce2ce82278 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,7 +40,7 @@
#define SNAPSHOT_PAGES 256
struct workqueue_struct *ksnapd;
-static void flush_queued_bios(void *data);
+static void flush_queued_bios(struct work_struct *work);
struct pending_exception {
struct exception e;
@@ -88,8 +88,8 @@ struct pending_exception {
* Hash table mapping origin volumes to lists of snapshots and
* a lock to protect it
*/
-static kmem_cache_t *exception_cache;
-static kmem_cache_t *pending_cache;
+static struct kmem_cache *exception_cache;
+static struct kmem_cache *pending_cache;
static mempool_t *pending_pool;
/*
@@ -228,7 +228,7 @@ static int init_exception_table(struct exception_table *et, uint32_t size)
return 0;
}
-static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem)
+static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
{
struct list_head *slot;
struct exception *ex, *next;
@@ -528,7 +528,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
bio_list_init(&s->queued_bios);
- INIT_WORK(&s->queued_bios_work, flush_queued_bios, s);
+ INIT_WORK(&s->queued_bios_work, flush_queued_bios);
/* Add snapshot to the list of snapshots for this origin */
/* Exceptions aren't triggered till snapshot_resume() is called */
@@ -603,9 +603,10 @@ static void flush_bios(struct bio *bio)
}
}
-static void flush_queued_bios(void *data)
+static void flush_queued_bios(struct work_struct *work)
{
- struct dm_snapshot *s = (struct dm_snapshot *) data;
+ struct dm_snapshot *s =
+ container_of(work, struct dm_snapshot, queued_bios_work);
struct bio *queued_bios;
unsigned long flags;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b5764a86c8b5..7ec1b112a6d5 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -121,8 +121,8 @@ struct mapped_device {
};
#define MIN_IOS 256
-static kmem_cache_t *_io_cache;
-static kmem_cache_t *_tio_cache;
+static struct kmem_cache *_io_cache;
+static struct kmem_cache *_tio_cache;
static int __init local_init(void)
{
@@ -1285,7 +1285,7 @@ int dm_suspend(struct mapped_device *md, int do_lockfs)
down(&md->suspend_lock);
if (dm_suspended(md))
- goto out;
+ goto out_unlock;
map = dm_get_table(md);
@@ -1361,6 +1361,8 @@ out:
}
dm_table_put(map);
+
+out_unlock:
up(&md->suspend_lock);
return r;
}
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c
index f1db6eff4857..b46f6c575f7e 100644
--- a/drivers/md/kcopyd.c
+++ b/drivers/md/kcopyd.c
@@ -203,7 +203,7 @@ struct kcopyd_job {
/* FIXME: this should scale with the number of pages */
#define MIN_JOBS 512
-static kmem_cache_t *_job_cache;
+static struct kmem_cache *_job_cache;
static mempool_t *_job_pool;
/*
@@ -417,7 +417,7 @@ static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
/*
* kcopyd does this every time it's woken up.
*/
-static void do_work(void *ignored)
+static void do_work(struct work_struct *ignored)
{
/*
* The order that these are called is *very* important.
@@ -628,7 +628,7 @@ static int kcopyd_init(void)
}
kcopyd_clients++;
- INIT_WORK(&_kcopyd_work, do_work, NULL);
+ INIT_WORK(&_kcopyd_work, do_work);
mutex_unlock(&kcopyd_init_lock);
return 0;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d11135604403..6c4345bde07e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -39,10 +39,10 @@
#include <linux/raid/bitmap.h>
#include <linux/sysctl.h>
#include <linux/buffer_head.h> /* for invalidate_bdev */
-#include <linux/suspend.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/ctype.h>
+#include <linux/freezer.h>
#include <linux/init.h>
@@ -3200,7 +3200,7 @@ static int do_md_run(mddev_t * mddev)
mddev->changed = 1;
md_new_event(mddev);
- kobject_uevent(&mddev->gendisk->kobj, KOBJ_ONLINE);
+ kobject_uevent(&mddev->gendisk->kobj, KOBJ_CHANGE);
return 0;
}
@@ -3314,7 +3314,6 @@ static int do_md_stop(mddev_t * mddev, int mode)
module_put(mddev->pers->owner);
mddev->pers = NULL;
- kobject_uevent(&mddev->gendisk->kobj, KOBJ_OFFLINE);
if (mddev->ro)
mddev->ro = 0;
}
@@ -4487,6 +4486,7 @@ static int md_thread(void * arg)
* many dirty RAID5 blocks.
*/
+ current->flags |= PF_NOFREEZE;
allow_signal(SIGKILL);
while (!kthread_should_stop()) {
@@ -4503,7 +4503,6 @@ static int md_thread(void * arg)
test_bit(THREAD_WAKEUP, &thread->flags)
|| kthread_should_stop(),
thread->timeout);
- try_to_freeze();
clear_bit(THREAD_WAKEUP, &thread->flags);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index e14f45780720..52914d5cec76 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -348,7 +348,7 @@ static int grow_one_stripe(raid5_conf_t *conf)
static int grow_stripes(raid5_conf_t *conf, int num)
{
- kmem_cache_t *sc;
+ struct kmem_cache *sc;
int devs = conf->raid_disks;
sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
@@ -397,7 +397,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
LIST_HEAD(newstripes);
struct disk_info *ndisks;
int err = 0;
- kmem_cache_t *sc;
+ struct kmem_cache *sc;
int i;
if (newsize <= conf->pool_size)
@@ -3659,7 +3659,7 @@ static void end_reshape(raid5_conf_t *conf)
bdev = bdget_disk(conf->mddev->gendisk, 0);
if (bdev) {
mutex_lock(&bdev->bd_inode->i_mutex);
- i_size_write(bdev->bd_inode, conf->mddev->array_size << 10);
+ i_size_write(bdev->bd_inode, (loff_t)conf->mddev->array_size << 10);
mutex_unlock(&bdev->bd_inode->i_mutex);
bdput(bdev);
}