summaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-10-30 06:12:50 +0100
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-10-30 06:12:50 +0100
commitfe0f59c41255d339f4f059be62c350c3c48a3f95 (patch)
treefde2381bf94a14ea1553f88f9e9e37b45ed280dc /drivers/md/dm.c
parentcpufreq: intel_pstate: Always set max P-state in performance mode (diff)
parentDocumentation: intel_pstate: PID tuning is not always available (diff)
downloadlinux-fe0f59c41255d339f4f059be62c350c3c48a3f95.tar.xz
linux-fe0f59c41255d339f4f059be62c350c3c48a3f95.zip
Merge back earlier cpufreq material for v4.10.
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c41
1 files changed, 24 insertions, 17 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index fa9b1cb4438a..147af9536d0c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1648,6 +1648,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
struct request_queue *q = md->queue;
sector_t size;
+ lockdep_assert_held(&md->suspend_lock);
+
size = dm_table_get_size(t);
/*
@@ -1873,6 +1875,7 @@ EXPORT_SYMBOL_GPL(dm_device_name);
static void __dm_destroy(struct mapped_device *md, bool wait)
{
+ struct request_queue *q = dm_get_md_queue(md);
struct dm_table *map;
int srcu_idx;
@@ -1883,8 +1886,12 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
+ spin_lock_irq(q->queue_lock);
+ queue_flag_set(QUEUE_FLAG_DYING, q);
+ spin_unlock_irq(q->queue_lock);
+
if (dm_request_based(md) && md->kworker_task)
- flush_kthread_worker(&md->kworker);
+ kthread_flush_worker(&md->kworker);
/*
* Take suspend_lock so that presuspend and postsuspend methods
@@ -1934,30 +1941,25 @@ void dm_put(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_put);
-static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
+static int dm_wait_for_completion(struct mapped_device *md, long task_state)
{
int r = 0;
- DECLARE_WAITQUEUE(wait, current);
-
- add_wait_queue(&md->wait, &wait);
+ DEFINE_WAIT(wait);
while (1) {
- set_current_state(interruptible);
+ prepare_to_wait(&md->wait, &wait, task_state);
if (!md_in_flight(md))
break;
- if (interruptible == TASK_INTERRUPTIBLE &&
- signal_pending(current)) {
+ if (signal_pending_state(task_state, current)) {
r = -EINTR;
break;
}
io_schedule();
}
- set_current_state(TASK_RUNNING);
-
- remove_wait_queue(&md->wait, &wait);
+ finish_wait(&md->wait, &wait);
return r;
}
@@ -2075,6 +2077,10 @@ static void unlock_fs(struct mapped_device *md)
}
/*
+ * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
+ * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
+ * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
+ *
* If __dm_suspend returns 0, the device is completely quiescent
* now. There is no request-processing activity. All new requests
* are being added to md->deferred list.
@@ -2082,13 +2088,15 @@ static void unlock_fs(struct mapped_device *md)
* Caller must hold md->suspend_lock
*/
static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
- unsigned suspend_flags, int interruptible,
+ unsigned suspend_flags, long task_state,
int dmf_suspended_flag)
{
bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
int r;
+ lockdep_assert_held(&md->suspend_lock);
+
/*
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
* This flag is cleared before dm_suspend returns.
@@ -2139,7 +2147,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
if (dm_request_based(md)) {
dm_stop_queue(md->queue);
if (md->kworker_task)
- flush_kthread_worker(&md->kworker);
+ kthread_flush_worker(&md->kworker);
}
flush_workqueue(md->wq);
@@ -2149,7 +2157,7 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
* We call dm_wait_for_completion to wait for all existing requests
* to finish.
*/
- r = dm_wait_for_completion(md, interruptible);
+ r = dm_wait_for_completion(md, task_state);
if (!r)
set_bit(dmf_suspended_flag, &md->flags);
@@ -2249,10 +2257,11 @@ static int __dm_resume(struct mapped_device *md, struct dm_table *map)
int dm_resume(struct mapped_device *md)
{
- int r = -EINVAL;
+ int r;
struct dm_table *map = NULL;
retry:
+ r = -EINVAL;
mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
if (!dm_suspended_md(md))
@@ -2276,8 +2285,6 @@ retry:
goto out;
clear_bit(DMF_SUSPENDED, &md->flags);
-
- r = 0;
out:
mutex_unlock(&md->suspend_lock);