summaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2011-06-21 00:11:22 +0200
committerDan Williams <dan.j.williams@intel.com>2011-07-03 13:04:50 +0200
commit980d3aeb3828b0fdf2a0b2e893d238130b014575 (patch)
tree76fcb476d6cb9ab4d8307ec7ff4dcfffbf1d296f /drivers/scsi
parentisci: Handle timed-out request terminations correctly (diff)
downloadlinux-980d3aeb3828b0fdf2a0b2e893d238130b014575.tar.xz
linux-980d3aeb3828b0fdf2a0b2e893d238130b014575.zip
isci: fix isci_terminate_pending() list management
Walk through the list of pending requests being careful to consider that multiple requests can be terminated when the lock is dropped (i.e. invalidating the 'next' reference established by list_for_each_entry_safe). Also noticed that all callers to isci_terminate_pending_requests() specifying terminating, so just drop the parameter. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/isci/remote_device.c2
-rw-r--r--drivers/scsi/isci/request.h5
-rw-r--r--drivers/scsi/isci/task.c131
3 files changed, 67 insertions, 71 deletions
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index 3b555dcbe569..45592ad33c3b 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -1272,7 +1272,7 @@ void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remot
"%s: idev = %p\n", __func__, idev);
/* Cleanup all requests pending for this device. */
- isci_terminate_pending_requests(ihost, idev, terminating);
+ isci_terminate_pending_requests(ihost, idev);
dev_dbg(&ihost->pdev->dev,
"%s: idev = %p, done\n", __func__, idev);
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
index 547c35cbe459..ac9368c5a6b5 100644
--- a/drivers/scsi/isci/request.h
+++ b/drivers/scsi/isci/request.h
@@ -776,9 +776,8 @@ isci_request_io_request_get_next_sge(struct isci_request *request,
}
void
-isci_terminate_pending_requests(struct isci_host *isci_host,
- struct isci_remote_device *isci_device,
- enum isci_request_status new_request_state);
+isci_terminate_pending_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev);
enum sci_status
scic_task_request_construct(struct scic_sds_controller *scic,
struct scic_sds_remote_device *sci_dev,
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 573cf1c9e81d..01032dc2c116 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -796,81 +796,79 @@ static void isci_terminate_request_core(
* @isci_host: This parameter specifies SCU.
* @isci_device: This parameter specifies the target.
*
- *
*/
-void isci_terminate_pending_requests(
- struct isci_host *isci_host,
- struct isci_remote_device *isci_device,
- enum isci_request_status new_request_state)
+void isci_terminate_pending_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev)
{
- struct isci_request *request;
- struct isci_request *next_request;
- unsigned long flags;
+ struct completion request_completion;
enum isci_request_status old_state;
- DECLARE_COMPLETION_ONSTACK(request_completion);
-
- dev_dbg(&isci_host->pdev->dev,
- "%s: isci_device = %p (new request state = %d)\n",
- __func__, isci_device, new_request_state);
+ unsigned long flags;
+ LIST_HEAD(list);
- spin_lock_irqsave(&isci_host->scic_lock, flags);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ list_splice_init(&idev->reqs_in_process, &list);
- /* Iterate through the list. */
- list_for_each_entry_safe(request, next_request,
- &isci_device->reqs_in_process, dev_node) {
+ /* assumes that isci_terminate_request_core deletes from the list */
+ while (!list_empty(&list)) {
+ struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
- init_completion(&request_completion);
+ /* Change state to "terminating" if it is currently
+ * "started".
+ */
+ old_state = isci_request_change_started_to_newstate(ireq,
+ &request_completion,
+ terminating);
+ switch (old_state) {
+ case started:
+ case completed:
+ case aborting:
+ break;
+ default:
+ /* termination in progress, or otherwise dispositioned.
+ * We know the request was on 'list' so should be safe
+ * to move it back to reqs_in_process
+ */
+ list_move(&ireq->dev_node, &idev->reqs_in_process);
+ ireq = NULL;
+ break;
+ }
- /* Change state to "new_request_state" if it is currently
- * "started".
- */
- old_state = isci_request_change_started_to_newstate(
- request,
- &request_completion,
- new_request_state);
+ if (!ireq)
+ continue;
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ init_completion(&request_completion);
- if ((old_state == started) ||
- (old_state == completed) ||
- (old_state == aborting)) {
-
- dev_warn(&isci_host->pdev->dev,
- "%s: isci_device=%p request=%p; task=%p "
- "old_state=%d\n",
- __func__,
- isci_device, request,
- ((request->ttype == io_task)
- ? isci_request_access_task(request)
- : NULL),
- old_state);
-
- /* If the old_state is started:
- * This request was not already being aborted. If it had been,
- * then the aborting I/O (ie. the TMF request) would not be in
- * the aborting state, and thus would be terminated here. Note
- * that since the TMF completion's call to the kernel function
- * "complete()" does not happen until the pending I/O request
- * terminate fully completes, we do not have to implement a
- * special wait here for already aborting requests - the
- * termination of the TMF request will force the request
- * to finish it's already started terminate.
- *
- * If old_state == completed:
- * This request completed from the SCU hardware perspective
- * and now just needs cleaning up in terms of freeing the
- * request and potentially calling up to libsas.
- *
- * If old_state == aborting:
- * This request has already gone through a TMF timeout, but may
- * not have been terminated; needs cleaning up at least.
- */
- isci_terminate_request_core(isci_host, isci_device,
- request);
- }
- spin_lock_irqsave(&isci_host->scic_lock, flags);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev=%p request=%p; task=%p old_state=%d\n",
+ __func__, idev, ireq,
+ ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
+ old_state);
+
+ /* If the old_state is started:
+ * This request was not already being aborted. If it had been,
+ * then the aborting I/O (ie. the TMF request) would not be in
+ * the aborting state, and thus would be terminated here. Note
+ * that since the TMF completion's call to the kernel function
+ * "complete()" does not happen until the pending I/O request
+ * terminate fully completes, we do not have to implement a
+ * special wait here for already aborting requests - the
+ * termination of the TMF request will force the request
+ * to finish it's already started terminate.
+ *
+ * If old_state == completed:
+ * This request completed from the SCU hardware perspective
+ * and now just needs cleaning up in terms of freeing the
+ * request and potentially calling up to libsas.
+ *
+ * If old_state == aborting:
+ * This request has already gone through a TMF timeout, but may
+ * not have been terminated; needs cleaning up at least.
+ */
+ isci_terminate_request_core(ihost, idev, ireq);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
}
- spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
}
/**
@@ -965,8 +963,7 @@ int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
if (ret == TMF_RESP_FUNC_COMPLETE)
/* Terminate all I/O now. */
isci_terminate_pending_requests(isci_host,
- isci_device,
- terminating);
+ isci_device);
return ret;
}