summaryrefslogtreecommitdiffstats
path: root/drivers/block/rsxx/dma.c
diff options
context:
space:
mode:
authorPhilip J Kelleher <pjk1939@linux.vnet.ibm.com>2013-06-18 21:38:26 +0200
committerJens Axboe <axboe@kernel.dk>2013-06-19 13:52:09 +0200
commit31a70bb4440c963e69ce210389d8119c70b5c39d (patch)
tree70bb10af0da9ad240fef12bcd44968142d4bb202 /drivers/block/rsxx/dma.c
parentrsxx: Restructured DMA cancel scheme. (diff)
downloadlinux-31a70bb4440c963e69ce210389d8119c70b5c39d.tar.xz
linux-31a70bb4440c963e69ce210389d8119c70b5c39d.zip
rsxx: Fixes soft-lockup issues during DMAs.
The workqueue mechanism has been reworked to prevent soft lockup issues from occuring by adding in mutex sychronization. Signed-off-by: Philip J Kelleher <pjk1939@linux.vnet.ibm.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block/rsxx/dma.c')
-rw-r--r--drivers/block/rsxx/dma.c35
1 files changed, 27 insertions, 8 deletions
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 213e40e4bd92..b485a65b8de1 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -381,15 +381,13 @@ static void dma_engine_stalled(unsigned long data)
}
}
-static void rsxx_issue_dmas(struct work_struct *work)
+static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
{
- struct rsxx_dma_ctrl *ctrl;
struct rsxx_dma *dma;
int tag;
int cmds_pending = 0;
struct hw_cmd *hw_cmd_buf;
- ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
hw_cmd_buf = ctrl->cmd.buf;
if (unlikely(ctrl->card->halt) ||
@@ -469,9 +467,8 @@ static void rsxx_issue_dmas(struct work_struct *work)
}
}
-static void rsxx_dma_done(struct work_struct *work)
+static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
{
- struct rsxx_dma_ctrl *ctrl;
struct rsxx_dma *dma;
unsigned long flags;
u16 count;
@@ -479,7 +476,6 @@ static void rsxx_dma_done(struct work_struct *work)
u8 tag;
struct hw_status *hw_st_buf;
- ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
hw_st_buf = ctrl->status.buf;
if (unlikely(ctrl->card->halt) ||
@@ -555,6 +551,28 @@ static void rsxx_dma_done(struct work_struct *work)
spin_unlock_bh(&ctrl->queue_lock);
}
+static void rsxx_schedule_issue(struct work_struct *work)
+{
+ struct rsxx_dma_ctrl *ctrl;
+
+ ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
+
+ mutex_lock(&ctrl->work_lock);
+ rsxx_issue_dmas(ctrl);
+ mutex_unlock(&ctrl->work_lock);
+}
+
+static void rsxx_schedule_done(struct work_struct *work)
+{
+ struct rsxx_dma_ctrl *ctrl;
+
+ ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
+
+ mutex_lock(&ctrl->work_lock);
+ rsxx_dma_done(ctrl);
+ mutex_unlock(&ctrl->work_lock);
+}
+
static int rsxx_queue_discard(struct rsxx_cardinfo *card,
struct list_head *q,
unsigned int laddr,
@@ -789,6 +807,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
spin_lock_init(&ctrl->trackers->lock);
spin_lock_init(&ctrl->queue_lock);
+ mutex_init(&ctrl->work_lock);
INIT_LIST_HEAD(&ctrl->queue);
setup_timer(&ctrl->activity_timer, dma_engine_stalled,
@@ -802,8 +821,8 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
if (!ctrl->done_wq)
return -ENOMEM;
- INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
- INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
+ INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
+ INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
st = rsxx_hw_buffers_init(dev, ctrl);
if (st)