summaryrefslogtreecommitdiffstats
path: root/block/badblocks.c
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2016-10-20 23:40:06 +0200
committerJens Axboe <axboe@fb.com>2016-10-21 23:45:47 +0200
commitb4a1278c78bc939b3e29c3ad21ceaa636b0ca8c8 (patch)
treed2cd820ba9f15c351d0fead32b08e3b698f7d4cc /block/badblocks.c
parentsoftirq: Display IRQ_POLL for irq-poll statistics (diff)
downloadlinux-b4a1278c78bc939b3e29c3ad21ceaa636b0ca8c8.tar.xz
linux-b4a1278c78bc939b3e29c3ad21ceaa636b0ca8c8.zip
badblocks: badblocks_set/clear update unacked_exist
When bandblocks_set acknowledges a range or badblocks_clear a range, it's possible all badblocks are acknowledged. We should update unacked_exist if this occurs. Signed-off-by: Shaohua Li <shli@fb.com> Reviewed-by: Tomasz Majchrzak <tomasz.majchrzak@intel.com> Tested-by: Tomasz Majchrzak <tomasz.majchrzak@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/badblocks.c')
-rw-r--r--block/badblocks.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/block/badblocks.c b/block/badblocks.c
index 6610e282a03e..6ebcef282314 100644
--- a/block/badblocks.c
+++ b/block/badblocks.c
@@ -133,6 +133,26 @@ retry:
}
EXPORT_SYMBOL_GPL(badblocks_check);
+static void badblocks_update_acked(struct badblocks *bb)
+{
+ u64 *p = bb->page;
+ int i;
+ bool unacked = false;
+
+ if (!bb->unacked_exist)
+ return;
+
+ for (i = 0; i < bb->count ; i++) {
+ if (!BB_ACK(p[i])) {
+ unacked = true;
+ break;
+ }
+ }
+
+ if (!unacked)
+ bb->unacked_exist = 0;
+}
+
/**
* badblocks_set() - Add a range of bad blocks to the table.
* @bb: the badblocks structure that holds all badblock information
@@ -294,6 +314,8 @@ int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
bb->changed = 1;
if (!acknowledged)
bb->unacked_exist = 1;
+ else
+ badblocks_update_acked(bb);
write_sequnlock_irqrestore(&bb->lock, flags);
return rv;
@@ -401,6 +423,7 @@ int badblocks_clear(struct badblocks *bb, sector_t s, int sectors)
}
}
+ badblocks_update_acked(bb);
bb->changed = 1;
out:
write_sequnlock_irq(&bb->lock);