summaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache/alloc.c
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2014-02-13 03:43:32 +0100
committerKent Overstreet <kmo@daterainc.com>2014-03-18 20:22:35 +0100
commit7159b1ad3dded9da040b5c608acf3d52d50f661e (patch)
tree3ec196333d8ae22e359dc7d16fe8d48b8352fbea /drivers/md/bcache/alloc.c
parentbcache: Kill dead cgroup code (diff)
downloadlinux-7159b1ad3dded9da040b5c608acf3d52d50f661e.tar.xz
linux-7159b1ad3dded9da040b5c608acf3d52d50f661e.zip
bcache: Better alloc tracepoints
Change the invalidate tracepoint to indicate how much data we're invalidating, and change the alloc tracepoints to indicate what offset they're for. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'drivers/md/bcache/alloc.c')
-rw-r--r--drivers/md/bcache/alloc.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index c0d37d082443..a3e1427945f2 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -162,10 +162,15 @@ static bool can_invalidate_bucket(struct cache *ca, struct bucket *b)
static void invalidate_one_bucket(struct cache *ca, struct bucket *b)
{
+ size_t bucket = b - ca->buckets;
+
+ if (GC_SECTORS_USED(b))
+ trace_bcache_invalidate(ca, bucket);
+
bch_inc_gen(ca, b);
b->prio = INITIAL_PRIO;
atomic_inc(&b->pin);
- fifo_push(&ca->free_inc, b - ca->buckets);
+ fifo_push(&ca->free_inc, bucket);
}
/*
@@ -301,8 +306,6 @@ static void invalidate_buckets(struct cache *ca)
invalidate_buckets_random(ca);
break;
}
-
- trace_bcache_alloc_invalidate(ca);
}
#define allocator_wait(ca, cond) \
@@ -408,8 +411,10 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
fifo_pop(&ca->free[reserve], r))
goto out;
- if (!wait)
+ if (!wait) {
+ trace_bcache_alloc_fail(ca, reserve);
return -1;
+ }
do {
prepare_to_wait(&ca->set->bucket_wait, &w,
@@ -425,6 +430,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
out:
wake_up_process(ca->alloc_thread);
+ trace_bcache_alloc(ca, reserve);
+
if (expensive_debug_checks(ca->set)) {
size_t iter;
long i;