diff options
author | Chris Mason <chris.mason@oracle.com> | 2010-06-11 20:26:02 +0200 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-09 03:15:30 +0200 |
commit | 7a0ff5dbdd0b4cb7ea8764da9d78f4bb2eebaf31 (patch) | |
tree | a3e6e1b1af5d63b9f7cb969d831724eecaa98cd5 /net/rds | |
parent | rds: more FMRs are faster (diff) | |
download | linux-7a0ff5dbdd0b4cb7ea8764da9d78f4bb2eebaf31.tar.xz linux-7a0ff5dbdd0b4cb7ea8764da9d78f4bb2eebaf31.zip |
RDS: use delayed work for the FMR flushes
Using a delayed work queue helps us make sure a healthy number of FMRs
have queued up over the limit. It makes for a large improvement in RDMA
iops.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'net/rds')
-rw-r--r-- | net/rds/ib_rdma.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 8c40391de5a2..3a275af9d52f 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -66,7 +66,7 @@ struct rds_ib_mr { */ struct rds_ib_mr_pool { struct mutex flush_lock; /* serialize fmr invalidate */ - struct work_struct flush_worker; /* flush worker */ + struct delayed_work flush_worker; /* flush worker */ atomic_t item_count; /* total # of MRs */ atomic_t dirty_count; /* # dirty of MRs */ @@ -226,7 +226,7 @@ struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev) INIT_XLIST_HEAD(&pool->clean_list); mutex_init(&pool->flush_lock); init_waitqueue_head(&pool->flush_wait); - INIT_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); + INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker); pool->fmr_attr.max_pages = fmr_message_size; pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps; @@ -254,7 +254,7 @@ void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_co void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool) { - cancel_work_sync(&pool->flush_worker); + cancel_delayed_work_sync(&pool->flush_worker); rds_ib_flush_mr_pool(pool, 1, NULL); WARN_ON(atomic_read(&pool->item_count)); WARN_ON(atomic_read(&pool->free_pinned)); @@ -695,7 +695,7 @@ out_nolock: static void rds_ib_mr_pool_flush_worker(struct work_struct *work) { - struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker); + struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work); rds_ib_flush_mr_pool(pool, 0, NULL); } @@ -720,7 +720,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate) /* If we've pinned too many pages, request a flush */ if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned || atomic_read(&pool->dirty_count) >= pool->max_items / 10) - queue_work(rds_wq, &pool->flush_worker); + queue_delayed_work(rds_wq, &pool->flush_worker, 10); if (invalidate) { if (likely(!in_interrupt())) { @@ -728,7 +728,7 @@ void rds_ib_free_mr(void *trans_private, int invalidate) } else { /* We get here if the user created a MR marked * as use_once and invalidate at the same time. */ - queue_work(rds_wq, &pool->flush_worker); + queue_delayed_work(rds_wq, &pool->flush_worker, 10); } } |