summaryrefslogtreecommitdiffstats
path: root/drivers/lightnvm/pblk.h
diff options
context:
space:
mode:
authorJavier González <javier@cnexlabs.com>2017-10-13 14:46:07 +0200
committerJens Axboe <axboe@kernel.dk>2017-10-13 16:34:57 +0200
commitb84ae4a8b883b96b95fff0e3979ff2c65bbf96b0 (patch)
tree6f53ea3c0f6d9afd67e3e86354197467a9e495da /drivers/lightnvm/pblk.h
parentlightnvm: pblk: fix min size for page mempool (diff)
downloadlinux-b84ae4a8b883b96b95fff0e3979ff2c65bbf96b0.tar.xz
linux-b84ae4a8b883b96b95fff0e3979ff2c65bbf96b0.zip
lightnvm: pblk: simplify work_queue mempool
In pblk, we have a mempool to allocate a generic structure that we pass along workqueues. This is heavily used in the GC path in order to have enough inflight reads and fully utilize the GC bandwidth. However, the current GC path copies data to the host memory and puts it back into the write buffer. This requires a vmalloc allocation for the data and a memory copy. Thus, guaranteeing the allocation by using a mempool for the structure in itself does not give us much. Until we implement support for vector copy to avoid moving data through the host, just allocate the workqueue structure using kmalloc. This allows us to have a much smaller mempool. Reported-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Javier González <javier@cnexlabs.com> Signed-off-by: Matias Bjørling <m@bjorling.me> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to '')
-rw-r--r--drivers/lightnvm/pblk.h11
1 files changed, 6 insertions, 5 deletions
diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
index 229f6020ad8a..efaa781abb06 100644
--- a/drivers/lightnvm/pblk.h
+++ b/drivers/lightnvm/pblk.h
@@ -40,7 +40,6 @@
#define PBLK_MAX_REQ_ADDRS (64)
#define PBLK_MAX_REQ_ADDRS_PW (6)
-#define PBLK_WS_POOL_SIZE (128)
#define PBLK_META_POOL_SIZE (128)
#define PBLK_READ_REQ_POOL_SIZE (1024)
@@ -61,6 +60,8 @@
#define ERASE 2 /* READ = 0, WRITE = 1 */
+#define PBLK_GEN_WS_POOL_SIZE (2)
+
enum {
/* IO Types */
PBLK_IOTYPE_USER = 1 << 0,
@@ -621,7 +622,7 @@ struct pblk {
struct list_head compl_list;
mempool_t *page_bio_pool;
- mempool_t *line_ws_pool;
+ mempool_t *gen_ws_pool;
mempool_t *rec_pool;
mempool_t *g_rq_pool;
mempool_t *w_rq_pool;
@@ -725,9 +726,9 @@ void pblk_line_close_meta_sync(struct pblk *pblk);
void pblk_line_close_ws(struct work_struct *work);
void pblk_pipeline_stop(struct pblk *pblk);
void pblk_line_mark_bb(struct work_struct *work);
-void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
- void (*work)(struct work_struct *),
- struct workqueue_struct *wq);
+void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
+ void (*work)(struct work_struct *), gfp_t gfp_mask,
+ struct workqueue_struct *wq);
u64 pblk_line_smeta_start(struct pblk *pblk, struct pblk_line *line);
int pblk_line_read_smeta(struct pblk *pblk, struct pblk_line *line);
int pblk_line_read_emeta(struct pblk *pblk, struct pblk_line *line,