diff options
author | Maxime Ripard <maxime@cerno.tech> | 2021-05-11 13:35:52 +0200 |
---|---|---|
committer | Maxime Ripard <maxime@cerno.tech> | 2021-05-11 13:35:52 +0200 |
commit | c55b44c9386f3ee1b08752638559f19deaf6040d (patch) | |
tree | c843a21f45180387fcd9eb2625cc9d1f166a3156 /drivers/target/target_core_user.c | |
parent | MAINTAINERS: Update my e-mail (diff) | |
parent | Linux 5.13-rc1 (diff) | |
download | linux-c55b44c9386f3ee1b08752638559f19deaf6040d.tar.xz linux-c55b44c9386f3ee1b08752638559f19deaf6040d.zip |
Merge drm/drm-fixes into drm-misc-fixes
Start this new release drm-misc-fixes branch
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/target/target_core_user.c')
-rw-r--r-- | drivers/target/target_core_user.c | 444 |
1 files changed, 275 insertions, 169 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index bf73cd5f4b04..198d25ae482a 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -8,13 +8,12 @@ #include <linux/spinlock.h> #include <linux/module.h> -#include <linux/idr.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/parser.h> #include <linux/vmalloc.h> #include <linux/uio_driver.h> -#include <linux/radix-tree.h> +#include <linux/xarray.h> #include <linux/stringify.h> #include <linux/bitops.h> #include <linux/highmem.h> @@ -61,25 +60,27 @@ #define TCMU_TIME_OUT (30 * MSEC_PER_SEC) -/* For cmd area, the size is fixed 8MB */ -#define CMDR_SIZE (8 * 1024 * 1024) +/* For mailbox plus cmd ring, the size is fixed 8MB */ +#define MB_CMDR_SIZE (8 * 1024 * 1024) +/* Offset of cmd ring is size of mailbox */ +#define CMDR_OFF sizeof(struct tcmu_mailbox) +#define CMDR_SIZE (MB_CMDR_SIZE - CMDR_OFF) /* - * For data area, the block size is PAGE_SIZE and - * the total size is 256K * PAGE_SIZE. + * For data area, the default block size is PAGE_SIZE and + * the default total size is 256K * PAGE_SIZE. */ -#define DATA_BLOCK_SIZE PAGE_SIZE -#define DATA_BLOCK_SHIFT PAGE_SHIFT -#define DATA_BLOCK_BITS_DEF (256 * 1024) +#define DATA_PAGES_PER_BLK_DEF 1 +#define DATA_AREA_PAGES_DEF (256 * 1024) -#define TCMU_MBS_TO_BLOCKS(_mbs) (_mbs << (20 - DATA_BLOCK_SHIFT)) -#define TCMU_BLOCKS_TO_MBS(_blocks) (_blocks >> (20 - DATA_BLOCK_SHIFT)) +#define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT)) +#define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT)) /* * Default number of global data blocks(512K * PAGE_SIZE) * when the unmap thread will be started. */ -#define TCMU_GLOBAL_MAX_BLOCKS_DEF (512 * 1024) +#define TCMU_GLOBAL_MAX_PAGES_DEF (512 * 1024) static u8 tcmu_kern_cmd_reply_supported; static u8 tcmu_netlink_blocked; @@ -111,6 +112,7 @@ struct tcmu_dev { struct kref kref; struct se_device se_dev; + struct se_dev_plug se_plug; char *name; struct se_hba *hba; @@ -119,22 +121,25 @@ struct tcmu_dev { #define TCMU_DEV_BIT_BROKEN 1 #define TCMU_DEV_BIT_BLOCKED 2 #define TCMU_DEV_BIT_TMR_NOTIFY 3 +#define TCM_DEV_BIT_PLUGGED 4 unsigned long flags; struct uio_info uio_info; struct inode *inode; - struct tcmu_mailbox *mb_addr; uint64_t dev_size; + + struct tcmu_mailbox *mb_addr; + void *cmdr; u32 cmdr_size; u32 cmdr_last_cleaned; /* Offset of data area from start of mb */ /* Must add data_off and mb_addr to get the address */ size_t data_off; - size_t data_size; + int data_area_mb; uint32_t max_blocks; - size_t ring_size; + size_t mmap_pages; struct mutex cmdr_lock; struct list_head qfull_queue; @@ -143,9 +148,11 @@ struct tcmu_dev { uint32_t dbi_max; uint32_t dbi_thresh; unsigned long *data_bitmap; - struct radix_tree_root data_blocks; + struct xarray data_pages; + uint32_t data_pages_per_blk; + uint32_t data_blk_size; - struct idr commands; + struct xarray commands; struct timer_list cmd_timer; unsigned int cmd_time_out; @@ -165,8 +172,6 @@ struct tcmu_dev { #define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev) -#define CMDR_OFF sizeof(struct tcmu_mailbox) - struct tcmu_cmd { struct se_cmd *se_cmd; struct tcmu_dev *tcmu_dev; @@ -215,9 +220,9 @@ static LIST_HEAD(timed_out_udevs); static struct kmem_cache *tcmu_cmd_cache; -static atomic_t global_db_count = ATOMIC_INIT(0); +static atomic_t global_page_count = ATOMIC_INIT(0); static struct delayed_work tcmu_unmap_work; -static int tcmu_global_max_blocks = TCMU_GLOBAL_MAX_BLOCKS_DEF; +static int tcmu_global_max_pages = TCMU_GLOBAL_MAX_PAGES_DEF; static int tcmu_set_global_max_data_area(const char *str, const struct kernel_param *kp) @@ -233,8 +238,8 @@ static int tcmu_set_global_max_data_area(const char *str, return -EINVAL; } - tcmu_global_max_blocks = TCMU_MBS_TO_BLOCKS(max_area_mb); - if (atomic_read(&global_db_count) > tcmu_global_max_blocks) + tcmu_global_max_pages = TCMU_MBS_TO_PAGES(max_area_mb); + if (atomic_read(&global_page_count) > tcmu_global_max_pages) schedule_delayed_work(&tcmu_unmap_work, 0); else cancel_delayed_work_sync(&tcmu_unmap_work); @@ -245,7 +250,7 @@ static int tcmu_set_global_max_data_area(const char *str, static int tcmu_get_global_max_data_area(char *buffer, const struct kernel_param *kp) { - return sprintf(buffer, "%d\n", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); + return sprintf(buffer, "%d\n", TCMU_PAGES_TO_MBS(tcmu_global_max_pages)); } static const struct kernel_param_ops tcmu_global_max_data_area_op = { @@ -497,32 +502,39 @@ static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) static inline int tcmu_get_empty_block(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd, - int prev_dbi, int *iov_cnt) + int prev_dbi, int length, int *iov_cnt) { + XA_STATE(xas, &udev->data_pages, 0); struct page *page; - int ret, dbi; + int i, cnt, dbi, dpi; + int page_cnt = DIV_ROUND_UP(length, PAGE_SIZE); dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh); if (dbi == udev->dbi_thresh) return -1; - page = radix_tree_lookup(&udev->data_blocks, dbi); - if (!page) { - if (atomic_add_return(1, &global_db_count) > - tcmu_global_max_blocks) - schedule_delayed_work(&tcmu_unmap_work, 0); + dpi = dbi * udev->data_pages_per_blk; + /* Count the number of already allocated pages */ + xas_set(&xas, dpi); + for (cnt = 0; xas_next(&xas) && cnt < page_cnt;) + cnt++; + for (i = cnt; i < page_cnt; i++) { /* try to get new page from the mm */ page = alloc_page(GFP_NOIO); if (!page) - goto err_alloc; + break; - ret = radix_tree_insert(&udev->data_blocks, dbi, page); - if (ret) - goto err_insert; + if (xa_store(&udev->data_pages, dpi + i, page, GFP_NOIO)) { + __free_page(page); + break; + } } + if (atomic_add_return(i - cnt, &global_page_count) > + tcmu_global_max_pages) + schedule_delayed_work(&tcmu_unmap_work, 0); - if (dbi > udev->dbi_max) + if (i && dbi > udev->dbi_max) udev->dbi_max = dbi; set_bit(dbi, udev->data_bitmap); @@ -531,35 +543,27 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev, if (dbi != prev_dbi + 1) *iov_cnt += 1; - return dbi; -err_insert: - __free_page(page); -err_alloc: - atomic_dec(&global_db_count); - return -1; + return i == page_cnt ? dbi : -1; } static int tcmu_get_empty_blocks(struct tcmu_dev *udev, - struct tcmu_cmd *tcmu_cmd, int dbi_cnt) + struct tcmu_cmd *tcmu_cmd, int length) { /* start value of dbi + 1 must not be a valid dbi */ int dbi = -2; - int i, iov_cnt = 0; + int blk_data_len, iov_cnt = 0; + uint32_t blk_size = udev->data_blk_size; - for (i = 0; i < dbi_cnt; i++) { - dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, &iov_cnt); + for (; length > 0; length -= blk_size) { + blk_data_len = min_t(uint32_t, length, blk_size); + dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, + &iov_cnt); if (dbi < 0) return -1; } return iov_cnt; } -static inline struct page * -tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) -{ - return radix_tree_lookup(&udev->data_blocks, dbi); -} - static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) { kfree(tcmu_cmd->dbi); @@ -570,14 +574,15 @@ static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd) { int i, len; struct se_cmd *se_cmd = cmd->se_cmd; + uint32_t blk_size = cmd->tcmu_dev->data_blk_size; - cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE); + cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, blk_size); if (se_cmd->se_cmd_flags & SCF_BIDI) { BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents)); for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++) len += se_cmd->t_bidi_data_sg[i].length; - cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, DATA_BLOCK_SIZE); + cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, blk_size); cmd->dbi_cnt += cmd->dbi_bidi_cnt; cmd->data_len_bidi = len; } @@ -589,9 +594,8 @@ static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd, /* Get the next dbi */ int dbi = tcmu_cmd_get_dbi(cmd); - /* Do not add more than DATA_BLOCK_SIZE to iov */ - if (len > DATA_BLOCK_SIZE) - len = DATA_BLOCK_SIZE; + /* Do not add more than udev->data_blk_size to iov */ + len = min_t(int, len, udev->data_blk_size); /* * The following code will gather and map the blocks to the same iovec @@ -603,7 +607,7 @@ static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd, (*iov)++; /* write offset relative to mb_addr */ (*iov)->iov_base = (void __user *) - (udev->data_off + dbi * DATA_BLOCK_SIZE); + (udev->data_off + dbi * udev->data_blk_size); } (*iov)->iov_len += len; @@ -617,7 +621,7 @@ static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, int dbi = -2; /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */ - for (; data_length > 0; data_length -= DATA_BLOCK_SIZE) + for (; data_length > 0; data_length -= udev->data_blk_size) dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); } @@ -695,9 +699,11 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev, struct scatterlist *sg, unsigned int sg_nents, struct iovec **iov, size_t data_len) { + XA_STATE(xas, &udev->data_pages, 0); /* start value of dbi + 1 must not be a valid dbi */ int dbi = -2; - size_t block_remaining, cp_len; + size_t page_remaining, cp_len; + int page_cnt, page_inx; struct sg_mapping_iter sg_iter; unsigned int sg_flags; struct page *page; @@ -715,37 +721,48 @@ static inline void tcmu_copy_data(struct tcmu_dev *udev, data_len); else dbi = tcmu_cmd_get_dbi(tcmu_cmd); - page = tcmu_get_block_page(udev, dbi); - if (direction == TCMU_DATA_AREA_TO_SG) - flush_dcache_page(page); - data_page_start = kmap_atomic(page); - block_remaining = DATA_BLOCK_SIZE; - - while (block_remaining && data_len) { - if (!sg_miter_next(&sg_iter)) { - /* set length to 0 to abort outer loop */ - data_len = 0; - pr_debug("tcmu_move_data: aborting data copy due to exhausted sg_list\n"); - break; + + page_cnt = DIV_ROUND_UP(data_len, PAGE_SIZE); + if (page_cnt > udev->data_pages_per_blk) + page_cnt = udev->data_pages_per_blk; + + xas_set(&xas, dbi * udev->data_pages_per_blk); + for (page_inx = 0; page_inx < page_cnt && data_len; page_inx++) { + page = xas_next(&xas); + + if (direction == TCMU_DATA_AREA_TO_SG) + flush_dcache_page(page); + data_page_start = kmap_atomic(page); + page_remaining = PAGE_SIZE; + + while (page_remaining && data_len) { + if (!sg_miter_next(&sg_iter)) { + /* set length to 0 to abort outer loop */ + data_len = 0; + pr_debug("%s: aborting data copy due to exhausted sg_list\n", + __func__); + break; + } + cp_len = min3(sg_iter.length, page_remaining, + data_len); + + data_addr = data_page_start + + PAGE_SIZE - page_remaining; + if (direction == TCMU_SG_TO_DATA_AREA) + memcpy(data_addr, sg_iter.addr, cp_len); + else + memcpy(sg_iter.addr, data_addr, cp_len); + + data_len -= cp_len; + page_remaining -= cp_len; + sg_iter.consumed = cp_len; } - cp_len = min3(sg_iter.length, block_remaining, data_len); + sg_miter_stop(&sg_iter); - data_addr = data_page_start + - DATA_BLOCK_SIZE - block_remaining; + kunmap_atomic(data_page_start); if (direction == TCMU_SG_TO_DATA_AREA) - memcpy(data_addr, sg_iter.addr, cp_len); - else - memcpy(sg_iter.addr, data_addr, cp_len); - - data_len -= cp_len; - block_remaining -= cp_len; - sg_iter.consumed = cp_len; + flush_dcache_page(page); } - sg_miter_stop(&sg_iter); - - kunmap_atomic(data_page_start); - if (direction == TCMU_SG_TO_DATA_AREA) - flush_dcache_page(page); } } @@ -844,9 +861,9 @@ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, (udev->max_blocks - udev->dbi_thresh) + space; if (blocks_left < cmd->dbi_cnt) { - pr_debug("no data space: only %lu available, but ask for %lu\n", - blocks_left * DATA_BLOCK_SIZE, - cmd->dbi_cnt * DATA_BLOCK_SIZE); + pr_debug("no data space: only %lu available, but ask for %u\n", + blocks_left * udev->data_blk_size, + cmd->dbi_cnt * udev->data_blk_size); return -1; } @@ -855,13 +872,12 @@ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, udev->dbi_thresh = udev->max_blocks; } - iov_cnt = tcmu_get_empty_blocks(udev, cmd, - cmd->dbi_cnt - cmd->dbi_bidi_cnt); + iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); if (iov_cnt < 0) return -1; if (cmd->dbi_bidi_cnt) { - ret = tcmu_get_empty_blocks(udev, cmd, cmd->dbi_bidi_cnt); + ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); if (ret < 0) return -1; } @@ -941,7 +957,7 @@ static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) if (head_to_end(cmd_head, udev->cmdr_size) < cmd_size) { size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); - hdr = (void *) mb + CMDR_OFF + cmd_head; + hdr = udev->cmdr + cmd_head; tcmu_hdr_set_op(&hdr->len_op, TCMU_OP_PAD); tcmu_hdr_set_len(&hdr->len_op, pad_size); hdr->cmd_id = 0; /* not used for PAD */ @@ -959,6 +975,25 @@ static uint32_t ring_insert_padding(struct tcmu_dev *udev, size_t cmd_size) return cmd_head; } +static void tcmu_unplug_device(struct se_dev_plug *se_plug) +{ + struct se_device *se_dev = se_plug->se_dev; + struct tcmu_dev *udev = TCMU_DEV(se_dev); + + clear_bit(TCM_DEV_BIT_PLUGGED, &udev->flags); + uio_event_notify(&udev->uio_info); +} + +static struct se_dev_plug *tcmu_plug_device(struct se_device *se_dev) +{ + struct tcmu_dev *udev = TCMU_DEV(se_dev); + + if (!test_and_set_bit(TCM_DEV_BIT_PLUGGED, &udev->flags)) + return &udev->se_plug; + + return NULL; +} + /** * queue_cmd_ring - queue cmd to ring or internally * @tcmu_cmd: cmd to queue @@ -977,11 +1012,12 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) struct tcmu_mailbox *mb = udev->mb_addr; struct tcmu_cmd_entry *entry; struct iovec *iov; - int iov_cnt, iov_bidi_cnt, cmd_id; - uint32_t cmd_head; + int iov_cnt, iov_bidi_cnt; + uint32_t cmd_id, cmd_head; uint64_t cdb_off; + uint32_t blk_size = udev->data_blk_size; /* size of data buffer needed */ - size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE; + size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size; *scsi_err = TCM_NO_SENSE; @@ -998,9 +1034,9 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) if (!list_empty(&udev->qfull_queue)) goto queue; - if (data_length > udev->data_size) { + if (data_length > (size_t)udev->max_blocks * blk_size) { pr_warn("TCMU: Request of size %zu is too big for %zu data area\n", - data_length, udev->data_size); + data_length, (size_t)udev->max_blocks * blk_size); *scsi_err = TCM_INVALID_CDB_FIELD; return -1; } @@ -1031,8 +1067,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) */ goto free_and_queue; - cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT); - if (cmd_id < 0) { + if (xa_alloc(&udev->commands, &cmd_id, tcmu_cmd, XA_LIMIT(1, 0xffff), + GFP_NOWAIT) < 0) { pr_err("tcmu: Could not allocate cmd id.\n"); tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt); @@ -1046,7 +1082,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) cmd_head = ring_insert_padding(udev, command_size); - entry = (void *) mb + CMDR_OFF + cmd_head; + entry = udev->cmdr + cmd_head; memset(entry, 0, command_size); tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); @@ -1086,8 +1122,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue); - /* TODO: only if FLUSH and FUA? */ - uio_event_notify(&udev->uio_info); + if (!test_bit(TCM_DEV_BIT_PLUGGED, &udev->flags)) + uio_event_notify(&udev->uio_info); return 0; @@ -1138,7 +1174,7 @@ queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr) cmd_head = ring_insert_padding(udev, cmd_size); - entry = (void *)mb + CMDR_OFF + cmd_head; + entry = udev->cmdr + cmd_head; memset(entry, 0, cmd_size); tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_TMR); tcmu_hdr_set_len(&entry->hdr.len_op, cmd_size); @@ -1253,7 +1289,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, pr_debug("TMR event %d on dev %s, aborted cmds %d, afflicted cmd_ids %d\n", tcmu_tmr_type(tmf), udev->name, i, cmd_cnt); - tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_KERNEL); + tmr = kmalloc(sizeof(*tmr) + cmd_cnt * sizeof(*cmd_ids), GFP_NOIO); if (!tmr) goto unlock; @@ -1377,7 +1413,7 @@ static int tcmu_run_tmr_queue(struct tcmu_dev *udev) return 1; } -static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) +static bool tcmu_handle_completions(struct tcmu_dev *udev) { struct tcmu_mailbox *mb; struct tcmu_cmd *cmd; @@ -1393,7 +1429,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { - struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; + struct tcmu_cmd_entry *entry = udev->cmdr + udev->cmdr_last_cleaned; /* * Flush max. up to end of cmd ring since current entry might @@ -1415,12 +1451,12 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) } WARN_ON(tcmu_hdr_get_op(entry->hdr.len_op) != TCMU_OP_CMD); - cmd = idr_remove(&udev->commands, entry->hdr.cmd_id); + cmd = xa_erase(&udev->commands, entry->hdr.cmd_id); if (!cmd) { pr_err("cmd_id %u not found, ring is broken\n", entry->hdr.cmd_id); set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); - break; + return false; } tcmu_handle_completion(cmd, entry); @@ -1432,8 +1468,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) if (free_space) free_space = tcmu_run_tmr_queue(udev); - if (atomic_read(&global_db_count) > tcmu_global_max_blocks && - idr_is_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { + if (atomic_read(&global_page_count) > tcmu_global_max_pages && + xa_empty(&udev->commands) && list_empty(&udev->qfull_queue)) { /* * Allocated blocks exceeded global block limit, currently no * more pending or waiting commands so try to reclaim blocks. @@ -1548,7 +1584,10 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) udev->cmd_time_out = TCMU_TIME_OUT; udev->qfull_time_out = -1; - udev->max_blocks = DATA_BLOCK_BITS_DEF; + udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF; + udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; + udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); + mutex_init(&udev->cmdr_lock); INIT_LIST_HEAD(&udev->node); @@ -1556,12 +1595,12 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) INIT_LIST_HEAD(&udev->qfull_queue); INIT_LIST_HEAD(&udev->tmr_queue); INIT_LIST_HEAD(&udev->inflight_queue); - idr_init(&udev->commands); + xa_init_flags(&udev->commands, XA_FLAGS_ALLOC1); timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0); timer_setup(&udev->cmd_timer, tcmu_cmd_timedout, 0); - INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL); + xa_init(&udev->data_pages); return &udev->se_dev; } @@ -1585,19 +1624,24 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) return -EINVAL; } -static void tcmu_blocks_release(struct radix_tree_root *blocks, - int start, int end) +static u32 tcmu_blocks_release(struct tcmu_dev *udev, unsigned long first, + unsigned long last) { - int i; + XA_STATE(xas, &udev->data_pages, first * udev->data_pages_per_blk); struct page *page; + u32 pages_freed = 0; - for (i = start; i < end; i++) { - page = radix_tree_delete(blocks, i); - if (page) { - __free_page(page); - atomic_dec(&global_db_count); - } + xas_lock(&xas); + xas_for_each(&xas, page, (last + 1) * udev->data_pages_per_blk - 1) { + xas_store(&xas, NULL); + __free_page(page); + pages_freed++; } + xas_unlock(&xas); + + atomic_sub(pages_freed, &global_page_count); + + return pages_freed; } static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev) @@ -1616,7 +1660,7 @@ static void tcmu_dev_kref_release(struct kref *kref) struct se_device *dev = &udev->se_dev; struct tcmu_cmd *cmd; bool all_expired = true; - int i; + unsigned long i; vfree(udev->mb_addr); udev->mb_addr = NULL; @@ -1628,7 +1672,7 @@ static void tcmu_dev_kref_release(struct kref *kref) /* Upper layer should drain all requests before calling this */ mutex_lock(&udev->cmdr_lock); - idr_for_each_entry(&udev->commands, cmd, i) { + xa_for_each(&udev->commands, i, cmd) { if (tcmu_check_and_free_pending_cmd(cmd) != 0) all_expired = false; } @@ -1636,10 +1680,10 @@ static void tcmu_dev_kref_release(struct kref *kref) tcmu_remove_all_queued_tmr(udev); if (!list_empty(&udev->qfull_queue)) all_expired = false; - idr_destroy(&udev->commands); + xa_destroy(&udev->commands); WARN_ON(!all_expired); - tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); + tcmu_blocks_release(udev, 0, udev->dbi_max); bitmap_free(udev->data_bitmap); mutex_unlock(&udev->cmdr_lock); @@ -1737,12 +1781,12 @@ static int tcmu_find_mem_index(struct vm_area_struct *vma) return -1; } -static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) +static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) { struct page *page; mutex_lock(&udev->cmdr_lock); - page = tcmu_get_block_page(udev, dbi); + page = xa_load(&udev->data_pages, dpi); if (likely(page)) { mutex_unlock(&udev->cmdr_lock); return page; @@ -1752,12 +1796,11 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi) * Userspace messed up and passed in a address not in the * data iov passed to it. */ - pr_err("Invalid addr to data block mapping (dbi %u) on device %s\n", - dbi, udev->name); - page = NULL; + pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n", + dpi, udev->name); mutex_unlock(&udev->cmdr_lock); - return page; + return NULL; } static void tcmu_vma_open(struct vm_area_struct *vma) @@ -1802,11 +1845,11 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf) addr = (void *)(unsigned long)info->mem[mi].addr + offset; page = vmalloc_to_page(addr); } else { - uint32_t dbi; + uint32_t dpi; /* For the dynamically growing data area pages */ - dbi = (offset - udev->data_off) / DATA_BLOCK_SIZE; - page = tcmu_try_get_block_page(udev, dbi); + dpi = (offset - udev->data_off) / PAGE_SIZE; + page = tcmu_try_get_data_page(udev, dpi); if (!page) return VM_FAULT_SIGBUS; } @@ -1832,7 +1875,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) vma->vm_private_data = udev; /* Ensure the mmap is exactly the right size */ - if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT)) + if (vma_pages(vma) != udev->mmap_pages) return -EINVAL; tcmu_vma_open(vma); @@ -2065,6 +2108,7 @@ static int tcmu_configure_device(struct se_device *dev) struct tcmu_dev *udev = TCMU_DEV(dev); struct uio_info *info; struct tcmu_mailbox *mb; + size_t data_size; int ret = 0; ret = tcmu_update_uio_info(udev); @@ -2081,20 +2125,23 @@ static int tcmu_configure_device(struct se_device *dev) goto err_bitmap_alloc; } - udev->mb_addr = vzalloc(CMDR_SIZE); - if (!udev->mb_addr) { + mb = vzalloc(MB_CMDR_SIZE); + if (!mb) { ret = -ENOMEM; goto err_vzalloc; } /* mailbox fits in first part of CMDR space */ - udev->cmdr_size = CMDR_SIZE - CMDR_OFF; - udev->data_off = CMDR_SIZE; - udev->data_size = udev->max_blocks * DATA_BLOCK_SIZE; + udev->mb_addr = mb; + udev->cmdr = (void *)mb + CMDR_OFF; + udev->cmdr_size = CMDR_SIZE; + udev->data_off = MB_CMDR_SIZE; + data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; + udev->mmap_pages = (data_size + MB_CMDR_SIZE) >> PAGE_SHIFT; + udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE; udev->dbi_thresh = 0; /* Default in Idle state */ /* Initialise the mailbox of the ring buffer */ - mb = udev->mb_addr; mb->version = TCMU_MAILBOX_VERSION; mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN | @@ -2103,14 +2150,13 @@ static int tcmu_configure_device(struct se_device *dev) mb->cmdr_size = udev->cmdr_size; WARN_ON(!PAGE_ALIGNED(udev->data_off)); - WARN_ON(udev->data_size % PAGE_SIZE); - WARN_ON(udev->data_size % DATA_BLOCK_SIZE); + WARN_ON(data_size % PAGE_SIZE); info->version = __stringify(TCMU_MAILBOX_VERSION); info->mem[0].name = "tcm-user command & data buffer"; info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; - info->mem[0].size = udev->ring_size = udev->data_size + CMDR_SIZE; + info->mem[0].size = data_size + MB_CMDR_SIZE; info->mem[0].memtype = UIO_MEM_NONE; info->irqcontrol = tcmu_irqcontrol; @@ -2226,16 +2272,16 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) { struct tcmu_mailbox *mb; struct tcmu_cmd *cmd; - int i; + unsigned long i; mutex_lock(&udev->cmdr_lock); - idr_for_each_entry(&udev->commands, cmd, i) { + xa_for_each(&udev->commands, i, cmd) { pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n", cmd->cmd_id, udev->name, test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)); - idr_remove(&udev->commands, i); + xa_erase(&udev->commands, i); if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { WARN_ON(!cmd->se_cmd); list_del_init(&cmd->queue_entry); @@ -2285,7 +2331,8 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) enum { Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, - Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_err, + Opt_nl_reply_supported, Opt_max_data_area_mb, Opt_data_pages_per_blk, + Opt_err, }; static match_table_t tokens = { @@ -2295,6 +2342,7 @@ static match_table_t tokens = { {Opt_hw_max_sectors, "hw_max_sectors=%d"}, {Opt_nl_reply_supported, "nl_reply_supported=%d"}, {Opt_max_data_area_mb, "max_data_area_mb=%d"}, + {Opt_data_pages_per_blk, "data_pages_per_blk=%d"}, {Opt_err, NULL} }; @@ -2321,6 +2369,7 @@ static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) { int val, ret; + uint32_t pages_per_blk = udev->data_pages_per_blk; ret = match_int(arg, &val); if (ret < 0) { @@ -2328,11 +2377,20 @@ static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) ret); return ret; } - if (val <= 0) { pr_err("Invalid max_data_area %d.\n", val); return -EINVAL; } + if (val > TCMU_PAGES_TO_MBS(tcmu_global_max_pages)) { + pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", + val, TCMU_PAGES_TO_MBS(tcmu_global_max_pages)); + val = TCMU_PAGES_TO_MBS(tcmu_global_max_pages); + } + if (TCMU_MBS_TO_PAGES(val) < pages_per_blk) { + pr_err("Invalid max_data_area %d (%zu pages): smaller than data_pages_per_blk (%u pages).\n", + val, TCMU_MBS_TO_PAGES(val), pages_per_blk); + return -EINVAL; + } mutex_lock(&udev->cmdr_lock); if (udev->data_bitmap) { @@ -2341,13 +2399,42 @@ static int tcmu_set_max_blocks_param(struct tcmu_dev *udev, substring_t *arg) goto unlock; } - udev->max_blocks = TCMU_MBS_TO_BLOCKS(val); - if (udev->max_blocks > tcmu_global_max_blocks) { - pr_err("%d is too large. Adjusting max_data_area_mb to global limit of %u\n", - val, TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks)); - udev->max_blocks = tcmu_global_max_blocks; + udev->data_area_mb = val; + udev->max_blocks = TCMU_MBS_TO_PAGES(val) / pages_per_blk; + +unlock: + mutex_unlock(&udev->cmdr_lock); + return ret; +} + +static int tcmu_set_data_pages_per_blk(struct tcmu_dev *udev, substring_t *arg) +{ + int val, ret; + + ret = match_int(arg, &val); + if (ret < 0) { + pr_err("match_int() failed for data_pages_per_blk=. Error %d.\n", + ret); + return ret; + } + + if (val > TCMU_MBS_TO_PAGES(udev->data_area_mb)) { + pr_err("Invalid data_pages_per_blk %d: greater than max_data_area_mb %d -> %zd pages).\n", + val, udev->data_area_mb, + TCMU_MBS_TO_PAGES(udev->data_area_mb)); + return -EINVAL; + } + + mutex_lock(&udev->cmdr_lock); + if (udev->data_bitmap) { + pr_err("Cannot set data_pages_per_blk after it has been enabled.\n"); + ret = -EINVAL; + goto unlock; } + udev->data_pages_per_blk = val; + udev->max_blocks = TCMU_MBS_TO_PAGES(udev->data_area_mb) / val; + unlock: mutex_unlock(&udev->cmdr_lock); return ret; @@ -2404,6 +2491,9 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, case Opt_max_data_area_mb: ret = tcmu_set_max_blocks_param(udev, &args[0]); break; + case Opt_data_pages_per_blk: + ret = tcmu_set_data_pages_per_blk(udev, &args[0]); + break; default: break; } @@ -2424,8 +2514,8 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b) bl = sprintf(b + bl, "Config: %s ", udev->dev_config[0] ? udev->dev_config : "NULL"); bl += sprintf(b + bl, "Size: %llu ", udev->dev_size); - bl += sprintf(b + bl, "MaxDataAreaMB: %u\n", - TCMU_BLOCKS_TO_MBS(udev->max_blocks)); + bl += sprintf(b + bl, "MaxDataAreaMB: %u ", udev->data_area_mb); + bl += sprintf(b + bl, "DataPagesPerBlk: %u\n", udev->data_pages_per_blk); return bl; } @@ -2519,11 +2609,21 @@ static ssize_t tcmu_max_data_area_mb_show(struct config_item *item, char *page) struct se_dev_attrib, da_group); struct tcmu_dev *udev = TCMU_DEV(da->da_dev); - return snprintf(page, PAGE_SIZE, "%u\n", - TCMU_BLOCKS_TO_MBS(udev->max_blocks)); + return snprintf(page, PAGE_SIZE, "%u\n", udev->data_area_mb); } CONFIGFS_ATTR_RO(tcmu_, max_data_area_mb); +static ssize_t tcmu_data_pages_per_blk_show(struct config_item *item, + char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%u\n", udev->data_pages_per_blk); +} +CONFIGFS_ATTR_RO(tcmu_, data_pages_per_blk); + static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) { struct se_dev_attrib *da = container_of(to_config_group(item), @@ -2835,6 +2935,7 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_cmd_time_out, &tcmu_attr_qfull_time_out, &tcmu_attr_max_data_area_mb, + &tcmu_attr_data_pages_per_blk, &tcmu_attr_dev_config, &tcmu_attr_dev_size, &tcmu_attr_emulate_write_cache, @@ -2863,6 +2964,8 @@ static struct target_backend_ops tcmu_ops = { .configure_device = tcmu_configure_device, .destroy_device = tcmu_destroy_device, .free_device = tcmu_free_device, + .unplug_device = tcmu_unplug_device, + .plug_device = tcmu_plug_device, .parse_cdb = tcmu_parse_cdb, .tmr_notify = tcmu_tmr_notify, .set_configfs_dev_params = tcmu_set_configfs_dev_params, @@ -2876,9 +2979,10 @@ static void find_free_blocks(void) { struct tcmu_dev *udev; loff_t off; - u32 start, end, block, total_freed = 0; + u32 pages_freed, total_pages_freed = 0; + u32 start, end, block, total_blocks_freed = 0; - if (atomic_read(&global_db_count) <= tcmu_global_max_blocks) + if (atomic_read(&global_page_count) <= tcmu_global_max_pages) return; mutex_lock(&root_udev_mutex); @@ -2919,20 +3023,22 @@ static void find_free_blocks(void) } /* Here will truncate the data area from off */ - off = udev->data_off + start * DATA_BLOCK_SIZE; + off = udev->data_off + (loff_t)start * udev->data_blk_size; unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); /* Release the block pages */ - tcmu_blocks_release(&udev->data_blocks, start, end); + pages_freed = tcmu_blocks_release(udev, start, end - 1); mutex_unlock(&udev->cmdr_lock); - total_freed += end - start; - pr_debug("Freed %u blocks (total %u) from %s.\n", end - start, - total_freed, udev->name); + total_pages_freed += pages_freed; + total_blocks_freed += end - start; + pr_debug("Freed %u pages (total %u) from %u blocks (total %u) from %s.\n", + pages_freed, total_pages_freed, end - start, + total_blocks_freed, udev->name); } mutex_unlock(&root_udev_mutex); - if (atomic_read(&global_db_count) > tcmu_global_max_blocks) + if (atomic_read(&global_page_count) > tcmu_global_max_pages) schedule_delayed_work(&tcmu_unmap_work, msecs_to_jiffies(5000)); } |