diff options
Diffstat (limited to 'drivers/infiniband/ulp/iser/iser_initiator.c')
-rw-r--r-- | drivers/infiniband/ulp/iser/iser_initiator.c | 148 |
1 files changed, 123 insertions, 25 deletions
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index b6d81a86c976..538822684d5b 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -49,6 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, { struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_device *device = iser_task->iser_conn->ib_conn->device; struct iser_regd_buf *regd_buf; int err; struct iser_hdr *hdr = &iser_task->desc.iser_header; @@ -69,7 +70,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task, return -EINVAL; } - err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN); + err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN); if (err) { iser_err("Failed to set up Data-IN RDMA\n"); return err; @@ -98,6 +99,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, unsigned int edtl) { struct iscsi_iser_task *iser_task = task->dd_data; + struct iser_device *device = iser_task->iser_conn->ib_conn->device; struct iser_regd_buf *regd_buf; int err; struct iser_hdr *hdr = &iser_task->desc.iser_header; @@ -119,7 +121,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, return -EINVAL; } - err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT); + err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT); if (err != 0) { iser_err("Failed to register write cmd RDMA mem\n"); return err; @@ -170,8 +172,78 @@ static void iser_create_send_desc(struct iser_conn *ib_conn, } } +static void iser_free_login_buf(struct iser_conn *ib_conn) +{ + if (!ib_conn->login_buf) + return; + + if (ib_conn->login_req_dma) + ib_dma_unmap_single(ib_conn->device->ib_device, + ib_conn->login_req_dma, + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); + + if (ib_conn->login_resp_dma) + ib_dma_unmap_single(ib_conn->device->ib_device, + ib_conn->login_resp_dma, + ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); + + kfree(ib_conn->login_buf); + + /* make sure we never redo any unmapping */ + ib_conn->login_req_dma = 0; + ib_conn->login_resp_dma = 0; + ib_conn->login_buf = NULL; +} + +static int iser_alloc_login_buf(struct iser_conn *ib_conn) +{ + struct iser_device *device; + int req_err, resp_err; + + BUG_ON(ib_conn->device == NULL); + + device = ib_conn->device; + + ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + + ISER_RX_LOGIN_SIZE, GFP_KERNEL); + if (!ib_conn->login_buf) + goto out_err; + + ib_conn->login_req_buf = ib_conn->login_buf; + ib_conn->login_resp_buf = ib_conn->login_buf + + ISCSI_DEF_MAX_RECV_SEG_LEN; + + ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device, + (void *)ib_conn->login_req_buf, + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); + + ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device, + (void *)ib_conn->login_resp_buf, + ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); + + req_err = ib_dma_mapping_error(device->ib_device, + ib_conn->login_req_dma); + resp_err = ib_dma_mapping_error(device->ib_device, + ib_conn->login_resp_dma); + + if (req_err || resp_err) { + if (req_err) + ib_conn->login_req_dma = 0; + if (resp_err) + ib_conn->login_resp_dma = 0; + goto free_login_buf; + } + return 0; + +free_login_buf: + iser_free_login_buf(ib_conn); -int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) +out_err: + iser_err("unable to alloc or map login buf\n"); + return -ENOMEM; +} + +int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session) { int i, j; u64 dma_addr; @@ -179,14 +251,24 @@ int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) struct ib_sge *rx_sg; struct iser_device *device = ib_conn->device; - ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS * + ib_conn->qp_max_recv_dtos = session->cmds_max; + ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ + ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2; + + if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max)) + goto create_rdma_reg_res_failed; + + if (iser_alloc_login_buf(ib_conn)) + goto alloc_login_buf_fail; + + ib_conn->rx_descs = kmalloc(session->cmds_max * sizeof(struct iser_rx_desc), GFP_KERNEL); if (!ib_conn->rx_descs) goto rx_desc_alloc_fail; rx_desc = ib_conn->rx_descs; - for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) { + for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++) { dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc, ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); if (ib_dma_mapping_error(device->ib_device, dma_addr)) @@ -207,10 +289,14 @@ rx_desc_dma_map_failed: rx_desc = ib_conn->rx_descs; for (j = 0; j < i; j++, rx_desc++) ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); kfree(ib_conn->rx_descs); ib_conn->rx_descs = NULL; rx_desc_alloc_fail: + iser_free_login_buf(ib_conn); +alloc_login_buf_fail: + device->iser_free_rdma_reg_res(ib_conn); +create_rdma_reg_res_failed: iser_err("failed allocating rx descriptors / data buffers\n"); return -ENOMEM; } @@ -222,18 +308,27 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) struct iser_device *device = ib_conn->device; if (!ib_conn->rx_descs) - return; + goto free_login_buf; + + if (device->iser_free_rdma_reg_res) + device->iser_free_rdma_reg_res(ib_conn); rx_desc = ib_conn->rx_descs; - for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) + for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++) ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr, - ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); + ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); kfree(ib_conn->rx_descs); + /* make sure we never redo any unmapping */ + ib_conn->rx_descs = NULL; + +free_login_buf: + iser_free_login_buf(ib_conn); } static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) { struct iscsi_iser_conn *iser_conn = conn->dd_data; + struct iscsi_session *session = conn->session; iser_dbg("req op %x flags %x\n", req->opcode, req->flags); /* check if this is the last login - going to full feature phase */ @@ -248,9 +343,16 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1); WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); - iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX); + if (session->discovery_sess) { + iser_info("Discovery session, re-using login RX buffer\n"); + return 0; + } else + iser_info("Normal session, posting batch of RX %d buffers\n", + iser_conn->ib_conn->min_posted_rx); + /* Initial post receive buffers */ - if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX)) + if (iser_post_recvm(iser_conn->ib_conn, + iser_conn->ib_conn->min_posted_rx)) return -ENOMEM; return 0; @@ -425,6 +527,8 @@ int iser_send_control(struct iscsi_conn *conn, } if (task == conn->login_task) { + iser_dbg("op %x dsl %lx, posting login rx buffer\n", + task->hdr->opcode, data_seg_len); err = iser_post_recvl(iser_conn->ib_conn); if (err) goto send_control_error; @@ -487,9 +591,9 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, return; outstanding = ib_conn->post_recv_buf_count; - if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) { - count = min(ISER_QP_MAX_RECV_DTOS - outstanding, - ISER_MIN_POSTED_RX); + if (outstanding + ib_conn->min_posted_rx <= ib_conn->qp_max_recv_dtos) { + count = min(ib_conn->qp_max_recv_dtos - outstanding, + ib_conn->min_posted_rx); err = iser_post_recvm(ib_conn, count); if (err) iser_err("posting %d rx bufs err %d\n", count, err); @@ -538,8 +642,8 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) { + struct iser_device *device = iser_task->iser_conn->ib_conn->device; int is_rdma_aligned = 1; - struct iser_regd_buf *regd; /* if we were reading, copy back to unaligned sglist, * anyway dma_unmap and free the copy @@ -553,17 +657,11 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT); } - if (iser_task->dir[ISER_DIR_IN]) { - regd = &iser_task->rdma_regd[ISER_DIR_IN]; - if (regd->reg.is_fmr) - iser_unreg_mem(®d->reg); - } + if (iser_task->dir[ISER_DIR_IN]) + device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); - if (iser_task->dir[ISER_DIR_OUT]) { - regd = &iser_task->rdma_regd[ISER_DIR_OUT]; - if (regd->reg.is_fmr) - iser_unreg_mem(®d->reg); - } + if (iser_task->dir[ISER_DIR_OUT]) + device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); /* if the data was unaligned, it was already unmapped and then copied */ if (is_rdma_aligned) |