diff options
author | Andy Grover <andy.grover@oracle.com> | 2010-01-12 23:13:15 +0100 |
---|---|---|
committer | Andy Grover <andy.grover@oracle.com> | 2010-09-09 03:11:38 +0200 |
commit | ff87e97a9d70c9ae133d3d3d7792b26ab85f4297 (patch) | |
tree | 32de73cdf5e4353e89b3351eaae695f69faa868b /net/rds/ib_send.c | |
parent | RDS: fold rdma.h into rds.h (diff) | |
download | linux-ff87e97a9d70c9ae133d3d3d7792b26ab85f4297.tar.xz linux-ff87e97a9d70c9ae133d3d3d7792b26ab85f4297.zip |
RDS: make m_rdma_op a member of rds_message
This eliminates a separate memory alloc, although
it is now necessary to add an "r_active" flag, since
it is no longer to use the m_rdma_op pointer as an
indicator of if an rdma op is present.
rdma SGs allocated from rm sg pool.
rds_rm_size also gets bigger. It's a little inefficient to
run through CMSGs twice, but it makes later steps a lot smoother.
Signed-off-by: Andy Grover <andy.grover@oracle.com>
Diffstat (limited to 'net/rds/ib_send.c')
-rw-r--r-- | net/rds/ib_send.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 575fce463c65..f0edfdb2866c 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c @@ -85,8 +85,8 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, rm->data.m_sg, rm->data.m_nents, DMA_TO_DEVICE); - if (rm->rdma.m_rdma_op) { - rds_ib_send_unmap_rdma(ic, rm->rdma.m_rdma_op); + if (rm->rdma.m_rdma_op.r_active) { + rds_ib_send_unmap_rdma(ic, &rm->rdma.m_rdma_op); /* If the user asked for a completion notification on this * message, we can implement three different semantics: @@ -110,10 +110,10 @@ static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic, */ rds_ib_send_rdma_complete(rm, wc_status); - if (rm->rdma.m_rdma_op->r_write) - rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op->r_bytes); + if (rm->rdma.m_rdma_op.r_write) + rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); else - rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op->r_bytes); + rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes); } /* If anyone waited for this message to get flushed out, wake @@ -243,8 +243,8 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context) rm = rds_send_get_message(conn, send->s_op); if (rm) { - if (rm->rdma.m_rdma_op) - rds_ib_send_unmap_rdma(ic, rm->rdma.m_rdma_op); + if (rm->rdma.m_rdma_op.r_active) + rds_ib_send_unmap_rdma(ic, &rm->rdma.m_rdma_op); rds_ib_send_rdma_complete(rm, wc.status); rds_message_put(rm); } @@ -560,10 +560,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, /* If it has a RDMA op, tell the peer we did it. This is * used by the peer to release use-once RDMA MRs. */ - if (rm->rdma.m_rdma_op) { + if (rm->rdma.m_rdma_op.r_active) { struct rds_ext_header_rdma ext_hdr; - ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op->r_key); + ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key); rds_message_add_extension(&rm->m_inc.i_hdr, RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); } @@ -601,7 +601,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, * or when requested by the user. Right now, we let * the application choose. */ - if (rm->rdma.m_rdma_op && rm->rdma.m_rdma_op->r_fence) + if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence) send_flags = IB_SEND_FENCE; /* |