diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2020-06-11 19:28:28 +0200 |
---|---|---|
committer | Chuck Lever <chuck.lever@oracle.com> | 2020-11-30 19:00:23 +0100 |
commit | b704be09dccf00b14e0b22a4e849e3ce7a10acd2 (patch) | |
tree | ca0ecdbb9fc9c56ea6062c631e68d1b61c619a0f /net/sunrpc/xprtrdma/svc_rdma_rw.c | |
parent | svcrdma: Remove chunk list pointers (diff) | |
download | linux-b704be09dccf00b14e0b22a4e849e3ce7a10acd2.tar.xz linux-b704be09dccf00b14e0b22a4e849e3ce7a10acd2.zip |
svcrdma: Clean up chunk tracepoints
We already have trace_svcrdma_decode_rseg(), which records each
ingress Read segment. Instead of reporting those again when they
are about to be posted as RDMA Reads, let's fire one tracepoint
before posting each type of chunk.
So we'll get:
nfsd-1998 [002] 321.666615: svcrdma_decode_rseg: cq.id=4 cid=42 segno=0 position=0 192@0x013ca9ebfae14000:0xb0010b05
nfsd-1998 [002] 321.666615: svcrdma_decode_rseg: cq.id=4 cid=42 segno=1 position=0 7688@0x013ca9ebf914e000:0xb0010a05
nfsd-1998 [002] 321.666615: svcrdma_decode_rseg: cq.id=4 cid=42 segno=2 position=0 28@0x013ca9ebfae15000:0xb0010905
nfsd-1998 [002] 321.666622: svcrdma_decode_rqst: cq.id=4 cid=42 xid=0x013ca9eb vers=1 credits=128 proc=RDMA_NOMSG hdrlen=100
nfsd-1998 [002] 321.666642: svcrdma_post_read_chunk: cq.id=3 cid=112 sqecount=3
kworker/2:1H-221 [002] 321.673949: svcrdma_wc_read: cq.id=3 cid=112 status=SUCCESS (0/0x0)
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc/xprtrdma/svc_rdma_rw.c')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_rw.c | 27 |
1 files changed, 12 insertions, 15 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 4a0ece9aa385..b04c700862e9 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c @@ -358,7 +358,6 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc) do { if (atomic_sub_return(cc->cc_sqecount, &rdma->sc_sq_avail) > 0) { - trace_svcrdma_post_chunk(&cc->cc_cid, cc->cc_sqecount); ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr); if (ret) break; @@ -470,8 +469,6 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info, if (ret < 0) return -EIO; - trace_svcrdma_send_wseg(seg->rs_handle, write_len, offset); - list_add(&ctxt->rw_list, &cc->cc_rwctxts); cc->cc_sqecount += ret; if (write_len == seg->rs_length - info->wi_seg_off) { @@ -590,21 +587,22 @@ int svc_rdma_send_write_chunk(struct svcxprt_rdma *rdma, const struct xdr_buf *xdr) { struct svc_rdma_write_info *info; + struct svc_rdma_chunk_ctxt *cc; int ret; info = svc_rdma_write_info_alloc(rdma, chunk); if (!info) return -ENOMEM; + cc = &info->wi_cc; ret = svc_rdma_xb_write(xdr, info); if (ret != xdr->len) goto out_err; - ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); + trace_svcrdma_post_write_chunk(&cc->cc_cid, cc->cc_sqecount); + ret = svc_rdma_post_chunk_ctxt(cc); if (ret < 0) goto out_err; - - trace_svcrdma_send_write_chunk(xdr->page_len); return xdr->len; out_err: @@ -630,6 +628,7 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, const struct xdr_buf *xdr) { struct svc_rdma_write_info *info; + struct svc_rdma_chunk_ctxt *cc; struct svc_rdma_chunk *chunk; int ret; @@ -640,17 +639,18 @@ int svc_rdma_send_reply_chunk(struct svcxprt_rdma *rdma, info = svc_rdma_write_info_alloc(rdma, chunk); if (!info) return -ENOMEM; + cc = &info->wi_cc; ret = pcl_process_nonpayloads(&rctxt->rc_write_pcl, xdr, svc_rdma_xb_write, info); if (ret < 0) goto out_err; - ret = svc_rdma_post_chunk_ctxt(&info->wi_cc); + trace_svcrdma_post_reply_chunk(&cc->cc_cid, cc->cc_sqecount); + ret = svc_rdma_post_chunk_ctxt(cc); if (ret < 0) goto out_err; - trace_svcrdma_send_reply_chunk(xdr->len); return xdr->len; out_err: @@ -737,10 +737,8 @@ static int svc_rdma_build_read_chunk(struct svc_rqst *rqstp, if (ret < 0) break; - trace_svcrdma_send_rseg(handle, length, offset); info->ri_chunklen += length; } - return ret; } @@ -762,8 +760,6 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp, if (ret < 0) goto out; - trace_svcrdma_send_read_chunk(info->ri_chunklen, info->ri_position); - head->rc_hdr_count = 0; /* Split the Receive buffer between the head and tail @@ -818,8 +814,6 @@ static int svc_rdma_build_pz_read_chunk(struct svc_rqst *rqstp, if (ret < 0) goto out; - trace_svcrdma_send_pzr(info->ri_chunklen); - head->rc_arg.len += info->ri_chunklen; head->rc_arg.buflen += info->ri_chunklen; @@ -876,6 +870,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, struct svc_rdma_recv_ctxt *head, __be32 *p) { struct svc_rdma_read_info *info; + struct svc_rdma_chunk_ctxt *cc; int ret; /* The request (with page list) is constructed in @@ -893,6 +888,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, info = svc_rdma_read_info_alloc(rdma); if (!info) return -ENOMEM; + cc = &info->ri_cc; info->ri_readctxt = head; info->ri_pageno = 0; info->ri_pageoff = 0; @@ -905,7 +901,8 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp, if (ret < 0) goto out_err; - ret = svc_rdma_post_chunk_ctxt(&info->ri_cc); + trace_svcrdma_post_read_chunk(&cc->cc_cid, cc->cc_sqecount); + ret = svc_rdma_post_chunk_ctxt(cc); if (ret < 0) goto out_err; svc_rdma_save_io_pages(rqstp, 0, head->rc_page_count); |