mirror of
https://github.com/tbsdtv/linux_media.git
synced 2025-07-23 20:51:03 +02:00
svcrdma: Don't account for Receive queue "starvation"
>From what I can tell, calling ->recvfrom when there is no work to do is a normal part of operation. This is the only way svc_recv can tell when there is no more data ready to receive on the transport. Neither the TCP nor the UDP transport implementations have a "starve" metric. The cost of receive starvation accounting is bumping an atomic, which results in extra (IMO unnecessary) bus traffic between CPU sockets, while holding a spin lock. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
This commit is contained in:
committed by
J. Bruce Fields
parent
ca5c76aba7
commit
2d6491a56c
@@ -844,9 +844,9 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
|
|||||||
struct svc_xprt *xprt = rqstp->rq_xprt;
|
struct svc_xprt *xprt = rqstp->rq_xprt;
|
||||||
struct svcxprt_rdma *rdma_xprt =
|
struct svcxprt_rdma *rdma_xprt =
|
||||||
container_of(xprt, struct svcxprt_rdma, sc_xprt);
|
container_of(xprt, struct svcxprt_rdma, sc_xprt);
|
||||||
struct svc_rdma_op_ctxt *ctxt = NULL;
|
struct svc_rdma_op_ctxt *ctxt;
|
||||||
struct rpcrdma_msg *rmsgp;
|
struct rpcrdma_msg *rmsgp;
|
||||||
int ret = 0;
|
int ret;
|
||||||
|
|
||||||
dprintk("svcrdma: rqstp=%p\n", rqstp);
|
dprintk("svcrdma: rqstp=%p\n", rqstp);
|
||||||
|
|
||||||
@@ -863,21 +863,13 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
|
|||||||
struct svc_rdma_op_ctxt, list);
|
struct svc_rdma_op_ctxt, list);
|
||||||
list_del(&ctxt->list);
|
list_del(&ctxt->list);
|
||||||
} else {
|
} else {
|
||||||
atomic_inc(&rdma_stat_rq_starve);
|
/* No new incoming requests, terminate the loop */
|
||||||
clear_bit(XPT_DATA, &xprt->xpt_flags);
|
clear_bit(XPT_DATA, &xprt->xpt_flags);
|
||||||
ctxt = NULL;
|
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
|
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
|
||||||
if (!ctxt) {
|
|
||||||
/* This is the EAGAIN path. The svc_recv routine will
|
|
||||||
* return -EAGAIN, the nfsd thread will go to call into
|
|
||||||
* svc_recv again and we shouldn't be on the active
|
|
||||||
* transport list
|
|
||||||
*/
|
|
||||||
if (test_bit(XPT_CLOSE, &xprt->xpt_flags))
|
|
||||||
goto defer;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p\n",
|
dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p\n",
|
||||||
ctxt, rdma_xprt, rqstp);
|
ctxt, rdma_xprt, rqstp);
|
||||||
atomic_inc(&rdma_stat_recv);
|
atomic_inc(&rdma_stat_recv);
|
||||||
@@ -920,7 +912,6 @@ complete:
|
|||||||
+ rqstp->rq_arg.page_len
|
+ rqstp->rq_arg.page_len
|
||||||
+ rqstp->rq_arg.tail[0].iov_len;
|
+ rqstp->rq_arg.tail[0].iov_len;
|
||||||
svc_rdma_put_context(ctxt, 0);
|
svc_rdma_put_context(ctxt, 0);
|
||||||
out:
|
|
||||||
dprintk("svcrdma: ret=%d, rq_arg.len=%u, "
|
dprintk("svcrdma: ret=%d, rq_arg.len=%u, "
|
||||||
"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n",
|
"rq_arg.head[0].iov_base=%p, rq_arg.head[0].iov_len=%zd\n",
|
||||||
ret, rqstp->rq_arg.len,
|
ret, rqstp->rq_arg.len,
|
||||||
|
Reference in New Issue
Block a user