aboutsummaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2007-12-30 21:07:25 -0600
committerJ. Bruce Fields <bfields@citi.umich.edu>2008-02-01 16:42:08 -0500
commit5148bf4ebc1f59dc6a0ec43a220c55ff0771246e (patch)
treed7ba46947483308dc77736335b05d1124befa7c4 /net
parent5d137990f5860451a6e0428e0903f62933d05287 (diff)
svc: Add transport specific xpo_release function
The svc_sock_release function releases pages allocated to a thread. For UDP this frees the receive skb. For RDMA it will post a receive WR and bump the client credit count. Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Acked-by: Neil Brown <neilb@suse.de> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Greg Banks <gnb@sgi.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/svcsock.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 7817c7eea75..d46abc89b99 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -185,14 +185,13 @@ svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp)
/*
* Release an skbuff after use
*/
-static inline void
-svc_release_skb(struct svc_rqst *rqstp)
+static void svc_release_skb(struct svc_rqst *rqstp)
{
- struct sk_buff *skb = rqstp->rq_skbuff;
+ struct sk_buff *skb = rqstp->rq_xprt_ctxt;
struct svc_deferred_req *dr = rqstp->rq_deferred;
if (skb) {
- rqstp->rq_skbuff = NULL;
+ rqstp->rq_xprt_ctxt = NULL;
dprintk("svc: service %p, releasing skb %p\n", rqstp, skb);
skb_free_datagram(rqstp->rq_sock->sk_sk, skb);
@@ -395,7 +394,7 @@ svc_sock_release(struct svc_rqst *rqstp)
{
struct svc_sock *svsk = rqstp->rq_sock;
- svc_release_skb(rqstp);
+ rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
svc_free_res_pages(rqstp);
rqstp->rq_res.page_len = 0;
@@ -867,7 +866,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
skb_free_datagram(svsk->sk_sk, skb);
return 0;
}
- rqstp->rq_skbuff = skb;
+ rqstp->rq_xprt_ctxt = skb;
}
rqstp->rq_arg.page_base = 0;
@@ -903,6 +902,7 @@ svc_udp_sendto(struct svc_rqst *rqstp)
static struct svc_xprt_ops svc_udp_ops = {
.xpo_recvfrom = svc_udp_recvfrom,
.xpo_sendto = svc_udp_sendto,
+ .xpo_release_rqst = svc_release_skb,
};
static struct svc_xprt_class svc_udp_class = {
@@ -1291,7 +1291,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len;
}
- rqstp->rq_skbuff = NULL;
+ rqstp->rq_xprt_ctxt = NULL;
rqstp->rq_prot = IPPROTO_TCP;
/* Reset TCP read info */
@@ -1357,6 +1357,7 @@ svc_tcp_sendto(struct svc_rqst *rqstp)
static struct svc_xprt_ops svc_tcp_ops = {
.xpo_recvfrom = svc_tcp_recvfrom,
.xpo_sendto = svc_tcp_sendto,
+ .xpo_release_rqst = svc_release_skb,
};
static struct svc_xprt_class svc_tcp_class = {
@@ -1578,7 +1579,7 @@ svc_send(struct svc_rqst *rqstp)
}
/* release the receive skb before sending the reply */
- svc_release_skb(rqstp);
+ rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp);
/* calculate over-all length */
xb = & rqstp->rq_res;