Lines Matching refs:xprt
12 #include <linux/sunrpc/xprt.h>
22 unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt)
31 static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
33 return xprt->bc_alloc_count < xprt->bc_alloc_max;
75 static struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt)
85 req->rq_xprt = xprt;
124 int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
126 if (!xprt->ops->bc_setup)
128 return xprt->ops->bc_setup(xprt, min_reqs);
132 int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
154 req = xprt_alloc_bc_req(xprt);
168 spin_lock(&xprt->bc_pa_lock);
169 list_splice(&tmp_list, &xprt->bc_pa_list);
170 xprt->bc_alloc_count += min_reqs;
171 xprt->bc_alloc_max += min_reqs;
172 atomic_add(min_reqs, &xprt->bc_slot_count);
173 spin_unlock(&xprt->bc_pa_lock);
196 * @xprt: the transport holding the preallocated strucures
203 void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
205 if (xprt->ops->bc_destroy)
206 xprt->ops->bc_destroy(xprt, max_reqs);
210 void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
219 spin_lock_bh(&xprt->bc_pa_lock);
220 xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
221 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
225 xprt->bc_alloc_count--;
226 atomic_dec(&xprt->bc_slot_count);
230 spin_unlock_bh(&xprt->bc_pa_lock);
234 list_empty(&xprt->bc_pa_list) ? "true" : "false");
237 static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
243 if (list_empty(&xprt->bc_pa_list)) {
246 if (atomic_read(&xprt->bc_slot_count) >= BC_MAX_SLOTS)
248 list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
249 xprt->bc_alloc_count++;
250 atomic_inc(&xprt->bc_slot_count);
252 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
258 req->rq_connect_cookie = xprt->connect_cookie;
270 struct rpc_xprt *xprt = req->rq_xprt;
272 xprt->ops->bc_free_rqst(req);
277 struct rpc_xprt *xprt = req->rq_xprt;
281 req->rq_connect_cookie = xprt->connect_cookie - 1;
290 spin_lock_bh(&xprt->bc_pa_lock);
291 if (xprt_need_to_requeue(xprt)) {
295 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
296 xprt->bc_alloc_count++;
297 atomic_inc(&xprt->bc_slot_count);
300 spin_unlock_bh(&xprt->bc_pa_lock);
311 xprt_put(xprt);
325 struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
330 spin_lock(&xprt->bc_pa_lock);
331 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
332 if (req->rq_connect_cookie != xprt->connect_cookie)
337 req = xprt_get_bc_request(xprt, xid, new);
339 spin_unlock(&xprt->bc_pa_lock);
346 new = xprt_alloc_bc_req(xprt);
359 struct rpc_xprt *xprt = req->rq_xprt;
360 struct svc_serv *bc_serv = xprt->bc_serv;
362 spin_lock(&xprt->bc_pa_lock);
364 xprt->bc_alloc_count--;
365 spin_unlock(&xprt->bc_pa_lock);
371 xprt_get(xprt);