Lines Matching refs:xprt
26 #include <linux/sunrpc/xprt.h>
36 unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt)
45 static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
47 return xprt->bc_alloc_count < xprt->bc_alloc_max;
90 struct rpc_rqst *xprt_alloc_bc_req(struct rpc_xprt *xprt, gfp_t gfp_flags)
99 req->rq_xprt = xprt;
138 int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
140 if (!xprt->ops->bc_setup)
142 return xprt->ops->bc_setup(xprt, min_reqs);
146 int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
168 req = xprt_alloc_bc_req(xprt, GFP_KERNEL);
182 spin_lock(&xprt->bc_pa_lock);
183 list_splice(&tmp_list, &xprt->bc_pa_list);
184 xprt->bc_alloc_count += min_reqs;
185 xprt->bc_alloc_max += min_reqs;
186 atomic_add(min_reqs, &xprt->bc_slot_count);
187 spin_unlock(&xprt->bc_pa_lock);
210 * @xprt: the transport holding the preallocated strucures
217 void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
219 if (xprt->ops->bc_destroy)
220 xprt->ops->bc_destroy(xprt, max_reqs);
224 void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
233 spin_lock_bh(&xprt->bc_pa_lock);
234 xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
235 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
239 xprt->bc_alloc_count--;
240 atomic_dec(&xprt->bc_slot_count);
244 spin_unlock_bh(&xprt->bc_pa_lock);
248 list_empty(&xprt->bc_pa_list) ? "true" : "false");
251 static struct rpc_rqst *xprt_get_bc_request(struct rpc_xprt *xprt, __be32 xid,
257 if (list_empty(&xprt->bc_pa_list)) {
260 if (atomic_read(&xprt->bc_slot_count) >= BC_MAX_SLOTS)
262 list_add_tail(&new->rq_bc_pa_list, &xprt->bc_pa_list);
263 xprt->bc_alloc_count++;
264 atomic_inc(&xprt->bc_slot_count);
266 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
272 req->rq_connect_cookie = xprt->connect_cookie;
284 struct rpc_xprt *xprt = req->rq_xprt;
286 xprt->ops->bc_free_rqst(req);
291 struct rpc_xprt *xprt = req->rq_xprt;
295 req->rq_connect_cookie = xprt->connect_cookie - 1;
304 spin_lock_bh(&xprt->bc_pa_lock);
305 if (xprt_need_to_requeue(xprt)) {
309 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
310 xprt->bc_alloc_count++;
311 atomic_inc(&xprt->bc_slot_count);
314 spin_unlock_bh(&xprt->bc_pa_lock);
325 xprt_put(xprt);
339 struct rpc_rqst *xprt_lookup_bc_request(struct rpc_xprt *xprt, __be32 xid)
344 spin_lock(&xprt->bc_pa_lock);
345 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) {
346 if (req->rq_connect_cookie != xprt->connect_cookie)
351 req = xprt_get_bc_request(xprt, xid, new);
353 spin_unlock(&xprt->bc_pa_lock);
360 new = xprt_alloc_bc_req(xprt, GFP_KERNEL);
373 struct rpc_xprt *xprt = req->rq_xprt;
374 struct svc_serv *bc_serv = xprt->bc_serv;
376 spin_lock(&xprt->bc_pa_lock);
378 xprt->bc_alloc_count--;
379 spin_unlock(&xprt->bc_pa_lock);
385 xprt_get(xprt);