Lines Matching refs:req

43 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
45 INIT_LIST_HEAD(&req->list);
46 INIT_LIST_HEAD(&req->intr_entry);
47 init_waitqueue_head(&req->waitq);
48 refcount_set(&req->count, 1);
49 __set_bit(FR_PENDING, &req->flags);
50 req->fm = fm;
55 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
56 if (req)
57 fuse_request_init(fm, req);
59 return req;
62 static void fuse_request_free(struct fuse_req *req)
64 kmem_cache_free(fuse_req_cachep, req);
67 static void __fuse_get_request(struct fuse_req *req)
69 refcount_inc(&req->count);
73 static void __fuse_put_request(struct fuse_req *req)
75 refcount_dec(&req->count);
104 static void fuse_put_request(struct fuse_req *req);
109 struct fuse_req *req;
130 req = fuse_request_alloc(fm, GFP_KERNEL);
132 if (!req) {
138 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
139 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
140 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
142 __set_bit(FR_WAITING, &req->flags);
144 __set_bit(FR_BACKGROUND, &req->flags);
146 if (unlikely(req->in.h.uid == ((uid_t)-1) ||
147 req->in.h.gid == ((gid_t)-1))) {
148 fuse_put_request(req);
151 return req;
158 static void fuse_put_request(struct fuse_req *req)
160 struct fuse_conn *fc = req->fm->fc;
162 if (refcount_dec_and_test(&req->count)) {
163 if (test_bit(FR_BACKGROUND, &req->flags)) {
174 if (test_bit(FR_WAITING, &req->flags)) {
175 __clear_bit(FR_WAITING, &req->flags);
179 fuse_request_free(req);
226 struct fuse_req *req)
229 req->in.h.len = sizeof(struct fuse_in_header) +
230 fuse_len_args(req->args->in_numargs,
231 (struct fuse_arg *) req->args->in_args);
232 list_add_tail(&req->list, &fiq->pending);
261 struct fuse_req *req;
263 req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
264 list_del(&req->list);
267 req->in.h.unique = fuse_get_unique(fiq);
268 queue_request_and_unlock(fiq, req);
280 void fuse_request_end(struct fuse_req *req)
282 struct fuse_mount *fm = req->fm;
286 if (test_and_set_bit(FR_FINISHED, &req->flags))
294 if (test_bit(FR_INTERRUPTED, &req->flags)) {
296 list_del_init(&req->intr_entry);
299 WARN_ON(test_bit(FR_PENDING, &req->flags));
300 WARN_ON(test_bit(FR_SENT, &req->flags));
301 if (test_bit(FR_BACKGROUND, &req->flags)) {
303 clear_bit(FR_BACKGROUND, &req->flags);
324 wake_up(&req->waitq);
327 if (test_bit(FR_ASYNC, &req->flags))
328 req->args->end(fm, req->args, req->out.h.error);
330 fuse_put_request(req);
334 static int queue_interrupt(struct fuse_req *req)
336 struct fuse_iqueue *fiq = &req->fm->fc->iq;
339 /* Check for we've sent request to interrupt this req */
340 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
345 if (list_empty(&req->intr_entry)) {
346 list_add_tail(&req->intr_entry, &fiq->interrupts);
352 if (test_bit(FR_FINISHED, &req->flags)) {
353 list_del_init(&req->intr_entry);
364 static void request_wait_answer(struct fuse_req *req)
366 struct fuse_conn *fc = req->fm->fc;
372 err = wait_event_interruptible(req->waitq,
373 test_bit(FR_FINISHED, &req->flags));
377 set_bit(FR_INTERRUPTED, &req->flags);
380 if (test_bit(FR_SENT, &req->flags))
381 queue_interrupt(req);
384 if (!test_bit(FR_FORCE, &req->flags)) {
386 err = wait_event_killable(req->waitq,
387 test_bit(FR_FINISHED, &req->flags));
393 if (test_bit(FR_PENDING, &req->flags)) {
394 list_del(&req->list);
396 __fuse_put_request(req);
397 req->out.h.error = -EINTR;
407 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
410 static void __fuse_request_send(struct fuse_req *req)
412 struct fuse_iqueue *fiq = &req->fm->fc->iq;
414 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
418 req->out.h.error = -ENOTCONN;
420 req->in.h.unique = fuse_get_unique(fiq);
423 __fuse_get_request(req);
424 queue_request_and_unlock(fiq, req);
426 request_wait_answer(req);
465 static void fuse_force_creds(struct fuse_req *req)
467 struct fuse_conn *fc = req->fm->fc;
469 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
470 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
471 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
474 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
476 req->in.h.opcode = args->opcode;
477 req->in.h.nodeid = args->nodeid;
478 req->args = args;
480 req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8;
482 __set_bit(FR_ASYNC, &req->flags);
488 struct fuse_req *req;
493 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL);
496 fuse_force_creds(req);
498 __set_bit(FR_WAITING, &req->flags);
499 __set_bit(FR_FORCE, &req->flags);
502 req = fuse_get_req(fm, false);
503 if (IS_ERR(req))
504 return PTR_ERR(req);
509 fuse_args_to_req(req, args);
512 __set_bit(FR_ISREPLY, &req->flags);
513 __fuse_request_send(req);
514 ret = req->out.h.error;
519 fuse_put_request(req);
524 static bool fuse_request_queue_background(struct fuse_req *req)
526 struct fuse_mount *fm = req->fm;
530 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
531 if (!test_bit(FR_WAITING, &req->flags)) {
532 __set_bit(FR_WAITING, &req->flags);
535 __set_bit(FR_ISREPLY, &req->flags);
541 list_add_tail(&req->list, &fc->bg_queue);
553 struct fuse_req *req;
557 req = fuse_request_alloc(fm, gfp_flags);
558 if (!req)
560 __set_bit(FR_BACKGROUND, &req->flags);
563 req = fuse_get_req(fm, true);
564 if (IS_ERR(req))
565 return PTR_ERR(req);
568 fuse_args_to_req(req, args);
570 if (!fuse_request_queue_background(req)) {
571 fuse_put_request(req);
582 struct fuse_req *req;
586 req = fuse_get_req(fm, false);
587 if (IS_ERR(req))
588 return PTR_ERR(req);
590 __clear_bit(FR_ISREPLY, &req->flags);
591 req->in.h.unique = unique;
593 fuse_args_to_req(req, args);
597 queue_request_and_unlock(fiq, req);
601 fuse_put_request(req);
612 static int lock_request(struct fuse_req *req)
615 if (req) {
616 spin_lock(&req->waitq.lock);
617 if (test_bit(FR_ABORTED, &req->flags))
620 set_bit(FR_LOCKED, &req->flags);
621 spin_unlock(&req->waitq.lock);
630 static int unlock_request(struct fuse_req *req)
633 if (req) {
634 spin_lock(&req->waitq.lock);
635 if (test_bit(FR_ABORTED, &req->flags))
638 clear_bit(FR_LOCKED, &req->flags);
639 spin_unlock(&req->waitq.lock);
646 struct fuse_req *req;
694 err = unlock_request(cs->req);
744 return lock_request(cs->req);
797 err = unlock_request(cs->req);
857 spin_lock(&cs->req->waitq.lock);
858 if (test_bit(FR_ABORTED, &cs->req->flags))
862 spin_unlock(&cs->req->waitq.lock);
887 err = lock_request(cs->req);
904 err = unlock_request(cs->req);
943 if (cs->req->args->user_pages) {
980 struct fuse_req *req = cs->req;
981 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1051 size_t nbytes, struct fuse_req *req)
1059 list_del_init(&req->intr_entry);
1064 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
1065 arg.unique = req->in.h.unique;
1211 struct fuse_req *req;
1255 req = list_entry(fiq->interrupts.next, struct fuse_req,
1257 return fuse_read_interrupt(fiq, cs, nbytes, req);
1268 req = list_entry(fiq->pending.next, struct fuse_req, list);
1269 clear_bit(FR_PENDING, &req->flags);
1270 list_del_init(&req->list);
1273 args = req->args;
1274 reqsize = req->in.h.len;
1278 req->out.h.error = -EIO;
1281 req->out.h.error = -E2BIG;
1282 fuse_request_end(req);
1291 req->out.h.error = err = -ECONNABORTED;
1295 list_add(&req->list, &fpq->io);
1297 cs->req = req;
1298 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
1304 clear_bit(FR_LOCKED, &req->flags);
1310 req->out.h.error = -EIO;
1313 if (!test_bit(FR_ISREPLY, &req->flags)) {
1317 hash = fuse_req_hash(req->in.h.unique);
1318 list_move_tail(&req->list, &fpq->processing[hash]);
1319 __fuse_get_request(req);
1320 set_bit(FR_SENT, &req->flags);
1324 if (test_bit(FR_INTERRUPTED, &req->flags))
1325 queue_interrupt(req);
1326 fuse_put_request(req);
1331 if (!test_bit(FR_PRIVATE, &req->flags))
1332 list_del_init(&req->list);
1334 fuse_request_end(req);
1813 struct fuse_req *req;
1815 list_for_each_entry(req, &fpq->processing[hash], list) {
1816 if (req->in.h.unique == unique)
1817 return req;
1856 struct fuse_req *req;
1885 req = NULL;
1887 req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
1890 if (!req) {
1897 __fuse_get_request(req);
1906 err = queue_interrupt(req);
1908 fuse_put_request(req);
1913 clear_bit(FR_SENT, &req->flags);
1914 list_move(&req->list, &fpq->io);
1915 req->out.h = oh;
1916 set_bit(FR_LOCKED, &req->flags);
1918 cs->req = req;
1919 if (!req->args->page_replace)
1925 err = copy_out_args(cs, req->args, nbytes);
1929 clear_bit(FR_LOCKED, &req->flags);
1933 req->out.h.error = -EIO;
1934 if (!test_bit(FR_PRIVATE, &req->flags))
1935 list_del_init(&req->list);
1938 fuse_request_end(req);
2083 struct fuse_req *req;
2084 req = list_entry(head->next, struct fuse_req, list);
2085 req->out.h.error = -ECONNABORTED;
2086 clear_bit(FR_SENT, &req->flags);
2087 list_del_init(&req->list);
2088 fuse_request_end(req);
2132 struct fuse_req *req, *next;
2147 list_for_each_entry_safe(req, next, &fpq->io, list) {
2148 req->out.h.error = -ECONNABORTED;
2149 spin_lock(&req->waitq.lock);
2150 set_bit(FR_ABORTED, &req->flags);
2151 if (!test_bit(FR_LOCKED, &req->flags)) {
2152 set_bit(FR_PRIVATE, &req->flags);
2153 __fuse_get_request(req);
2154 list_move(&req->list, &to_end);
2156 spin_unlock(&req->waitq.lock);
2171 list_for_each_entry(req, &fiq->pending, list)
2172 clear_bit(FR_PENDING, &req->flags);