Lines Matching refs:req

43 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req)
45 INIT_LIST_HEAD(&req->list);
46 INIT_LIST_HEAD(&req->intr_entry);
47 init_waitqueue_head(&req->waitq);
48 refcount_set(&req->count, 1);
49 __set_bit(FR_PENDING, &req->flags);
50 req->fm = fm;
55 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags);
56 if (req)
57 fuse_request_init(fm, req);
59 return req;
62 static void fuse_request_free(struct fuse_req *req)
64 kmem_cache_free(fuse_req_cachep, req);
67 static void __fuse_get_request(struct fuse_req *req)
69 refcount_inc(&req->count);
73 static void __fuse_put_request(struct fuse_req *req)
75 refcount_dec(&req->count);
104 static void fuse_put_request(struct fuse_req *req);
109 struct fuse_req *req;
130 req = fuse_request_alloc(fm, GFP_KERNEL);
132 if (!req) {
138 req->in.h.uid = from_kuid(fc->user_ns, current_fsuid());
139 req->in.h.gid = from_kgid(fc->user_ns, current_fsgid());
140 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
142 __set_bit(FR_WAITING, &req->flags);
144 __set_bit(FR_BACKGROUND, &req->flags);
146 if (unlikely(req->in.h.uid == ((uid_t)-1) ||
147 req->in.h.gid == ((gid_t)-1))) {
148 fuse_put_request(req);
151 return req;
158 static void fuse_put_request(struct fuse_req *req)
160 struct fuse_conn *fc = req->fm->fc;
162 if (refcount_dec_and_test(&req->count)) {
163 if (test_bit(FR_BACKGROUND, &req->flags)) {
174 if (test_bit(FR_WAITING, &req->flags)) {
175 __clear_bit(FR_WAITING, &req->flags);
179 fuse_request_free(req);
226 struct fuse_req *req)
229 req->in.h.len = sizeof(struct fuse_in_header) +
230 fuse_len_args(req->args->in_numargs,
231 (struct fuse_arg *) req->args->in_args);
232 list_add_tail(&req->list, &fiq->pending);
261 struct fuse_req *req;
263 req = list_first_entry(&fc->bg_queue, struct fuse_req, list);
264 list_del(&req->list);
267 req->in.h.unique = fuse_get_unique(fiq);
268 queue_request_and_unlock(fiq, req);
280 void fuse_request_end(struct fuse_req *req)
282 struct fuse_mount *fm = req->fm;
286 if (test_and_set_bit(FR_FINISHED, &req->flags))
294 if (test_bit(FR_INTERRUPTED, &req->flags)) {
296 list_del_init(&req->intr_entry);
299 WARN_ON(test_bit(FR_PENDING, &req->flags));
300 WARN_ON(test_bit(FR_SENT, &req->flags));
301 if (test_bit(FR_BACKGROUND, &req->flags)) {
303 clear_bit(FR_BACKGROUND, &req->flags);
328 wake_up(&req->waitq);
331 if (test_bit(FR_ASYNC, &req->flags))
332 req->args->end(fm, req->args, req->out.h.error);
334 fuse_put_request(req);
338 static int queue_interrupt(struct fuse_req *req)
340 struct fuse_iqueue *fiq = &req->fm->fc->iq;
343 /* Check for we've sent request to interrupt this req */
344 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) {
349 if (list_empty(&req->intr_entry)) {
350 list_add_tail(&req->intr_entry, &fiq->interrupts);
356 if (test_bit(FR_FINISHED, &req->flags)) {
357 list_del_init(&req->intr_entry);
368 static void request_wait_answer(struct fuse_req *req)
370 struct fuse_conn *fc = req->fm->fc;
376 err = wait_event_interruptible(req->waitq,
377 test_bit(FR_FINISHED, &req->flags));
381 set_bit(FR_INTERRUPTED, &req->flags);
384 if (test_bit(FR_SENT, &req->flags))
385 queue_interrupt(req);
388 if (!test_bit(FR_FORCE, &req->flags)) {
390 err = wait_event_killable(req->waitq,
391 test_bit(FR_FINISHED, &req->flags));
397 if (test_bit(FR_PENDING, &req->flags)) {
398 list_del(&req->list);
400 __fuse_put_request(req);
401 req->out.h.error = -EINTR;
411 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
414 static void __fuse_request_send(struct fuse_req *req)
416 struct fuse_iqueue *fiq = &req->fm->fc->iq;
418 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
422 req->out.h.error = -ENOTCONN;
424 req->in.h.unique = fuse_get_unique(fiq);
427 __fuse_get_request(req);
428 queue_request_and_unlock(fiq, req);
430 request_wait_answer(req);
469 static void fuse_force_creds(struct fuse_req *req)
471 struct fuse_conn *fc = req->fm->fc;
473 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid());
474 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid());
475 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
478 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
480 req->in.h.opcode = args->opcode;
481 req->in.h.nodeid = args->nodeid;
482 req->args = args;
484 __set_bit(FR_ASYNC, &req->flags);
490 struct fuse_req *req;
495 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL);
498 fuse_force_creds(req);
500 __set_bit(FR_WAITING, &req->flags);
501 __set_bit(FR_FORCE, &req->flags);
504 req = fuse_get_req(fm, false);
505 if (IS_ERR(req))
506 return PTR_ERR(req);
511 fuse_args_to_req(req, args);
514 __set_bit(FR_ISREPLY, &req->flags);
515 __fuse_request_send(req);
516 ret = req->out.h.error;
521 fuse_put_request(req);
526 static bool fuse_request_queue_background(struct fuse_req *req)
528 struct fuse_mount *fm = req->fm;
532 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags));
533 if (!test_bit(FR_WAITING, &req->flags)) {
534 __set_bit(FR_WAITING, &req->flags);
537 __set_bit(FR_ISREPLY, &req->flags);
547 list_add_tail(&req->list, &fc->bg_queue);
559 struct fuse_req *req;
563 req = fuse_request_alloc(fm, gfp_flags);
564 if (!req)
566 __set_bit(FR_BACKGROUND, &req->flags);
569 req = fuse_get_req(fm, true);
570 if (IS_ERR(req))
571 return PTR_ERR(req);
574 fuse_args_to_req(req, args);
576 if (!fuse_request_queue_background(req)) {
577 fuse_put_request(req);
588 struct fuse_req *req;
592 req = fuse_get_req(fm, false);
593 if (IS_ERR(req))
594 return PTR_ERR(req);
596 __clear_bit(FR_ISREPLY, &req->flags);
597 req->in.h.unique = unique;
599 fuse_args_to_req(req, args);
603 queue_request_and_unlock(fiq, req);
607 fuse_put_request(req);
618 static int lock_request(struct fuse_req *req)
621 if (req) {
622 spin_lock(&req->waitq.lock);
623 if (test_bit(FR_ABORTED, &req->flags))
626 set_bit(FR_LOCKED, &req->flags);
627 spin_unlock(&req->waitq.lock);
636 static int unlock_request(struct fuse_req *req)
639 if (req) {
640 spin_lock(&req->waitq.lock);
641 if (test_bit(FR_ABORTED, &req->flags))
644 clear_bit(FR_LOCKED, &req->flags);
645 spin_unlock(&req->waitq.lock);
652 struct fuse_req *req;
700 err = unlock_request(cs->req);
751 return lock_request(cs->req);
803 err = unlock_request(cs->req);
866 spin_lock(&cs->req->waitq.lock);
867 if (test_bit(FR_ABORTED, &cs->req->flags))
871 spin_unlock(&cs->req->waitq.lock);
896 err = lock_request(cs->req);
913 err = unlock_request(cs->req);
952 if (cs->req->args->user_pages) {
989 struct fuse_req *req = cs->req;
990 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args);
1060 size_t nbytes, struct fuse_req *req)
1068 list_del_init(&req->intr_entry);
1073 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT);
1074 arg.unique = req->in.h.unique;
1220 struct fuse_req *req;
1264 req = list_entry(fiq->interrupts.next, struct fuse_req,
1266 return fuse_read_interrupt(fiq, cs, nbytes, req);
1277 req = list_entry(fiq->pending.next, struct fuse_req, list);
1278 clear_bit(FR_PENDING, &req->flags);
1279 list_del_init(&req->list);
1282 args = req->args;
1283 reqsize = req->in.h.len;
1287 req->out.h.error = -EIO;
1290 req->out.h.error = -E2BIG;
1291 fuse_request_end(req);
1300 req->out.h.error = err = -ECONNABORTED;
1304 list_add(&req->list, &fpq->io);
1306 cs->req = req;
1307 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h));
1313 clear_bit(FR_LOCKED, &req->flags);
1319 req->out.h.error = -EIO;
1322 if (!test_bit(FR_ISREPLY, &req->flags)) {
1326 hash = fuse_req_hash(req->in.h.unique);
1327 list_move_tail(&req->list, &fpq->processing[hash]);
1328 __fuse_get_request(req);
1329 set_bit(FR_SENT, &req->flags);
1333 if (test_bit(FR_INTERRUPTED, &req->flags))
1334 queue_interrupt(req);
1335 fuse_put_request(req);
1340 if (!test_bit(FR_PRIVATE, &req->flags))
1341 list_del_init(&req->list);
1343 fuse_request_end(req);
1824 struct fuse_req *req;
1826 list_for_each_entry(req, &fpq->processing[hash], list) {
1827 if (req->in.h.unique == unique)
1828 return req;
1867 struct fuse_req *req;
1896 req = NULL;
1898 req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT);
1901 if (!req) {
1908 __fuse_get_request(req);
1917 err = queue_interrupt(req);
1919 fuse_put_request(req);
1924 clear_bit(FR_SENT, &req->flags);
1925 list_move(&req->list, &fpq->io);
1926 req->out.h = oh;
1927 set_bit(FR_LOCKED, &req->flags);
1929 cs->req = req;
1930 if (!req->args->page_replace)
1936 err = copy_out_args(cs, req->args, nbytes);
1940 clear_bit(FR_LOCKED, &req->flags);
1944 req->out.h.error = -EIO;
1945 if (!test_bit(FR_PRIVATE, &req->flags))
1946 list_del_init(&req->list);
1949 fuse_request_end(req);
2094 struct fuse_req *req;
2095 req = list_entry(head->next, struct fuse_req, list);
2096 req->out.h.error = -ECONNABORTED;
2097 clear_bit(FR_SENT, &req->flags);
2098 list_del_init(&req->list);
2099 fuse_request_end(req);
2143 struct fuse_req *req, *next;
2158 list_for_each_entry_safe(req, next, &fpq->io, list) {
2159 req->out.h.error = -ECONNABORTED;
2160 spin_lock(&req->waitq.lock);
2161 set_bit(FR_ABORTED, &req->flags);
2162 if (!test_bit(FR_LOCKED, &req->flags)) {
2163 set_bit(FR_PRIVATE, &req->flags);
2164 __fuse_get_request(req);
2165 list_move(&req->list, &to_end);
2167 spin_unlock(&req->waitq.lock);
2182 list_for_each_entry(req, &fiq->pending, list)
2183 clear_bit(FR_PENDING, &req->flags);