Lines Matching refs:req

804 static void put_request_session(struct ceph_mds_request *req)
806 if (req->r_session) {
807 ceph_put_mds_session(req->r_session);
808 req->r_session = NULL;
841 struct ceph_mds_request *req = container_of(kref,
844 ceph_mdsc_release_dir_caps_no_check(req);
845 destroy_reply_info(&req->r_reply_info);
846 if (req->r_request)
847 ceph_msg_put(req->r_request);
848 if (req->r_reply)
849 ceph_msg_put(req->r_reply);
850 if (req->r_inode) {
851 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
853 ceph_async_iput(req->r_inode);
855 if (req->r_parent) {
856 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
857 ceph_async_iput(req->r_parent);
859 ceph_async_iput(req->r_target_inode);
860 if (req->r_dentry)
861 dput(req->r_dentry);
862 if (req->r_old_dentry)
863 dput(req->r_old_dentry);
864 if (req->r_old_dentry_dir) {
871 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
873 ceph_async_iput(req->r_old_dentry_dir);
875 kfree(req->r_path1);
876 kfree(req->r_path2);
877 if (req->r_pagelist)
878 ceph_pagelist_release(req->r_pagelist);
879 put_request_session(req);
880 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
881 WARN_ON_ONCE(!list_empty(&req->r_wait));
882 kmem_cache_free(ceph_mds_request_cachep, req);
895 struct ceph_mds_request *req;
897 req = lookup_request(&mdsc->request_tree, tid);
898 if (req)
899 ceph_mdsc_get_request(req);
901 return req;
911 struct ceph_mds_request *req,
916 req->r_tid = ++mdsc->last_tid;
917 if (req->r_num_caps) {
918 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
919 req->r_num_caps);
922 "failed to reserve caps: %d\n", req, ret);
923 /* set req->r_err to fail early from __do_request */
924 req->r_err = ret;
928 dout("__register_request %p tid %lld\n", req, req->r_tid);
929 ceph_mdsc_get_request(req);
930 insert_request(&mdsc->request_tree, req);
932 req->r_uid = current_fsuid();
933 req->r_gid = current_fsgid();
935 if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
936 mdsc->oldest_tid = req->r_tid;
942 req->r_unsafe_dir = dir;
944 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
950 struct ceph_mds_request *req)
952 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
955 list_del_init(&req->r_unsafe_item);
957 if (req->r_tid == mdsc->oldest_tid) {
958 struct rb_node *p = rb_next(&req->r_node);
971 erase_request(&mdsc->request_tree, req);
973 if (req->r_unsafe_dir) {
974 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
976 list_del_init(&req->r_unsafe_dir_item);
979 if (req->r_target_inode &&
980 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
981 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
983 list_del_init(&req->r_unsafe_target_item);
987 if (req->r_unsafe_dir) {
989 ceph_async_iput(req->r_unsafe_dir);
990 req->r_unsafe_dir = NULL;
993 complete_all(&req->r_safe_completion);
995 ceph_mdsc_put_request(req);
1029 struct ceph_mds_request *req,
1035 int mode = req->r_direct_mode;
1037 u32 hash = req->r_direct_hash;
1038 bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
1047 if (req->r_resend_mds >= 0 &&
1048 (__have_session(mdsc, req->r_resend_mds) ||
1049 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1051 req->r_resend_mds);
1052 return req->r_resend_mds;
1059 if (req->r_inode) {
1060 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
1061 inode = req->r_inode;
1064 /* req->r_dentry is non-null for LSSNAP request */
1066 inode = get_nonsnap_parent(req->r_dentry);
1070 } else if (req->r_dentry) {
1076 parent = READ_ONCE(req->r_dentry->d_parent);
1077 dir = req->r_parent ? : d_inode_rcu(parent);
1081 inode = d_inode(req->r_dentry);
1091 inode = d_inode(req->r_dentry);
1095 hash = ceph_dentry_hash(dir, req->r_dentry);
1513 struct ceph_mds_request *req;
1519 req = list_first_entry(&session->s_unsafe,
1522 req->r_tid);
1523 if (req->r_target_inode)
1524 mapping_set_error(req->r_target_inode->i_mapping, -EIO);
1525 if (req->r_unsafe_dir)
1526 mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
1527 __unregister_request(mdsc, req);
1532 req = rb_entry(p, struct ceph_mds_request, r_node);
1534 if (req->r_session &&
1535 req->r_session->s_mds == session->s_mds)
1536 req->r_attempts = 0;
2290 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2294 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2295 struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2322 req->r_num_caps = num_entries + 1;
2323 req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2324 req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2334 struct ceph_mds_request *req;
2336 req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2337 if (!req)
2340 mutex_init(&req->r_fill_mutex);
2341 req->r_mdsc = mdsc;
2342 req->r_started = jiffies;
2343 req->r_start_latency = ktime_get();
2344 req->r_resend_mds = -1;
2345 INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2346 INIT_LIST_HEAD(&req->r_unsafe_target_item);
2347 req->r_fmode = -1;
2348 kref_init(&req->r_kref);
2349 RB_CLEAR_NODE(&req->r_node);
2350 INIT_LIST_HEAD(&req->r_wait);
2351 init_completion(&req->r_completion);
2352 init_completion(&req->r_safe_completion);
2353 INIT_LIST_HEAD(&req->r_unsafe_item);
2355 ktime_get_coarse_real_ts64(&req->r_stamp);
2357 req->r_op = op;
2358 req->r_direct_mode = mode;
2359 return req;
2550 struct ceph_mds_request *req,
2565 ret = set_request_path_attr(req->r_inode, req->r_dentry,
2566 req->r_parent, req->r_path1, req->r_ino1.ino,
2569 &req->r_req_flags));
2576 ret = set_request_path_attr(NULL, req->r_old_dentry,
2577 req->r_old_dentry_dir,
2578 req->r_path2, req->r_ino2.ino,
2591 (!!req->r_inode_drop + !!req->r_dentry_drop +
2592 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2593 if (req->r_dentry_drop)
2595 if (req->r_old_dentry_drop)
2605 msg->hdr.tid = cpu_to_le64(req->r_tid);
2612 head->op = cpu_to_le32(req->r_op);
2613 head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
2614 head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
2615 head->ino = cpu_to_le64(req->r_deleg_ino);
2616 head->args = req->r_args;
2622 req->r_request_release_offset = p - msg->front.iov_base;
2626 if (req->r_inode_drop)
2628 req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2629 mds, req->r_inode_drop, req->r_inode_unless,
2630 req->r_op == CEPH_MDS_OP_READDIR);
2631 if (req->r_dentry_drop)
2632 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2633 req->r_parent, mds, req->r_dentry_drop,
2634 req->r_dentry_unless);
2635 if (req->r_old_dentry_drop)
2636 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2637 req->r_old_dentry_dir, mds,
2638 req->r_old_dentry_drop,
2639 req->r_old_dentry_unless);
2640 if (req->r_old_inode_drop)
2642 d_inode(req->r_old_dentry),
2643 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2647 p = msg->front.iov_base + req->r_request_release_offset;
2655 ceph_encode_timespec64(&ts, &req->r_stamp);
2668 if (req->r_pagelist) {
2669 struct ceph_pagelist *pagelist = req->r_pagelist;
2693 struct ceph_mds_request *req)
2695 req->r_end_latency = ktime_get();
2697 if (req->r_callback)
2698 req->r_callback(mdsc, req);
2699 complete_all(&req->r_completion);
2706 struct ceph_mds_request *req,
2713 req->r_attempts++;
2714 if (req->r_inode) {
2716 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2719 req->r_sent_on_mseq = cap->mseq;
2721 req->r_sent_on_mseq = -1;
2723 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2724 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2726 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2734 msg = req->r_request;
2741 if (req->r_target_inode)
2742 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2744 rhead->num_retry = req->r_attempts - 1;
2750 p = msg->front.iov_base + req->r_request_release_offset;
2753 ceph_encode_timespec64(&ts, &req->r_stamp);
2762 if (req->r_request) {
2763 ceph_msg_put(req->r_request);
2764 req->r_request = NULL;
2766 msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2768 req->r_err = PTR_ERR(msg);
2771 req->r_request = msg;
2775 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2777 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
2779 if (req->r_parent)
2782 rhead->num_fwd = req->r_num_fwd;
2783 rhead->num_retry = req->r_attempts - 1;
2785 dout(" r_parent = %p\n", req->r_parent);
2794 struct ceph_mds_request *req,
2799 err = __prepare_send_request(mdsc, req, session->s_mds,
2802 ceph_msg_get(req->r_request);
2803 ceph_con_send(&session->s_con, req->r_request);
2813 struct ceph_mds_request *req)
2820 if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2821 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2822 __unregister_request(mdsc, req);
2826 if (req->r_timeout &&
2827 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2845 list_add(&req->r_wait, &mdsc->waiting_for_map);
2856 put_request_session(req);
2858 mds = __choose_mds(mdsc, req, &random);
2861 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2866 list_add(&req->r_wait, &mdsc->waiting_for_map);
2879 req->r_session = ceph_get_mds_session(session);
2894 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2905 req->r_resend_mds = mds;
2907 list_add(&req->r_wait, &session->s_waiting);
2912 req->r_resend_mds = -1; /* forget any previous mds hint */
2914 if (req->r_request_started == 0) /* note request start time */
2915 req->r_request_started = jiffies;
2917 err = __send_request(mdsc, session, req, false);
2924 req->r_err = err;
2925 complete_request(mdsc, req);
2926 __unregister_request(mdsc, req);
2937 struct ceph_mds_request *req;
2943 req = list_entry(tmp_list.next,
2945 list_del_init(&req->r_wait);
2946 dout(" wake request %p tid %llu\n", req, req->r_tid);
2947 __do_request(mdsc, req);
2957 struct ceph_mds_request *req;
2962 req = rb_entry(p, struct ceph_mds_request, r_node);
2964 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2966 if (req->r_attempts > 0)
2968 if (req->r_session &&
2969 req->r_session->s_mds == mds) {
2970 dout(" kicking tid %llu\n", req->r_tid);
2971 list_del_init(&req->r_wait);
2972 __do_request(mdsc, req);
2978 struct ceph_mds_request *req)
2983 if (req->r_inode)
2984 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2985 if (req->r_parent) {
2986 struct ceph_inode_info *ci = ceph_inode(req->r_parent);
2987 int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
2993 ihold(req->r_parent);
2995 if (req->r_old_dentry_dir)
2996 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2999 if (req->r_inode) {
3000 err = ceph_wait_on_async_create(req->r_inode);
3008 if (!err && req->r_old_inode) {
3009 err = ceph_wait_on_async_create(req->r_old_inode);
3017 dout("submit_request on %p for inode %p\n", req, dir);
3019 __register_request(mdsc, req, dir);
3020 __do_request(mdsc, req);
3021 err = req->r_err;
3027 struct ceph_mds_request *req)
3033 if (!req->r_timeout && req->r_wait_for_completion) {
3034 err = req->r_wait_for_completion(mdsc, req);
3037 &req->r_completion,
3038 ceph_timeout_jiffies(req->r_timeout));
3050 if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
3051 err = le32_to_cpu(req->r_reply_info.head->result);
3053 dout("aborted request %lld with %d\n", req->r_tid, err);
3060 mutex_lock(&req->r_fill_mutex);
3061 req->r_err = err;
3062 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3063 mutex_unlock(&req->r_fill_mutex);
3065 if (req->r_parent &&
3066 (req->r_op & CEPH_MDS_OP_WRITE))
3067 ceph_invalidate_dir_request(req);
3069 err = req->r_err;
3082 struct ceph_mds_request *req)
3086 dout("do_request on %p\n", req);
3089 err = ceph_mdsc_submit_request(mdsc, dir, req);
3091 err = ceph_mdsc_wait_request(mdsc, req);
3092 dout("do_request %p done, result %d\n", req, err);
3100 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
3102 struct inode *dir = req->r_parent;
3103 struct inode *old_dir = req->r_old_dentry_dir;
3110 if (req->r_dentry)
3111 ceph_invalidate_dentry_lease(req->r_dentry);
3112 if (req->r_old_dentry)
3113 ceph_invalidate_dentry_lease(req->r_old_dentry);
3126 struct ceph_mds_request *req;
3143 req = lookup_get_request(mdsc, tid);
3144 if (!req) {
3149 dout("handle_reply %p\n", req);
3152 if (req->r_session != session) {
3155 req->r_session ? req->r_session->s_mds : -1);
3161 if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
3162 (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
3168 if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
3185 dout("got ESTALE on request %llu\n", req->r_tid);
3186 req->r_resend_mds = -1;
3187 if (req->r_direct_mode != USE_AUTH_MDS) {
3189 req->r_direct_mode = USE_AUTH_MDS;
3190 __do_request(mdsc, req);
3194 int mds = __choose_mds(mdsc, req, NULL);
3195 if (mds >= 0 && mds != req->r_session->s_mds) {
3197 __do_request(mdsc, req);
3202 dout("have to return ESTALE on request %llu\n", req->r_tid);
3207 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
3208 __unregister_request(mdsc, req);
3214 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3228 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
3229 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
3233 rinfo = &req->r_reply_info;
3261 mutex_lock(&req->r_fill_mutex);
3262 current->journal_info = req;
3263 err = ceph_fill_trace(mdsc->fsc->sb, req);
3265 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3266 req->r_op == CEPH_MDS_OP_LSSNAP))
3267 ceph_readdir_prepopulate(req, req->r_session);
3270 mutex_unlock(&req->r_fill_mutex);
3277 if (req->r_target_inode &&
3278 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3280 ceph_inode(req->r_target_inode);
3282 list_add_tail(&req->r_unsafe_target_item,
3287 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3291 if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3293 req->r_err = err;
3295 req->r_reply = ceph_msg_get(msg);
3296 set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3306 complete_request(mdsc, req);
3308 ceph_update_metadata_latency(&mdsc->metric, req->r_start_latency,
3309 req->r_end_latency, err);
3311 ceph_mdsc_put_request(req);
3324 struct ceph_mds_request *req;
3337 req = lookup_get_request(mdsc, tid);
3338 if (!req) {
3339 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
3343 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3345 __unregister_request(mdsc, req);
3346 } else if (fwd_seq <= req->r_num_fwd) {
3348 tid, next_mds, req->r_num_fwd, fwd_seq);
3352 BUG_ON(req->r_err);
3353 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
3354 req->r_attempts = 0;
3355 req->r_num_fwd = fwd_seq;
3356 req->r_resend_mds = next_mds;
3357 put_request_session(req);
3358 __do_request(mdsc, req);
3360 ceph_mdsc_put_request(req);
3551 void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
3555 dcaps = xchg(&req->r_dir_caps, 0);
3558 ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
3562 void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
3566 dcaps = xchg(&req->r_dir_caps, 0);
3569 ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
3580 struct ceph_mds_request *req, *nreq;
3586 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
3587 __send_request(mdsc, session, req, true);
3595 req = rb_entry(p, struct ceph_mds_request, r_node);
3597 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3599 if (req->r_attempts == 0)
3601 if (!req->r_session)
3603 if (req->r_session->s_mds != session->s_mds)
3606 ceph_mdsc_release_dir_caps_no_check(req);
3608 __send_request(mdsc, session, req, true);
4650 struct ceph_mds_request *req;
4662 while ((req = __get_oldest_req(mdsc))) {
4664 req->r_tid);
4665 list_del_init(&req->r_wait);
4666 __unregister_request(mdsc, req);
4725 struct ceph_mds_request *req = NULL, *nextreq;
4731 req = __get_oldest_req(mdsc);
4732 while (req && req->r_tid <= want_tid) {
4734 n = rb_next(&req->r_node);
4739 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
4740 (req->r_op & CEPH_MDS_OP_WRITE)) {
4742 ceph_mdsc_get_request(req);
4747 req->r_tid, want_tid);
4748 wait_for_completion(&req->r_safe_completion);
4750 ceph_mdsc_put_request(req);
4760 req = nextreq;