Lines Matching refs:req
410 struct ceph_mds_request *req,
413 struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
443 struct inode *inode = d_inode(req->r_dentry);
722 struct ceph_mds_request *req,
725 struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
731 return parse_reply_info_readdir(p, end, req, features);
744 struct ceph_mds_request *req, u64 features)
746 struct ceph_mds_reply_info_parsed *info = &req->r_reply_info;
768 err = parse_reply_info_extra(&p, p+len, req, features, s);
1045 static void put_request_session(struct ceph_mds_request *req)
1047 if (req->r_session) {
1048 ceph_put_mds_session(req->r_session);
1049 req->r_session = NULL;
1082 struct ceph_mds_request *req = container_of(kref,
1085 ceph_mdsc_release_dir_caps_no_check(req);
1086 destroy_reply_info(&req->r_reply_info);
1087 if (req->r_request)
1088 ceph_msg_put(req->r_request);
1089 if (req->r_reply)
1090 ceph_msg_put(req->r_reply);
1091 if (req->r_inode) {
1092 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
1093 iput(req->r_inode);
1095 if (req->r_parent) {
1096 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
1097 iput(req->r_parent);
1099 iput(req->r_target_inode);
1100 iput(req->r_new_inode);
1101 if (req->r_dentry)
1102 dput(req->r_dentry);
1103 if (req->r_old_dentry)
1104 dput(req->r_old_dentry);
1105 if (req->r_old_dentry_dir) {
1112 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
1114 iput(req->r_old_dentry_dir);
1116 kfree(req->r_path1);
1117 kfree(req->r_path2);
1118 put_cred(req->r_cred);
1119 if (req->r_pagelist)
1120 ceph_pagelist_release(req->r_pagelist);
1121 kfree(req->r_fscrypt_auth);
1122 kfree(req->r_altname);
1123 put_request_session(req);
1124 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
1125 WARN_ON_ONCE(!list_empty(&req->r_wait));
1126 kmem_cache_free(ceph_mds_request_cachep, req);
1139 struct ceph_mds_request *req;
1141 req = lookup_request(&mdsc->request_tree, tid);
1142 if (req)
1143 ceph_mdsc_get_request(req);
1145 return req;
1155 struct ceph_mds_request *req,
1160 req->r_tid = ++mdsc->last_tid;
1161 if (req->r_num_caps) {
1162 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
1163 req->r_num_caps);
1166 "failed to reserve caps: %d\n", req, ret);
1167 /* set req->r_err to fail early from __do_request */
1168 req->r_err = ret;
1172 dout("__register_request %p tid %lld\n", req, req->r_tid);
1173 ceph_mdsc_get_request(req);
1174 insert_request(&mdsc->request_tree, req);
1176 req->r_cred = get_current_cred();
1178 if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
1179 mdsc->oldest_tid = req->r_tid;
1185 req->r_unsafe_dir = dir;
1187 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
1193 struct ceph_mds_request *req)
1195 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
1198 list_del_init(&req->r_unsafe_item);
1200 if (req->r_tid == mdsc->oldest_tid) {
1201 struct rb_node *p = rb_next(&req->r_node);
1214 erase_request(&mdsc->request_tree, req);
1216 if (req->r_unsafe_dir) {
1217 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
1219 list_del_init(&req->r_unsafe_dir_item);
1222 if (req->r_target_inode &&
1223 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
1224 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
1226 list_del_init(&req->r_unsafe_target_item);
1230 if (req->r_unsafe_dir) {
1231 iput(req->r_unsafe_dir);
1232 req->r_unsafe_dir = NULL;
1235 complete_all(&req->r_safe_completion);
1237 ceph_mdsc_put_request(req);
1271 struct ceph_mds_request *req,
1277 int mode = req->r_direct_mode;
1279 u32 hash = req->r_direct_hash;
1280 bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
1289 if (req->r_resend_mds >= 0 &&
1290 (__have_session(mdsc, req->r_resend_mds) ||
1291 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1293 req->r_resend_mds);
1294 return req->r_resend_mds;
1301 if (req->r_inode) {
1302 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
1303 inode = req->r_inode;
1306 /* req->r_dentry is non-null for LSSNAP request */
1308 inode = get_nonsnap_parent(req->r_dentry);
1312 } else if (req->r_dentry) {
1318 parent = READ_ONCE(req->r_dentry->d_parent);
1319 dir = req->r_parent ? : d_inode_rcu(parent);
1323 inode = d_inode(req->r_dentry);
1333 inode = d_inode(req->r_dentry);
1337 hash = ceph_dentry_hash(dir, req->r_dentry);
1756 struct ceph_mds_request *req;
1762 req = list_first_entry(&session->s_unsafe,
1765 req->r_tid);
1766 if (req->r_target_inode)
1767 mapping_set_error(req->r_target_inode->i_mapping, -EIO);
1768 if (req->r_unsafe_dir)
1769 mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
1770 __unregister_request(mdsc, req);
1775 req = rb_entry(p, struct ceph_mds_request, r_node);
1777 if (req->r_session &&
1778 req->r_session->s_mds == session->s_mds)
1779 req->r_attempts = 0;
2439 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2443 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2444 struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2472 req->r_num_caps = num_entries + 1;
2473 req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2474 req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2484 struct ceph_mds_request *req;
2486 req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2487 if (!req)
2490 mutex_init(&req->r_fill_mutex);
2491 req->r_mdsc = mdsc;
2492 req->r_started = jiffies;
2493 req->r_start_latency = ktime_get();
2494 req->r_resend_mds = -1;
2495 INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2496 INIT_LIST_HEAD(&req->r_unsafe_target_item);
2497 req->r_fmode = -1;
2498 req->r_feature_needed = -1;
2499 kref_init(&req->r_kref);
2500 RB_CLEAR_NODE(&req->r_node);
2501 INIT_LIST_HEAD(&req->r_wait);
2502 init_completion(&req->r_completion);
2503 init_completion(&req->r_safe_completion);
2504 INIT_LIST_HEAD(&req->r_unsafe_item);
2506 ktime_get_coarse_real_ts64(&req->r_stamp);
2508 req->r_op = op;
2509 req->r_direct_mode = mode;
2510 return req;
2532 static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
2534 struct inode *dir = req->r_parent;
2535 struct dentry *dentry = req->r_dentry;
2582 static u8 *get_fscrypt_altname(const struct ceph_mds_request *req, u32 *plen)
2808 const struct ceph_mds_request *req)
2813 ceph_encode_timespec64(&ts, &req->r_stamp);
2817 ceph_encode_32(p, req->r_cred->group_info->ngroups);
2818 for (i = 0; i < req->r_cred->group_info->ngroups; i++)
2820 req->r_cred->group_info->gid[i]));
2823 ceph_encode_32(p, req->r_altname_len);
2824 ceph_encode_copy(p, req->r_altname, req->r_altname_len);
2827 if (req->r_fscrypt_auth) {
2828 u32 authlen = ceph_fscrypt_auth_len(req->r_fscrypt_auth);
2831 ceph_encode_copy(p, req->r_fscrypt_auth, authlen);
2835 if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags)) {
2837 ceph_encode_64(p, req->r_fscrypt_file);
2859 struct ceph_mds_request *req,
2880 ret = set_request_path_attr(req->r_inode, req->r_dentry,
2881 req->r_parent, req->r_path1, req->r_ino1.ino,
2884 &req->r_req_flags));
2891 if (req->r_old_dentry &&
2892 !(req->r_old_dentry->d_flags & DCACHE_DISCONNECTED))
2893 old_dentry = req->r_old_dentry;
2895 req->r_old_dentry_dir,
2896 req->r_path2, req->r_ino2.ino,
2903 req->r_altname = get_fscrypt_altname(req, &req->r_altname_len);
2904 if (IS_ERR(req->r_altname)) {
2905 msg = ERR_CAST(req->r_altname);
2906 req->r_altname = NULL;
2930 (!!req->r_inode_drop + !!req->r_dentry_drop +
2931 !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2933 if (req->r_dentry_drop)
2935 if (req->r_old_dentry_drop)
2940 /* req->r_stamp */
2944 len += sizeof(u32) + (sizeof(u64) * req->r_cred->group_info->ngroups);
2947 len += sizeof(u32) + req->r_altname_len;
2951 if (req->r_fscrypt_auth)
2952 len += ceph_fscrypt_auth_len(req->r_fscrypt_auth);
2956 if (test_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags))
2965 msg->hdr.tid = cpu_to_le64(req->r_tid);
2994 lhead->op = cpu_to_le32(req->r_op);
2996 req->r_cred->fsuid));
2998 req->r_cred->fsgid));
2999 lhead->ino = cpu_to_le64(req->r_deleg_ino);
3000 lhead->args = req->r_args;
3006 req->r_request_release_offset = p - msg->front.iov_base;
3010 if (req->r_inode_drop)
3012 req->r_inode ? req->r_inode : d_inode(req->r_dentry),
3013 mds, req->r_inode_drop, req->r_inode_unless,
3014 req->r_op == CEPH_MDS_OP_READDIR);
3015 if (req->r_dentry_drop) {
3016 ret = ceph_encode_dentry_release(&p, req->r_dentry,
3017 req->r_parent, mds, req->r_dentry_drop,
3018 req->r_dentry_unless);
3023 if (req->r_old_dentry_drop) {
3024 ret = ceph_encode_dentry_release(&p, req->r_old_dentry,
3025 req->r_old_dentry_dir, mds,
3026 req->r_old_dentry_drop,
3027 req->r_old_dentry_unless);
3032 if (req->r_old_inode_drop)
3034 d_inode(req->r_old_dentry),
3035 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
3039 p = msg->front.iov_base + req->r_request_release_offset;
3044 encode_mclientrequest_tail(&p, req);
3055 if (req->r_pagelist) {
3056 struct ceph_pagelist *pagelist = req->r_pagelist;
3084 struct ceph_mds_request *req)
3086 req->r_end_latency = ktime_get();
3088 if (req->r_callback)
3089 req->r_callback(mdsc, req);
3090 complete_all(&req->r_completion);
3097 struct ceph_mds_request *req,
3114 if (req->r_attempts) {
3118 if ((old_version && req->r_attempts >= old_max_retry) ||
3119 ((uint32_t)req->r_attempts >= U32_MAX)) {
3121 __func__, req->r_tid);
3126 req->r_attempts++;
3127 if (req->r_inode) {
3129 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
3132 req->r_sent_on_mseq = cap->mseq;
3134 req->r_sent_on_mseq = -1;
3136 dout("%s %p tid %lld %s (attempt %d)\n", __func__, req,
3137 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
3139 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3148 msg = req->r_request;
3156 if (req->r_target_inode)
3157 lhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
3159 lhead->num_retry = req->r_attempts - 1;
3162 nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
3168 p = msg->front.iov_base + req->r_request_release_offset;
3169 encode_mclientrequest_tail(&p, req);
3176 if (req->r_request) {
3177 ceph_msg_put(req->r_request);
3178 req->r_request = NULL;
3180 msg = create_request_message(session, req, drop_cap_releases);
3182 req->r_err = PTR_ERR(msg);
3185 req->r_request = msg;
3190 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3192 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
3194 if (req->r_parent)
3197 lhead->num_fwd = req->r_num_fwd;
3198 lhead->num_retry = req->r_attempts - 1;
3201 nhead->ext_num_fwd = cpu_to_le32(req->r_num_fwd);
3202 nhead->ext_num_retry = cpu_to_le32(req->r_attempts - 1);
3205 dout(" r_parent = %p\n", req->r_parent);
3213 struct ceph_mds_request *req,
3218 err = __prepare_send_request(session, req, drop_cap_releases);
3220 ceph_msg_get(req->r_request);
3221 ceph_con_send(&session->s_con, req->r_request);
3231 struct ceph_mds_request *req)
3238 if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
3239 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
3240 __unregister_request(mdsc, req);
3249 if (req->r_timeout &&
3250 time_after_eq(jiffies, req->r_started + req->r_timeout)) {
3268 list_add(&req->r_wait, &mdsc->waiting_for_map);
3279 put_request_session(req);
3281 mds = __choose_mds(mdsc, req, &random);
3284 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
3289 list_add(&req->r_wait, &mdsc->waiting_for_map);
3302 req->r_session = ceph_get_mds_session(session);
3310 if (req->r_feature_needed > 0 &&
3311 !test_bit(req->r_feature_needed, &session->s_features)) {
3323 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
3335 list_add(&req->r_wait, &mdsc->waiting_for_map);
3348 req->r_resend_mds = mds;
3350 list_add(&req->r_wait, &session->s_waiting);
3355 req->r_resend_mds = -1; /* forget any previous mds hint */
3357 if (req->r_request_started == 0) /* note request start time */
3358 req->r_request_started = jiffies;
3368 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) && req->r_num_fwd) {
3369 struct ceph_dentry_info *di = ceph_dentry(req->r_dentry);
3380 if (!d_inode(req->r_dentry)) {
3384 mutex_lock(&req->r_fill_mutex);
3385 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3386 mutex_unlock(&req->r_fill_mutex);
3391 ci = ceph_inode(d_inode(req->r_dentry));
3418 err = __send_request(session, req, false);
3425 req->r_err = err;
3426 complete_request(mdsc, req);
3427 __unregister_request(mdsc, req);
3438 struct ceph_mds_request *req;
3444 req = list_entry(tmp_list.next,
3446 list_del_init(&req->r_wait);
3447 dout(" wake request %p tid %llu\n", req, req->r_tid);
3448 __do_request(mdsc, req);
3458 struct ceph_mds_request *req;
3463 req = rb_entry(p, struct ceph_mds_request, r_node);
3465 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3467 if (req->r_attempts > 0)
3469 if (req->r_session &&
3470 req->r_session->s_mds == mds) {
3471 dout(" kicking tid %llu\n", req->r_tid);
3472 list_del_init(&req->r_wait);
3473 __do_request(mdsc, req);
3479 struct ceph_mds_request *req)
3484 if (req->r_inode)
3485 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
3486 if (req->r_parent) {
3487 struct ceph_inode_info *ci = ceph_inode(req->r_parent);
3488 int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
3495 if (req->r_old_dentry_dir)
3496 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
3499 if (req->r_inode) {
3500 err = ceph_wait_on_async_create(req->r_inode);
3508 if (!err && req->r_old_inode) {
3509 err = ceph_wait_on_async_create(req->r_old_inode);
3517 dout("submit_request on %p for inode %p\n", req, dir);
3519 __register_request(mdsc, req, dir);
3520 __do_request(mdsc, req);
3521 err = req->r_err;
3527 struct ceph_mds_request *req,
3535 err = wait_func(mdsc, req);
3538 &req->r_completion,
3539 ceph_timeout_jiffies(req->r_timeout));
3551 if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
3552 err = le32_to_cpu(req->r_reply_info.head->result);
3554 dout("aborted request %lld with %d\n", req->r_tid, err);
3561 mutex_lock(&req->r_fill_mutex);
3562 req->r_err = err;
3563 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3564 mutex_unlock(&req->r_fill_mutex);
3566 if (req->r_parent &&
3567 (req->r_op & CEPH_MDS_OP_WRITE))
3568 ceph_invalidate_dir_request(req);
3570 err = req->r_err;
3583 struct ceph_mds_request *req)
3587 dout("do_request on %p\n", req);
3590 err = ceph_mdsc_submit_request(mdsc, dir, req);
3592 err = ceph_mdsc_wait_request(mdsc, req, NULL);
3593 dout("do_request %p done, result %d\n", req, err);
3601 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
3603 struct inode *dir = req->r_parent;
3604 struct inode *old_dir = req->r_old_dentry_dir;
3611 if (req->r_dentry)
3612 ceph_invalidate_dentry_lease(req->r_dentry);
3613 if (req->r_old_dentry)
3614 ceph_invalidate_dentry_lease(req->r_old_dentry);
3627 struct ceph_mds_request *req;
3645 req = lookup_get_request(mdsc, tid);
3646 if (!req) {
3651 dout("handle_reply %p\n", req);
3654 if (req->r_session != session) {
3657 req->r_session ? req->r_session->s_mds : -1);
3663 if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
3664 (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
3670 if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
3680 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
3681 __unregister_request(mdsc, req);
3687 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3701 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
3702 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
3707 err = parse_reply_info(session, msg, req, (u64)-1);
3709 err = parse_reply_info(session, msg, req,
3714 rinfo = &req->r_reply_info;
3716 struct inode *in = xchg(&req->r_new_inode, NULL);
3726 if (req->r_op == CEPH_MDS_OP_CREATE &&
3727 !req->r_reply_info.has_create_ino) {
3729 WARN_ON_ONCE(req->r_deleg_ino);
3740 req->r_target_inode = in;
3771 mutex_lock(&req->r_fill_mutex);
3772 current->journal_info = req;
3773 err = ceph_fill_trace(mdsc->fsc->sb, req);
3775 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3776 req->r_op == CEPH_MDS_OP_LSSNAP))
3777 err = ceph_readdir_prepopulate(req, req->r_session);
3780 mutex_unlock(&req->r_fill_mutex);
3787 if (req->r_target_inode &&
3788 test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3790 ceph_inode(req->r_target_inode);
3792 list_add_tail(&req->r_unsafe_target_item,
3797 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3801 if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3803 req->r_err = err;
3805 req->r_reply = ceph_msg_get(msg);
3806 set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3816 complete_request(mdsc, req);
3818 ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency,
3819 req->r_end_latency, err);
3821 ceph_mdsc_put_request(req);
3838 struct ceph_mds_request *req;
3852 req = lookup_get_request(mdsc, tid);
3853 if (!req) {
3855 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
3859 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3861 __unregister_request(mdsc, req);
3862 } else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) {
3871 mutex_lock(&req->r_fill_mutex);
3872 req->r_err = -EMULTIHOP;
3873 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3874 mutex_unlock(&req->r_fill_mutex);
3880 BUG_ON(req->r_err);
3881 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
3882 req->r_attempts = 0;
3883 req->r_num_fwd = fwd_seq;
3884 req->r_resend_mds = next_mds;
3885 put_request_session(req);
3886 __do_request(mdsc, req);
3892 complete_request(mdsc, req);
3893 ceph_mdsc_put_request(req);
4117 void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
4121 dcaps = xchg(&req->r_dir_caps, 0);
4124 ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
4128 void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
4132 dcaps = xchg(&req->r_dir_caps, 0);
4135 ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
4146 struct ceph_mds_request *req, *nreq;
4152 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
4153 __send_request(session, req, true);
4161 req = rb_entry(p, struct ceph_mds_request, r_node);
4163 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
4165 if (req->r_attempts == 0)
4167 if (!req->r_session)
4169 if (req->r_session->s_mds != session->s_mds)
4172 ceph_mdsc_release_dir_caps_no_check(req);
4174 __send_request(session, req, true);
5256 struct ceph_mds_request *req;
5268 while ((req = __get_oldest_req(mdsc))) {
5270 req->r_tid);
5271 list_del_init(&req->r_wait);
5272 __unregister_request(mdsc, req);
5332 struct ceph_mds_request *req = NULL, *nextreq;
5339 req = __get_oldest_req(mdsc);
5340 while (req && req->r_tid <= want_tid) {
5342 n = rb_next(&req->r_node);
5347 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
5348 (req->r_op & CEPH_MDS_OP_WRITE)) {
5349 struct ceph_mds_session *s = req->r_session;
5352 req = nextreq;
5357 ceph_mdsc_get_request(req);
5372 req->r_tid, want_tid);
5373 wait_for_completion(&req->r_safe_completion);
5376 ceph_mdsc_put_request(req);
5386 req = nextreq;