Lines Matching refs:mdsc
62 static void __wake_requests(struct ceph_mds_client *mdsc,
926 * called under mdsc->mutex
928 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
931 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
933 return ceph_get_mds_session(mdsc->sessions[mds]);
936 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
938 if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
944 static int __verify_registered_session(struct ceph_mds_client *mdsc,
947 if (s->s_mds >= mdsc->max_sessions ||
948 mdsc->sessions[s->s_mds] != s)
955 * called under mdsc->mutex.
957 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
962 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
965 if (mds >= mdsc->mdsmap->possible_max_rank)
972 if (mds >= mdsc->max_sessions) {
980 if (mdsc->sessions) {
981 memcpy(sa, mdsc->sessions,
982 mdsc->max_sessions * sizeof(void *));
983 kfree(mdsc->sessions);
985 mdsc->sessions = sa;
986 mdsc->max_sessions = newmax;
990 s->s_mdsc = mdsc;
995 ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
1012 mdsc->sessions[mds] = s;
1013 atomic_inc(&mdsc->num_sessions);
1017 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
1027 * called under mdsc->mutex
1029 static void __unregister_session(struct ceph_mds_client *mdsc,
1033 BUG_ON(mdsc->sessions[s->s_mds] != s);
1034 mdsc->sessions[s->s_mds] = NULL;
1037 atomic_dec(&mdsc->num_sessions);
1043 * should be last request ref, or hold mdsc->mutex
1053 void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
1059 mutex_lock(&mdsc->mutex);
1060 for (mds = 0; mds < mdsc->max_sessions; ++mds) {
1063 s = __ceph_lookup_mds_session(mdsc, mds);
1072 mutex_unlock(&mdsc->mutex);
1075 mutex_lock(&mdsc->mutex);
1077 mutex_unlock(&mdsc->mutex);
1134 * called under mdsc->mutex.
1137 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
1141 req = lookup_request(&mdsc->request_tree, tid);
1152 * Called under mdsc->mutex.
1154 static void __register_request(struct ceph_mds_client *mdsc,
1160 req->r_tid = ++mdsc->last_tid;
1162 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
1174 insert_request(&mdsc->request_tree, req);
1178 if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
1179 mdsc->oldest_tid = req->r_tid;
1192 static void __unregister_request(struct ceph_mds_client *mdsc,
1200 if (req->r_tid == mdsc->oldest_tid) {
1202 mdsc->oldest_tid = 0;
1207 mdsc->oldest_tid = next_req->r_tid;
1214 erase_request(&mdsc->request_tree, req);
1268 * Called under mdsc->mutex.
1270 static int __choose_mds(struct ceph_mds_client *mdsc,
1290 (__have_session(mdsc, req->r_resend_mds) ||
1291 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1321 if (!dir || dir->i_sb != mdsc->fsc->sb) {
1368 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1370 !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
1383 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1385 if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
1418 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
1523 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
1530 struct ceph_options *opt = mdsc->fsc->client->options;
1531 struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
1537 {"hostname", mdsc->nodename},
1631 * called under mdsc->mutex
1633 static int __open_session(struct ceph_mds_client *mdsc,
1640 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
1644 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1651 msg = create_session_open_msg(mdsc, session->s_seq);
1661 * called under mdsc->mutex
1664 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
1669 session = __ceph_lookup_mds_session(mdsc, target);
1671 session = register_session(mdsc, target);
1677 ret = __open_session(mdsc, session);
1686 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1692 mutex_lock(&mdsc->mutex);
1693 session = __open_export_target_session(mdsc, target);
1694 mutex_unlock(&mdsc->mutex);
1699 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1706 if (mds >= mdsc->mdsmap->possible_max_rank)
1709 mi = &mdsc->mdsmap->m_info[mds];
1714 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1719 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1722 mutex_lock(&mdsc->mutex);
1723 __open_export_target_sessions(mdsc, session);
1724 mutex_unlock(&mdsc->mutex);
1741 static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1749 ceph_put_cap(mdsc, cap);
1753 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1760 mutex_lock(&mdsc->mutex);
1770 __unregister_request(mdsc, req);
1773 p = rb_first(&mdsc->request_tree);
1781 mutex_unlock(&mdsc->mutex);
1896 wake_up_all(&fsc->mdsc->cap_flushing_wq);
1985 static int send_renew_caps(struct ceph_mds_client *mdsc,
1998 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
2015 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
2035 static void renewed_caps(struct ceph_mds_client *mdsc,
2045 mdsc->mdsmap->m_session_timeout*HZ;
2085 static int __close_session(struct ceph_mds_client *mdsc,
2206 int ceph_trim_caps(struct ceph_mds_client *mdsc,
2223 ceph_flush_cap_releases(mdsc, session);
2227 static int check_caps_flush(struct ceph_mds_client *mdsc,
2232 spin_lock(&mdsc->cap_dirty_lock);
2233 if (!list_empty(&mdsc->cap_flush_list)) {
2235 list_first_entry(&mdsc->cap_flush_list,
2243 spin_unlock(&mdsc->cap_dirty_lock);
2252 static void wait_caps_flush(struct ceph_mds_client *mdsc,
2257 wait_event(mdsc->cap_flushing_wq,
2258 check_caps_flush(mdsc, want_flush_tid));
2266 static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
2272 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
2318 ceph_put_cap(mdsc, cap);
2373 void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
2376 if (mdsc->stopping)
2380 if (queue_work(mdsc->fsc->cap_wq,
2404 struct ceph_mds_client *mdsc =
2406 int ret = ceph_trim_dentries(mdsc);
2408 ceph_queue_cap_reclaim_work(mdsc);
2411 void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
2413 if (mdsc->stopping)
2416 if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
2423 void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
2428 val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
2430 atomic_set(&mdsc->cap_reclaim_pending, 0);
2431 ceph_queue_cap_reclaim_work(mdsc);
2482 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
2491 req->r_mdsc = mdsc;
2516 * called under mdsc->mutex.
2518 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
2520 if (RB_EMPTY_ROOT(&mdsc->request_tree))
2522 return rb_entry(rb_first(&mdsc->request_tree),
2526 static inline u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
2528 return mdsc->oldest_tid;
2856 * called under mdsc->mutex
2863 struct ceph_mds_client *mdsc = session->s_mdsc;
2993 lhead->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
3080 * called under mdsc->mutex if error, under no mutex if
3083 static void complete_request(struct ceph_mds_client *mdsc,
3089 req->r_callback(mdsc, req);
3094 * called under mdsc->mutex
3101 struct ceph_mds_client *mdsc = session->s_mdsc;
3189 lhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
3210 * called under mdsc->mutex
3230 static void __do_request(struct ceph_mds_client *mdsc,
3240 __unregister_request(mdsc, req);
3244 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
3255 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
3260 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
3261 if (mdsc->mdsmap_err) {
3262 err = mdsc->mdsmap_err;
3266 if (mdsc->mdsmap->m_epoch == 0) {
3268 list_add(&req->r_wait, &mdsc->waiting_for_map);
3271 if (!(mdsc->fsc->mount_options->flags &
3273 !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
3281 mds = __choose_mds(mdsc, req, &random);
3283 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
3289 list_add(&req->r_wait, &mdsc->waiting_for_map);
3294 session = __ceph_lookup_mds_session(mdsc, mds);
3296 session = register_session(mdsc, mds);
3331 * it to the mdsc queue.
3334 if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER))
3335 list_add(&req->r_wait, &mdsc->waiting_for_map);
3343 err = __open_session(mdsc, session);
3426 complete_request(mdsc, req);
3427 __unregister_request(mdsc, req);
3433 * called under mdsc->mutex
3435 static void __wake_requests(struct ceph_mds_client *mdsc,
3448 __do_request(mdsc, req);
3456 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
3459 struct rb_node *p = rb_first(&mdsc->request_tree);
3473 __do_request(mdsc, req);
3478 int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
3492 __ceph_touch_fmode(ci, mdsc, fmode);
3518 mutex_lock(&mdsc->mutex);
3519 __register_request(mdsc, req, dir);
3520 __do_request(mdsc, req);
3522 mutex_unlock(&mdsc->mutex);
3526 int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
3535 err = wait_func(mdsc, req);
3548 mutex_lock(&mdsc->mutex);
3573 mutex_unlock(&mdsc->mutex);
3581 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
3590 err = ceph_mdsc_submit_request(mdsc, dir, req);
3592 err = ceph_mdsc_wait_request(mdsc, req, NULL);
3626 struct ceph_mds_client *mdsc = session->s_mdsc;
3644 mutex_lock(&mdsc->mutex);
3645 req = lookup_get_request(mdsc, tid);
3648 mutex_unlock(&mdsc->mutex);
3658 mutex_unlock(&mdsc->mutex);
3667 mutex_unlock(&mdsc->mutex);
3673 mutex_unlock(&mdsc->mutex);
3681 __unregister_request(mdsc, req);
3684 if (mdsc->stopping && !__get_oldest_req(mdsc))
3685 complete_all(&mdsc->safe_umount_waiters);
3697 mutex_unlock(&mdsc->mutex);
3711 mutex_unlock(&mdsc->mutex);
3734 in = ceph_get_inode(mdsc->fsc->sb, tvino, in);
3753 down_write(&mdsc->snap_rwsem);
3754 err = ceph_update_snap_trace(mdsc, rinfo->snapblob,
3759 up_write(&mdsc->snap_rwsem);
3765 downgrade_write(&mdsc->snap_rwsem);
3767 down_read(&mdsc->snap_rwsem);
3773 err = ceph_fill_trace(mdsc->fsc->sb, req);
3782 up_read(&mdsc->snap_rwsem);
3784 ceph_put_snap_realm(mdsc, realm);
3797 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3800 mutex_lock(&mdsc->mutex);
3811 mutex_unlock(&mdsc->mutex);
3816 complete_request(mdsc, req);
3818 ceph_update_metadata_metrics(&mdsc->metric, req->r_start_latency,
3825 ceph_mdsc_close_sessions(mdsc);
3834 static void handle_forward(struct ceph_mds_client *mdsc,
3851 mutex_lock(&mdsc->mutex);
3852 req = lookup_get_request(mdsc, tid);
3854 mutex_unlock(&mdsc->mutex);
3861 __unregister_request(mdsc, req);
3886 __do_request(mdsc, req);
3888 mutex_unlock(&mdsc->mutex);
3892 complete_request(mdsc, req);
3935 struct ceph_mds_client *mdsc = session->s_mdsc;
3988 mutex_lock(&mdsc->mutex);
3991 __unregister_session(mdsc, session);
3994 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
3995 mutex_unlock(&mdsc->mutex);
4018 renewed_caps(mdsc, session, 0);
4021 metric_schedule_delayed(&mdsc->metric);
4033 if (mdsc->stopping)
4034 __close_session(mdsc, session);
4039 renewed_caps(mdsc, session, 1);
4046 cleanup_session_requests(mdsc, session);
4049 wake_up_all(&mdsc->session_close_wq);
4057 send_renew_caps(mdsc, session);
4061 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
4068 ceph_flush_cap_releases(mdsc, session);
4071 send_flushmsg_ack(mdsc, session, seq);
4086 cleanup_session_requests(mdsc, session);
4089 mdsc->fsc->blocklisted = true;
4100 mutex_lock(&mdsc->mutex);
4101 __wake_requests(mdsc, &session->s_waiting);
4103 kick_requests(mdsc, mds);
4104 mutex_unlock(&mdsc->mutex);
4143 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
4151 mutex_lock(&mdsc->mutex);
4159 p = rb_first(&mdsc->request_tree);
4176 mutex_unlock(&mdsc->mutex);
4476 static int encode_snap_realms(struct ceph_mds_client *mdsc,
4484 err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
4494 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
4546 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
4590 dispose_cap_releases(mdsc, &dispose);
4593 if (mdsc->fsc->sb->s_root)
4594 shrink_dcache_parent(mdsc->fsc->sb->s_root);
4599 ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
4602 replay_unsafe_requests(mdsc, session);
4604 ceph_early_kick_flushing_caps(mdsc, session);
4606 down_read(&mdsc->snap_rwsem);
4632 if (mdsc->num_snap_realms) {
4635 mdsc->num_snap_realms *
4641 total_len += mdsc->num_snap_realms *
4658 err = encode_snap_realms(mdsc, &recon_state);
4674 WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
4693 mutex_lock(&mdsc->mutex);
4694 __wake_requests(mdsc, &session->s_waiting);
4695 mutex_unlock(&mdsc->mutex);
4697 up_read(&mdsc->snap_rwsem);
4703 up_read(&mdsc->snap_rwsem);
4717 * called under mdsc->mutex.
4719 static void check_new_map(struct ceph_mds_client *mdsc,
4738 for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4739 if (!mdsc->sessions[i])
4741 s = mdsc->sessions[i];
4755 __unregister_session(mdsc, s);
4756 __wake_requests(mdsc, &s->s_waiting);
4757 mutex_unlock(&mdsc->mutex);
4760 cleanup_session_requests(mdsc, s);
4766 mutex_lock(&mdsc->mutex);
4767 kick_requests(mdsc, i);
4775 mutex_unlock(&mdsc->mutex);
4777 mutex_lock(&mdsc->mutex);
4790 mutex_unlock(&mdsc->mutex);
4792 send_mds_reconnect(mdsc, s);
4793 mutex_lock(&mdsc->mutex);
4804 kick_requests(mdsc, i);
4805 mutex_unlock(&mdsc->mutex);
4807 mutex_lock(&mdsc->mutex);
4808 ceph_kick_flushing_caps(mdsc, s);
4837 * the mdsc->mutex's unlock/lock gap below in rare
4842 s = __ceph_lookup_mds_session(mdsc, i);
4844 s = __open_export_target_session(mdsc, i);
4853 mutex_unlock(&mdsc->mutex);
4854 send_mds_reconnect(mdsc, s);
4856 mutex_lock(&mdsc->mutex);
4859 for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4860 s = mdsc->sessions[i];
4870 __open_export_target_sessions(mdsc, s);
4892 static void handle_lease(struct ceph_mds_client *mdsc,
4896 struct super_block *sb = mdsc->fsc->sb;
4909 if (!ceph_inc_mds_stopping_blocker(mdsc, session))
4992 ceph_dec_mds_stopping_blocker(mdsc);
4996 ceph_dec_mds_stopping_blocker(mdsc);
5043 static void maybe_recover_session(struct ceph_mds_client *mdsc)
5045 struct ceph_fs_client *fsc = mdsc->fsc;
5106 static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
5113 schedule_delayed_work(&mdsc->delayed_work,
5119 struct ceph_mds_client *mdsc =
5126 dout("mdsc delayed_work\n");
5128 if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED)
5131 mutex_lock(&mdsc->mutex);
5132 renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
5134 mdsc->last_renew_caps);
5136 mdsc->last_renew_caps = jiffies;
5138 for (i = 0; i < mdsc->max_sessions; i++) {
5139 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
5147 mutex_unlock(&mdsc->mutex);
5151 send_renew_caps(mdsc, s);
5156 ceph_send_cap_releases(mdsc, s);
5160 mutex_lock(&mdsc->mutex);
5162 mutex_unlock(&mdsc->mutex);
5164 delay = ceph_check_delayed_caps(mdsc);
5166 ceph_queue_cap_reclaim_work(mdsc);
5168 ceph_trim_snapid_map(mdsc);
5170 maybe_recover_session(mdsc);
5172 schedule_delayed(mdsc, delay);
5178 struct ceph_mds_client *mdsc;
5181 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
5182 if (!mdsc)
5184 mdsc->fsc = fsc;
5185 mutex_init(&mdsc->mutex);
5186 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
5187 if (!mdsc->mdsmap) {
5192 init_completion(&mdsc->safe_umount_waiters);
5193 spin_lock_init(&mdsc->stopping_lock);
5194 atomic_set(&mdsc->stopping_blockers, 0);
5195 init_completion(&mdsc->stopping_waiter);
5196 init_waitqueue_head(&mdsc->session_close_wq);
5197 INIT_LIST_HEAD(&mdsc->waiting_for_map);
5198 mdsc->quotarealms_inodes = RB_ROOT;
5199 mutex_init(&mdsc->quotarealms_inodes_mutex);
5200 init_rwsem(&mdsc->snap_rwsem);
5201 mdsc->snap_realms = RB_ROOT;
5202 INIT_LIST_HEAD(&mdsc->snap_empty);
5203 spin_lock_init(&mdsc->snap_empty_lock);
5204 mdsc->request_tree = RB_ROOT;
5205 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
5206 mdsc->last_renew_caps = jiffies;
5207 INIT_LIST_HEAD(&mdsc->cap_delay_list);
5208 INIT_LIST_HEAD(&mdsc->cap_wait_list);
5209 spin_lock_init(&mdsc->cap_delay_lock);
5210 INIT_LIST_HEAD(&mdsc->snap_flush_list);
5211 spin_lock_init(&mdsc->snap_flush_lock);
5212 mdsc->last_cap_flush_tid = 1;
5213 INIT_LIST_HEAD(&mdsc->cap_flush_list);
5214 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
5215 spin_lock_init(&mdsc->cap_dirty_lock);
5216 init_waitqueue_head(&mdsc->cap_flushing_wq);
5217 INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
5218 err = ceph_metric_init(&mdsc->metric);
5222 spin_lock_init(&mdsc->dentry_list_lock);
5223 INIT_LIST_HEAD(&mdsc->dentry_leases);
5224 INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
5226 ceph_caps_init(mdsc);
5227 ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
5229 spin_lock_init(&mdsc->snapid_map_lock);
5230 mdsc->snapid_map_tree = RB_ROOT;
5231 INIT_LIST_HEAD(&mdsc->snapid_map_lru);
5233 init_rwsem(&mdsc->pool_perm_rwsem);
5234 mdsc->pool_perm_tree = RB_ROOT;
5236 strscpy(mdsc->nodename, utsname()->nodename,
5237 sizeof(mdsc->nodename));
5239 fsc->mdsc = mdsc;
5243 kfree(mdsc->mdsmap);
5245 kfree(mdsc);
5253 static void wait_requests(struct ceph_mds_client *mdsc)
5255 struct ceph_options *opts = mdsc->fsc->client->options;
5258 mutex_lock(&mdsc->mutex);
5259 if (__get_oldest_req(mdsc)) {
5260 mutex_unlock(&mdsc->mutex);
5263 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
5267 mutex_lock(&mdsc->mutex);
5268 while ((req = __get_oldest_req(mdsc))) {
5272 __unregister_request(mdsc, req);
5275 mutex_unlock(&mdsc->mutex);
5307 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
5310 mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN;
5312 ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
5313 ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
5314 ceph_flush_dirty_caps(mdsc);
5315 wait_requests(mdsc);
5323 ceph_cleanup_quotarealms_inodes(mdsc);
5329 static void flush_mdlog_and_wait_mdsc_unsafe_requests(struct ceph_mds_client *mdsc,
5336 mutex_lock(&mdsc->mutex);
5339 req = __get_oldest_req(mdsc);
5361 mutex_unlock(&mdsc->mutex);
5375 mutex_lock(&mdsc->mutex);
5388 mutex_unlock(&mdsc->mutex);
5393 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
5397 if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN)
5401 mutex_lock(&mdsc->mutex);
5402 want_tid = mdsc->last_tid;
5403 mutex_unlock(&mdsc->mutex);
5405 ceph_flush_dirty_caps(mdsc);
5406 spin_lock(&mdsc->cap_dirty_lock);
5407 want_flush = mdsc->last_cap_flush_tid;
5408 if (!list_empty(&mdsc->cap_flush_list)) {
5410 list_last_entry(&mdsc->cap_flush_list,
5414 spin_unlock(&mdsc->cap_dirty_lock);
5419 flush_mdlog_and_wait_mdsc_unsafe_requests(mdsc, want_tid);
5420 wait_caps_flush(mdsc, want_flush);
5426 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
5428 if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
5430 return atomic_read(&mdsc->num_sessions) <= skipped;
5436 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
5438 struct ceph_options *opts = mdsc->fsc->client->options;
5446 mutex_lock(&mdsc->mutex);
5447 for (i = 0; i < mdsc->max_sessions; i++) {
5448 session = __ceph_lookup_mds_session(mdsc, i);
5451 mutex_unlock(&mdsc->mutex);
5453 if (__close_session(mdsc, session) <= 0)
5457 mutex_lock(&mdsc->mutex);
5459 mutex_unlock(&mdsc->mutex);
5462 wait_event_timeout(mdsc->session_close_wq,
5463 done_closing_sessions(mdsc, skipped),
5467 mutex_lock(&mdsc->mutex);
5468 for (i = 0; i < mdsc->max_sessions; i++) {
5469 if (mdsc->sessions[i]) {
5470 session = ceph_get_mds_session(mdsc->sessions[i]);
5471 __unregister_session(mdsc, session);
5472 mutex_unlock(&mdsc->mutex);
5477 mutex_lock(&mdsc->mutex);
5480 WARN_ON(!list_empty(&mdsc->cap_delay_list));
5481 mutex_unlock(&mdsc->mutex);
5483 ceph_cleanup_snapid_map(mdsc);
5484 ceph_cleanup_global_and_empty_realms(mdsc);
5486 cancel_work_sync(&mdsc->cap_reclaim_work);
5487 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
5492 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
5499 mutex_lock(&mdsc->mutex);
5500 for (mds = 0; mds < mdsc->max_sessions; mds++) {
5501 session = __ceph_lookup_mds_session(mdsc, mds);
5506 __unregister_session(mdsc, session);
5507 __wake_requests(mdsc, &session->s_waiting);
5508 mutex_unlock(&mdsc->mutex);
5511 __close_session(mdsc, session);
5513 cleanup_session_requests(mdsc, session);
5519 mutex_lock(&mdsc->mutex);
5520 kick_requests(mdsc, mds);
5522 __wake_requests(mdsc, &mdsc->waiting_for_map);
5523 mutex_unlock(&mdsc->mutex);
5526 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
5537 flush_delayed_work(&mdsc->delayed_work);
5539 if (mdsc->mdsmap)
5540 ceph_mdsmap_destroy(mdsc->mdsmap);
5541 kfree(mdsc->sessions);
5542 ceph_caps_finalize(mdsc);
5543 ceph_pool_perm_destroy(mdsc);
5548 struct ceph_mds_client *mdsc = fsc->mdsc;
5549 dout("mdsc_destroy %p\n", mdsc);
5551 if (!mdsc)
5557 ceph_mdsc_stop(mdsc);
5559 ceph_metric_destroy(&mdsc->metric);
5561 fsc->mdsc = NULL;
5562 kfree(mdsc);
5563 dout("mdsc_destroy %p done\n", mdsc);
5566 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
5568 struct ceph_fs_client *fsc = mdsc->fsc;
5626 ceph_umount_begin(mdsc->fsc->sb);
5629 mutex_lock(&mdsc->mutex);
5630 mdsc->mdsmap_err = err;
5631 __wake_requests(mdsc, &mdsc->waiting_for_map);
5632 mutex_unlock(&mdsc->mutex);
5638 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
5650 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
5657 mutex_lock(&mdsc->mutex);
5658 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
5660 epoch, mdsc->mdsmap->m_epoch);
5661 mutex_unlock(&mdsc->mutex);
5665 newmap = ceph_mdsmap_decode(&p, end, ceph_msgr2(mdsc->fsc->client));
5672 if (mdsc->mdsmap) {
5673 oldmap = mdsc->mdsmap;
5674 mdsc->mdsmap = newmap;
5675 check_new_map(mdsc, newmap, oldmap);
5678 mdsc->mdsmap = newmap; /* first mds map */
5680 mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
5683 __wake_requests(mdsc, &mdsc->waiting_for_map);
5684 ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
5685 mdsc->mdsmap->m_epoch);
5687 mutex_unlock(&mdsc->mutex);
5688 schedule_delayed(mdsc, 0);
5692 mutex_unlock(&mdsc->mutex);
5695 ceph_umount_begin(mdsc->fsc->sb);
5723 struct ceph_mds_client *mdsc = s->s_mdsc;
5726 if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
5727 send_mds_reconnect(mdsc, s);
5733 struct ceph_mds_client *mdsc = s->s_mdsc;
5736 mutex_lock(&mdsc->mutex);
5737 if (__verify_registered_session(mdsc, s) < 0) {
5738 mutex_unlock(&mdsc->mutex);
5741 mutex_unlock(&mdsc->mutex);
5745 ceph_mdsc_handle_mdsmap(mdsc, msg);
5748 ceph_mdsc_handle_fsmap(mdsc, msg);
5757 handle_forward(mdsc, s, msg);
5763 ceph_handle_snap(mdsc, s, msg);
5766 handle_lease(mdsc, s, msg);
5769 ceph_handle_quota(mdsc, s, msg);
5792 struct ceph_mds_client *mdsc = s->s_mdsc;
5793 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5809 struct ceph_mds_client *mdsc = s->s_mdsc;
5810 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5819 struct ceph_mds_client *mdsc = s->s_mdsc;
5820 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5831 struct ceph_mds_client *mdsc = s->s_mdsc;
5832 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5836 return ceph_monc_validate_auth(&mdsc->fsc->client->monc);