Lines Matching defs:osdc
69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
71 WARN_ON(!rwsem_is_locked(&osdc->lock));
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
75 WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
79 struct ceph_osd_client *osdc = osd->o_osdc;
82 rwsem_is_locked(&osdc->lock)) &&
83 !rwsem_is_wrlocked(&osdc->lock));
90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
540 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
550 req = mempool_alloc(osdc->req_mempool, gfp_flags);
561 req->r_osdc = osdc;
581 struct ceph_osd_client *osdc = req->r_osdc;
607 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
624 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
1043 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
1064 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
1097 req->r_flags = flags | osdc->client->options->read_from_replica;
1132 static void for_each_request(struct ceph_osd_client *osdc,
1138 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1151 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
1209 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1217 osd->o_osdc = osdc;
1220 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1251 struct ceph_osd_client *osdc = osd->o_osdc;
1256 spin_lock(&osdc->osd_lru_lock);
1257 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1258 spin_unlock(&osdc->osd_lru_lock);
1260 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1272 struct ceph_osd_client *osdc = osd->o_osdc;
1276 spin_lock(&osdc->osd_lru_lock);
1279 spin_unlock(&osdc->osd_lru_lock);
1288 struct ceph_osd_client *osdc = osd->o_osdc;
1291 verify_osdc_wrlocked(osdc);
1304 link_request(&osdc->homeless_osd, req);
1315 link_linger(&osdc->homeless_osd, lreq);
1320 erase_osd(&osdc->osds, osd);
1363 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1369 verify_osdc_wrlocked(osdc);
1371 verify_osdc_locked(osdc);
1374 osd = lookup_osd(&osdc->osds, o);
1376 osd = &osdc->homeless_osd;
1381 osd = create_osd(osdc, o);
1382 insert_osd(&osdc->osds, osd);
1384 &osdc->osdmap->osd_addr[osd->o_osd]);
1387 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1435 static bool have_pool_full(struct ceph_osd_client *osdc)
1439 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1450 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1454 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1465 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1469 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1470 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1471 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1477 (osdc->osdmap->epoch < osdc->epoch_barrier);
1494 static int pick_closest_replica(struct ceph_osd_client *osdc,
1497 struct ceph_options *opt = osdc->client->options;
1502 locality = ceph_get_crush_locality(osdc->osdmap,
1525 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1538 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1539 bool recovery_deletes = ceph_osdmap_flag(osdc,
1543 t->epoch = osdc->osdmap->epoch;
1544 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1551 if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1569 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1581 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1600 if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1612 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1632 pos = pick_closest_replica(osdc, &acting);
2293 static void maybe_request_map(struct ceph_osd_client *osdc)
2297 verify_osdc_locked(osdc);
2298 WARN_ON(!osdc->osdmap->epoch);
2300 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2301 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2302 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2303 dout("%s osdc %p continuous\n", __func__, osdc);
2306 dout("%s osdc %p onetime\n", __func__, osdc);
2309 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2310 osdc->osdmap->epoch + 1, continuous))
2311 ceph_monc_renew_subs(&osdc->client->monc);
2319 struct ceph_osd_client *osdc = req->r_osdc;
2330 ct_res = calc_target(osdc, &req->r_t, false);
2334 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2340 if (osdc->abort_err) {
2341 dout("req %p abort_err %d\n", req, osdc->abort_err);
2342 err = osdc->abort_err;
2343 } else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2344 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2345 osdc->epoch_barrier);
2347 maybe_request_map(osdc);
2349 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2352 maybe_request_map(osdc);
2354 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2357 maybe_request_map(osdc);
2361 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2362 pool_full(osdc, req->r_t.base_oloc.pool))) {
2364 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
2369 maybe_request_map(osdc);
2374 maybe_request_map(osdc);
2383 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2395 downgrade_write(&osdc->lock);
2399 up_read(&osdc->lock);
2400 down_write(&osdc->lock);
2427 struct ceph_osd_client *osdc = req->r_osdc;
2429 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2436 atomic_dec(&osdc->num_requests);
2483 struct ceph_osd_client *osdc = req->r_osdc;
2486 verify_osdc_wrlocked(osdc);
2488 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2493 erase_request_mc(&osdc->map_checks, req);
2527 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
2529 dout("%s osdc %p err %d\n", __func__, osdc, err);
2530 down_write(&osdc->lock);
2531 for_each_request(osdc, abort_fn, &err);
2532 osdc->abort_err = err;
2533 up_write(&osdc->lock);
2537 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc)
2539 down_write(&osdc->lock);
2540 osdc->abort_err = 0;
2541 up_write(&osdc->lock);
2545 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2547 if (likely(eb > osdc->epoch_barrier)) {
2549 osdc->epoch_barrier, eb);
2550 osdc->epoch_barrier = eb;
2552 if (eb > osdc->osdmap->epoch)
2553 maybe_request_map(osdc);
2557 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2559 down_read(&osdc->lock);
2560 if (unlikely(eb > osdc->epoch_barrier)) {
2561 up_read(&osdc->lock);
2562 down_write(&osdc->lock);
2563 update_epoch_barrier(osdc, eb);
2564 up_write(&osdc->lock);
2566 up_read(&osdc->lock);
2579 struct ceph_osd_client *osdc = req->r_osdc;
2583 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2584 pool_full(osdc, req->r_t.base_oloc.pool))) {
2586 update_epoch_barrier(osdc, osdc->osdmap->epoch);
2598 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2601 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2605 if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
2606 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2607 for_each_request(osdc, abort_on_full_fn, &victims);
2612 struct ceph_osd_client *osdc = req->r_osdc;
2613 struct ceph_osdmap *map = osdc->osdmap;
2615 verify_osdc_wrlocked(osdc);
2646 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2652 down_write(&osdc->lock);
2653 req = lookup_request_mc(&osdc->map_checks, tid);
2663 erase_request_mc(&osdc->map_checks, req);
2668 up_write(&osdc->lock);
2673 struct ceph_osd_client *osdc = req->r_osdc;
2677 verify_osdc_wrlocked(osdc);
2679 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2686 insert_request_mc(&osdc->map_checks, req);
2687 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2734 linger_alloc(struct ceph_osd_client *osdc)
2752 lreq->osdc = osdc;
2806 verify_osdc_locked(lreq->osdc);
2813 struct ceph_osd_client *osdc = lreq->osdc;
2816 down_read(&osdc->lock);
2818 up_read(&osdc->lock);
2825 struct ceph_osd_client *osdc = lreq->osdc;
2827 verify_osdc_wrlocked(osdc);
2831 lreq->linger_id = ++osdc->last_linger_id;
2832 insert_linger_osdc(&osdc->linger_requests, lreq);
2837 struct ceph_osd_client *osdc = lreq->osdc;
2839 verify_osdc_wrlocked(osdc);
2841 erase_linger_osdc(&osdc->linger_requests, lreq);
2906 struct ceph_osd_client *osdc = lreq->osdc;
2913 queue_work(osdc->notify_wq, &lwork->work);
3058 struct ceph_osd_client *osdc = lreq->osdc;
3062 verify_osdc_wrlocked(osdc);
3072 req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
3146 struct ceph_osd_client *osdc = lreq->osdc;
3150 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
3166 req = ceph_osdc_alloc_request(osdc, NULL, 1, true, GFP_NOIO);
3183 req->r_tid = atomic64_inc_return(&osdc->last_tid);
3190 struct ceph_osd_client *osdc = lreq->osdc;
3193 down_write(&osdc->lock);
3196 calc_target(osdc, &lreq->t, false);
3197 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3201 up_write(&osdc->lock);
3206 struct ceph_osd_client *osdc = lreq->osdc;
3209 verify_osdc_wrlocked(osdc);
3211 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3217 erase_linger_mc(&osdc->linger_map_checks, lreq);
3237 struct ceph_osd_client *osdc = lreq->osdc;
3239 down_write(&osdc->lock);
3242 up_write(&osdc->lock);
3249 struct ceph_osd_client *osdc = lreq->osdc;
3250 struct ceph_osdmap *map = osdc->osdmap;
3252 verify_osdc_wrlocked(osdc);
3280 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3286 down_write(&osdc->lock);
3287 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3298 erase_linger_mc(&osdc->linger_map_checks, lreq);
3303 up_write(&osdc->lock);
3308 struct ceph_osd_client *osdc = lreq->osdc;
3312 verify_osdc_wrlocked(osdc);
3314 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3322 insert_linger_mc(&osdc->linger_map_checks, lreq);
3323 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3361 struct ceph_osd_client *osdc =
3363 struct ceph_options *opts = osdc->client->options;
3369 dout("%s osdc %p\n", __func__, osdc);
3370 down_write(&osdc->lock);
3377 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3418 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3426 req->r_tid, osdc->homeless_osd.o_osd);
3432 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3433 maybe_request_map(osdc);
3443 up_write(&osdc->lock);
3444 schedule_delayed_work(&osdc->timeout_work,
3445 osdc->client->options->osd_keepalive_timeout);
3450 struct ceph_osd_client *osdc =
3453 unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3456 dout("%s osdc %p\n", __func__, osdc);
3457 down_write(&osdc->lock);
3458 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3467 up_write(&osdc->lock);
3468 schedule_delayed_work(&osdc->osds_timeout_work,
3681 struct ceph_osd_client *osdc = osd->o_osdc;
3691 down_read(&osdc->lock);
3797 up_read(&osdc->lock);
3807 up_read(&osdc->lock);
3810 static void set_pool_was_full(struct ceph_osd_client *osdc)
3814 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3822 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3826 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3836 struct ceph_osd_client *osdc = lreq->osdc;
3839 ct_res = calc_target(osdc, &lreq->t, true);
3843 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3863 struct ceph_osd_client *osdc = osd->o_osdc;
3881 pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3911 ct_res = calc_target(osdc, &req->r_t, false);
3916 pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3935 static int handle_one_map(struct ceph_osd_client *osdc,
3945 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3946 set_pool_was_full(osdc);
3949 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3955 if (newmap != osdc->osdmap) {
3966 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3973 if (osdc->osdmap->epoch &&
3974 osdc->osdmap->epoch + 1 < newmap->epoch) {
3979 ceph_osdmap_destroy(osdc->osdmap);
3980 osdc->osdmap = newmap;
3983 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3984 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3987 for (n = rb_first(&osdc->osds); n; ) {
3994 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3996 ceph_osd_addr(osdc->osdmap, osd->o_osd),
4004 static void kick_requests(struct ceph_osd_client *osdc,
4019 if (req->r_t.epoch < osdc->osdmap->epoch) {
4020 ct_res = calc_target(osdc, &req->r_t, false);
4036 osd = lookup_create_osd(osdc, req->r_t.osd, true);
4061 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
4075 dout("%s have %u\n", __func__, osdc->osdmap->epoch);
4076 down_write(&osdc->lock);
4081 if (ceph_check_fsid(osdc->client, &fsid) < 0)
4084 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4085 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4086 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4087 have_pool_full(osdc);
4097 if (osdc->osdmap->epoch &&
4098 osdc->osdmap->epoch + 1 == epoch) {
4101 err = handle_one_map(osdc, p, p + maplen, true,
4127 } else if (osdc->osdmap->epoch >= epoch) {
4130 osdc->osdmap->epoch);
4133 err = handle_one_map(osdc, p, p + maplen, false,
4148 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4149 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4150 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4151 have_pool_full(osdc);
4153 osdc->osdmap->epoch < osdc->epoch_barrier)
4154 maybe_request_map(osdc);
4156 kick_requests(osdc, &need_resend, &need_resend_linger);
4158 ceph_osdc_abort_on_full(osdc);
4159 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
4160 osdc->osdmap->epoch);
4161 up_write(&osdc->lock);
4162 wake_up_all(&osdc->client->auth_wq);
4166 pr_err("osdc handle_map corrupt msg\n");
4168 up_write(&osdc->lock);
4207 struct ceph_osd_client *osdc = osd->o_osdc;
4211 down_write(&osdc->lock);
4219 maybe_request_map(osdc);
4222 up_write(&osdc->lock);
4441 struct ceph_osd_client *osdc = osd->o_osdc;
4445 down_read(&osdc->lock);
4448 up_read(&osdc->lock);
4477 up_read(&osdc->lock);
4483 static void handle_watch_notify(struct ceph_osd_client *osdc,
4516 down_read(&osdc->lock);
4517 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4572 up_read(&osdc->lock);
4576 pr_err("osdc handle_watch_notify corrupt msg\n");
4582 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4586 down_read(&osdc->lock);
4588 up_read(&osdc->lock);
4600 struct ceph_osd_client *osdc = req->r_osdc;
4602 down_write(&osdc->lock);
4605 up_write(&osdc->lock);
4633 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4643 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4646 u64 last_tid = atomic64_read(&osdc->last_tid);
4649 down_read(&osdc->lock);
4650 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4666 up_read(&osdc->lock);
4677 up_read(&osdc->lock);
4686 ceph_osdc_watch(struct ceph_osd_client *osdc,
4696 lreq = linger_alloc(osdc);
4733 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4736 struct ceph_options *opts = osdc->client->options;
4740 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4755 ceph_osdc_start_request(osdc, req, false);
4798 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4809 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4826 ceph_osdc_start_request(osdc, req, false);
4827 ret = ceph_osdc_wait_request(osdc, req);
4843 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4861 lreq = linger_alloc(osdc);
4915 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4921 down_read(&osdc->lock);
4940 up_read(&osdc->lock);
5010 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
5020 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5043 ceph_osdc_start_request(osdc, req, false);
5044 ret = ceph_osdc_wait_request(osdc, req);
5062 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
5064 dout("%s osdc %p\n", __func__, osdc);
5065 flush_workqueue(osdc->notify_wq);
5069 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
5071 down_read(&osdc->lock);
5072 maybe_request_map(osdc);
5073 up_read(&osdc->lock);
5083 int ceph_osdc_call(struct ceph_osd_client *osdc,
5097 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5120 ceph_osdc_start_request(osdc, req, false);
5121 ret = ceph_osdc_wait_request(osdc, req);
5137 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
5141 down_write(&osdc->lock);
5142 for (n = rb_first(&osdc->osds); n; ) {
5149 up_write(&osdc->lock);
5155 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
5160 osdc->client = client;
5161 init_rwsem(&osdc->lock);
5162 osdc->osds = RB_ROOT;
5163 INIT_LIST_HEAD(&osdc->osd_lru);
5164 spin_lock_init(&osdc->osd_lru_lock);
5165 osd_init(&osdc->homeless_osd);
5166 osdc->homeless_osd.o_osdc = osdc;
5167 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
5168 osdc->last_linger_id = CEPH_LINGER_ID_START;
5169 osdc->linger_requests = RB_ROOT;
5170 osdc->map_checks = RB_ROOT;
5171 osdc->linger_map_checks = RB_ROOT;
5172 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
5173 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
5176 osdc->osdmap = ceph_osdmap_alloc();
5177 if (!osdc->osdmap)
5180 osdc->req_mempool = mempool_create_slab_pool(10,
5182 if (!osdc->req_mempool)
5185 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
5189 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
5196 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
5197 if (!osdc->notify_wq)
5200 osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
5201 if (!osdc->completion_wq)
5204 schedule_delayed_work(&osdc->timeout_work,
5205 osdc->client->options->osd_keepalive_timeout);
5206 schedule_delayed_work(&osdc->osds_timeout_work,
5207 round_jiffies_relative(osdc->client->options->osd_idle_ttl));
5212 destroy_workqueue(osdc->notify_wq);
5214 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5216 ceph_msgpool_destroy(&osdc->msgpool_op);
5218 mempool_destroy(osdc->req_mempool);
5220 ceph_osdmap_destroy(osdc->osdmap);
5225 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5227 destroy_workqueue(osdc->completion_wq);
5228 destroy_workqueue(osdc->notify_wq);
5229 cancel_delayed_work_sync(&osdc->timeout_work);
5230 cancel_delayed_work_sync(&osdc->osds_timeout_work);
5232 down_write(&osdc->lock);
5233 while (!RB_EMPTY_ROOT(&osdc->osds)) {
5234 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5238 up_write(&osdc->lock);
5239 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5240 osd_cleanup(&osdc->homeless_osd);
5242 WARN_ON(!list_empty(&osdc->osd_lru));
5243 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5244 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5245 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5246 WARN_ON(atomic_read(&osdc->num_requests));
5247 WARN_ON(atomic_read(&osdc->num_homeless));
5249 ceph_osdmap_destroy(osdc->osdmap);
5250 mempool_destroy(osdc->req_mempool);
5251 ceph_msgpool_destroy(&osdc->msgpool_op);
5252 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5292 int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
5306 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
5326 ceph_osdc_start_request(osdc, req, false);
5327 ret = ceph_osdc_wait_request(osdc, req);
5360 struct ceph_osd_client *osdc = osd->o_osdc;
5365 ceph_osdc_handle_map(osdc, msg);
5374 handle_watch_notify(osdc, msg);
5395 struct ceph_osd_client *osdc = osd->o_osdc;
5402 down_read(&osdc->lock);
5448 up_read(&osdc->lock);
5530 struct ceph_osd_client *osdc = o->o_osdc;
5531 struct ceph_auth_client *ac = osdc->client->monc.auth;
5558 struct ceph_osd_client *osdc = o->o_osdc;
5559 struct ceph_auth_client *ac = osdc->client->monc.auth;
5568 struct ceph_osd_client *osdc = o->o_osdc;
5569 struct ceph_auth_client *ac = osdc->client->monc.auth;
5577 struct ceph_osd_client *osdc = o->o_osdc;
5578 struct ceph_auth_client *ac = osdc->client->monc.auth;
5581 return ceph_monc_validate_auth(&osdc->client->monc);