Lines Matching refs:lreq
52 struct ceph_osd_linger_request *lreq);
54 struct ceph_osd_linger_request *lreq);
85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
87 WARN_ON(!mutex_is_locked(&lreq->lock));
93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
1307 struct ceph_osd_linger_request *lreq =
1312 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1313 lreq->linger_id);
1314 unlink_linger(osd, lreq);
1315 link_linger(&osdc->homeless_osd, lreq);
2697 struct ceph_osd_linger_request *lreq =
2700 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2701 lreq->reg_req, lreq->ping_req);
2702 WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2703 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2704 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2705 WARN_ON(!list_empty(&lreq->scan_item));
2706 WARN_ON(!list_empty(&lreq->pending_lworks));
2707 WARN_ON(lreq->osd);
2709 if (lreq->request_pl)
2710 ceph_pagelist_release(lreq->request_pl);
2711 if (lreq->notify_id_pages)
2712 ceph_release_page_vector(lreq->notify_id_pages, 1);
2714 ceph_osdc_put_request(lreq->reg_req);
2715 ceph_osdc_put_request(lreq->ping_req);
2716 target_destroy(&lreq->t);
2717 kfree(lreq);
2720 static void linger_put(struct ceph_osd_linger_request *lreq)
2722 if (lreq)
2723 kref_put(&lreq->kref, linger_release);
2727 linger_get(struct ceph_osd_linger_request *lreq)
2729 kref_get(&lreq->kref);
2730 return lreq;
2736 struct ceph_osd_linger_request *lreq;
2738 lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2739 if (!lreq)
2742 kref_init(&lreq->kref);
2743 mutex_init(&lreq->lock);
2744 RB_CLEAR_NODE(&lreq->node);
2745 RB_CLEAR_NODE(&lreq->osdc_node);
2746 RB_CLEAR_NODE(&lreq->mc_node);
2747 INIT_LIST_HEAD(&lreq->scan_item);
2748 INIT_LIST_HEAD(&lreq->pending_lworks);
2749 init_completion(&lreq->reg_commit_wait);
2750 init_completion(&lreq->notify_finish_wait);
2752 lreq->osdc = osdc;
2753 target_init(&lreq->t);
2755 dout("%s lreq %p\n", __func__, lreq);
2756 return lreq;
2766 * @lreq has to be registered, @osd may be homeless.
2769 struct ceph_osd_linger_request *lreq)
2772 WARN_ON(!lreq->linger_id || lreq->osd);
2773 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2774 osd->o_osd, lreq, lreq->linger_id);
2782 insert_linger(&osd->o_linger_requests, lreq);
2783 lreq->osd = osd;
2787 struct ceph_osd_linger_request *lreq)
2790 WARN_ON(lreq->osd != osd);
2791 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2792 osd->o_osd, lreq, lreq->linger_id);
2794 lreq->osd = NULL;
2795 erase_linger(&osd->o_linger_requests, lreq);
2804 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2806 verify_osdc_locked(lreq->osdc);
2808 return !RB_EMPTY_NODE(&lreq->osdc_node);
2811 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2813 struct ceph_osd_client *osdc = lreq->osdc;
2817 registered = __linger_registered(lreq);
2823 static void linger_register(struct ceph_osd_linger_request *lreq)
2825 struct ceph_osd_client *osdc = lreq->osdc;
2828 WARN_ON(lreq->linger_id);
2830 linger_get(lreq);
2831 lreq->linger_id = ++osdc->last_linger_id;
2832 insert_linger_osdc(&osdc->linger_requests, lreq);
2835 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2837 struct ceph_osd_client *osdc = lreq->osdc;
2841 erase_linger_osdc(&osdc->linger_requests, lreq);
2842 linger_put(lreq);
2847 struct ceph_osd_linger_request *lreq = req->r_priv;
2851 linger_put(lreq);
2856 struct ceph_osd_linger_request *lreq;
2875 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2886 lwork->lreq = linger_get(lreq);
2893 struct ceph_osd_linger_request *lreq = lwork->lreq;
2895 mutex_lock(&lreq->lock);
2897 mutex_unlock(&lreq->lock);
2899 linger_put(lreq);
2905 struct ceph_osd_linger_request *lreq = lwork->lreq;
2906 struct ceph_osd_client *osdc = lreq->osdc;
2908 verify_lreq_locked(lreq);
2912 list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2919 struct ceph_osd_linger_request *lreq = lwork->lreq;
2921 if (!linger_registered(lreq)) {
2922 dout("%s lreq %p not registered\n", __func__, lreq);
2926 WARN_ON(!lreq->is_watch);
2927 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2928 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2930 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2942 struct ceph_osd_linger_request *lreq = lwork->lreq;
2944 if (!linger_registered(lreq)) {
2945 dout("%s lreq %p not registered\n", __func__, lreq);
2949 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2950 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2956 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2960 lwork = lwork_alloc(lreq, do_watch_error);
2966 lwork->error.err = lreq->last_error;
2970 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2973 if (!completion_done(&lreq->reg_commit_wait)) {
2974 lreq->reg_commit_error = (result <= 0 ? result : 0);
2975 complete_all(&lreq->reg_commit_wait);
2981 struct ceph_osd_linger_request *lreq = req->r_priv;
2983 mutex_lock(&lreq->lock);
2984 if (req != lreq->reg_req) {
2985 dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
2986 __func__, lreq, lreq->linger_id, req, lreq->reg_req);
2990 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2991 lreq->linger_id, req->r_result);
2992 linger_reg_commit_complete(lreq, req->r_result);
2993 lreq->committed = true;
2995 if (!lreq->is_watch) {
3005 lreq->notify_id = ceph_decode_64(&p);
3006 dout("lreq %p notify_id %llu\n", lreq,
3007 lreq->notify_id);
3009 dout("lreq %p no notify_id\n", lreq);
3014 mutex_unlock(&lreq->lock);
3015 linger_put(lreq);
3033 struct ceph_osd_linger_request *lreq = req->r_priv;
3035 mutex_lock(&lreq->lock);
3036 if (req != lreq->reg_req) {
3037 dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
3038 __func__, lreq, lreq->linger_id, req, lreq->reg_req);
3042 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
3043 lreq, lreq->linger_id, req->r_result, lreq->last_error);
3045 if (!lreq->last_error) {
3046 lreq->last_error = normalize_watch_error(req->r_result);
3047 queue_watch_error(lreq);
3052 mutex_unlock(&lreq->lock);
3053 linger_put(lreq);
3056 static void send_linger(struct ceph_osd_linger_request *lreq)
3058 struct ceph_osd_client *osdc = lreq->osdc;
3063 mutex_lock(&lreq->lock);
3064 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3066 if (lreq->reg_req) {
3067 if (lreq->reg_req->r_osd)
3068 cancel_linger_request(lreq->reg_req);
3069 ceph_osdc_put_request(lreq->reg_req);
3075 target_copy(&req->r_t, &lreq->t);
3076 req->r_mtime = lreq->mtime;
3078 if (lreq->is_watch && lreq->committed) {
3080 lreq->linger_id, ++lreq->register_gen);
3081 dout("lreq %p reconnect register_gen %u\n", lreq,
3085 if (lreq->is_watch) {
3087 lreq->linger_id, 0);
3089 lreq->notify_id = 0;
3091 refcount_inc(&lreq->request_pl->refcnt);
3092 osd_req_op_notify_init(req, 0, lreq->linger_id,
3093 lreq->request_pl);
3096 lreq->notify_id_pages, PAGE_SIZE, 0, false, false);
3098 dout("lreq %p register\n", lreq);
3105 req->r_priv = linger_get(lreq);
3107 lreq->reg_req = req;
3108 mutex_unlock(&lreq->lock);
3115 struct ceph_osd_linger_request *lreq = req->r_priv;
3117 mutex_lock(&lreq->lock);
3118 if (req != lreq->ping_req) {
3119 dout("%s lreq %p linger_id %llu unknown req (%p != %p)\n",
3120 __func__, lreq, lreq->linger_id, req, lreq->ping_req);
3124 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
3125 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
3126 lreq->last_error);
3127 if (lreq->register_gen == req->r_ops[0].watch.gen) {
3129 lreq->watch_valid_thru = lreq->ping_sent;
3130 } else if (!lreq->last_error) {
3131 lreq->last_error = normalize_watch_error(req->r_result);
3132 queue_watch_error(lreq);
3135 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
3136 lreq->register_gen, req->r_ops[0].watch.gen);
3140 mutex_unlock(&lreq->lock);
3141 linger_put(lreq);
3144 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
3146 struct ceph_osd_client *osdc = lreq->osdc;
3155 lreq->ping_sent = jiffies;
3156 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
3157 __func__, lreq, lreq->linger_id, lreq->ping_sent,
3158 lreq->register_gen);
3160 if (lreq->ping_req) {
3161 if (lreq->ping_req->r_osd)
3162 cancel_linger_request(lreq->ping_req);
3163 ceph_osdc_put_request(lreq->ping_req);
3169 target_copy(&req->r_t, &lreq->t);
3170 osd_req_op_watch_init(req, 0, CEPH_OSD_WATCH_OP_PING, lreq->linger_id,
3171 lreq->register_gen);
3177 req->r_priv = linger_get(lreq);
3179 lreq->ping_req = req;
3184 link_request(lreq->osd, req);
3188 static void linger_submit(struct ceph_osd_linger_request *lreq)
3190 struct ceph_osd_client *osdc = lreq->osdc;
3194 linger_register(lreq);
3196 calc_target(osdc, &lreq->t, false);
3197 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3198 link_linger(osd, lreq);
3200 send_linger(lreq);
3204 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
3206 struct ceph_osd_client *osdc = lreq->osdc;
3212 lreq->linger_id);
3216 WARN_ON(lookup_lreq != lreq);
3217 erase_linger_mc(&osdc->linger_map_checks, lreq);
3218 linger_put(lreq);
3222 * @lreq has to be both registered and linked.
3224 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
3226 if (lreq->ping_req && lreq->ping_req->r_osd)
3227 cancel_linger_request(lreq->ping_req);
3228 if (lreq->reg_req && lreq->reg_req->r_osd)
3229 cancel_linger_request(lreq->reg_req);
3230 cancel_linger_map_check(lreq);
3231 unlink_linger(lreq->osd, lreq);
3232 linger_unregister(lreq);
3235 static void linger_cancel(struct ceph_osd_linger_request *lreq)
3237 struct ceph_osd_client *osdc = lreq->osdc;
3240 if (__linger_registered(lreq))
3241 __linger_cancel(lreq);
3245 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
3247 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
3249 struct ceph_osd_client *osdc = lreq->osdc;
3255 if (lreq->register_gen) {
3256 lreq->map_dne_bound = map->epoch;
3257 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
3258 lreq, lreq->linger_id);
3260 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3261 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3265 if (lreq->map_dne_bound) {
3266 if (map->epoch >= lreq->map_dne_bound) {
3269 lreq->linger_id);
3270 linger_reg_commit_complete(lreq, -ENOENT);
3271 __linger_cancel(lreq);
3274 send_linger_map_check(lreq);
3281 struct ceph_osd_linger_request *lreq;
3287 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3288 if (!lreq) {
3293 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3294 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3296 if (!lreq->map_dne_bound)
3297 lreq->map_dne_bound = greq->u.newest;
3298 erase_linger_mc(&osdc->linger_map_checks, lreq);
3299 check_linger_pool_dne(lreq);
3301 linger_put(lreq);
3306 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3308 struct ceph_osd_client *osdc = lreq->osdc;
3315 lreq->linger_id);
3317 WARN_ON(lookup_lreq != lreq);
3321 linger_get(lreq);
3322 insert_linger_mc(&osdc->linger_map_checks, lreq);
3324 linger_map_check_cb, lreq->linger_id);
3328 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3332 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3333 ret = wait_for_completion_killable(&lreq->reg_commit_wait);
3334 return ret ?: lreq->reg_commit_error;
3337 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq,
3342 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3343 left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait,
3348 left = lreq->notify_finish_error; /* completed */
3400 struct ceph_osd_linger_request *lreq =
3403 dout(" lreq %p linger_id %llu is served by osd%d\n",
3404 lreq, lreq->linger_id, osd->o_osd);
3407 mutex_lock(&lreq->lock);
3408 if (lreq->is_watch && lreq->committed && !lreq->last_error)
3409 send_linger_ping(lreq);
3410 mutex_unlock(&lreq->lock);
3834 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3836 struct ceph_osd_client *osdc = lreq->osdc;
3839 ct_res = calc_target(osdc, &lreq->t, true);
3843 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3844 if (osd != lreq->osd) {
3845 unlink_linger(lreq->osd, lreq);
3846 link_linger(osd, lreq);
3868 struct ceph_osd_linger_request *lreq =
3874 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3875 lreq->linger_id);
3876 ct_res = recalc_linger_target(lreq);
3881 pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3887 cancel_linger_map_check(lreq);
3893 if (list_empty(&lreq->scan_item))
3894 list_add_tail(&lreq->scan_item, need_resend_linger);
3897 list_del_init(&lreq->scan_item);
3898 check_linger_pool_dne(lreq);
4008 struct ceph_osd_linger_request *lreq, *nlreq;
4046 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
4047 if (!osd_homeless(lreq->osd))
4048 send_linger(lreq);
4050 list_del_init(&lreq->scan_item);
4194 struct ceph_osd_linger_request *lreq =
4197 send_linger(lreq);
4488 struct ceph_osd_linger_request *lreq;
4517 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4518 if (!lreq) {
4524 mutex_lock(&lreq->lock);
4525 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4526 opcode, cookie, lreq, lreq->is_watch);
4528 if (!lreq->last_error) {
4529 lreq->last_error = -ENOTCONN;
4530 queue_watch_error(lreq);
4532 } else if (!lreq->is_watch) {
4534 if (lreq->notify_id && lreq->notify_id != notify_id) {
4535 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4536 lreq->notify_id, notify_id);
4537 } else if (!completion_done(&lreq->notify_finish_wait)) {
4542 if (lreq->preply_pages) {
4545 *lreq->preply_pages = data->pages;
4546 *lreq->preply_len = data->length;
4550 lreq->notify_finish_error = return_code;
4551 complete_all(&lreq->notify_finish_wait);
4555 lwork = lwork_alloc(lreq, do_watch_notify);
4570 mutex_unlock(&lreq->lock);
4693 struct ceph_osd_linger_request *lreq;
4696 lreq = linger_alloc(osdc);
4697 if (!lreq)
4700 lreq->is_watch = true;
4701 lreq->wcb = wcb;
4702 lreq->errcb = errcb;
4703 lreq->data = data;
4704 lreq->watch_valid_thru = jiffies;
4706 ceph_oid_copy(&lreq->t.base_oid, oid);
4707 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4708 lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4709 ktime_get_real_ts64(&lreq->mtime);
4711 linger_submit(lreq);
4712 ret = linger_reg_commit_wait(lreq);
4714 linger_cancel(lreq);
4718 return lreq;
4721 linger_put(lreq);
4734 struct ceph_osd_linger_request *lreq)
4744 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4745 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4749 lreq->linger_id, 0);
4756 linger_cancel(lreq);
4757 linger_put(lreq);
4852 struct ceph_osd_linger_request *lreq;
4861 lreq = linger_alloc(osdc);
4862 if (!lreq)
4865 lreq->request_pl = ceph_pagelist_alloc(GFP_NOIO);
4866 if (!lreq->request_pl) {
4871 ret = ceph_pagelist_encode_32(lreq->request_pl, 1); /* prot_ver */
4872 ret |= ceph_pagelist_encode_32(lreq->request_pl, timeout);
4873 ret |= ceph_pagelist_encode_32(lreq->request_pl, payload_len);
4874 ret |= ceph_pagelist_append(lreq->request_pl, payload, payload_len);
4881 lreq->notify_id_pages = ceph_alloc_page_vector(1, GFP_NOIO);
4882 if (IS_ERR(lreq->notify_id_pages)) {
4883 ret = PTR_ERR(lreq->notify_id_pages);
4884 lreq->notify_id_pages = NULL;
4888 lreq->preply_pages = preply_pages;
4889 lreq->preply_len = preply_len;
4891 ceph_oid_copy(&lreq->t.base_oid, oid);
4892 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4893 lreq->t.flags = CEPH_OSD_FLAG_READ;
4895 linger_submit(lreq);
4896 ret = linger_reg_commit_wait(lreq);
4898 ret = linger_notify_finish_wait(lreq,
4901 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4903 linger_cancel(lreq);
4905 linger_put(lreq);
4916 struct ceph_osd_linger_request *lreq)
4922 mutex_lock(&lreq->lock);
4923 stamp = lreq->watch_valid_thru;
4924 if (!list_empty(&lreq->pending_lworks)) {
4926 list_first_entry(&lreq->pending_lworks,
4934 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4935 lreq, lreq->linger_id, age, lreq->last_error);
4937 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4939 mutex_unlock(&lreq->lock);