Lines Matching refs:clt

17 #include "rtrs-clt.h"
19 #include "rtrs-clt-trace.h"
52 static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
58 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
69 __rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type)
71 size_t max_depth = clt->queue_depth;
83 bit = find_first_zero_bit(clt->permits_map, max_depth);
86 } while (test_and_set_bit_lock(bit, clt->permits_map));
88 permit = get_permit(clt, bit);
96 static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt,
99 clear_bit_unlock(permit->mem_id, clt->permits_map);
104 * @clt: Current session
116 struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt,
123 permit = __rtrs_get_permit(clt, con_type);
128 prepare_to_wait(&clt->permits_wait, &wait,
130 permit = __rtrs_get_permit(clt, con_type);
137 finish_wait(&clt->permits_wait, &wait);
145 * @clt: Current session
151 void rtrs_clt_put_permit(struct rtrs_clt_sess *clt,
154 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
157 __rtrs_put_permit(clt, permit);
160 * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
162 * it must have added itself to &clt->permits_wait before
166 if (waitqueue_active(&clt->permits_wait))
167 wake_up(&clt->permits_wait);
547 rtrs_err(clt_path->clt,
613 rtrs_err(clt_path->clt, "RDMA failed: %s\n",
691 rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
735 rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n",
747 struct rtrs_clt_sess *clt;
752 * rtrs_clt_get_next_path_or_null - get clt path from the list or return NULL
756 * Next clt path returned in round-robin fashion, i.e. head will be skipped,
784 struct rtrs_clt_sess *clt;
786 clt = it->clt;
794 ppcpu_path = this_cpu_ptr(clt->pcpu_path);
797 path = list_first_or_null_rcu(&clt->paths_list,
800 path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path);
819 struct rtrs_clt_sess *clt = it->clt;
824 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
870 struct rtrs_clt_sess *clt = it->clt;
875 list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
900 static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt)
903 it->clt = clt;
906 if (clt->mp_policy == MP_POLICY_RR)
908 else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
969 req->mp_policy = clt_path->clt->mp_policy;
1279 * @clt: clt context
1282 static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt,
1291 for (path_it_init(&it, clt);
1292 (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num;
1317 struct rtrs_clt_sess *clt = clt_path->clt;
1335 err = rtrs_clt_failover_req(clt, req);
1406 static int alloc_permits(struct rtrs_clt_sess *clt)
1411 clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL);
1412 if (!clt->permits_map) {
1416 clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
1417 if (!clt->permits) {
1421 chunk_bits = ilog2(clt->queue_depth - 1) + 1;
1422 for (i = 0; i < clt->queue_depth; i++) {
1425 permit = get_permit(clt, i);
1433 bitmap_free(clt->permits_map);
1434 clt->permits_map = NULL;
1439 static void free_permits(struct rtrs_clt_sess *clt)
1441 if (clt->permits_map)
1442 wait_event(clt->permits_wait,
1443 bitmap_empty(clt->permits_map, clt->queue_depth));
1445 bitmap_free(clt->permits_map);
1446 clt->permits_map = NULL;
1447 kfree(clt->permits);
1448 clt->permits = NULL;
1470 clt_path->clt->max_segments =
1471 min(clt_path->max_pages_per_mr, clt_path->clt->max_segments);
1511 struct rtrs_clt_sess *clt;
1515 clt = clt_path->clt;
1516 delay_ms = clt->reconnect_delay_sec * 1000;
1523 static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
1566 strscpy(clt_path->s.sessname, clt->sessname,
1568 clt_path->clt = clt;
1664 rtrs_wrn(clt_path->clt,
1788 struct rtrs_clt_sess *clt = clt_path->clt;
1810 uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
1814 rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
1823 struct rtrs_clt_sess *clt = clt_path->clt;
1832 rtrs_err(clt, "Invalid RTRS connection response\n");
1836 rtrs_err(clt, "Invalid RTRS magic\n");
1841 rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
1847 rtrs_err(clt, "Invalid RTRS message: errno %d\n",
1855 rtrs_err(clt, "Error: queue depth changed\n");
1861 rtrs_err(clt,
1889 mutex_lock(&clt->paths_mutex);
1890 clt->queue_depth = clt_path->queue_depth;
1891 clt->max_io_size = min_not_zero(clt_path->max_io_size,
1892 clt->max_io_size);
1893 mutex_unlock(&clt->paths_mutex);
2102 struct rtrs_clt_sess *clt = clt_path->clt;
2112 mutex_lock(&clt->paths_ev_mutex);
2113 up = ++clt->paths_up;
2119 if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
2120 clt->paths_up = clt->paths_num;
2122 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
2123 mutex_unlock(&clt->paths_ev_mutex);
2133 struct rtrs_clt_sess *clt = clt_path->clt;
2139 mutex_lock(&clt->paths_ev_mutex);
2140 WARN_ON(!clt->paths_up);
2141 if (--clt->paths_up == 0)
2142 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
2143 mutex_unlock(&clt->paths_ev_mutex);
2211 struct rtrs_clt_sess *clt = clt_path->clt;
2216 mutex_lock(&clt->paths_mutex);
2242 * [!CONNECTED path] clt->paths_num--
2244 * load clt->paths_num from 2 to 1
2249 * ends, because expression i < clt->paths_num is false.
2251 clt->paths_num--;
2258 next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path);
2268 ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
2270 lockdep_is_held(&clt->paths_mutex)) != clt_path)
2296 mutex_unlock(&clt->paths_mutex);
2301 struct rtrs_clt_sess *clt = clt_path->clt;
2303 mutex_lock(&clt->paths_mutex);
2304 clt->paths_num++;
2306 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
2307 mutex_unlock(&clt->paths_mutex);
2388 rtrs_err(clt_path->clt, "Path info request send failed: %s\n",
2405 rtrs_err(clt_path->clt,
2417 rtrs_err(clt_path->clt,
2435 rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n",
2450 rtrs_err(clt_path->clt,
2455 rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len);
2477 rtrs_err(clt_path->clt, "Path info response recv failed: %s\n",
2484 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
2492 rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n",
2499 rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
2542 rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err);
2558 rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err);
2608 rtrs_err(clt_path->clt,
2615 rtrs_err(clt_path->clt,
2631 struct rtrs_clt_sess *clt;
2636 clt = clt_path->clt;
2643 if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) {
2668 struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
2671 mutex_destroy(&clt->paths_ev_mutex);
2672 mutex_destroy(&clt->paths_mutex);
2673 kfree(clt);
2683 struct rtrs_clt_sess *clt;
2689 if (strlen(sessname) >= sizeof(clt->sessname))
2692 clt = kzalloc(sizeof(*clt), GFP_KERNEL);
2693 if (!clt)
2696 clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
2697 if (!clt->pcpu_path) {
2698 kfree(clt);
2702 clt->dev.class = &rtrs_clt_dev_class;
2703 clt->dev.release = rtrs_clt_dev_release;
2704 uuid_gen(&clt->paths_uuid);
2705 INIT_LIST_HEAD_RCU(&clt->paths_list);
2706 clt->paths_num = paths_num;
2707 clt->paths_up = MAX_PATHS_NUM;
2708 clt->port = port;
2709 clt->pdu_sz = pdu_sz;
2710 clt->max_segments = RTRS_MAX_SEGMENTS;
2711 clt->reconnect_delay_sec = reconnect_delay_sec;
2712 clt->max_reconnect_attempts = max_reconnect_attempts;
2713 clt->priv = priv;
2714 clt->link_ev = link_ev;
2715 clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
2716 strscpy(clt->sessname, sessname, sizeof(clt->sessname));
2717 init_waitqueue_head(&clt->permits_wait);
2718 mutex_init(&clt->paths_ev_mutex);
2719 mutex_init(&clt->paths_mutex);
2720 device_initialize(&clt->dev);
2722 err = dev_set_name(&clt->dev, "%s", sessname);
2730 dev_set_uevent_suppress(&clt->dev, true);
2731 err = device_add(&clt->dev);
2735 clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
2736 if (!clt->kobj_paths) {
2740 err = rtrs_clt_create_sysfs_root_files(clt);
2742 kobject_del(clt->kobj_paths);
2743 kobject_put(clt->kobj_paths);
2746 dev_set_uevent_suppress(&clt->dev, false);
2747 kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
2749 return clt;
2751 device_del(&clt->dev);
2753 free_percpu(clt->pcpu_path);
2754 put_device(&clt->dev);
2758 static void free_clt(struct rtrs_clt_sess *clt)
2760 free_percpu(clt->pcpu_path);
2763 * release callback will free clt and destroy mutexes in last put
2765 device_unregister(&clt->dev);
2794 struct rtrs_clt_sess *clt;
2803 clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv,
2807 if (IS_ERR(clt)) {
2808 err = PTR_ERR(clt);
2814 clt_path = alloc_path(clt, &paths[i], nr_cpu_ids,
2822 list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
2844 err = alloc_permits(clt);
2848 return clt;
2851 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
2856 rtrs_clt_destroy_sysfs_root(clt);
2857 free_clt(clt);
2866 * @clt: Session handle. Session is freed upon return.
2868 void rtrs_clt_close(struct rtrs_clt_sess *clt)
2873 rtrs_clt_destroy_sysfs_root(clt);
2876 list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
2881 free_permits(clt);
2882 free_clt(clt);
2945 void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value)
2947 clt->max_reconnect_attempts = (unsigned int)value;
2950 int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt)
2952 return (int)clt->max_reconnect_attempts;
2960 * @clt: Session
2980 struct rtrs_clt_sess *clt, struct rtrs_permit *permit,
3006 for (path_it_init(&it, clt);
3007 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
3012 rtrs_wrn_rl(clt_path->clt,
3040 int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index)
3049 for (path_it_init(&it, clt);
3050 (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
3068 *@clt: session pointer
3074 int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr)
3076 if (!rtrs_clt_is_connected(clt))
3079 attr->queue_depth = clt->queue_depth;
3080 attr->max_segments = clt->max_segments;
3082 attr->max_io_size = min_t(int, clt->max_io_size,
3083 clt->max_segments * SZ_4K);
3089 int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
3095 clt_path = alloc_path(clt, addr, nr_cpu_ids, 0);
3099 mutex_lock(&clt->paths_mutex);
3100 if (clt->paths_num == 0) {
3109 mutex_unlock(&clt->paths_mutex);