Lines Matching refs:sess
36 static inline bool rnbd_clt_get_sess(struct rnbd_clt_session *sess)
38 return refcount_inc_not_zero(&sess->refcount);
41 static void free_sess(struct rnbd_clt_session *sess);
43 static void rnbd_clt_put_sess(struct rnbd_clt_session *sess)
47 if (refcount_dec_and_test(&sess->refcount))
48 free_sess(sess);
63 rnbd_clt_put_sess(dev->sess);
76 struct rnbd_clt_session *sess = dev->sess;
92 dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
176 * @sess: Session to find a queue for
185 rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
190 bit = find_next_bit(sess->cpu_queues_bm, nr_cpu_ids, cpu);
192 return per_cpu_ptr(sess->cpu_queues, bit);
195 bit = find_next_bit(sess->cpu_queues_bm, cpu, 0);
197 return per_cpu_ptr(sess->cpu_queues, bit);
210 * @sess: Session to rerun a queue on
223 static bool rnbd_rerun_if_needed(struct rnbd_clt_session *sess)
235 cpup = get_cpu_ptr(sess->cpu_rr);
236 for (cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(*cpup)); cpu_q;
237 cpu_q = rnbd_get_cpu_qlist(sess, nxt_cpu(cpu_q->cpu))) {
240 if (unlikely(!test_bit(cpu_q->cpu, sess->cpu_queues_bm)))
252 clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
270 put_cpu_var(sess->cpu_rr);
282 * @sess: Session to rerun the queues on
298 * one who observes sess->busy == 0) must wake up all remaining queues.
303 static void rnbd_rerun_all_if_idle(struct rnbd_clt_session *sess)
308 requeued = rnbd_rerun_if_needed(sess);
309 } while (atomic_read(&sess->busy) == 0 && requeued);
312 static struct rtrs_permit *rnbd_get_permit(struct rnbd_clt_session *sess,
318 permit = rtrs_clt_get_permit(sess->rtrs, con_type,
327 atomic_inc(&sess->busy);
332 static void rnbd_put_permit(struct rnbd_clt_session *sess,
335 rtrs_clt_put_permit(sess->rtrs, permit);
336 atomic_dec(&sess->busy);
341 rnbd_rerun_all_if_idle(sess);
344 static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
351 permit = rnbd_get_permit(sess, con_type,
373 static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
376 rnbd_put_permit(sess, iu->permit);
382 struct rnbd_clt_session *sess = dev->sess;
386 rnbd_put_permit(sess, iu->permit);
457 rnbd_put_iu(dev->sess, iu);
463 struct rnbd_clt_session *sess = dev->sess;
472 iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
485 err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 0, NULL, 0,
489 rnbd_put_iu(sess, iu);
494 rnbd_put_iu(sess, iu);
522 rnbd_put_iu(dev->sess, iu);
530 struct rnbd_clt_session *sess = iu->sess;
533 sess->ver = min_t(u8, rsp->ver, RNBD_PROTO_VER_MAJOR);
537 rnbd_put_iu(sess, iu);
538 rnbd_clt_put_sess(sess);
543 struct rnbd_clt_session *sess = dev->sess;
557 iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
573 err = send_usr_msg(sess->rtrs, READ, iu,
578 rnbd_put_iu(sess, iu);
584 rnbd_put_iu(sess, iu);
588 static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
603 iu = rnbd_get_iu(sess, RTRS_ADMIN_CON, RTRS_PERMIT_WAIT);
610 iu->sess = sess;
617 if (!rnbd_clt_get_sess(sess)) {
627 err = send_usr_msg(sess->rtrs, READ, iu,
631 rnbd_clt_put_sess(sess);
633 rnbd_put_iu(sess, iu);
639 rnbd_put_iu(sess, iu);
643 static void set_dev_states_to_disconnected(struct rnbd_clt_session *sess)
647 mutex_lock(&sess->lock);
648 list_for_each_entry(dev, &sess->devs_list, list) {
656 mutex_unlock(&sess->lock);
659 static void remap_devs(struct rnbd_clt_session *sess)
676 err = send_msg_sess_info(sess, NO_WAIT);
678 pr_err("send_msg_sess_info(\"%s\"): %d\n", sess->sessname, err);
682 err = rtrs_clt_query(sess->rtrs, &attrs);
684 pr_err("rtrs_clt_query(\"%s\"): %d\n", sess->sessname, err);
687 mutex_lock(&sess->lock);
688 sess->max_io_size = attrs.max_io_size;
690 list_for_each_entry(dev, &sess->devs_list, list) {
710 mutex_unlock(&sess->lock);
715 struct rnbd_clt_session *sess = priv;
719 set_dev_states_to_disconnected(sess);
722 remap_devs(sess);
726 ev, sess->sessname);
744 static void destroy_mq_tags(struct rnbd_clt_session *sess)
746 if (sess->tag_set.tags)
747 blk_mq_free_tag_set(&sess->tag_set);
750 static inline void wake_up_rtrs_waiters(struct rnbd_clt_session *sess)
752 sess->rtrs_ready = true;
753 wake_up_all(&sess->rtrs_waitq);
756 static void close_rtrs(struct rnbd_clt_session *sess)
760 if (!IS_ERR_OR_NULL(sess->rtrs)) {
761 rtrs_clt_close(sess->rtrs);
762 sess->rtrs = NULL;
763 wake_up_rtrs_waiters(sess);
767 static void free_sess(struct rnbd_clt_session *sess)
769 WARN_ON(!list_empty(&sess->devs_list));
773 close_rtrs(sess);
774 destroy_mq_tags(sess);
775 if (!list_empty(&sess->list)) {
777 list_del(&sess->list);
780 free_percpu(sess->cpu_queues);
781 free_percpu(sess->cpu_rr);
782 mutex_destroy(&sess->lock);
783 kfree(sess);
788 struct rnbd_clt_session *sess;
791 sess = kzalloc_node(sizeof(*sess), GFP_KERNEL, NUMA_NO_NODE);
792 if (!sess)
794 strlcpy(sess->sessname, sessname, sizeof(sess->sessname));
795 atomic_set(&sess->busy, 0);
796 mutex_init(&sess->lock);
797 INIT_LIST_HEAD(&sess->devs_list);
798 INIT_LIST_HEAD(&sess->list);
799 bitmap_zero(sess->cpu_queues_bm, NR_CPUS);
800 init_waitqueue_head(&sess->rtrs_waitq);
801 refcount_set(&sess->refcount, 1);
803 sess->cpu_queues = alloc_percpu(struct rnbd_cpu_qlist);
804 if (!sess->cpu_queues) {
808 rnbd_init_cpu_qlists(sess->cpu_queues);
815 sess->cpu_rr = alloc_percpu(int);
816 if (!sess->cpu_rr) {
821 * per_cpu_ptr(sess->cpu_rr, cpu) = cpu;
823 return sess;
826 free_sess(sess);
831 static int wait_for_rtrs_connection(struct rnbd_clt_session *sess)
833 wait_event(sess->rtrs_waitq, sess->rtrs_ready);
834 if (IS_ERR_OR_NULL(sess->rtrs))
840 static void wait_for_rtrs_disconnection(struct rnbd_clt_session *sess)
846 prepare_to_wait(&sess->rtrs_waitq, &wait, TASK_UNINTERRUPTIBLE);
847 if (IS_ERR_OR_NULL(sess->rtrs)) {
848 finish_wait(&sess->rtrs_waitq, &wait);
866 struct rnbd_clt_session *sess, *sn;
870 list_for_each_entry_safe(sess, sn, &sess_list, list) {
871 if (strcmp(sessname, sess->sessname))
874 if (sess->rtrs_ready && IS_ERR_OR_NULL(sess->rtrs))
880 if (rnbd_clt_get_sess(sess)) {
885 err = wait_for_rtrs_connection(sess);
887 rnbd_clt_put_sess(sess);
894 return sess;
900 wait_for_rtrs_disconnection(sess);
914 struct rnbd_clt_session *sess = NULL;
917 sess = __find_and_get_sess(sessname);
918 if (!sess) {
919 sess = alloc_sess(sessname);
920 if (IS_ERR(sess)) {
922 return sess;
924 list_add(&sess->list, &sess_list);
930 return sess;
1001 struct rtrs_clt *rtrs = dev->sess->rtrs;
1064 struct rnbd_clt_session *sess = dev->sess;
1070 cpu_q = get_cpu_ptr(sess->cpu_queues);
1077 need_set = !test_bit(cpu_q->cpu, sess->cpu_queues_bm);
1079 set_bit(cpu_q->cpu, sess->cpu_queues_bm);
1085 if (likely(atomic_read(&sess->busy))) {
1093 clear_bit(cpu_q->cpu, sess->cpu_queues_bm);
1100 put_cpu_ptr(sess->cpu_queues);
1132 iu->permit = rnbd_get_permit(dev->sess, RTRS_IO_CON,
1145 rnbd_put_permit(dev->sess, iu->permit);
1149 rnbd_put_permit(dev->sess, iu->permit);
1168 static int setup_mq_tags(struct rnbd_clt_session *sess)
1170 struct blk_mq_tag_set *tag_set = &sess->tag_set;
1174 tag_set->queue_depth = sess->queue_depth;
1189 struct rnbd_clt_session *sess;
1195 sess = find_or_create_sess(sessname, &first);
1196 if (sess == ERR_PTR(-ENOMEM))
1199 return sess;
1202 .priv = sess,
1208 sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname,
1214 if (IS_ERR(sess->rtrs)) {
1215 err = PTR_ERR(sess->rtrs);
1219 err = rtrs_clt_query(sess->rtrs, &attrs);
1223 sess->max_io_size = attrs.max_io_size;
1224 sess->queue_depth = attrs.queue_depth;
1226 err = setup_mq_tags(sess);
1230 err = send_msg_sess_info(sess, WAIT);
1234 wake_up_rtrs_waiters(sess);
1236 return sess;
1239 close_rtrs(sess);
1241 rnbd_clt_put_sess(sess);
1246 wake_up_rtrs_waiters(sess);
1274 dev->queue = blk_mq_init_queue(&dev->sess->tag_set);
1309 blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
1342 static int rnbd_client_setup_device(struct rnbd_clt_session *sess,
1366 static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
1390 pathname, sess->sessname, ret);
1401 dev->sess = sess;
1411 WARN_ON(!rnbd_clt_get_sess(sess));
1424 struct rnbd_clt_session *sess;
1428 list_for_each_entry(sess, &sess_list, list) {
1429 mutex_lock(&sess->lock);
1430 list_for_each_entry(dev, &sess->devs_list, list) {
1437 mutex_unlock(&sess->lock);
1457 struct rnbd_clt_session *sess,
1465 mutex_lock(&sess->lock);
1466 list_add_tail(&dev->list, &sess->devs_list);
1467 mutex_unlock(&sess->lock);
1476 struct rnbd_clt_session *sess = dev->sess;
1478 mutex_lock(&sess->lock);
1480 mutex_unlock(&sess->lock);
1489 struct rnbd_clt_session *sess;
1496 sess = find_and_get_or_create_sess(sessname, paths, path_cnt, port_nr);
1497 if (IS_ERR(sess))
1498 return ERR_CAST(sess);
1500 dev = init_dev(sess, access_mode, pathname);
1503 pathname, sess->sessname, PTR_ERR(dev));
1507 if (insert_dev_if_not_exists_devpath(pathname, sess, dev)) {
1520 sess->sessname, pathname);
1521 ret = rnbd_client_setup_device(sess, dev, dev->clt_device_id);
1542 rnbd_clt_put_sess(sess);
1553 rnbd_clt_put_sess(sess);
1581 struct rnbd_clt_session *sess = dev->sess;
1606 if (was_mapped && sess->rtrs)
1659 struct rnbd_clt_session *sess, *sn;
1681 list_for_each_entry_safe(sess, sn, &sess_list, list) {
1682 if (!rnbd_clt_get_sess(sess))
1684 close_rtrs(sess);
1685 list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
1695 rnbd_clt_put_sess(sess);