Lines Matching refs:node
81 body->case_sensitive = conn_impl->node->sbi->s_case_sensitive;
88 __u8 sensitive = conn_impl->node->sbi->s_case_sensitive ? 1 : 0;
111 body->features = cpu_to_le64(conn_impl->node->sbi->s_features);
125 conn_impl->node->features = le64_to_cpu(body->features);
286 data_len, conn_impl->node->device_id);
295 req_len, data_len, conn_impl->node->device_id);
306 ops, data_len, conn_impl->node->device_id, hs_req->len);
409 static int hmdfs_node_waiting_evt_sum(const struct hmdfs_peer *node)
415 sum += node->waiting_evt[i];
420 static int hmdfs_update_node_waiting_evt(struct hmdfs_peer *node, int evt,
427 sum = hmdfs_node_waiting_evt_sum(node);
429 last = !node->pending_evt;
431 last = node->pending_evt;
435 node->dup_evt[evt]++;
439 node->waiting_evt[evt]++;
440 hmdfs_debug("add node->waiting_evt[%d]=%d", evt,
441 node->waiting_evt[evt]);
447 if (node->waiting_evt[RAW_NODE_EVT_OFF] >= 2 &&
448 node->waiting_evt[RAW_NODE_EVT_ON] >= 1) {
449 node->waiting_evt[RAW_NODE_EVT_OFF] -= 1;
450 node->waiting_evt[RAW_NODE_EVT_ON] -= 1;
451 node->seq_wr_idx -= 2;
452 node->merged_evt += 2;
455 next = hmdfs_node_inc_evt_seq(node);
456 node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = next;
462 static void hmdfs_run_evt_cb_verbosely(struct hmdfs_peer *node, int raw_evt,
469 node->cur_evt[cur_evt_idx] = raw_evt;
470 node->cur_evt_seq[cur_evt_idx] = seq;
471 hmdfs_node_call_evt_cb(node, evt, sync, seq);
472 node->cur_evt[cur_evt_idx] = RAW_NODE_EVT_NR;
477 struct hmdfs_peer *node =
486 mutex_lock(&node->seq_lock);
487 seq = node->seq_tbl[(node->seq_rd_idx++) % RAW_NODE_EVT_MAX_NR];
488 hmdfs_run_evt_cb_verbosely(node, node->pending_evt, false, seq);
489 mutex_unlock(&node->seq_lock);
491 mutex_lock(&node->evt_lock);
492 if (hmdfs_node_waiting_evt_sum(node)) {
493 node->pending_evt = !node->pending_evt;
494 node->pending_evt_seq =
495 node->seq_tbl[node->seq_rd_idx % RAW_NODE_EVT_MAX_NR];
496 node->waiting_evt[node->pending_evt]--;
498 schedule_delayed_work(&node->evt_dwork,
499 node->sbi->async_cb_delay * HZ);
501 node->last_evt = node->pending_evt;
502 node->pending_evt = RAW_NODE_EVT_NR;
504 mutex_unlock(&node->evt_lock);
519 static void hmdfs_queue_raw_node_evt(struct hmdfs_peer *node, int evt)
523 mutex_lock(&node->evt_lock);
524 if (node->pending_evt == RAW_NODE_EVT_NR) {
525 if (evt == node->last_evt) {
526 node->dup_evt[evt]++;
527 mutex_unlock(&node->evt_lock);
530 node->pending_evt = evt;
531 seq = hmdfs_node_inc_evt_seq(node);
532 node->seq_tbl[(node->seq_wr_idx++) % RAW_NODE_EVT_MAX_NR] = seq;
533 node->pending_evt_seq = seq;
534 mutex_lock(&node->seq_lock);
535 mutex_unlock(&node->evt_lock);
537 hmdfs_run_evt_cb_verbosely(node, evt, true, seq);
538 mutex_unlock(&node->seq_lock);
539 schedule_delayed_work(&node->evt_dwork,
540 node->sbi->async_cb_delay * HZ);
541 } else if (hmdfs_update_node_waiting_evt(node, evt, &seq) > 0) {
546 mutex_lock(&node->seq_lock);
547 mutex_unlock(&node->evt_lock);
548 hmdfs_run_evt_cb_verbosely(node, evt, true, seq);
549 mutex_unlock(&node->seq_lock);
551 mutex_unlock(&node->evt_lock);
569 void connection_handshake_notify(struct hmdfs_peer *node, int notify_type)
575 memcpy(param.remote_cid, node->cid, HMDFS_CID_SIZE);
576 notify(node, ¶m);
593 void connection_to_working(struct hmdfs_peer *node)
598 if (!node)
600 mutex_lock(&node->conn_impl_list_lock);
601 list_for_each_entry(conn_impl, &node->conn_impl_list, list) {
609 mutex_unlock(&node->conn_impl_list_lock);
610 peer_online(node);
625 conn_impl->node->version = head->version;
632 conn_impl->node->device_id, head->datasize, fd);
637 conn_impl->node->status = NODE_STAT_SHAKING;
642 conn_impl->node->device_id, status, fd);
661 peer_online(conn_impl->node);
677 peer_online(conn_impl->node);
688 connection_handshake_notify(conn_impl->node, NOTIFY_OFFLINE);
690 conn_impl->node->device_id, fd);
703 if (hmdfs_message_verify(conn->node, head, data) < 0) {
741 hmdfs_recv_mesg_callback(conn_impl->node, buf, data);
767 mutex_lock(&conn->node->conn_impl_list_lock);
769 mutex_unlock(&conn->node->conn_impl_list_lock);
774 wake_up_interruptible(&conn->node->deleting_list_wq);
823 static void hmdfs_dump_deleting_list(struct hmdfs_peer *node)
829 mutex_lock(&node->conn_impl_list_lock);
830 list_for_each_entry(con, &node->conn_deleting_list, list) {
833 count, node->device_id, tcp ? tcp->fd : -1,
837 mutex_unlock(&node->conn_impl_list_lock);
840 static bool hmdfs_conn_deleting_list_empty(struct hmdfs_peer *node)
844 mutex_lock(&node->conn_impl_list_lock);
845 empty = list_empty(&node->conn_deleting_list);
846 mutex_unlock(&node->conn_impl_list_lock);
851 void hmdfs_disconnect_node(struct hmdfs_peer *node)
858 if (unlikely(!node))
861 hmdfs_node_inc_evt_seq(node);
864 node->status = NODE_STAT_OFFLINE;
865 hmdfs_info("Try to disconnect peer: device_id %llu", node->device_id);
867 mutex_lock(&node->conn_impl_list_lock);
868 if (!list_empty(&node->conn_impl_list))
869 list_replace_init(&node->conn_impl_list, &local_conns);
870 mutex_unlock(&node->conn_impl_list_lock);
888 if (wait_event_interruptible_timeout(node->deleting_list_wq,
889 hmdfs_conn_deleting_list_empty(node),
891 hmdfs_dump_deleting_list(node);
894 spin_lock(&node->idr_lock);
895 while (node->msg_idr_process) {
896 spin_unlock(&node->idr_lock);
899 spin_lock(&node->idr_lock);
901 spin_unlock(&node->idr_lock);
903 hmdfs_queue_raw_node_evt(node, RAW_NODE_EVT_OFF);
906 static void hmdfs_run_simple_evt_cb(struct hmdfs_peer *node, int evt)
908 unsigned int seq = hmdfs_node_inc_evt_seq(node);
910 mutex_lock(&node->seq_lock);
911 hmdfs_node_call_evt_cb(node, evt, true, seq);
912 mutex_unlock(&node->seq_lock);
915 static void hmdfs_del_peer(struct hmdfs_peer *node)
922 cancel_delayed_work_sync(&node->evt_dwork);
924 hmdfs_run_simple_evt_cb(node, NODE_EVT_DEL);
926 hmdfs_release_peer_sysfs(node);
928 flush_workqueue(node->reget_conn_wq);
929 peer_put(node);
934 struct hmdfs_peer *node = NULL;
938 list_for_each_entry_safe(node, con_tmp, &sbi->connections.node_list,
941 hmdfs_disconnect_node(node);
942 hmdfs_del_peer(node);
948 struct connection *get_conn_impl(struct hmdfs_peer *node, int connect_type)
952 if (!node)
954 mutex_lock(&node->conn_impl_list_lock);
955 list_for_each_entry(conn_impl, &node->conn_impl_list, list) {
959 mutex_unlock(&node->conn_impl_list_lock);
963 mutex_unlock(&node->conn_impl_list_lock);
965 node->device_id, connect_type);
969 void set_conn_sock_quickack(struct hmdfs_peer *node)
975 if (!node)
977 mutex_lock(&node->conn_impl_list_lock);
978 list_for_each_entry(conn_impl, &node->conn_impl_list, list) {
986 mutex_unlock(&node->conn_impl_list_lock);
1033 struct hmdfs_peer *node = NULL;
1035 list_for_each_entry(node, &sbi->connections.node_list, list)
1036 if (!strncmp(node->cid, cid, HMDFS_CID_SIZE)) {
1037 peer_get(node);
1038 return node;
1068 struct hmdfs_peer *node = kzalloc(sizeof(*node), GFP_KERNEL);
1070 if (!node)
1073 node->device_id = (u32)atomic_inc_return(&sbi->connections.conn_seq);
1075 node->async_wq = alloc_workqueue("dfs_async%u_%llu", WQ_MEM_RECLAIM, 0,
1076 sbi->seq, node->device_id);
1077 if (!node->async_wq) {
1081 node->req_handle_wq = alloc_workqueue("dfs_req%u_%llu",
1084 sbi->seq, node->device_id);
1085 if (!node->req_handle_wq) {
1089 node->dentry_wq = alloc_workqueue("dfs_dentry%u_%llu",
1091 0, sbi->seq, node->device_id);
1092 if (!node->dentry_wq) {
1096 node->retry_wb_wq = alloc_workqueue("dfs_rwb%u_%llu",
1099 sbi->seq, node->device_id);
1100 if (!node->retry_wb_wq) {
1104 node->reget_conn_wq = alloc_workqueue("dfs_regetcon%u_%llu",
1106 sbi->seq, node->device_id);
1107 if (!node->reget_conn_wq) {
1111 INIT_LIST_HEAD(&node->conn_impl_list);
1112 mutex_init(&node->conn_impl_list_lock);
1113 INIT_LIST_HEAD(&node->conn_deleting_list);
1114 init_waitqueue_head(&node->deleting_list_wq);
1115 idr_init(&node->msg_idr);
1116 spin_lock_init(&node->idr_lock);
1117 idr_init(&node->file_id_idr);
1118 spin_lock_init(&node->file_id_lock);
1119 INIT_LIST_HEAD(&node->list);
1120 kref_init(&node->ref_cnt);
1121 node->owner = sbi->seq;
1122 node->sbi = sbi;
1123 node->version = HMDFS_VERSION;
1124 node->status = NODE_STAT_SHAKING;
1125 node->conn_time = jiffies;
1126 memcpy(node->cid, cid, HMDFS_CID_SIZE);
1127 atomic64_set(&node->sb_dirty_count, 0);
1128 node->fid_cookie = 0;
1129 atomic_set(&node->evt_seq, 0);
1130 mutex_init(&node->seq_lock);
1131 mutex_init(&node->offline_cb_lock);
1132 mutex_init(&node->evt_lock);
1133 node->pending_evt = RAW_NODE_EVT_NR;
1134 node->last_evt = RAW_NODE_EVT_NR;
1135 node->cur_evt[0] = RAW_NODE_EVT_NR;
1136 node->cur_evt[1] = RAW_NODE_EVT_NR;
1137 node->seq_wr_idx = (unsigned char)UINT_MAX;
1138 node->seq_rd_idx = node->seq_wr_idx;
1139 INIT_DELAYED_WORK(&node->evt_dwork, hmdfs_node_evt_work);
1140 node->msg_idr_process = 0;
1141 node->offline_start = false;
1142 spin_lock_init(&node->wr_opened_inode_lock);
1143 INIT_LIST_HEAD(&node->wr_opened_inode_list);
1144 spin_lock_init(&node->stashed_inode_lock);
1145 node->stashed_inode_nr = 0;
1146 atomic_set(&node->rebuild_inode_status_nr, 0);
1147 init_waitqueue_head(&node->rebuild_inode_status_wq);
1148 INIT_LIST_HEAD(&node->stashed_inode_list);
1149 node->need_rebuild_stash_list = false;
1150 node->devsl = devsl;
1152 return node;
1155 if (node->async_wq) {
1156 destroy_workqueue(node->async_wq);
1157 node->async_wq = NULL;
1159 if (node->req_handle_wq) {
1160 destroy_workqueue(node->req_handle_wq);
1161 node->req_handle_wq = NULL;
1163 if (node->dentry_wq) {
1164 destroy_workqueue(node->dentry_wq);
1165 node->dentry_wq = NULL;
1167 if (node->retry_wb_wq) {
1168 destroy_workqueue(node->retry_wb_wq);
1169 node->retry_wb_wq = NULL;
1171 if (node->reget_conn_wq) {
1172 destroy_workqueue(node->reget_conn_wq);
1173 node->reget_conn_wq = NULL;
1175 kfree(node);