Lines Matching defs:node
32 * @src_node_id: source node
36 * @dst_node_id: destination node
57 * @src_node_id: source node
59 * @dst_node_id: destination node
104 /* for node ids */
109 /* lock for qrtr_all_nodes and node reference */
116 * struct qrtr_node - endpoint node
119 * @ref: reference count for node
120 * @nid: node id
121 * @qrtr_tx_flow: tree of qrtr_tx_flow, keyed by node << 32 | port
154 static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
157 static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
163 /* Release node resources and free the node.
166 * kref_put_mutex. As such, the node mutex is expected to be locked on call.
170 struct qrtr_node *node = container_of(kref, struct qrtr_node, ref);
177 /* If the node is a bridge for other nodes, there are possibly
178 * multiple entries pointing to our released node, delete them all.
181 if (*slot == node)
186 list_del(&node->item);
189 skb_queue_purge(&node->rx_queue);
192 radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
194 radix_tree_iter_delete(&node->qrtr_tx_flow, &iter, slot);
197 kfree(node);
200 /* Increment reference to node. */
201 static struct qrtr_node *qrtr_node_acquire(struct qrtr_node *node)
203 if (node)
204 kref_get(&node->ref);
205 return node;
208 /* Decrement reference to node and release as necessary. */
209 static void qrtr_node_release(struct qrtr_node *node)
211 if (!node)
213 kref_put_mutex(&node->ref, __qrtr_node_release, &qrtr_node_lock);
218 * @node: qrtr_node that the QRTR_TYPE_RESUME_TX packet arrived on
221 static void qrtr_tx_resume(struct qrtr_node *node, struct sk_buff *skb)
224 u64 remote_node = le32_to_cpu(pkt->client.node);
232 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
246 * @node: qrtr_node that the packet is to be send to
247 * @dest_node: node id of the destination
259 static int qrtr_tx_wait(struct qrtr_node *node, int dest_node, int dest_port,
271 mutex_lock(&node->qrtr_tx_lock);
272 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
277 if (radix_tree_insert(&node->qrtr_tx_flow, key, flow)) {
283 mutex_unlock(&node->qrtr_tx_lock);
293 !node->ep);
296 } else if (!node->ep) {
312 * @node: qrtr_node that the packet is to be send to
313 * @dest_node: node id of the destination
323 static void qrtr_tx_flow_failed(struct qrtr_node *node, int dest_node,
330 flow = radix_tree_lookup(&node->qrtr_tx_flow, key);
340 static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
348 confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type);
360 hdr->dst_node_id = cpu_to_le32(node->nid);
373 mutex_lock(&node->ep_lock);
375 if (node->ep)
376 rc = node->ep->xmit(node->ep, skb);
379 mutex_unlock(&node->ep_lock);
384 qrtr_tx_flow_failed(node, to->sq_node, to->sq_port);
389 /* Lookup node by id.
395 struct qrtr_node *node;
400 node = radix_tree_lookup(&qrtr_nodes, nid);
401 node = qrtr_node_acquire(node);
405 return node;
408 /* Assign node id to node.
410 * This is mostly useful for automatic node id assignment, based on
413 static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
421 radix_tree_insert(&qrtr_nodes, nid, node);
422 if (node->nid == QRTR_EP_NID_AUTO)
423 node->nid = nid;
437 struct qrtr_node *node = ep->node;
517 qrtr_node_assign(node, cb->src_node);
520 /* Remote node endpoint can bridge other distant nodes */
524 qrtr_node_assign(node, le32_to_cpu(pkt->server.node));
528 qrtr_tx_resume(node, skb);
580 * @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
587 struct qrtr_node *node;
592 node = kzalloc(sizeof(*node), GFP_KERNEL);
593 if (!node)
596 kref_init(&node->ref);
597 mutex_init(&node->ep_lock);
598 skb_queue_head_init(&node->rx_queue);
599 node->nid = QRTR_EP_NID_AUTO;
600 node->ep = ep;
602 INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
603 mutex_init(&node->qrtr_tx_lock);
605 qrtr_node_assign(node, nid);
608 list_add(&node->item, &qrtr_all_nodes);
610 ep->node = node;
622 struct qrtr_node *node = ep->node;
623 struct sockaddr_qrtr src = {AF_QIPCRTR, node->nid, QRTR_PORT_CTRL};
632 mutex_lock(&node->ep_lock);
633 node->ep = NULL;
634 mutex_unlock(&node->ep_lock);
639 if (*slot != node)
650 /* Wake up any transmitters waiting for resume-tx from the node */
651 mutex_lock(&node->qrtr_tx_lock);
652 radix_tree_for_each_slot(slot, &node->qrtr_tx_flow, &iter, 0) {
656 mutex_unlock(&node->qrtr_tx_lock);
658 qrtr_node_release(node);
659 ep->node = NULL;
704 pkt->client.node = cpu_to_le32(ipc->us.sq_node);
848 static int qrtr_local_enqueue(struct qrtr_node *node, struct sk_buff *skb,
879 static int qrtr_bcast_enqueue(struct qrtr_node *node, struct sk_buff *skb,
886 list_for_each_entry(node, &qrtr_all_nodes, item) {
891 qrtr_node_enqueue(node, skbn, type, from, to);
908 struct qrtr_node *node;
945 node = NULL;
956 node = qrtr_node_lookup(addr->sq_node);
957 if (!node) {
992 rc = enqueue_fn(node, skb, type, &ipc->us, addr);
997 qrtr_node_release(node);
1008 struct qrtr_node *node;
1012 node = qrtr_node_lookup(remote.sq_node);
1013 if (!node)
1021 pkt->client.node = cpu_to_le32(cb->dst_node);
1024 ret = qrtr_node_enqueue(node, skb, QRTR_TYPE_RESUME_TX, &local, &remote);
1026 qrtr_node_release(node);