Lines Matching refs:new_ref
1088 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1099 * new_ref. new_ref must be kfree'd by the caller in
1105 struct binder_ref *new_ref)
1124 if (!new_ref)
1128 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1129 new_ref->proc = proc;
1130 new_ref->node = node;
1131 rb_link_node(&new_ref->rb_node_node, parent, p);
1132 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1134 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1137 if (ref->data.desc > new_ref->data.desc)
1139 new_ref->data.desc = ref->data.desc + 1;
1147 if (new_ref->data.desc < ref->data.desc)
1149 else if (new_ref->data.desc > ref->data.desc)
1154 rb_link_node(&new_ref->rb_node_desc, parent, p);
1155 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1158 hlist_add_head(&new_ref->node_entry, &node->refs);
1162 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1165 return new_ref;
1420 struct binder_ref *new_ref = NULL;
1427 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1428 if (!new_ref)
1431 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1435 if (ret && ref == new_ref) {
1441 * the node. The new_ref gets kfree'd below.
1443 binder_cleanup_ref_olocked(new_ref);
1448 if (new_ref && ref != new_ref)
1453 kfree(new_ref);