Lines Matching refs:node

12  * Each node type has a number of slots for entries and a number of slots for
14 * and are simply the slot index + the minimum of the node.
43 * a slot, but the last offset has an implied pivot from the node above (or
44 * UINT_MAX for the root node.
137 * dead node and restart on updates.
175 struct maple_node *node = container_of(head, struct maple_node, rcu);
177 kmem_cache_free(maple_node_cache, node);
181 * ma_free_rcu() - Use rcu callback to free a maple node
182 * @node: The node to free
184 * The maple tree uses the parent pointer to indicate this node is no longer in
187 static void ma_free_rcu(struct maple_node *node)
189 WARN_ON(node->parent != ma_parent_ptr(node));
190 call_rcu(&node->rcu, mt_free_rcu);
241 mas->node = MA_ERROR(err);
246 return mas->node == MAS_ROOT;
251 return mas->node == MAS_START;
256 return xa_is_err(mas->node);
261 if (unlikely(mas->node == MAS_OVERFLOW))
269 if (unlikely(mas->node == MAS_UNDERFLOW))
292 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
293 * @entry: The maple encoded node
304 * mas_mn() - Get the maple state node.
307 * Return: the maple node (not encoded - bare pointer).
311 return mte_to_node(mas->node);
315 * mte_set_node_dead() - Set a maple encoded node as dead.
316 * @mn: The maple encoded node.
324 /* Bit 1 indicates the root is a node */
331 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
334 return (void *)((unsigned long)node |
338 static inline void *mte_mk_root(const struct maple_enode *node)
340 return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
343 static inline void *mte_safe_root(const struct maple_enode *node)
345 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
348 static inline void *mte_set_full(const struct maple_enode *node)
350 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
353 static inline void *mte_clear_full(const struct maple_enode *node)
355 return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
358 static inline bool mte_has_null(const struct maple_enode *node)
360 return (unsigned long)node & MAPLE_ENODE_NULL;
363 static inline bool ma_is_root(struct maple_node *node)
365 return ((unsigned long)node->parent & MA_ROOT_PARENT);
368 static inline bool mte_is_root(const struct maple_enode *node)
370 return ma_is_root(mte_to_node(node));
388 * a reuse of the last bit in the node type. This is possible by using bit 1 to
449 * Return: The node->parent maple_type
473 * mas_set_parent() - Set the parent node and encode the slot
474 * @enode: The encoded maple node.
475 * @parent: The encoded maple node that is the parent of @enode.
506 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
513 * @enode: The encoded maple node.
515 * Return: The slot in the parent node where @enode resides.
532 * mte_parent() - Get the parent of @node.
533 * @node: The encoded maple node.
535 * Return: The parent maple node.
545 * @enode: The encoded maple node
549 static inline bool ma_dead_node(const struct maple_node *node)
553 /* Do not reorder reads from the node prior to the parent check */
555 parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
556 return (parent == node);
561 * @enode: The encoded maple node
567 struct maple_node *parent, *node;
569 node = mte_to_node(enode);
570 /* Do not reorder reads from the node prior to the parent check */
573 return (parent == node);
581 * allocated node or to the number of requested nodes to allocate. If bit 0 is
583 * allocated node, then the total allocated nodes is in that node.
600 * The requested number of allocations is either in the first allocated node,
602 * no allocated node. Set the request either in the node or do the necessary
623 * @mas->alloc->request_count if there is at least one node allocated. Decode
638 * ma_pivots() - Get a pointer to the maple node pivots.
639 * @node - the maple node
640 * @type - the node type
642 * In the event of a dead node, this array may be %NULL
644 * Return: A pointer to the maple node pivots
646 static inline unsigned long *ma_pivots(struct maple_node *node,
651 return node->ma64.pivot;
654 return node->mr64.pivot;
662 * ma_gaps() - Get a pointer to the maple node gaps.
663 * @node - the maple node
664 * @type - the node type
666 * Return: A pointer to the maple node gaps
668 static inline unsigned long *ma_gaps(struct maple_node *node,
673 return node->ma64.gap;
683 * mas_pivot() - Get the pivot at @piv of the maple encoded node.
691 struct maple_node *node = mas_mn(mas);
692 enum maple_type type = mte_node_type(mas->node);
701 return node->ma64.pivot[piv];
704 return node->mr64.pivot[piv];
714 * @pivots: The pointer to the maple node pivots
716 * @type: The maple node type
734 * @pivots: The pointer to the maple node pivots
749 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
750 * @mn: The encoded maple node
757 struct maple_node *node = mte_to_node(mn);
765 node->mr64.pivot[piv] = val;
768 node->ma64.pivot[piv] = val;
777 * ma_slots() - Get a pointer to the maple node slots.
778 * @mn: The maple node
779 * @mt: The maple node type
781 * Return: A pointer to the maple node slots
887 * ma_set_meta() - Set the metadata information of a node.
888 * @mn: The maple node
889 * @mt: The maple node type
890 * @offset: The offset of the highest sub-gap in this node.
891 * @end: The end of the data in this node.
903 * mt_clear_meta() - clear the metadata information of a node, if it exists
905 * @mn: The maple node
906 * @type: The maple node type
907 * @offset: The offset of the highest sub-gap in this node.
908 * @end: The end of the data in this node.
927 return; /* no metadata, could be node */
942 * ma_meta_end() - Get the data end of a node from the metadata
943 * @mn: The maple node
944 * @mt: The maple node type
955 * ma_meta_gap() - Get the largest gap location of a node from the metadata
956 * @mn: The maple node
957 * @mt: The maple node type
967 * @mn: The maple node
968 * @mn: The maple node type
983 * @dead_enode - the node to be marked as dead and added to the tail of the list
1014 struct maple_node *node;
1019 node = mte_to_node(mat->head);
1022 call_rcu(&node->rcu, mt_free_walk);
1036 struct maple_node *node;
1039 node = mas_mn(mas);
1040 type = mte_node_type(mas->node);
1041 pivots = ma_pivots(node, type);
1042 slots = ma_slots(node, type);
1047 mas->node = mas_slot(mas, slots, mas->offset);
1051 * mte_set_gap() - Set a maple node gap.
1052 * @mn: The encoded maple node
1074 * May find a dead node which will cause a premature return.
1075 * Return: 1 on dead node, 0 otherwise
1081 struct maple_node *a_node; /* ancestor node. */
1082 struct maple_node *p_node; /* parent node. */
1095 p_node = mte_parent(mas->node);
1099 a_type = mas_parent_type(mas, mas->node);
1100 mas->offset = mte_parent_slot(mas->node);
1104 if (p_node != mte_parent(mas->node))
1107 mas->node = a_enode;
1158 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1161 * Return: A pointer to a maple node.
1165 struct maple_alloc *ret, *node = mas->alloc;
1176 ret = node;
1180 if (node->node_count == 1) {
1181 /* Single allocation in this node. */
1182 mas->alloc = node->slot[0];
1183 mas->alloc->total = node->total - 1;
1184 ret = node;
1187 node->total--;
1188 ret = node->slot[--node->node_count];
1189 node->slot[node->node_count] = NULL;
1203 * mas_push_node() - Push a node back on the maple state allocation.
1205 * @used: The used maple node
1207 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1208 * requested node count as necessary.
1247 struct maple_alloc *node;
1265 node = (struct maple_alloc *)mt_alloc_one(gfp);
1266 if (!node)
1270 node->slot[0] = mas->alloc;
1271 node->node_count = 1;
1273 node->node_count = 0;
1276 mas->alloc = node;
1277 node->total = ++allocated;
1281 node = mas->alloc;
1282 node->request_count = 0;
1284 max_req = MAPLE_ALLOC_SLOTS - node->node_count;
1285 slots = (void **)&node->slot[node->node_count];
1291 if (node->node_count == 0) {
1292 node->slot[0]->node_count = 0;
1293 node->slot[0]->request_count = 0;
1296 node->node_count += count;
1298 node = node->slot[0];
1315 * mas_free() - Free an encoded maple node
1317 * @used: The encoded maple node to free.
1366 * If mas->node == MAS_START, then set the min, max and depth to
1370 * - If mas->node is an error or not MAS_START, return NULL.
1371 * - If it's an empty tree: NULL & mas->node == MAS_NONE
1372 * - If it's a single entry: The entry & mas->node == MAS_ROOT
1373 * - If it's a tree: NULL & mas->node == safe root node.
1389 mas->node = mte_safe_root(root);
1391 if (mte_dead_node(mas->node))
1399 mas->node = MAS_NONE;
1405 mas->node = MAS_ROOT;
1419 * ma_data_end() - Find the end of the data in a node.
1420 * @node: The maple node
1421 * @type: The maple node type
1422 * @pivots: The array of pivots in the node
1423 * @max: The maximum value in the node
1428 static inline unsigned char ma_data_end(struct maple_node *node,
1439 return ma_meta_end(node, type);
1443 return ma_meta_end(node, type);
1455 * This method is optimized to check the metadata of a node if the node type
1463 struct maple_node *node;
1467 type = mte_node_type(mas->node);
1468 node = mas_mn(mas);
1470 return ma_meta_end(node, type);
1472 pivots = ma_pivots(node, type);
1473 if (unlikely(ma_dead_node(node)))
1478 return ma_meta_end(node, type);
1487 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1502 mt = mte_node_type(mas->node);
1538 * node.
1563 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1564 * @node: The maple node
1566 * @mt: The maple node type
1574 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1580 i = offset = ma_meta_end(node, mt);
1593 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1603 struct maple_node *node;
1605 mt = mte_node_type(mas->node);
1609 node = mas_mn(mas);
1611 offset = ma_meta_gap(node, mt);
1612 gaps = ma_gaps(node, mt);
1623 * of the parent to see if it is necessary to check the node above.
1635 pnode = mte_parent(mas->node);
1636 pmt = mas_parent_type(mas, mas->node);
1663 /* Go to the parent node. */
1685 if (mte_is_root(mas->node))
1690 pslot = mte_parent_slot(mas->node);
1691 p_gap = ma_gaps(mte_parent(mas->node),
1692 mas_parent_type(mas, mas->node))[pslot];
1702 * @parent - the maple encoded node containing the children.
1708 struct maple_node *node = mte_to_node(parent);
1709 void __rcu **slots = ma_slots(node, type);
1710 unsigned long *pivots = ma_pivots(node, type);
1714 offset = ma_data_end(node, type, pivots, mas->max);
1722 * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
1723 * node as dead.
1724 * @mas - the maple state with the new node
1725 * @old_enode - The old maple encoded node to replace.
1734 if (mte_is_root(mas->node)) {
1736 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1740 offset = mte_parent_slot(mas->node);
1741 slots = ma_slots(mte_parent(mas->node),
1742 mas_parent_type(mas, mas->node));
1743 rcu_assign_pointer(slots[offset], mas->node);
1750 * mas_replace_node() - Replace a node by putting it in the tree, marking it
1752 * the parent encoding to locate the maple node in the tree.
1753 * @mas - the ma_state with @mas->node pointing to the new node.
1754 * @old_enode - The old maple encoded node.
1765 * mas_find_child() - Find a child who has the parent @mas->node.
1777 struct maple_node *node;
1780 mt = mte_node_type(mas->node);
1781 node = mas_mn(mas);
1782 slots = ma_slots(node, mt);
1783 pivots = ma_pivots(node, mt);
1784 end = ma_data_end(node, mt, pivots, mas->max);
1787 if (mte_parent(entry) == node) {
1817 * mab_middle_node() - Check if a middle node is needed (unlikely)
1821 * @slot_count: the size that can be stored in a single node being considered.
1823 * Return: true if a middle node is required.
1843 * @slot_count: the number of slots in the node being considered.
1880 * To support gap tracking, all NULL entries are kept together and a node cannot
1882 * limitation means that the split of a node must be checked for this condition
1915 * causes one node to be deficient.
1924 /* Avoid ending a node on a NULL entry */
1947 struct maple_node *node;
1953 node = mas_mn(mas);
1954 mt = mte_node_type(mas->node);
1955 pivots = ma_pivots(node, mt);
1979 slots = ma_slots(node, mt);
1982 gaps = ma_gaps(node, mt);
1991 * @node: The maple node
1992 * @pivots: pointer to the maple node pivots
1998 * node during a write.
2001 struct maple_node *node, unsigned long *pivots,
2012 ma_set_meta(node, mt, 0, end);
2016 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2020 * @mas: The maple state with the maple encoded node.
2027 enum maple_type mt = mte_node_type(mas->node);
2028 struct maple_node *node = mte_to_node(mas->node);
2029 void __rcu **slots = ma_slots(node, mt);
2030 unsigned long *pivots = ma_pivots(node, mt);
2056 gaps = ma_gaps(node, mt);
2065 ma_set_meta(node, mt, offset, end);
2067 mas_leaf_set_meta(mas, node, pivots, mt, end);
2074 * @end: The maple node end
2075 * @mt: The maple node type
2083 if (mte_is_root(mas->node))
2094 * data from a maple encoded node.
2158 /* Copy end data to the end of the node. */
2168 * mas_prev_sibling() - Find the previous node with the same parent.
2175 unsigned int p_slot = mte_parent_slot(mas->node);
2177 if (mte_is_root(mas->node))
2190 * mas_next_sibling() - Find the next node with the same parent.
2199 if (mte_is_root(mas->node))
2204 parent.offset = mte_parent_slot(mas->node) + 1;
2214 * mte_node_or_node() - Return the encoded node or MAS_NONE.
2215 * @enode: The encoded maple node.
2246 wr_mas->node = mas_mn(wr_mas->mas);
2247 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2248 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2261 * mast_rebalance_next() - Rebalance against the next node
2263 * @old_r: The encoded maple node to the right (next node).
2269 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2275 * mast_rebalance_prev() - Rebalance against the previous node
2277 * @old_l: The encoded maple node to the left (previous node)
2294 * the node to the right. Checking the nodes to the right then the left at each
2334 } while (!mte_is_root(mast->orig_r->node));
2360 wr_mas.type = mte_node_type(mast->orig_r->node);
2366 wr_mas.type = mte_node_type(mast->orig_l->node);
2373 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2377 * Use the node type from the maple_big_node to allocate a new node from the
2380 * Return: A new maple encoded node
2392 * @b_node: the node which contains the data.
2393 * @left: The pointer which will have the left node
2394 * @right: The pointer which may have the right node
2395 * @middle: the pointer which may have the middle node (rare)
2396 * @mid_split: the split location for the middle node
2430 * @b_node - the big node to add the entry
2449 * of @mas->node to either @left or @right, depending on @slot and @split
2451 * @mas - the maple state with the node that needs a parent
2454 * @slot - the slot the mas->node was placed
2466 mas_set_parent(mas, mas->node, left, *slot);
2468 mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
2474 * mte_mid_split_check() - Check if the next node passes the mid-split
2475 * @**l: Pointer to left encoded maple node.
2476 * @**m: Pointer to middle encoded maple node.
2477 * @**r: Pointer to right encoded maple node.
2504 * @left - the left node
2505 * @right - the right node
2538 * mas_topiary_node() - Dispose of a singe node
2540 * @enode: The encoded maple node
2543 * The node will either be RCU freed or pushed back on the maple state.
2575 * @old_enode: The maple encoded node being replaced
2586 /* Place data in tree & then mark node as old */
2592 tmp[1].node = MAS_NONE;
2593 tmp[2].node = MAS_NONE;
2594 while (!mte_is_leaf(tmp[0].node)) {
2606 mas_adopt_children(&tmp[i], tmp[i].node);
2613 tmp_next[n++].node = MAS_NONE;
2625 tmp[0].node = old_enode;
2626 tmp[1].node = MAS_NONE;
2627 tmp[2].node = MAS_NONE;
2641 mat_add(&subtrees, tmp_next[n].node);
2642 tmp_next[n].node = MAS_NONE;
2653 tmp_next[n++].node = MAS_NONE;
2656 mas_topiary_node(mas, tmp[i].node, in_rcu);
2659 } while (!mte_is_leaf(tmp[0].node));
2662 mas_topiary_node(mas, tmp[i].node, in_rcu);
2670 * @old: The old maple encoded node that is being replaced.
2680 if (mte_is_leaf(mas->node))
2689 * @left: The left encoded maple node
2690 * @middle: The middle encoded maple node
2691 * @right: The right encoded maple node
2701 mast->l->node = mte_node_or_none(left);
2702 mast->m->node = mte_node_or_none(middle);
2703 mast->r->node = mte_node_or_none(right);
2728 * combined data set in the maple subtree state big node.
2743 * combined data set in the maple subtree state big node.
2752 mt_slot_count(mast->orig_r->node), mast->bn,
2759 * node to create at least one sufficient node
2764 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2772 * single node.
2777 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2787 struct maple_node *node;
2795 next = mas->node;
2801 node = mte_to_node(next);
2803 pivots = ma_pivots(node, type);
2804 end = ma_data_end(node, type, pivots, max);
2805 if (unlikely(ma_dead_node(node)))
2826 slots = ma_slots(node, type);
2828 if (unlikely(ma_dead_node(node)))
2837 mas->node = last;
2882 l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
2904 mast->bn->type = mte_node_type(mast->orig_l->node);
2919 /* Root already stored in l->node. */
2930 /* Copy anything necessary out of the right node. */
2951 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
2952 mte_node_type(mast->orig_l->node));
2955 mas_set_parent(mas, left, l_mas.node, slot);
2957 mas_set_parent(mas, middle, l_mas.node, ++slot);
2960 mas_set_parent(mas, right, l_mas.node, ++slot);
2965 while (!mte_is_root(mast->orig_l->node))
2971 old_enode = mast->orig_l->node;
2973 mas->node = l_mas.node;
2983 * mas_rebalance() - Rebalance a given node.
2985 * @b_node: The big maple node.
2987 * Rebalance two nodes into a single node or two new nodes that are sufficient.
3005 * Rebalancing occurs if a node is insufficient. Data is rebalanced
3006 * against the node to the right if it exists, otherwise the node to the
3007 * left of this node is rebalanced against this node. If rebalancing
3008 * causes just one node to be produced instead of two, then the parent
3010 * tries to combine the data in the same way. If one node contains the
3011 * entire range of the tree, then that node is used as a new root node.
3020 mast.bn->type = mte_node_type(mas->node);
3025 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3041 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3044 * @end: The end of the left-most node.
3047 * rebalance the left-most node when it is not sufficient.
3051 enum maple_type mt = mte_node_type(mas->node);
3052 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3064 /* set up node. */
3076 node = mas_mn(mas);
3077 newnode->parent = node->parent;
3090 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3091 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3095 old_eparent = mt_mk_node(mte_parent(l_mas.node),
3096 mas_parent_type(&l_mas, l_mas.node));
3109 memcpy(node, newnode, sizeof(struct maple_node));
3110 ma_set_meta(node, mt, 0, tmp - 1);
3111 mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node),
3125 mas->node = mt_mk_node(newnode, mt);
3130 mt = mte_node_type(l_mas.node);
3136 l_mas.node = mt_mk_node(new_left, mt);
3139 offset = mte_parent_slot(mas->node);
3140 mt = mas_parent_type(&l_mas, l_mas.node);
3145 rcu_assign_pointer(slots[offset], mas->node);
3146 rcu_assign_pointer(slots[offset - 1], l_mas.node);
3151 mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3153 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3158 mas_adopt_children(mas, mas->node);
3165 * mas_split_final_node() - Split the final node in a subtree operation.
3175 if (mte_is_root(mas->node)) {
3183 * Only a single node is used here, could be root.
3184 * The Big_node data should just fit in a single node.
3187 mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
3188 mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
3191 mast->l->node = ancestor;
3198 * mast_fill_bnode() - Copy data into the big node in the subtree state
3215 if (mte_is_root(mas->node)) {
3219 mas->offset = mte_parent_slot(mas->node);
3226 mab_set_b_end(mast->bn, mast->l, mast->l->node);
3228 mab_set_b_end(mast->bn, mast->r, mast->r->node);
3233 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3237 mast->bn->type = mte_node_type(mas->node);
3241 * mast_split_data() - Split the data in the subtree state big node into regular
3245 * @split: The location to split the big node
3253 mte_set_pivot(mast->r->node, 0, mast->r->max);
3255 mast->l->offset = mte_parent_slot(mas->node);
3258 if (mte_is_leaf(mas->node))
3262 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3264 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3269 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3270 * data to the right or left node if there is room.
3297 space = 2 * mt_slot_count(mas->node) - 2;
3321 /* Switch mas to prev node */
3324 tmp_mas.node = mast->l->node;
3327 tmp_mas.node = mast->r->node;
3343 * mas_split() - Split data that is too big for one node into two.
3345 * @b_node: The maple big node
3397 l_mas.node = mas_new_ma_node(mas, b_node);
3398 r_mas.node = mas_new_ma_node(mas, b_node);
3401 * left or right node has space to spare. This is referred to as "pushing left"
3426 /* Set the original node as dead */
3427 old = mas->node;
3428 mas->node = l_mas.node;
3435 * mas_reuse_node() - Reuse the node to store the data.
3437 * @bn: The maple big node
3442 * Return: True if node was reused, false otherwise.
3462 * mas_commit_b_node() - Commit the big node into the tree.
3464 * @b_node: The maple big node
3470 struct maple_node *node;
3475 old_enode = wr_mas->mas->node;
3491 node = mas_pop_node(wr_mas->mas);
3492 node->parent = mas_mn(wr_mas->mas)->parent;
3493 wr_mas->mas->node = mt_mk_node(node, b_type);
3502 * mas_root_expand() - Expand a root to a node
3510 struct maple_node *node;
3519 node = mas_pop_node(mas);
3520 pivots = ma_pivots(node, type);
3521 slots = ma_slots(node, type);
3522 node->parent = ma_parent_ptr(mas_tree_parent(mas));
3523 mas->node = mt_mk_node(node, type);
3542 ma_set_meta(node, maple_leaf_64, 0, slot);
3544 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3556 mas->node = MAS_START;
3562 * spans the node.
3565 * @type: The maple node type
3568 * Spanning writes are writes that start in one node and end in another OR if
3569 * the write of a %NULL will cause the node to end with a %NULL.
3592 * The last entry of leaf node cannot be NULL unless it is the
3593 * rightmost node (writing ULONG_MAX), otherwise it spans slots.
3605 wr_mas->type = mte_node_type(wr_mas->mas->node);
3607 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3614 wr_mas->mas->node = wr_mas->content;
3624 * Return: True if it's contained in a node, false on spanning write.
3722 * Return: The entry for @mas->index or %NULL on dead node.
3728 struct maple_node *node;
3735 next = mas->node;
3739 node = mte_to_node(next);
3741 pivots = ma_pivots(node, type);
3742 end = ma_data_end(node, type, pivots, max);
3743 if (unlikely(ma_dead_node(node)))
3752 slots = ma_slots(node, type);
3754 if (unlikely(ma_dead_node(node)))
3767 * mas_new_root() - Create a new root node that only contains the entry passed
3780 struct maple_node *node;
3788 mas->node = MAS_START;
3796 node = mas_pop_node(mas);
3797 pivots = ma_pivots(node, type);
3798 slots = ma_slots(node, type);
3799 node->parent = ma_parent_ptr(mas_tree_parent(mas));
3800 mas->node = mt_mk_node(node, type);
3805 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3816 * Note that mas is expected to point to the node which caused the store to
3841 * The data in the two nodes are combined into a single node, two nodes,
3843 * written to the last entry of a node is considered a spanning store as
3863 * store to ensure it's not NULL and to combine both the next node and
3864 * the node with the start together.
3913 * mas_wr_node_store() - Attempt to store the value in a node
3916 * Attempts to reuse the node, but may allocate.
3932 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
3941 /* set up node. */
3972 * this range wrote to the end of the node or it overwrote the rest of
3979 /* Copy to the end of node if necessary. */
3992 struct maple_enode *old_enode = mas->node;
3994 mas->node = mt_mk_node(newnode, wr_mas->type);
3997 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4125 * @new_end: The end of the node after the modification
4127 * This is currently unsafe in rcu mode since the end of the node may be cached
4128 * by readers while the node contents may be updated which could result in
4153 ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
4215 * new_end exceeds the size of the maple node and cannot enter the fast
4261 /* At this point, we are at the leaf node that needs to be altered. */
4293 * usual, the entry must be written. Most operations require a new node
4294 * to be allocated and replace an existing node to ensure RCU safety,
4295 * when in RCU mode. The exception to requiring a newly allocated node
4296 * is when inserting at the end of a node (appending). When done
4297 * carefully, appending can reuse the node in place.
4312 /* At this point, we are at the leaf node that needs to be altered. */
4341 struct maple_node *node, const unsigned long index)
4343 if (unlikely(ma_dead_node(node))) {
4352 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
4356 * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4357 * Return: 1 if the node is dead, 0 otherwise.
4364 struct maple_node *node;
4368 node = mas_mn(mas);
4378 if (ma_is_root(node))
4386 node = mas_mn(mas);
4390 mt = mte_node_type(mas->node);
4393 slots = ma_slots(node, mt);
4394 mas->node = mas_slot(mas, slots, offset);
4395 if (unlikely(ma_dead_node(node)))
4398 mt = mte_node_type(mas->node);
4399 node = mas_mn(mas);
4400 pivots = ma_pivots(node, mt);
4401 offset = ma_data_end(node, mt, pivots, max);
4402 if (unlikely(ma_dead_node(node)))
4406 slots = ma_slots(node, mt);
4407 mas->node = mas_slot(mas, slots, offset);
4408 pivots = ma_pivots(node, mt);
4409 if (unlikely(ma_dead_node(node)))
4416 if (unlikely(mte_dead_node(mas->node)))
4422 if (unlikely(ma_dead_node(node)))
4425 mas->node = MAS_NONE;
4435 * @set_underflow: Set the @mas->node to underflow state on limit.
4447 struct maple_node *node;
4451 node = mas_mn(mas);
4452 type = mte_node_type(mas->node);
4453 pivots = ma_pivots(node, type);
4454 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4460 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4482 node = mas_mn(mas);
4483 type = mte_node_type(mas->node);
4484 pivots = ma_pivots(node, type);
4488 slots = ma_slots(node, type);
4490 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4507 mas->node = MAS_UNDERFLOW;
4512 * mas_next_node() - Get the next node at the same level in the tree.
4516 * The next value will be mas->node[mas->offset] or MAS_NONE.
4517 * Return: 1 on dead node, 0 otherwise.
4519 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4536 if (ma_is_root(node))
4544 node = mas_mn(mas);
4545 mt = mte_node_type(mas->node);
4546 pivots = ma_pivots(node, mt);
4547 node_end = ma_data_end(node, mt, pivots, mas->max);
4548 if (unlikely(ma_dead_node(node)))
4553 slots = ma_slots(node, mt);
4556 if (unlikely(ma_dead_node(node)))
4564 mas->node = enode;
4565 node = mas_mn(mas);
4566 mt = mte_node_type(mas->node);
4567 slots = ma_slots(node, mt);
4569 if (unlikely(ma_dead_node(node)))
4574 pivots = ma_pivots(node, mt);
4577 if (unlikely(ma_dead_node(node)))
4580 mas->node = enode;
4585 if (unlikely(ma_dead_node(node)))
4588 mas->node = MAS_NONE;
4598 * @set_overflow: Should @mas->node be set to overflow when the limit is
4610 struct maple_node *node;
4616 node = mas_mn(mas);
4617 type = mte_node_type(mas->node);
4618 pivots = ma_pivots(node, type);
4619 data_end = ma_data_end(node, type, pivots, mas->max);
4620 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4629 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4645 if (mas_next_node(mas, node, max)) {
4651 mas->node = MAS_OVERFLOW;
4658 node = mas_mn(mas);
4659 type = mte_node_type(mas->node);
4660 pivots = ma_pivots(node, type);
4664 slots = ma_slots(node, type);
4666 if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4685 mas->node = MAS_OVERFLOW;
4694 * Set the @mas->node to the next entry and the range_start to
4705 mas->node = MAS_OVERFLOW;
4714 * highest gap address of a given size in a given node and descend.
4724 enum maple_type type = mte_node_type(mas->node);
4725 struct maple_node *node = mas_mn(mas);
4741 pivots = ma_pivots(node, type);
4742 slots = ma_slots(node, type);
4743 gaps = ma_gaps(node, type);
4793 mas->node = mas_slot(mas, slots, offset);
4800 if (!mte_is_root(mas->node))
4810 enum maple_type type = mte_node_type(mas->node);
4815 struct maple_node *node;
4823 node = mas_mn(mas);
4824 pivots = ma_pivots(node, type);
4825 slots = ma_slots(node, type);
4826 gaps = ma_gaps(node, type);
4829 data_end = ma_data_end(node, type, pivots, mas->max);
4850 mas->node = mas_slot(mas, slots, offset);
4865 if (mte_is_root(mas->node))
4877 * mas->node is MAS_NONE, reset to MAS_START.
4886 mas->node = MAS_START;
4902 mas->node = MAS_NONE;
4915 if (mte_is_root(mas->node)) {
4930 * mas_skip_node() - Internal function. Skip over a node.
4933 * Return: true if there is another node, false otherwise.
4941 if (mte_is_root(mas->node)) {
4975 if (last == mas->node)
4978 last = mas->node;
5053 return xa_err(mas->node);
5059 mt = mte_node_type(mas->node);
5080 struct maple_enode *last = mas->node;
5106 if (last == mas->node) {
5110 last = mas->node;
5115 return xa_err(mas->node);
5130 * mte_dead_leaves() - Mark all leaves of a node as dead.
5133 * @type: The maple node type
5143 struct maple_node *node;
5151 node = mte_to_node(entry);
5152 /* Use both node and type to catch LE & BE metadata */
5153 if (!node || !type)
5157 node->type = type;
5158 rcu_assign_pointer(slots[offset], node);
5166 * @enode: The maple encoded node
5173 struct maple_node *node, *next;
5179 node = mte_to_node(*enode);
5180 slots = ma_slots(node, node->type);
5191 * @head: The RCU head that's within the node.
5198 struct maple_node *node, *start;
5203 node = container_of(head, struct maple_node, rcu);
5205 if (ma_is_leaf(node->type))
5208 start = node;
5209 enode = mt_mk_node(node, node->type);
5211 node = mte_to_node(enode);
5213 mt_free_bulk(node->slot_len, slots);
5214 offset = node->parent_slot + 1;
5215 enode = node->piv_parent;
5216 if (mte_to_node(enode) == node)
5225 node = mte_to_node(enode);
5226 } while ((node != start) || (node->slot_len < offset));
5228 slots = ma_slots(node, node->type);
5229 mt_free_bulk(node->slot_len, slots);
5232 mt_free_rcu(&node->rcu);
5238 struct maple_node *node;
5246 node = mte_to_node(*enode);
5248 slots = ma_slots(node, type);
5254 node->type = type;
5255 node->piv_parent = prev;
5256 node->parent_slot = offset;
5269 struct maple_node *node = mte_to_node(enode);
5273 node->type = mte_node_type(enode);
5279 node = mte_to_node(enode); // Updated in the above call.
5285 node->slot_len = mte_dead_leaves(enode, mt, slots);
5287 mt_free_bulk(node->slot_len, slots);
5288 offset = node->parent_slot + 1;
5289 enode = node->piv_parent;
5290 if (mte_to_node(enode) == node)
5305 node = mte_to_node(enode);
5308 node = mte_to_node(enode);
5309 node->slot_len = mte_dead_leaves(enode, mt, slots);
5311 mt_free_bulk(node->slot_len, slots);
5315 mt_free_rcu(&node->rcu);
5317 mt_clear_meta(mt, node, node->type);
5322 * @enode: the encoded maple node (maple_enode) to start
5323 * @mt: the tree to free - needed for node types.
5330 struct maple_node *node = mte_to_node(enode);
5334 call_rcu(&node->rcu, mt_free_walk);
5361 * writes within this node. This is to stop partial walks in
5370 if (mte_is_leaf(wr_mas->mas->node) &&
5442 return xa_err(mas->node);
5497 /* At this point, we are at the leaf node that needs to be altered. */
5507 /* reuse node */
5521 /* New root needs a singe node */
5522 if (unlikely(mte_is_root(mas->node)))
5525 /* Potential spanning rebalance collapsing a node, use worst-case */
5529 /* node store, slot store needs one node */
5537 ret = xa_err(mas->node);
5549 * Upon completion, check the left-most node and rebalance against the node to
5555 struct maple_alloc *node;
5561 * number. To fix an invalid final node, a check is performed here to
5562 * rebalance the previous node with the final node.
5570 if (end < mt_min_slot_count(mas->node) - 1)
5579 node = mas->alloc;
5580 mas->alloc = node->slot[0];
5581 if (node->node_count > 1) {
5582 size_t count = node->node_count - 1;
5584 mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5587 kmem_cache_free(maple_node_cache, node);
5610 struct maple_enode *enode = mas->node;
5649 ret = xa_err(mas->node);
5650 mas->node = enode;
5663 mas->node = MAS_OVERFLOW;
5671 mas->node = MAS_START;
5674 mas->node = MAS_START;
5676 mas->node = MAS_START;
5693 mas->node = MAS_NONE;
5777 mas->node = MAS_UNDERFLOW;
5785 mas->node = MAS_START;
5792 mas->node = MAS_START;
5795 mas->node = MAS_START;
5813 mas->node = MAS_ROOT;
5823 mas->node = MAS_NONE;
5833 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5856 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
5911 mas->node = MAS_PAUSE;
5938 mas->node = MAS_START;
5944 mas->node = MAS_START;
5947 mas->node = MAS_OVERFLOW;
5951 mas->node = MAS_START;
5978 mas->node = MAS_NONE;
5992 * May set @mas->node to MAS_NONE.
6016 * May set @mas->node to MAS_NONE.
6052 mas->node = MAS_NONE;
6055 mas->node = MAS_START;
6062 mas->node = MAS_START;
6065 mas->node = MAS_UNDERFLOW;
6069 mas->node = MAS_START;
6092 mas->node = MAS_ROOT;
6104 mas->node = MAS_NONE;
6117 * May set @mas->node to MAS_NONE.
6143 * May set @mas->node to MAS_NONE.
6176 mas->node = MAS_START;
6205 if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6221 mas->node = MAS_START;
6302 return xa_err(mas.node);
6355 return xa_err(ms.node);
6406 ret = xa_err(mas.node);
6444 ret = xa_err(mas.node);
6635 * mas_dead_node() - Check if the maple state is pointing to a dead node.
6647 if (likely(!mte_dead_node(mas->node)))
6675 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6684 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6692 struct maple_enode *p = MAS_NONE, *mn = mas->node;
6702 mas->node = mn;
6705 p = mas->node;
6711 mas->node = p;
6761 struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6770 pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
6774 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6777 pr_cont("%p\n", node->slot[i]);
6782 last = node->pivot[i];
6783 else if (!node->slot[i] && max != mt_node_max(entry))
6788 mt_dump_entry(mt_slot(mt, node->slot, i),
6790 else if (node->slot[i])
6791 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6799 pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
6800 node, last, max, i);
6804 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6805 node, last, max, i);
6816 struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6825 pr_cont("%lx ", node->gap[i]);
6829 pr_cont("%lu ", node->gap[i]);
6832 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6836 pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
6840 pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6843 pr_cont("%p\n", node->slot[i]);
6848 last = node->pivot[i];
6849 else if (!node->slot[i])
6854 mt_dump_entry(mt_slot(mt, node->slot, i),
6856 else if (node->slot[i])
6857 mt_dump_node(mt, mt_slot(mt, node->slot, i),
6863 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6864 node, last, max, i);
6875 struct maple_node *node = mte_to_node(entry);
6881 pr_cont("node %p depth %d type %d parent %p", node, depth, type,
6882 node ? node->parent : NULL);
6889 mt_dump_entry(mt_slot(mt, node->slot, i),
6920 * Calculate the maximum gap in a node and check if that's what is reported in
6925 struct maple_enode *mte = mas->node;
6926 struct maple_node *p_mn, *node = mte_to_node(mte);
6927 enum maple_type mt = mte_node_type(mas->node);
6932 unsigned long *pivots = ma_pivots(node, mt);
6948 gaps = ma_gaps(node, mt);
6979 offset = ma_meta_gap(node, mt);
6981 pr_err("gap offset %p[%u] is invalid\n", node, offset);
6987 node, offset, max_gap);
6994 pr_err("gap %p[%u] beyond node limit != 0\n",
6995 node, i);
7004 p_slot = mte_parent_slot(mas->node);
7017 struct maple_enode *node;
7023 if (mte_is_root(mas->node))
7026 p_slot = mte_parent_slot(mas->node);
7027 p_type = mas_parent_type(mas, mas->node);
7028 parent = mte_parent(mas->node);
7032 /* Check prev/next parent slot for duplicate node entry */
7035 node = mas_slot(mas, slots, i);
7037 if (node != mas->node)
7040 MT_BUG_ON(mas->tree, node != mas->node);
7041 } else if (node == mas->node) {
7044 MT_BUG_ON(mas->tree, node == mas->node);
7051 enum maple_type type = mte_node_type(mas->node);
7052 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7053 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7057 if (mte_is_leaf(mas->node))
7064 pr_err("Non-leaf node lacks child at %p[%u]\n",
7076 if (mte_parent(child) != mte_to_node(mas->node)) {
7079 mte_to_node(mas->node));
7097 enum maple_type type = mte_node_type(mas->node);
7098 void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7107 pr_err("Missing node limit pivot at %p[%u]",
7134 pr_err("node%p: data_end %u != the last slot offset %u\n",
7169 if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7172 while (!mte_is_leaf(mas.node))
7175 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7189 slots = ma_slots(mte_to_node(mas.node),
7190 mte_node_type(mas.node));
7213 while (!mte_is_leaf(mas.node))
7217 MAS_WARN_ON(&mas, mte_dead_node(mas.node));
7219 if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
7240 pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
7260 pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
7261 wr_mas->node, wr_mas->r_min, wr_mas->r_max);