Lines Matching refs:wr_mas

2095  * @wr_mas: the maple write state
2101 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2108 struct ma_state *mas = wr_mas->mas;
2110 b_node->type = wr_mas->type;
2123 b_node->slot[b_end] = wr_mas->content;
2124 if (!wr_mas->content)
2131 b_node->slot[b_end] = wr_mas->entry;
2139 piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2142 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2145 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2148 b_node->slot[++b_end] = wr_mas->content;
2149 if (!wr_mas->content)
2155 if (slot > wr_mas->node_end)
2159 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2231 * @wr_mas: The maple write state
2235 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2237 struct ma_state *mas = wr_mas->mas;
2240 if (unlikely(ma_is_dense(wr_mas->type))) {
2241 wr_mas->r_max = wr_mas->r_min = mas->index;
2246 wr_mas->node = mas_mn(wr_mas->mas);
2247 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2248 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2249 wr_mas->pivots, mas->max);
2252 while (offset < count && mas->index > wr_mas->pivots[offset])
2255 wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
2256 wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
2257 wr_mas->offset_end = mas->offset = offset;
2350 MA_WR_STATE(wr_mas, mast->orig_r, NULL);
2360 wr_mas.type = mte_node_type(mast->orig_r->node);
2361 mas_wr_node_walk(&wr_mas);
2365 wr_mas.mas = mast->orig_l;
2366 wr_mas.type = mte_node_type(mast->orig_l->node);
2367 mas_wr_node_walk(&wr_mas);
2369 mast->bn->type = wr_mas.type;
3436 * @wr_mas: The maple write state
3444 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3448 if (mt_in_rcu(wr_mas->mas->tree))
3452 int clear = mt_slots[wr_mas->type] - bn->b_end;
3454 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3455 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3457 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3463 * @wr_mas: The maple write state
3467 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3475 old_enode = wr_mas->mas->node;
3478 (mas_mt_height(wr_mas->mas) > 1))
3479 return mas_rebalance(wr_mas->mas, b_node);
3482 return mas_split(wr_mas->mas, b_node);
3484 if (mas_reuse_node(wr_mas, b_node, end))
3487 mas_node_count(wr_mas->mas, 1);
3488 if (mas_is_err(wr_mas->mas))
3491 node = mas_pop_node(wr_mas->mas);
3492 node->parent = mas_mn(wr_mas->mas)->parent;
3493 wr_mas->mas->node = mt_mk_node(node, b_type);
3494 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3495 mas_replace_node(wr_mas->mas, old_enode);
3497 mas_update_gap(wr_mas->mas);
3573 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3575 unsigned long max = wr_mas->r_max;
3576 unsigned long last = wr_mas->mas->last;
3577 enum maple_type type = wr_mas->type;
3578 void *entry = wr_mas->entry;
3585 max = wr_mas->mas->max;
3599 trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
3603 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3605 wr_mas->type = mte_node_type(wr_mas->mas->node);
3606 mas_wr_node_walk(wr_mas);
3607 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3610 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3612 wr_mas->mas->max = wr_mas->r_max;
3613 wr_mas->mas->min = wr_mas->r_min;
3614 wr_mas->mas->node = wr_mas->content;
3615 wr_mas->mas->offset = 0;
3616 wr_mas->mas->depth++;
3620 * @wr_mas: The maple write state
3626 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3628 struct ma_state *mas = wr_mas->mas;
3631 mas_wr_walk_descend(wr_mas);
3632 if (unlikely(mas_is_span_wr(wr_mas)))
3635 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3637 if (ma_is_leaf(wr_mas->type))
3640 mas_wr_walk_traverse(wr_mas);
3646 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3648 struct ma_state *mas = wr_mas->mas;
3651 mas_wr_walk_descend(wr_mas);
3652 wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3654 if (ma_is_leaf(wr_mas->type))
3656 mas_wr_walk_traverse(wr_mas);
3818 * @wr_mas: The maple write state
3822 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3832 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
3833 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
3847 mas = wr_mas->mas;
3851 return mas_new_root(mas, wr_mas->entry);
3879 if (!wr_mas->entry) {
3889 return mas_new_root(mas, wr_mas->entry);
3914 * @wr_mas: The maple write state
3920 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
3923 struct ma_state *mas = wr_mas->mas;
3926 unsigned char dst_offset, offset_end = wr_mas->offset_end;
3928 unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
3932 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
3936 if (mas->last == wr_mas->end_piv)
3938 else if (unlikely(wr_mas->r_max == ULONG_MAX))
3939 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
3954 dst_pivots = ma_pivots(newnode, wr_mas->type);
3955 dst_slots = ma_slots(newnode, wr_mas->type);
3957 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
3958 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
3961 if (wr_mas->r_min < mas->index) {
3962 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
3969 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
3975 if (offset_end > wr_mas->node_end)
3980 copy_size = wr_mas->node_end - offset_end + 1;
3981 memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
3983 memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
3994 mas->node = mt_mk_node(newnode, wr_mas->type);
3997 memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
3999 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4006 * @wr_mas: the maple write state
4010 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4012 struct ma_state *mas = wr_mas->mas;
4014 void __rcu **slots = wr_mas->slots;
4020 if (wr_mas->offset_end - offset == 1) {
4021 if (mas->index == wr_mas->r_min) {
4023 rcu_assign_pointer(slots[offset], wr_mas->entry);
4024 wr_mas->pivots[offset] = mas->last;
4027 rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
4028 wr_mas->pivots[offset] = mas->index - 1;
4037 rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
4038 wr_mas->pivots[offset] = mas->index - 1;
4039 wr_mas->pivots[offset + 1] = mas->last;
4045 trace_ma_write(__func__, mas, 0, wr_mas->entry);
4050 if (!wr_mas->entry || gap)
4056 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4058 struct ma_state *mas = wr_mas->mas;
4060 if (!wr_mas->slots[wr_mas->offset_end]) {
4062 mas->last = wr_mas->end_piv;
4065 if ((mas->last == wr_mas->end_piv) &&
4066 (wr_mas->node_end != wr_mas->offset_end) &&
4067 !wr_mas->slots[wr_mas->offset_end + 1]) {
4068 wr_mas->offset_end++;
4069 if (wr_mas->offset_end == wr_mas->node_end)
4072 mas->last = wr_mas->pivots[wr_mas->offset_end];
4073 wr_mas->end_piv = mas->last;
4077 if (!wr_mas->content) {
4079 mas->index = wr_mas->r_min;
4082 if (mas->index == wr_mas->r_min && mas->offset &&
4083 !wr_mas->slots[mas->offset - 1]) {
4085 wr_mas->r_min = mas->index =
4086 mas_safe_min(mas, wr_mas->pivots, mas->offset);
4087 wr_mas->r_max = wr_mas->pivots[mas->offset];
4092 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4094 while ((wr_mas->offset_end < wr_mas->node_end) &&
4095 (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
4096 wr_mas->offset_end++;
4098 if (wr_mas->offset_end < wr_mas->node_end)
4099 wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
4101 wr_mas->end_piv = wr_mas->mas->max;
4103 if (!wr_mas->entry)
4104 mas_wr_extend_null(wr_mas);
4107 static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
4109 struct ma_state *mas = wr_mas->mas;
4110 unsigned char new_end = wr_mas->node_end + 2;
4112 new_end -= wr_mas->offset_end - mas->offset;
4113 if (wr_mas->r_min == mas->index)
4116 if (wr_mas->end_piv == mas->last)
4124 * @wr_mas: the maple write state
4133 static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
4140 mas = wr_mas->mas;
4144 if (mas->offset != wr_mas->node_end)
4147 end = wr_mas->node_end;
4151 if (new_end < mt_pivots[wr_mas->type]) {
4152 wr_mas->pivots[new_end] = wr_mas->pivots[end];
4153 ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
4156 slots = wr_mas->slots;
4158 if (mas->last == wr_mas->r_max) {
4160 rcu_assign_pointer(slots[new_end], wr_mas->entry);
4161 wr_mas->pivots[end] = mas->index - 1;
4165 rcu_assign_pointer(slots[new_end], wr_mas->content);
4166 wr_mas->pivots[end] = mas->last;
4167 rcu_assign_pointer(slots[end], wr_mas->entry);
4171 rcu_assign_pointer(slots[new_end], wr_mas->content);
4172 wr_mas->pivots[end + 1] = mas->last;
4173 rcu_assign_pointer(slots[end + 1], wr_mas->entry);
4174 wr_mas->pivots[end] = mas->index - 1;
4178 if (!wr_mas->content || !wr_mas->entry)
4181 trace_ma_write(__func__, mas, new_end, wr_mas->entry);
4187 * @wr_mas: The write maple state
4191 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4195 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4197 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4198 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4201 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4203 struct ma_state *mas = wr_mas->mas;
4207 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4208 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4209 if (!!wr_mas->entry ^ !!wr_mas->content)
4218 new_end = mas_wr_new_end(wr_mas);
4219 if (new_end >= mt_slots[wr_mas->type])
4223 if (mas_wr_append(wr_mas, new_end))
4226 if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
4229 if (mas_wr_node_store(wr_mas, new_end))
4236 mas_wr_bnode(wr_mas);
4246 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4248 struct ma_state *mas = wr_mas->mas;
4250 wr_mas->content = mas_start(mas);
4252 mas_store_root(mas, wr_mas->entry);
4253 return wr_mas->content;
4256 if (unlikely(!mas_wr_walk(wr_mas))) {
4257 mas_wr_spanning_store(wr_mas);
4258 return wr_mas->content;
4262 mas_wr_end_piv(wr_mas);
4265 mas_new_root(mas, wr_mas->entry);
4266 return wr_mas->content;
4269 mas_wr_modify(wr_mas);
4270 return wr_mas->content;
4283 MA_WR_STATE(wr_mas, mas, entry);
4299 wr_mas.content = mas_start(mas);
4300 if (wr_mas.content)
4309 if (!mas_wr_walk(&wr_mas))
4313 wr_mas.offset_end = mas->offset;
4314 wr_mas.end_piv = wr_mas.r_max;
4316 if (wr_mas.content || (mas->last > wr_mas.r_max))
4322 mas_wr_modify(&wr_mas);
4323 return wr_mas.content;
4327 return wr_mas.content;
5340 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5342 if (!mas_is_active(wr_mas->mas)) {
5343 if (mas_is_start(wr_mas->mas))
5346 if (unlikely(mas_is_paused(wr_mas->mas)))
5349 if (unlikely(mas_is_none(wr_mas->mas)))
5352 if (unlikely(mas_is_overflow(wr_mas->mas)))
5355 if (unlikely(mas_is_underflow(wr_mas->mas)))
5364 if (wr_mas->mas->last > wr_mas->mas->max)
5367 if (wr_mas->entry)
5370 if (mte_is_leaf(wr_mas->mas->node) &&
5371 wr_mas->mas->last == wr_mas->mas->max)
5377 mas_reset(wr_mas->mas);
5395 MA_WR_STATE(wr_mas, mas, entry);
5415 mas_wr_store_setup(&wr_mas);
5416 mas_wr_store_entry(&wr_mas);
5417 return wr_mas.content;
5432 MA_WR_STATE(wr_mas, mas, entry);
5434 mas_wr_store_setup(&wr_mas);
5437 mas_wr_store_entry(&wr_mas);
5456 MA_WR_STATE(wr_mas, mas, entry);
5458 mas_wr_store_setup(&wr_mas);
5460 mas_wr_store_entry(&wr_mas);
5461 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
5476 MA_WR_STATE(wr_mas, mas, entry);
5485 mas_wr_store_setup(&wr_mas);
5486 wr_mas.content = mas_start(mas);
5491 if (unlikely(!mas_wr_walk(&wr_mas))) {
5499 if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
5502 mas_wr_end_piv(&wr_mas);
5503 node_size = mas_wr_new_end(&wr_mas);
5506 if (node_size == wr_mas.node_end) {
5511 if (wr_mas.offset_end - mas->offset == 1)
5515 if (node_size >= mt_slots[wr_mas.type]) {
5526 if (node_size - 1 <= mt_min_slots[wr_mas.type])
6173 MA_WR_STATE(wr_mas, mas, NULL);
6186 mas_wr_store_setup(&wr_mas);
6187 mas_wr_store_entry(&wr_mas);
6285 MA_WR_STATE(wr_mas, &mas, entry);
6296 mas_wr_store_entry(&wr_mas);
7258 void mas_wr_dump(const struct ma_wr_state *wr_mas)
7261 wr_mas->node, wr_mas->r_min, wr_mas->r_max);
7263 wr_mas->type, wr_mas->offset_end, wr_mas->node_end,
7264 wr_mas->end_piv);