Lines Matching refs:keys

18  * as keys are inserted we only sort the pages that have not yet been written.
52 * Check for bad keys in replay
115 if (b->level && b->keys.nsets)
116 bch_btree_sort(&b->keys, &b->c->sort);
118 bch_btree_sort_lazy(&b->keys, &b->c->sort);
121 bch_bset_init_next(&b->keys, write_block(b),
164 iter->b = &b->keys;
171 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
199 if (i != b->keys.set[0].data && !i->keys)
209 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
211 if (i->seq == b->keys.set[0].data->seq)
214 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
216 i = b->keys.set[0].data;
218 if (b->keys.set[0].size &&
219 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
223 bch_bset_init_next(&b->keys, write_block(b),
230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
232 bset_block_offset(b, i), i->keys);
259 bch_bio_map(bio, b->keys.set[0].data);
373 bset_sector_offset(&b->keys, i));
413 BUG_ON(b->written && !i->keys);
415 bch_check_keys(&b->keys, "writing");
436 unsigned int nsets = b->keys.nsets;
446 if (nsets && !b->keys.nsets)
483 BUG_ON(!i->keys);
528 bch_btree_keys_free(&b->keys);
550 if (!bch_btree_keys_alloc(&b->keys,
597 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
599 if (b->keys.page_order < min_order)
671 * succeed, so that inserting keys into the btree can always succeed and
804 c->verify_data->keys.set->data)
923 if (!b->keys.set[0].data)
934 if (!b->keys.set->data)
951 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
954 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
1027 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1028 prefetch(b->keys.set[i].tree);
1029 prefetch(b->keys.set[i].data);
1032 for (; i <= b->keys.nsets; i++)
1033 prefetch(b->keys.set[i].data);
1123 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1152 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1210 * ptr_invalid() can't return true for the keys that mark btree nodes as
1284 unsigned int keys = 0, good_keys = 0;
1291 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1293 keys++;
1295 if (bch_ptr_bad(&b->keys, k))
1305 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1307 bset_written(&b->keys, t) &&
1317 if ((keys - good_keys) * 2 > keys)
1327 unsigned int keys;
1338 unsigned int i, nodes = 0, keys = 0, blocks;
1353 keys += r[nodes++].keys;
1358 __set_blocks(b->keys.set[0].data, keys,
1385 keys = 0;
1391 if (__set_blocks(n1, n1->keys + keys +
1397 keys += bkey_u64s(k);
1403 * the remaining keys into this node; we can't ensure
1405 * length keys (shouldn't be possible in practice,
1408 if (__set_blocks(n1, n1->keys + n2->keys,
1413 keys = n2->keys;
1418 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1426 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1428 n1->keys += keys;
1429 r[i].keys = n1->keys;
1432 bset_bkey_idx(n2, keys),
1434 (void *) bset_bkey_idx(n2, keys));
1436 n2->keys -= keys;
1452 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1509 struct keylist keys;
1528 bch_keylist_init(&keys);
1529 bch_keylist_add(&keys, &n->key);
1531 make_btree_freeing_key(replace, keys.top);
1532 bch_keylist_push(&keys);
1534 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1535 BUG_ON(!bch_keylist_empty(&keys));
1550 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1592 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1598 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1607 r->keys = btree_gc_count_keys(r->b);
1749 /* don't reclaim buckets to which writeback keys point */
1762 &dc->writeback_keys.keys, node)
1775 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
1890 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1896 bch_btree_iter_init(&b->keys, &iter, NULL);
1899 k = bch_btree_iter_next_filter(&iter, &b->keys,
1935 /* root node keys are checked before thread created */
1936 bch_btree_iter_init(&c->root->keys, &iter, NULL);
1937 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
1943 * Fetch a root node key index, skip the keys which
1956 &c->root->keys,
1962 * No more keys to check in root node,
2031 /* check and mark root node keys */
2032 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
2141 status = bch_btree_insert_key(&b->keys, k, replace_key);
2143 bch_check_keys(&b->keys, "%u for %s", status,
2155 long ret = bch_btree_keys_u64s_remaining(&b->keys);
2160 if (b->keys.ops->is_extents)
2171 int oldsize = bch_count_data(&b->keys);
2174 struct bkey *k = insert_keys->keys;
2187 bkey_copy(&temp.key, insert_keys->keys);
2190 bch_cut_front(&b->key, insert_keys->keys);
2204 BUG_ON(bch_count_data(&b->keys) < oldsize);
2236 unsigned int keys = 0;
2238 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2260 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2261 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2262 keys));
2265 bset_bkey_idx(btree_bset_first(n1), keys));
2266 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2268 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2269 btree_bset_first(n1)->keys = keys;
2273 btree_bset_first(n2)->keys * sizeof(uint64_t));
2282 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2356 b->keys.last_set_unwritten)
2438 struct keylist *keys;
2448 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2450 if (ret && !bch_keylist_empty(op->keys))
2456 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2463 BUG_ON(bch_keylist_empty(keys));
2466 op.keys = keys;
2470 while (!ret && !bch_keylist_empty(keys)) {
2473 &START_KEY(keys->keys),
2482 while ((k = bch_keylist_pop(keys)))
2514 /* Map across nodes or keys */
2526 bch_btree_iter_init(&b->keys, &iter, from);
2528 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2559 bch_btree_iter_init(&b->keys, &iter, from);
2561 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2589 /* Overlapping keys compare equal */
2640 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2679 if (!RB_EMPTY_ROOT(&buf->keys)) {
2682 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2685 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2697 rb_erase(&w->node, &buf->keys);
2721 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2743 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2781 buf->keys = RB_ROOT;