Lines Matching refs:keys
18 * as keys are inserted we only sort the pages that have not yet been written.
52 * Check for bad keys in replay
115 if (b->level && b->keys.nsets)
116 bch_btree_sort(&b->keys, &b->c->sort);
118 bch_btree_sort_lazy(&b->keys, &b->c->sort);
121 bch_bset_init_next(&b->keys, write_block(b),
164 iter->b = &b->keys;
171 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
199 if (i != b->keys.set[0].data && !i->keys)
209 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
211 if (i->seq == b->keys.set[0].data->seq)
214 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
216 i = b->keys.set[0].data;
218 if (b->keys.set[0].size &&
219 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
223 bch_bset_init_next(&b->keys, write_block(b),
230 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
232 bset_block_offset(b, i), i->keys);
259 bch_bio_map(bio, b->keys.set[0].data);
373 bset_sector_offset(&b->keys, i));
413 BUG_ON(b->written && !i->keys);
415 bch_check_keys(&b->keys, "writing");
436 unsigned int nsets = b->keys.nsets;
446 if (nsets && !b->keys.nsets)
483 BUG_ON(!i->keys);
528 bch_btree_keys_free(&b->keys);
550 if (!bch_btree_keys_alloc(&b->keys,
618 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
620 if (b->keys.page_order < min_order)
692 * succeed, so that inserting keys into the btree can always succeed and
825 c->verify_data->keys.set->data)
944 if (!b->keys.set[0].data)
955 if (!b->keys.set->data)
972 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
975 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
1048 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1049 prefetch(b->keys.set[i].tree);
1050 prefetch(b->keys.set[i].data);
1053 for (; i <= b->keys.nsets; i++)
1054 prefetch(b->keys.set[i].data);
1144 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->cache->sb));
1173 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1231 * ptr_invalid() can't return true for the keys that mark btree nodes as
1305 unsigned int keys = 0, good_keys = 0;
1312 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1314 keys++;
1316 if (bch_ptr_bad(&b->keys, k))
1326 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1328 bset_written(&b->keys, t) &&
1338 if ((keys - good_keys) * 2 > keys)
1348 unsigned int keys;
1359 unsigned int i, nodes = 0, keys = 0, blocks;
1374 keys += r[nodes++].keys;
1379 __set_blocks(b->keys.set[0].data, keys,
1406 keys = 0;
1412 if (__set_blocks(n1, n1->keys + keys +
1418 keys += bkey_u64s(k);
1424 * the remaining keys into this node; we can't ensure
1426 * length keys (shouldn't be possible in practice,
1429 if (__set_blocks(n1, n1->keys + n2->keys,
1434 keys = n2->keys;
1439 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c->cache)) >
1447 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1449 n1->keys += keys;
1450 r[i].keys = n1->keys;
1453 bset_bkey_idx(n2, keys),
1455 (void *) bset_bkey_idx(n2, keys));
1457 n2->keys -= keys;
1473 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1530 struct keylist keys;
1549 bch_keylist_init(&keys);
1550 bch_keylist_add(&keys, &n->key);
1552 make_btree_freeing_key(replace, keys.top);
1553 bch_keylist_push(&keys);
1555 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1556 BUG_ON(!bch_keylist_empty(&keys));
1571 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1613 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1619 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1628 r->keys = btree_gc_count_keys(r->b);
1770 /* don't reclaim buckets to which writeback keys point */
1783 &dc->writeback_keys.keys, node)
1796 for (k = ca->sb.d; k < ca->sb.d + ca->sb.keys; k++)
1911 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1917 bch_btree_iter_init(&b->keys, &iter, NULL);
1920 k = bch_btree_iter_next_filter(&iter, &b->keys,
1956 /* root node keys are checked before thread created */
1957 bch_btree_iter_init(&c->root->keys, &iter, NULL);
1958 k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
1964 * Fetch a root node key index, skip the keys which
1977 &c->root->keys,
1983 * No more keys to check in root node,
2052 /* check and mark root node keys */
2053 for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
2162 status = bch_btree_insert_key(&b->keys, k, replace_key);
2164 bch_check_keys(&b->keys, "%u for %s", status,
2176 long ret = bch_btree_keys_u64s_remaining(&b->keys);
2181 if (b->keys.ops->is_extents)
2192 int oldsize = bch_count_data(&b->keys);
2195 struct bkey *k = insert_keys->keys;
2208 bkey_copy(&temp.key, insert_keys->keys);
2211 bch_cut_front(&b->key, insert_keys->keys);
2225 BUG_ON(bch_count_data(&b->keys) < oldsize);
2257 unsigned int keys = 0;
2259 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2281 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2282 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2283 keys));
2286 bset_bkey_idx(btree_bset_first(n1), keys));
2287 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2289 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2290 btree_bset_first(n1)->keys = keys;
2294 btree_bset_first(n2)->keys * sizeof(uint64_t));
2303 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2377 b->keys.last_set_unwritten)
2459 struct keylist *keys;
2469 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2471 if (ret && !bch_keylist_empty(op->keys))
2477 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2484 BUG_ON(bch_keylist_empty(keys));
2487 op.keys = keys;
2491 while (!ret && !bch_keylist_empty(keys)) {
2494 &START_KEY(keys->keys),
2503 while ((k = bch_keylist_pop(keys)))
2535 /* Map across nodes or keys */
2547 bch_btree_iter_init(&b->keys, &iter, from);
2549 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2580 bch_btree_iter_init(&b->keys, &iter, from);
2582 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2610 /* Overlapping keys compare equal */
2661 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2700 if (!RB_EMPTY_ROOT(&buf->keys)) {
2703 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2706 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2718 rb_erase(&w->node, &buf->keys);
2742 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2764 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2802 buf->keys = RB_ROOT;