Lines Matching defs:item

18 #include "inode-item.h"
21 #include "file-item.h"
314 struct btrfs_delayed_item *item;
316 item = kmalloc(struct_size(item, data, data_len), GFP_NOFS);
317 if (item) {
318 item->data_len = data_len;
319 item->type = type;
320 item->bytes_reserved = 0;
321 item->delayed_node = node;
322 RB_CLEAR_NODE(&item->rb_node);
323 INIT_LIST_HEAD(&item->log_list);
324 item->logged = false;
325 refcount_set(&item->refs, 1);
327 return item;
331 * __btrfs_lookup_delayed_item - look up the delayed item by key
335 * Note: if we don't find the right item, we will return the prev item and
336 * the next item.
365 struct btrfs_delayed_item *item;
378 item = rb_entry(parent_node, struct btrfs_delayed_item,
381 if (item->index < ins->index) {
384 } else if (item->index > ins->index) {
442 static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
444 if (item) {
445 __btrfs_remove_delayed_item(item);
446 if (refcount_dec_and_test(&item->refs))
447 kfree(item);
455 struct btrfs_delayed_item *item = NULL;
459 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
461 return item;
468 struct btrfs_delayed_item *item = NULL;
472 item = rb_entry(p, struct btrfs_delayed_item, rb_node);
474 return item;
478 struct btrfs_delayed_item *item)
483 p = rb_next(&item->rb_node);
491 struct btrfs_delayed_item *item)
515 item->delayed_node->inode_id,
522 if (item->type == BTRFS_DELAYED_DELETION_ITEM)
523 item->bytes_reserved = num_bytes;
530 struct btrfs_delayed_item *item)
535 if (!item->bytes_reserved)
544 item->delayed_node->inode_id,
545 item->bytes_reserved, 0);
546 btrfs_block_rsv_release(fs_info, rsv, item->bytes_reserved, NULL);
587 * we always reserve enough to update the inode item.
637 * Insert a single delayed item or a batch of delayed items, as many as possible
838 struct btrfs_delayed_item *item)
840 const u64 ino = item->delayed_node->inode_id;
847 u64 total_reserved_size = item->bytes_reserved;
854 * Our caller always gives us a path pointing to an existing item, so
862 curr = item;
866 * Keep checking if the next delayed item matches the next item in the
925 struct btrfs_delayed_item *item;
928 item = __btrfs_first_delayed_deletion_item(node);
929 if (!item) {
934 key.offset = item->index;
938 * There's no matching item in the leaf. This means we
939 * have already deleted this item in a past run of the
949 * an item in the tree, and move to the next item.
952 btrfs_release_delayed_item(item);
955 ret = btrfs_batch_delete_items(trans, root, path, item);
1053 * in the same item doesn't exist.
1442 * Adding the new dir index item does not require touching another
1498 * First attempt to insert the delayed item. This is to make the error
1500 * any other task coming in and running the delayed item before we do
1508 "error adding delayed dir index item, name: %.*s, index: %llu, root: %llu, dir: %llu, dir->index_cnt: %llu, delayed_node->index_cnt: %llu, error: %d",
1530 * Space was reserved for a dir index item insertion when we
1555 struct btrfs_delayed_item *item;
1558 item = __btrfs_lookup_delayed_item(&node->ins_root.rb_root, index);
1559 if (!item) {
1570 ASSERT(item->bytes_reserved == 0);
1574 * If there's only one leaf reserved, we can decrement this item from the
1581 const u32 data_len = item->data_len + sizeof(struct btrfs_item);
1587 btrfs_release_delayed_item(item);
1603 struct btrfs_delayed_item *item;
1614 item = btrfs_alloc_delayed_item(0, node, BTRFS_DELAYED_DELETION_ITEM);
1615 if (!item) {
1620 item->index = index;
1622 ret = btrfs_delayed_item_reserve_metadata(trans, item);
1629 "metadata reservation failed for delayed dir item deltiona, should have been reserved");
1630 btrfs_release_delayed_item(item);
1635 ret = __btrfs_add_delayed_item(node, item);
1638 "err add delayed dir index item(index: %llu) into the deletion tree of the delayed node(root id: %llu, inode id: %llu, errno: %d)",
1641 btrfs_delayed_item_release_metadata(dir->root, item);
1642 btrfs_release_delayed_item(item);
1678 struct btrfs_delayed_item *item;
1686 * item->readdir_list.
1692 item = __btrfs_first_delayed_insertion_item(delayed_node);
1693 while (item && item->index <= last_index) {
1694 refcount_inc(&item->refs);
1695 list_add_tail(&item->readdir_list, ins_list);
1696 item = __btrfs_next_delayed_item(item);
1699 item = __btrfs_first_delayed_deletion_item(delayed_node);
1700 while (item && item->index <= last_index) {
1701 refcount_inc(&item->refs);
1702 list_add_tail(&item->readdir_list, del_list);
1703 item = __btrfs_next_delayed_item(item);
1778 * Changing the data of the delayed item is impossible. So
1969 * And in most case, the inode ref and the inode item are in the
1971 * Since we are sure we will reserve the space for the inode item,
1973 * - If the inode ref and the inode item are not in the same leaf,
2096 struct btrfs_delayed_item *item;
2103 item = __btrfs_first_delayed_insertion_item(node);
2104 while (item) {
2106 * It's possible that the item is already in a log list. This
2128 if (!item->logged && list_empty(&item->log_list)) {
2129 refcount_inc(&item->refs);
2130 list_add_tail(&item->log_list, ins_list);
2132 item = __btrfs_next_delayed_item(item);
2135 item = __btrfs_first_delayed_deletion_item(node);
2136 while (item) {
2138 if (!item->logged && list_empty(&item->log_list)) {
2139 refcount_inc(&item->refs);
2140 list_add_tail(&item->log_list, del_list);
2142 item = __btrfs_next_delayed_item(item);
2164 struct btrfs_delayed_item *item;
2173 list_for_each_entry_safe(item, next, ins_list, log_list) {
2174 item->logged = true;
2175 list_del_init(&item->log_list);
2176 if (refcount_dec_and_test(&item->refs))
2177 kfree(item);
2180 list_for_each_entry_safe(item, next, del_list, log_list) {
2181 item->logged = true;
2182 list_del_init(&item->log_list);
2183 if (refcount_dec_and_test(&item->refs))
2184 kfree(item);