Lines Matching refs:tree
59 struct hfs_btree *tree;
62 tree = node->tree;
64 tree->attributes & HFS_TREE_VARIDXKEYS ||
65 node->tree->cnid == HFSPLUS_ATTR_CNID)
68 key_len = tree->max_key_len + 2;
303 off = node->tree->node_size - 2;
310 if (node->tree->attributes & HFS_TREE_VARIDXKEYS ||
311 node->tree->cnid == HFSPLUS_ATTR_CNID)
314 tmp = node->tree->max_key_len + 2;
330 struct hfs_btree *tree;
334 tree = node->tree;
336 tmp = hfs_bnode_find(tree, node->prev);
345 tree->leaf_head = node->next;
348 tmp = hfs_bnode_find(tree, node->next);
357 tree->leaf_tail = node->prev;
363 tree->root = 0;
364 tree->depth = 0;
376 struct hfs_bnode *hfs_bnode_findhash(struct hfs_btree *tree, u32 cnid)
380 if (cnid >= tree->node_count) {
386 for (node = tree->node_hash[hfs_bnode_hash(cnid)];
393 static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
401 if (cnid >= tree->node_count) {
407 size = sizeof(struct hfs_bnode) + tree->pages_per_bnode *
412 node->tree = tree;
417 node->tree->cnid, node->this);
419 spin_lock(&tree->hash_lock);
420 node2 = hfs_bnode_findhash(tree, cnid);
423 node->next_hash = tree->node_hash[hash];
424 tree->node_hash[hash] = node;
425 tree->node_hash_cnt++;
427 spin_unlock(&tree->hash_lock);
433 spin_unlock(&tree->hash_lock);
435 mapping = tree->inode->i_mapping;
436 off = (loff_t)cnid << tree->node_size_shift;
439 for (i = 0; i < tree->pages_per_bnode; block++, i++) {
457 node->tree->cnid, node->this, atomic_read(&node->refcnt));
458 for (p = &node->tree->node_hash[hfs_bnode_hash(node->this)];
463 node->tree->node_hash_cnt--;
466 /* Load a particular node out of a tree */
467 struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
474 spin_lock(&tree->hash_lock);
475 node = hfs_bnode_findhash(tree, num);
478 spin_unlock(&tree->hash_lock);
485 spin_unlock(&tree->hash_lock);
486 node = __hfs_bnode_create(tree, num);
514 if (node->height <= 1 || node->height > tree->depth)
521 rec_off = tree->node_size - 2;
529 next_off > tree->node_size ||
556 for (i = 0; i < node->tree->pages_per_bnode; i++)
562 struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
568 spin_lock(&tree->hash_lock);
569 node = hfs_bnode_findhash(tree, num);
570 spin_unlock(&tree->hash_lock);
576 node = __hfs_bnode_create(tree, num);
586 min_t(int, PAGE_SIZE, tree->node_size));
588 for (i = 1; i < tree->pages_per_bnode; i++) {
603 node->tree->cnid, node->this,
612 struct hfs_btree *tree = node->tree;
616 node->tree->cnid, node->this,
619 if (!atomic_dec_and_lock(&node->refcnt, &tree->hash_lock))
621 for (i = 0; i < tree->pages_per_bnode; i++) {
629 spin_unlock(&tree->hash_lock);
630 if (hfs_bnode_need_zeroout(tree))
631 hfs_bnode_clear(node, 0, tree->node_size);
636 spin_unlock(&tree->hash_lock);
641 * Unused nodes have to be zeroed if this is the catalog tree and
644 bool hfs_bnode_need_zeroout(struct hfs_btree *tree)
646 struct super_block *sb = tree->inode->i_sb;
650 return tree->cnid == HFSPLUS_CAT_CNID &&