Lines Matching refs:btree
152 to->btree = from3->__btree;
160 to->btree = from->__btree;
576 * Update the btree to show the new hashval for this child.
655 struct xfs_da_node_entry *btree;
690 btree = icnodehdr.btree;
691 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
749 btree = nodehdr.btree;
750 btree[0].hashval = cpu_to_be32(blk1->hashval);
751 btree[0].before = cpu_to_be32(blk1->blkno);
752 btree[1].hashval = cpu_to_be32(blk2->hashval);
753 btree[1].before = cpu_to_be32(blk2->blkno);
769 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
869 * Balance the btree elements between two intermediate nodes,
900 btree1 = nodehdr1.btree;
901 btree2 = nodehdr2.btree;
914 btree1 = nodehdr1.btree;
915 btree2 = nodehdr2.btree;
997 btree1 = nodehdr1.btree;
998 btree2 = nodehdr2.btree;
1023 struct xfs_da_node_entry *btree;
1031 btree = nodehdr.btree;
1044 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
1045 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
1047 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
1048 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
1050 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
1051 tmp + sizeof(*btree)));
1062 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1215 child = be32_to_cpu(oldroothdr.btree[0].before);
1386 return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval);
1400 struct xfs_da_node_entry *btree;
1432 btree = nodehdr.btree;
1433 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1436 btree[blk->index].hashval = cpu_to_be32(lasthash);
1438 XFS_DA_LOGRANGE(node, &btree[blk->index],
1439 sizeof(*btree)));
1441 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1455 struct xfs_da_node_entry *btree;
1471 btree = nodehdr.btree;
1475 memmove(&btree[index], &btree[index + 1], tmp);
1477 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1480 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1482 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1491 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1521 drop_btree = drop_hdr.btree;
1522 save_btree = save_hdr.btree;
1590 struct xfs_da_node_entry *btree;
1656 btree = nodehdr.btree;
1674 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1683 btreehashval = be32_to_cpu(btree[probe].hashval);
1693 (be32_to_cpu(btree[probe].hashval) == hashval));
1700 be32_to_cpu(btree[probe].hashval) >= hashval) {
1704 be32_to_cpu(btree[probe].hashval) < hashval) {
1713 blkno = be32_to_cpu(btree[max - 1].before);
1716 blkno = be32_to_cpu(btree[probe].before);
1788 btree1 = node1hdr.btree;
1789 btree2 = node2hdr.btree;
1987 struct xfs_da_node_entry *btree;
2014 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
2018 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
2071 btree = nodehdr.btree;
2072 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
2077 blkno = be32_to_cpu(btree[blk->index].before);
2235 * Add a block to the btree ahead of the file.
2256 * Ick. We need to always be able to remove a btree block, even
2258 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2261 * a bmap btree split to do that.
2274 struct xfs_da_node_entry *btree;
2311 * Read the last block in the btree space.
2342 btree = deadhdr.btree;
2344 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2404 btree = par_hdr.btree;
2407 be32_to_cpu(btree[entno].hashval) < dead_hash;
2414 par_blkno = be32_to_cpu(btree[entno].before);
2427 be32_to_cpu(btree[entno].before) != last_blkno;
2448 btree = par_hdr.btree;
2454 btree[entno].before = cpu_to_be32(dead_blkno);
2456 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2457 sizeof(btree[entno].before)));
2471 * Remove a btree block from a directory or attribute.