Lines Matching refs:btree

140 		to->btree = from3->__btree;
148 to->btree = from->__btree;
561 * Update the btree to show the new hashval for this child.
640 struct xfs_da_node_entry *btree;
675 btree = icnodehdr.btree;
676 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
734 btree = nodehdr.btree;
735 btree[0].hashval = cpu_to_be32(blk1->hashval);
736 btree[0].before = cpu_to_be32(blk1->blkno);
737 btree[1].hashval = cpu_to_be32(blk2->hashval);
738 btree[1].before = cpu_to_be32(blk2->blkno);
754 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
854 * Balance the btree elements between two intermediate nodes,
886 btree1 = nodehdr1.btree;
887 btree2 = nodehdr2.btree;
902 btree1 = nodehdr1.btree;
903 btree2 = nodehdr2.btree;
985 btree1 = nodehdr1.btree;
986 btree2 = nodehdr2.btree;
1011 struct xfs_da_node_entry *btree;
1019 btree = nodehdr.btree;
1032 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
1033 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
1035 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
1036 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
1038 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
1039 tmp + sizeof(*btree)));
1050 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1203 child = be32_to_cpu(oldroothdr.btree[0].before);
1374 return be32_to_cpu(nodehdr.btree[nodehdr.count - 1].hashval);
1388 struct xfs_da_node_entry *btree;
1420 btree = nodehdr.btree;
1421 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1424 btree[blk->index].hashval = cpu_to_be32(lasthash);
1426 XFS_DA_LOGRANGE(node, &btree[blk->index],
1427 sizeof(*btree)));
1429 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1443 struct xfs_da_node_entry *btree;
1459 btree = nodehdr.btree;
1463 memmove(&btree[index], &btree[index + 1], tmp);
1465 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1468 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1470 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1479 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1509 drop_btree = drop_hdr.btree;
1510 save_btree = save_hdr.btree;
1578 struct xfs_da_node_entry *btree;
1644 btree = nodehdr.btree;
1662 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1671 btreehashval = be32_to_cpu(btree[probe].hashval);
1681 (be32_to_cpu(btree[probe].hashval) == hashval));
1688 be32_to_cpu(btree[probe].hashval) >= hashval) {
1692 be32_to_cpu(btree[probe].hashval) < hashval) {
1701 blkno = be32_to_cpu(btree[max - 1].before);
1704 blkno = be32_to_cpu(btree[probe].before);
1776 btree1 = node1hdr.btree;
1777 btree2 = node2hdr.btree;
1975 struct xfs_da_node_entry *btree;
2002 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
2006 blkno = be32_to_cpu(nodehdr.btree[blk->index].before);
2059 btree = nodehdr.btree;
2060 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
2065 blkno = be32_to_cpu(btree[blk->index].before);
2223 * Add a block to the btree ahead of the file.
2244 * Ick. We need to always be able to remove a btree block, even
2246 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2249 * a bmap btree split to do that.
2262 struct xfs_da_node_entry *btree;
2299 * Read the last block in the btree space.
2330 btree = deadhdr.btree;
2332 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2392 btree = par_hdr.btree;
2395 be32_to_cpu(btree[entno].hashval) < dead_hash;
2402 par_blkno = be32_to_cpu(btree[entno].before);
2415 be32_to_cpu(btree[entno].before) != last_blkno;
2436 btree = par_hdr.btree;
2442 btree[entno].before = cpu_to_be32(dead_blkno);
2444 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2445 sizeof(btree[entno].before)));
2459 * Remove a btree block from a directory or attribute.