Lines Matching refs:bh
53 * by the bh lock. No more than a single bh lock is held at any time
69 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data))
71 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1)
81 # define ea_bdebug(bh, f...) do { \
83 bh->b_bdev, (unsigned long) bh->b_blocknr); \
89 # define ea_bdebug(bh, f...) no_printk(f)
199 struct buffer_head *bh = NULL;
220 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
222 if (!bh)
224 ea_bdebug(bh, "b_count=%d, refcount=%d",
225 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
226 end = bh->b_data + bh->b_size;
227 if (!ext2_xattr_header_valid(HDR(bh))) {
237 entry = FIRST_ENTRY(bh);
252 if (ext2_xattr_cache_insert(ea_block_cache, bh))
258 if (ext2_xattr_cache_insert(ea_block_cache, bh))
265 memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
271 brelse(bh);
291 struct buffer_head *bh = NULL;
306 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
308 if (!bh)
310 ea_bdebug(bh, "b_count=%d, refcount=%d",
311 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount));
312 end = bh->b_data + bh->b_size;
313 if (!ext2_xattr_header_valid(HDR(bh))) {
323 entry = FIRST_ENTRY(bh);
330 if (ext2_xattr_cache_insert(ea_block_cache, bh))
334 for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry);
361 brelse(bh);
411 struct buffer_head *bh = NULL;
419 * header -- Points either into bh, or to a temporarily
446 bh = sb_bread(sb, EXT2_I(inode)->i_file_acl);
448 if (!bh)
450 ea_bdebug(bh, "b_count=%d, refcount=%d",
451 atomic_read(&(bh->b_count)),
452 le32_to_cpu(HDR(bh)->h_refcount));
453 header = HDR(bh);
454 end = bh->b_data + bh->b_size;
468 last = FIRST_ENTRY(bh);
520 /* assert(header == HDR(bh)); */
521 lock_buffer(bh);
525 ea_bdebug(bh, "modifying in-place");
531 bh->b_blocknr);
537 unlock_buffer(bh);
538 ea_bdebug(bh, "cloning");
539 header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL);
545 offset = (char *)here - bh->b_data;
547 offset = (char *)last - bh->b_data;
562 /* Iff we are modifying the block in-place, bh is locked here. */
635 if (bh && header == HDR(bh))
636 unlock_buffer(bh); /* we were modifying in-place. */
637 error = ext2_xattr_set2(inode, bh, NULL);
640 if (bh && header == HDR(bh))
641 unlock_buffer(bh); /* we were modifying in-place. */
642 error = ext2_xattr_set2(inode, bh, header);
646 if (!(bh && header == HDR(bh)))
648 brelse(bh);
795 struct buffer_head *bh = NULL;
817 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
818 if (!bh) {
824 ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count)));
825 if (!ext2_xattr_header_valid(HDR(bh))) {
831 lock_buffer(bh);
832 if (HDR(bh)->h_refcount == cpu_to_le32(1)) {
833 __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
840 bh->b_blocknr);
842 get_bh(bh);
843 bforget(bh);
844 unlock_buffer(bh);
846 le32_add_cpu(&HDR(bh)->h_refcount, -1);
847 ea_bdebug(bh, "refcount now=%d",
848 le32_to_cpu(HDR(bh)->h_refcount));
849 unlock_buffer(bh);
850 mark_buffer_dirty(bh);
852 sync_dirty_buffer(bh);
858 brelse(bh);
871 ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh)
873 __u32 hash = le32_to_cpu(HDR(bh)->h_hash);
876 error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr,
880 ea_bdebug(bh, "already in cache");
884 ea_bdebug(bh, "inserting [%x]", (int)hash);
949 struct buffer_head *bh;
951 bh = sb_bread(inode->i_sb, ce->e_value);
952 if (!bh) {
957 lock_buffer(bh);
969 unlock_buffer(bh);
970 brelse(bh);
972 } else if (le32_to_cpu(HDR(bh)->h_refcount) >
976 le32_to_cpu(HDR(bh)->h_refcount),
978 } else if (!ext2_xattr_cmp(header, HDR(bh))) {
979 ea_bdebug(bh, "b_count=%d",
980 atomic_read(&(bh->b_count)));
983 return bh;
985 unlock_buffer(bh);
986 brelse(bh);