Lines Matching refs:buffer

47  * Sector aligned buffer routines for buffer create/read/write/access
52 * an operation involving the given XFS log buffer. Returns true if the fields
69 * Allocate a buffer to hold log data. The buffer needs to be able to map to
78 * Pass log block 0 since we don't have an addr yet, buffer will be
82 xfs_warn(log->l_mp, "Invalid block length (0x%x) for buffer",
92 * In addition, the buffer may be used for a non-sector-aligned block
94 * beyond the end of the buffer. If the requested size is only 1 basic
97 * blocks (sector size 1). But otherwise we extend the buffer by one
109 * in a log buffer. The buffer covers a log sector-aligned region.
131 "Invalid log block/length (0x%llx, 0x%x) for buffer",
271 char *buffer,
285 error = xlog_bread(log, mid_blk, 1, buffer, &offset);
321 char *buffer;
327 * Greedily allocate a buffer big enough to handle the full
335 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
346 error = xlog_bread(log, i, bcount, buffer, &buf);
364 kmem_free(buffer);
386 * of the last block in the given buffer. extra_bblks contains the number
401 char *buffer;
411 buffer = xlog_alloc_buffer(log, num_blks);
412 if (!buffer) {
413 buffer = xlog_alloc_buffer(log, 1);
414 if (!buffer)
418 error = xlog_bread(log, start_blk, num_blks, buffer, &offset);
435 error = xlog_bread(log, i, 1, buffer, &offset);
480 kmem_free(buffer);
502 char *buffer;
532 buffer = xlog_alloc_buffer(log, 1);
533 if (!buffer)
536 error = xlog_bread(log, 0, 1, buffer, &offset);
543 error = xlog_bread(log, last_blk, 1, buffer, &offset);
613 error = xlog_find_cycle_start(log, buffer, first_blk, &head_blk,
630 * in one buffer.
734 kmem_free(buffer);
748 kmem_free(buffer);
759 * records encountered or a negative error code. The log block and buffer
768 char *buffer,
787 error = xlog_bread(log, i, 1, buffer, &offset);
806 error = xlog_bread(log, i, 1, buffer, &offset);
833 * buffer pointer of the last record seen are returned in rblk and rhead
842 char *buffer,
861 error = xlog_bread(log, i, 1, buffer, &offset);
879 error = xlog_bread(log, i, 1, buffer, &offset);
942 char *buffer;
949 buffer = xlog_alloc_buffer(log, 1);
950 if (!buffer)
957 error = xlog_seek_logrec_hdr(log, head_blk, *tail_blk, 1, buffer,
987 buffer, &tmp_tail, &thead, &wrapped);
1002 kmem_free(buffer);
1024 char *buffer,
1040 * that could have been in flight at one time. Use a temporary buffer so
1041 * we don't trash the rhead/buffer pointers from the caller.
1071 * Get the header block and buffer pointer for the last good
1079 buffer, rhead_blk, rhead, wrapped);
1139 char *buffer,
1167 error = xlog_bread(log, umount_data_blk, 1, buffer, &offset);
1235 * We could speed up search by using current head_blk buffer, but it is not
1246 char *buffer;
1260 buffer = xlog_alloc_buffer(log, 1);
1261 if (!buffer)
1264 error = xlog_bread(log, 0, 1, buffer, &offset);
1280 error = xlog_rseek_logrec_hdr(log, *head_blk, *head_blk, 1, buffer,
1302 rhead_blk, buffer, &clean);
1319 error = xlog_verify_head(log, head_blk, tail_blk, buffer,
1330 rhead, rhead_blk, buffer,
1368 kmem_free(buffer);
1396 char *buffer;
1406 buffer = xlog_alloc_buffer(log, 1);
1407 if (!buffer)
1409 error = xlog_bread(log, 0, 1, buffer, &offset);
1416 kmem_free(buffer);
1421 error = xlog_bread(log, log_bbnum-1, 1, buffer, &offset);
1427 kmem_free(buffer);
1433 error = xlog_find_cycle_start(log, buffer, 0, &last_blk, 0);
1474 kmem_free(buffer);
1482 * to initialize a buffer full of empty log record headers and write
1517 char *buffer;
1526 * Greedily allocate a buffer big enough to handle the full
1534 while (!(buffer = xlog_alloc_buffer(log, bufblks))) {
1541 * the buffer in the starting sector not covered by the first
1546 error = xlog_bread_noalign(log, start_block, 1, buffer);
1560 * the buffer in the final sector not covered by the write.
1566 buffer + BBTOB(ealign - start_block));
1572 offset = buffer + xlog_align(log, start_block);
1578 error = xlog_bwrite(log, start_block, endcount, buffer);
1586 kmem_free(buffer);
1828 * cancelled buffer and replaying the cancelled buffer can remove it
1829 * form the cancelled buffer table. Hence they have tobe done last.
1832 * read the buffer and replay changes into it. For filesystems using the
1838 * This ensures that inodes are completely flushed to the inode buffer
1844 * But there's a problem with that - we can't tell an inode allocation buffer
1845 * apart from a regular buffer, so we can't separate them. We can, however,
1846 * tell an inode unlink buffer from the others, and so we can separate them out
1851 * - item_list for all non-buffer items
1969 * If the transaction modifies a buffer or inode, do it now. Otherwise,
2373 * The recovered buffer queue is drained only once we know that all
2377 * - Buffer write submission updates the metadata LSN of the buffer.
2380 * - Separate recovery items against the same metadata buffer can share
2386 * In other words, we are allowed to submit a buffer from log recovery
2780 * Unlock the buffer so that it can be acquired in the normal course of
2782 * racing with anyone else here for the AGI buffer, we don't even need
2784 * the buffer. We keep buffer reference though, so that it stays pinned
2785 * in memory while we need the buffer.
2926 * and h_len must not be greater than LR buffer size.
2974 * Read the header of the tail block and get the iclog buffer size from
2997 * log buffer can be too small for the record and cause an
3000 * Detect this condition here. Use lsunit for the buffer size as
3002 * error to avoid a buffer overrun.
3075 * - we increased the buffer size originally
3137 * - we increased the buffer size originally
3312 * Now that we've finished replaying all buffer and inode updates,