Lines Matching defs:rhead
766 * pointer of the last record seen are returned in rblk and rhead respectively.
776 struct xlog_rec_header **rhead,
799 *rhead = (struct xlog_rec_header *) offset;
820 *rhead = (struct xlog_rec_header *) offset;
839 * buffer pointer of the last record seen are returned in rblk and rhead
850 struct xlog_rec_header **rhead,
873 *rhead = (struct xlog_rec_header *) offset;
893 *rhead = (struct xlog_rec_header *) offset;
1032 struct xlog_rec_header **rhead, /* ptr to last record */
1047 * we don't trash the rhead/buffer pointers from the caller.
1085 buffer, rhead_blk, rhead, wrapped);
1101 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1111 be32_to_cpu((*rhead)->h_size));
1143 struct xlog_rec_header *rhead,
1166 hblks = xlog_logrec_hblks(log, rhead);
1168 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1171 be32_to_cpu(rhead->h_num_logops) == 1) {
1201 struct xlog_rec_header *rhead,
1217 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1220 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1221 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1250 xlog_rec_header_t *rhead;
1287 &rhead_blk, &rhead, &wrapped);
1295 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1300 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1307 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1326 &rhead_blk, &rhead, &wrapped);
1332 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1336 rhead, rhead_blk, buffer,
2271 struct xlog_rec_header *rhead,
2300 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2316 struct xlog_rec_header *rhead,
2346 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2400 struct xlog_rec_header *rhead,
2410 end = dp + be32_to_cpu(rhead->h_len);
2411 num_logops = be32_to_cpu(rhead->h_num_logops);
2414 if (xlog_header_check_recover(log->l_mp, rhead))
2417 trace_xfs_log_recover_record(log, rhead, pass);
2425 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2800 struct xlog_rec_header *rhead,
2806 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
2808 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
2813 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
2814 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2830 struct xlog_rec_header *rhead,
2835 __le32 old_crc = rhead->h_crc;
2838 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2878 xlog_unpack_data(rhead, dp, log);
2880 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2887 struct xlog_rec_header *rhead,
2894 rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2897 (!rhead->h_version ||
2898 (be32_to_cpu(rhead->h_version) &
2901 __func__, be32_to_cpu(rhead->h_version));
2909 hlen = be32_to_cpu(rhead->h_len);
2935 xlog_rec_header_t *rhead;
2972 rhead = (xlog_rec_header_t *)offset;
2985 h_size = be32_to_cpu(rhead->h_size);
2986 h_len = be32_to_cpu(rhead->h_len);
2988 rhead->h_num_logops == cpu_to_be32(1)) {
2995 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
3006 (rhead->h_version & cpu_to_be32(XLOG_VERSION_2))) {
3080 rhead = (xlog_rec_header_t *)offset;
3081 error = xlog_valid_rec_header(log, rhead,
3086 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3142 error = xlog_recover_process(log, rhash, rhead, offset,
3162 rhead = (xlog_rec_header_t *)offset;
3163 error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
3168 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3174 error = xlog_recover_process(log, rhash, rhead, offset, pass,