Lines Matching defs:rhead
760 * pointer of the last record seen are returned in rblk and rhead respectively.
770 struct xlog_rec_header **rhead,
793 *rhead = (struct xlog_rec_header *) offset;
814 *rhead = (struct xlog_rec_header *) offset;
833 * buffer pointer of the last record seen are returned in rblk and rhead
844 struct xlog_rec_header **rhead,
867 *rhead = (struct xlog_rec_header *) offset;
887 *rhead = (struct xlog_rec_header *) offset;
1026 struct xlog_rec_header **rhead, /* ptr to last record */
1041 * we don't trash the rhead/buffer pointers from the caller.
1079 buffer, rhead_blk, rhead, wrapped);
1095 *tail_blk = BLOCK_LSN(be64_to_cpu((*rhead)->h_tail_lsn));
1105 be32_to_cpu((*rhead)->h_size));
1137 struct xlog_rec_header *rhead,
1160 hblks = xlog_logrec_hblks(log, rhead);
1162 rhead_blk + hblks + BTOBB(be32_to_cpu(rhead->h_len)));
1165 be32_to_cpu(rhead->h_num_logops) == 1) {
1195 struct xlog_rec_header *rhead,
1211 log->l_curr_cycle = be32_to_cpu(rhead->h_cycle);
1214 atomic64_set(&log->l_tail_lsn, be64_to_cpu(rhead->h_tail_lsn));
1215 atomic64_set(&log->l_last_sync_lsn, be64_to_cpu(rhead->h_lsn));
1244 xlog_rec_header_t *rhead;
1281 &rhead_blk, &rhead, &wrapped);
1289 *tail_blk = BLOCK_LSN(be64_to_cpu(rhead->h_tail_lsn));
1294 xlog_set_state(log, *head_blk, rhead, rhead_blk, wrapped);
1301 error = xlog_check_unmount_rec(log, head_blk, tail_blk, rhead,
1320 &rhead_blk, &rhead, &wrapped);
1326 xlog_set_state(log, *head_blk, rhead, rhead_blk,
1330 rhead, rhead_blk, buffer,
2291 struct xlog_rec_header *rhead,
2320 trans->r_lsn = be64_to_cpu(rhead->h_lsn);
2336 struct xlog_rec_header *rhead,
2366 trans = xlog_recover_ophdr_to_trans(rhash, rhead, ohead);
2420 struct xlog_rec_header *rhead,
2430 end = dp + be32_to_cpu(rhead->h_len);
2431 num_logops = be32_to_cpu(rhead->h_num_logops);
2434 if (xlog_header_check_recover(log->l_mp, rhead))
2437 trace_xfs_log_recover_record(log, rhead, pass);
2445 error = xlog_recover_process_ophdr(log, rhash, rhead, ohead,
2819 struct xlog_rec_header *rhead,
2825 for (i = 0; i < BTOBB(be32_to_cpu(rhead->h_len)) &&
2827 *(__be32 *)dp = *(__be32 *)&rhead->h_cycle_data[i];
2832 xlog_in_core_2_t *xhdr = (xlog_in_core_2_t *)rhead;
2833 for ( ; i < BTOBB(be32_to_cpu(rhead->h_len)); i++) {
2849 struct xlog_rec_header *rhead,
2854 __le32 old_crc = rhead->h_crc;
2857 crc = xlog_cksum(log, rhead, dp, be32_to_cpu(rhead->h_len));
2897 xlog_unpack_data(rhead, dp, log);
2899 return xlog_recover_process_data(log, rhash, rhead, dp, pass,
2906 struct xlog_rec_header *rhead,
2913 rhead->h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM)))
2916 (!rhead->h_version ||
2917 (be32_to_cpu(rhead->h_version) &
2920 __func__, be32_to_cpu(rhead->h_version));
2928 hlen = be32_to_cpu(rhead->h_len);
2954 xlog_rec_header_t *rhead;
2991 rhead = (xlog_rec_header_t *)offset;
3004 h_size = be32_to_cpu(rhead->h_size);
3005 h_len = be32_to_cpu(rhead->h_len);
3007 rhead->h_num_logops == cpu_to_be32(1)) {
3014 error = xlog_valid_rec_header(log, rhead, tail_blk, h_size);
3018 hblks = xlog_logrec_hblks(log, rhead);
3091 rhead = (xlog_rec_header_t *)offset;
3092 error = xlog_valid_rec_header(log, rhead,
3097 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3153 error = xlog_recover_process(log, rhash, rhead, offset,
3173 rhead = (xlog_rec_header_t *)offset;
3174 error = xlog_valid_rec_header(log, rhead, blk_no, h_size);
3179 bblks = (int)BTOBB(be32_to_cpu(rhead->h_len));
3185 error = xlog_recover_process(log, rhash, rhead, offset, pass,