Lines Matching defs:iclog

43 	struct xlog_in_core	*iclog);
50 struct xlog_in_core **iclog,
60 struct xlog_in_core *iclog,
69 struct xlog_in_core *iclog,
74 struct xlog_in_core *iclog);
509 * Run all the pending iclog callbacks and wake log force waiters and iclog
518 * while the iclog owner might still be preparing the iclog for IO submssion.
520 * again to process any callbacks that may have been added to that iclog.
526 struct xlog_in_core *iclog;
529 iclog = log->l_iclog;
531 if (atomic_read(&iclog->ic_refcnt)) {
532 /* Reference holder will re-run iclog callbacks. */
535 list_splice_init(&iclog->ic_callbacks, &cb_list);
541 wake_up_all(&iclog->ic_write_wait);
542 wake_up_all(&iclog->ic_force_wait);
543 } while ((iclog = iclog->ic_next) != log->l_iclog);
549 * Flush iclog to disk if this is the last reference to the given iclog and the
552 * If XLOG_ICL_NEED_FUA is already set on the iclog, we need to ensure that the
553 * log tail is updated correctly. NEED_FUA indicates that the iclog will be
555 * within the iclog. We need to ensure that the log tail does not move beyond
556 * the tail that the first commit record in the iclog ordered against, otherwise
558 * performed on this iclog.
560 * Hence if NEED_FUA is set and the current iclog tail lsn is empty, write the
561 * current tail into iclog. Once the iclog tail is set, future operations must
564 * the iclog will get zeroed on activation of the iclog after sync, so we
565 * always capture the tail lsn on the iclog on the first NEED_FUA release
566 * regardless of the number of active reference counts on this iclog.
571 struct xlog_in_core *iclog,
579 trace_xlog_iclog_release(iclog, _RET_IP_);
582 * of the tail LSN into the iclog so we guarantee that the log tail does
583 * not move between the first time we know that the iclog needs to be
586 if ((iclog->ic_state == XLOG_STATE_WANT_SYNC ||
587 (iclog->ic_flags & XLOG_ICL_NEED_FUA)) &&
588 !iclog->ic_header.h_tail_lsn) {
590 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
593 last_ref = atomic_dec_and_test(&iclog->ic_refcnt);
597 * If there are no more references to this iclog, process the
598 * pending iclog callbacks that were waiting on the release of
599 * this iclog.
609 if (iclog->ic_state != XLOG_STATE_WANT_SYNC) {
610 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
614 iclog->ic_state = XLOG_STATE_SYNCING;
615 xlog_verify_tail_lsn(log, iclog);
616 trace_xlog_iclog_syncing(iclog, _RET_IP_);
619 xlog_sync(log, iclog, ticket);
843 * Flush out the iclog to disk ensuring that device caches are flushed and
844 * the iclog hits stable storage before any completion waiters are woken.
848 struct xlog_in_core *iclog)
850 atomic_inc(&iclog->ic_refcnt);
851 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
852 if (iclog->ic_state == XLOG_STATE_ACTIVE)
853 xlog_state_switch_iclogs(iclog->ic_log, iclog, 0);
854 return xlog_state_release_iclog(iclog->ic_log, iclog, NULL);
865 struct xlog_in_core *iclog = log->l_iclog;
868 down(&iclog->ic_sema);
869 up(&iclog->ic_sema);
870 iclog = iclog->ic_next;
875 * Wait for the iclog and all prior iclogs to be written disk as required by the
876 * log force state machine. Waiting on ic_force_wait ensures iclog completions
882 struct xlog_in_core *iclog)
883 __releases(iclog->ic_log->l_icloglock)
885 struct xlog *log = iclog->ic_log;
887 trace_xlog_iclog_wait_on(iclog, _RET_IP_);
889 iclog->ic_state != XLOG_STATE_ACTIVE &&
890 iclog->ic_state != XLOG_STATE_DIRTY) {
892 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
956 struct xlog_in_core *iclog;
974 iclog = log->l_iclog;
975 error = xlog_force_iclog(iclog);
976 xlog_wait_on_iclog(iclog);
988 struct xlog_in_core *iclog = log->l_iclog;
991 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
992 ASSERT(iclog->ic_offset == 0);
993 } while ((iclog = iclog->ic_next) != log->l_iclog);
1100 * If shutdown has come from iclog IO context, the log
1102 * for the iclog to complete shutdown processing before we
1224 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1276 * We may be holding the log iclog lock upon entering this routine.
1378 struct xlog_in_core *iclog =
1380 struct xlog *log = iclog->ic_log;
1383 error = blk_status_to_errno(iclog->ic_bio.bi_status);
1386 if (iclog->ic_fail_crc)
1398 xlog_state_done_syncing(iclog);
1399 bio_uninit(&iclog->ic_bio);
1403 * iclog after this, so an unmount waiting on this lock can now tear it
1404 * down safely. As such, it is unsafe to reference the iclog after the
1407 up(&iclog->ic_sema);
1526 xlog_in_core_t *iclog, *prev_iclog=NULL;
1596 * The amount of memory to allocate for the iclog structure is
1607 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
1608 if (!iclog)
1611 *iclogp = iclog;
1612 iclog->ic_prev = prev_iclog;
1613 prev_iclog = iclog;
1615 iclog->ic_data = kvzalloc(log->l_iclog_size,
1617 if (!iclog->ic_data)
1619 head = &iclog->ic_header;
1629 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1630 iclog->ic_state = XLOG_STATE_ACTIVE;
1631 iclog->ic_log = log;
1632 atomic_set(&iclog->ic_refcnt, 0);
1633 INIT_LIST_HEAD(&iclog->ic_callbacks);
1634 iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
1636 init_waitqueue_head(&iclog->ic_force_wait);
1637 init_waitqueue_head(&iclog->ic_write_wait);
1638 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1639 sema_init(&iclog->ic_sema, 1);
1641 iclogp = &iclog->ic_next;
1661 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1662 prev_iclog = iclog->ic_next;
1663 kmem_free(iclog->ic_data);
1664 kmem_free(iclog);
1763 struct xlog_in_core *iclog,
1767 int size = iclog->ic_offset + roundoff;
1771 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1773 dp = iclog->ic_datap;
1777 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1783 xlog_in_core_2_t *xhdr = iclog->ic_data;
1842 struct xlog_in_core *iclog = bio->bi_private;
1844 queue_work(iclog->ic_log->l_ioend_workqueue,
1845 &iclog->ic_end_io_work);
1872 struct xlog_in_core *iclog,
1877 trace_xlog_iclog_write(iclog, _RET_IP_);
1887 down(&iclog->ic_sema);
1905 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
1908 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1909 iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1910 iclog->ic_bio.bi_private = iclog;
1912 if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
1913 iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1917 * by the LSN in this iclog is on stable storage. This is slow,
1929 if (iclog->ic_flags & XLOG_ICL_NEED_FUA)
1930 iclog->ic_bio.bi_opf |= REQ_FUA;
1932 iclog->ic_flags &= ~(XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA);
1934 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count))
1937 if (is_vmalloc_addr(iclog->ic_data))
1938 flush_kernel_vmap_range(iclog->ic_data, count);
1947 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1949 bio_chain(split, &iclog->ic_bio);
1953 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1956 submit_bio(&iclog->ic_bio);
1961 xlog_state_done_syncing(iclog);
1962 up(&iclog->ic_sema);
1966 * We need to bump cycle number for the part of the iclog that is
1992 struct xlog_in_core *iclog,
1998 count_init = log->l_iclog_hsize + iclog->ic_offset;
2009 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
2010 * fashion. Previously, we should have moved the current iclog
2011 * ptr in the log to point to the next available iclog. This allows further
2012 * write to continue while this code syncs out an iclog ready to go.
2022 * This routine is single threaded on the iclog. No other thread can be in
2023 * this routine with the same iclog. Changing contents of iclog can there-
2034 struct xlog_in_core *iclog,
2042 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2043 trace_xlog_iclog_sync(iclog, _RET_IP_);
2045 count = xlog_calc_iclog_size(log, iclog, &roundoff);
2060 xlog_pack_data(log, iclog, roundoff);
2063 size = iclog->ic_offset;
2066 iclog->ic_header.h_len = cpu_to_be32(size);
2071 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
2075 xlog_split_iclog(log, &iclog->ic_header, bno, count);
2078 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
2079 iclog->ic_datap, size);
2089 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
2090 iclog->ic_fail_crc = true;
2093 be64_to_cpu(iclog->ic_header.h_lsn));
2096 xlog_verify_iclog(log, iclog, count);
2097 xlog_write_iclog(log, iclog, bno, count);
2107 xlog_in_core_t *iclog, *next_iclog;
2111 * Destroy the CIL after waiting for iclog IO completion because an
2112 * iclog EIO error will try to shut down the log, which accesses the
2117 iclog = log->l_iclog;
2119 next_iclog = iclog->ic_next;
2120 kmem_free(iclog->ic_data);
2121 kmem_free(iclog);
2122 iclog = next_iclog;
2136 struct xlog_in_core *iclog,
2142 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
2143 iclog->ic_offset += copy_bytes;
2214 struct xlog_in_core *iclog,
2222 ASSERT(*log_offset < iclog->ic_log->l_iclog_size);
2226 memcpy(iclog->ic_datap + *log_offset, data, write_len);
2234 * Write log vectors into a single iclog which is guaranteed by the caller
2241 struct xlog_in_core *iclog,
2249 ASSERT(*log_offset + *len <= iclog->ic_size ||
2250 iclog->ic_state == XLOG_STATE_WANT_SYNC);
2261 xlog_write_iovec(iclog, log_offset, reg->i_addr,
2275 struct xlog_in_core *iclog = *iclogp;
2276 struct xlog *log = iclog->ic_log;
2280 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC);
2281 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2282 error = xlog_state_release_iclog(log, iclog, ticket);
2287 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2293 *iclogp = iclog;
2298 * Write log vectors into a single iclog which is smaller than the current chain
2301 * wholly fit in the iclog.
2313 struct xlog_in_core *iclog = *iclogp;
2319 /* walk the logvec, copying until we run out of space in the iclog */
2333 * opheader, then we need to start afresh with a new iclog.
2335 if (iclog->ic_size - *log_offset <=
2338 &iclog, log_offset, *len, record_cnt,
2345 rlen = min_t(uint32_t, reg->i_len, iclog->ic_size - *log_offset);
2352 xlog_write_iovec(iclog, log_offset, reg->i_addr,
2361 * multiple iclogs so we loop here. First we release the iclog
2362 * we currently have, then we get a new iclog and add a new
2364 * we either complete the iovec or fill the iclog. If we
2366 * back to the top of the outer loop. if we fill the iclog, we
2370 * space in an iclog and hence requiring us to release the iclog
2374 * iclog, hence we cannot just terminate the loop at the end
2376 * space left in the current iclog, and check for the end of the
2377 * continuation after getting a new iclog.
2382 * space we need in the new iclog by adding that size
2389 &iclog, log_offset,
2395 ophdr = iclog->ic_datap + *log_offset;
2406 * If rlen fits in the iclog, then end the region
2411 if (rlen <= iclog->ic_size - *log_offset)
2416 rlen = min_t(uint32_t, rlen, iclog->ic_size - *log_offset);
2419 xlog_write_iovec(iclog, log_offset,
2430 *iclogp = iclog;
2444 * 3. While writing to this iclog
2445 * A. Reserve as much space in this iclog as can get
2450 * 3. Find out if we can fit entire region into this iclog
2453 * 6. If partial copy, release iclog; otherwise, continue
2454 * copying more regions into current iclog
2456 * 5. Release iclog for potential flush to on-disk log.
2469 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2483 struct xlog_in_core *iclog = NULL;
2497 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2502 ASSERT(log_offset <= iclog->ic_size - 1);
2505 * If we have a context pointer, pass it the first iclog we are
2506 * writing to so it can record state needed for iclog write
2510 xlog_cil_set_ctx_write_state(ctx, iclog);
2514 * If the entire log vec does not fit in the iclog, punt it to
2518 lv->lv_bytes > iclog->ic_size - log_offset) {
2519 error = xlog_write_partial(lv, ticket, &iclog,
2524 * We have no iclog to release, so just return
2530 xlog_write_full(lv, ticket, iclog, &log_offset,
2538 * the current iclog, and hence it will already have the space used by
2540 * iclog with the number of bytes written here.
2543 xlog_state_finish_copy(log, iclog, record_cnt, 0);
2544 error = xlog_state_release_iclog(log, iclog, ticket);
2552 struct xlog_in_core *iclog,
2555 ASSERT(list_empty_careful(&iclog->ic_callbacks));
2556 trace_xlog_iclog_activate(iclog, _RET_IP_);
2559 * If the number of ops in this iclog indicate it just contains the
2565 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2575 iclog->ic_state = XLOG_STATE_ACTIVE;
2576 iclog->ic_offset = 0;
2577 iclog->ic_header.h_num_logops = 0;
2578 memset(iclog->ic_header.h_cycle_data, 0,
2579 sizeof(iclog->ic_header.h_cycle_data));
2580 iclog->ic_header.h_lsn = 0;
2581 iclog->ic_header.h_tail_lsn = 0;
2586 * ACTIVE after iclog I/O has completed.
2593 struct xlog_in_core *iclog = log->l_iclog;
2596 if (iclog->ic_state == XLOG_STATE_DIRTY)
2597 xlog_state_activate_iclog(iclog, iclogs_changed);
2600 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2602 else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2604 } while ((iclog = iclog->ic_next) != log->l_iclog);
2665 struct xlog_in_core *iclog = log->l_iclog;
2669 if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2670 iclog->ic_state == XLOG_STATE_DIRTY)
2673 lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2676 } while ((iclog = iclog->ic_next) != log->l_iclog);
2682 * Completion of a iclog IO does not imply that a transaction has completed, as
2687 * should only update the last_sync_lsn if this iclog contains transaction
2706 struct xlog_in_core *iclog,
2709 trace_xlog_iclog_callback(iclog, _RET_IP_);
2710 iclog->ic_state = XLOG_STATE_CALLBACK;
2715 if (list_empty_careful(&iclog->ic_callbacks))
2724 * iclog. The caller will need to run callbacks if the iclog is returned in the
2730 struct xlog_in_core *iclog)
2735 switch (iclog->ic_state) {
2744 * Now that we have an iclog that is in the DONE_SYNC state, do
2746 * If this is not the lowest lsn iclog, then we will leave it
2749 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2753 xlog_state_set_callback(log, iclog, header_lsn);
2757 * Can only perform callbacks in order. Since this iclog is not
2779 struct xlog_in_core *iclog = first_iclog;
2785 if (xlog_state_iodone_process_iclog(log, iclog))
2787 if (iclog->ic_state != XLOG_STATE_CALLBACK) {
2788 iclog = iclog->ic_next;
2791 list_splice_init(&iclog->ic_callbacks, &cb_list);
2794 trace_xlog_iclog_callbacks_start(iclog, _RET_IP_);
2796 trace_xlog_iclog_callbacks_done(iclog, _RET_IP_);
2800 xlog_state_clean_iclog(log, iclog);
2801 iclog = iclog->ic_next;
2802 } while (iclog != first_iclog);
2809 * Loop running iclog completion callbacks until there are no more iclogs in a
2841 * Finish transitioning this iclog to the dirty state.
2848 struct xlog_in_core *iclog)
2850 struct xlog *log = iclog->ic_log;
2853 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2854 trace_xlog_iclog_sync_done(iclog, _RET_IP_);
2862 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2863 iclog->ic_state = XLOG_STATE_DONE_SYNC;
2868 * iclog buffer, we wake them all, one will get to do the
2871 wake_up_all(&iclog->ic_write_wait);
2878 * sleep. We wait on the flush queue on the head iclog as that should be
2879 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2883 * out-of-order even when an iclog past the head is free.
2904 xlog_in_core_t *iclog;
2913 iclog = log->l_iclog;
2914 if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2922 head = &iclog->ic_header;
2924 atomic_inc(&iclog->ic_refcnt); /* prevents sync */
2925 log_offset = iclog->ic_offset;
2927 trace_xlog_iclog_get_space(iclog, _RET_IP_);
2929 /* On the 1st write to an iclog, figure out lsn. This works
2951 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2954 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2957 * If we are the only one writing to this iclog, sync it to
2961 * reference to the iclog.
2963 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2964 error = xlog_state_release_iclog(log, iclog, ticket);
2972 * of this iclog? Or must we continue a write on the next iclog and
2973 * mark this iclog as completely taken? In the case where we switch
2974 * iclogs (to mark it taken), this particular iclog will release/sync
2977 if (len <= iclog->ic_size - iclog->ic_offset)
2978 iclog->ic_offset += len;
2980 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2981 *iclogp = iclog;
2983 ASSERT(iclog->ic_offset <= iclog->ic_size);
3075 * This routine will mark the current iclog in the ring as WANT_SYNC and move
3076 * the current iclog pointer to the next iclog in the ring.
3081 struct xlog_in_core *iclog,
3084 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
3086 trace_xlog_iclog_switch(iclog, _RET_IP_);
3089 eventual_size = iclog->ic_offset;
3090 iclog->ic_state = XLOG_STATE_WANT_SYNC;
3091 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3119 ASSERT(iclog == log->l_iclog);
3120 log->l_iclog = iclog->ic_next;
3124 * Force the iclog to disk and check if the iclog has been completed before
3128 * unnecessary wait on the iclog.
3132 struct xlog_in_core *iclog,
3135 xfs_lsn_t lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3139 error = xlog_force_iclog(iclog);
3144 * If the iclog has already been completed and reused the header LSN
3147 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
3163 * 1. the current iclog is active and has no data; the previous iclog
3165 * 2. the current iclog is drity, and the previous iclog is in the
3170 * 1. the current iclog is not in the active nor dirty state.
3171 * 2. the current iclog dirty, and the previous iclog is not in the
3173 * 3. the current iclog is active, and there is another thread writing
3174 * to this particular iclog.
3175 * 4. a) the current iclog is active and has no other writers
3176 * b) when we return from flushing out this iclog, it is still
3185 struct xlog_in_core *iclog;
3196 iclog = log->l_iclog;
3197 trace_xlog_iclog_force(iclog, _RET_IP_);
3199 if (iclog->ic_state == XLOG_STATE_DIRTY ||
3200 (iclog->ic_state == XLOG_STATE_ACTIVE &&
3201 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
3204 * look at the previous iclog.
3206 * If the previous iclog is active or dirty we are done. There
3208 * previous iclog and go to sleep.
3210 iclog = iclog->ic_prev;
3211 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3212 if (atomic_read(&iclog->ic_refcnt) == 0) {
3213 /* We have exclusive access to this iclog. */
3216 if (xlog_force_and_check_iclog(iclog, &completed))
3223 * Someone else is still writing to this iclog, so we
3224 * need to ensure that when they release the iclog it
3227 xlog_state_switch_iclogs(log, iclog, 0);
3232 * The iclog we are about to wait on may contain the checkpoint pushed
3235 * are flushed when this iclog is written.
3237 if (iclog->ic_state == XLOG_STATE_WANT_SYNC)
3238 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3241 return xlog_wait_on_iclog(iclog);
3253 * If an iclog with that lsn can be found:
3272 struct xlog_in_core *iclog;
3279 iclog = log->l_iclog;
3280 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3281 trace_xlog_iclog_force_lsn(iclog, _RET_IP_);
3282 iclog = iclog->ic_next;
3283 if (iclog == log->l_iclog)
3287 switch (iclog->ic_state) {
3291 * first time we've looked at the correct iclog buf) and the
3295 * transactions into this iclog before we close it down.
3305 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
3306 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
3307 xlog_wait(&iclog->ic_prev->ic_write_wait,
3311 if (xlog_force_and_check_iclog(iclog, &completed))
3320 * This iclog may contain the checkpoint pushed by the
3324 * when this iclog is written.
3326 iclog->ic_flags |= XLOG_ICL_NEED_FLUSH | XLOG_ICL_NEED_FUA;
3339 return xlog_wait_on_iclog(iclog);
3353 * the iclog that needs to be flushed to stable storage. If the caller needs
3354 * a synchronous log force, we will wait on the iclog with the LSN returned by
3457 * for LR headers - the space for data in an iclog is the size minus
3458 * the space used for the headers. If we use the iclog size, then we
3466 * transaction is the first in an iclog and hence has the LR headers
3467 * accounted to it, then the remaining space in the iclog is
3469 * than the iclog, it will be the only thing in that iclog.
3577 struct xlog_in_core *iclog)
3579 xfs_lsn_t tail_lsn = be64_to_cpu(iclog->ic_header.h_tail_lsn);
3585 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3594 if (blocks < BTOBB(iclog->ic_offset) + 1)
3600 * Perform a number of checks on the iclog before writing to disk.
3617 struct xlog_in_core *iclog,
3629 /* check validity of iclog pointers */
3636 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3640 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3643 base_ptr = ptr = &iclog->ic_header;
3644 p = &iclog->ic_header;
3652 len = be32_to_cpu(iclog->ic_header.h_num_logops);
3653 base_ptr = ptr = iclog->ic_datap;
3655 xhdr = iclog->ic_data;
3665 idx = BTOBBT((void *)&ophead->oh_clientid - iclog->ic_datap);
3673 iclog->ic_header.h_cycle_data[idx]);
3689 idx = BTOBBT((void *)&ophead->oh_len - iclog->ic_datap);
3695 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3815 xlog_in_core_t *iclog;
3817 iclog = log->l_iclog;
3822 if (iclog->ic_header.h_num_logops)
3824 iclog = iclog->ic_next;
3825 } while (iclog != log->l_iclog);