Lines Matching defs:iclog

43 	struct xlog_in_core	*iclog);
48 struct xlog_in_core **iclog,
55 struct xlog_in_core *iclog,
64 struct xlog_in_core *iclog);
76 struct xlog_in_core *iclog,
81 struct xlog_in_core *iclog,
488 struct xlog_in_core *iclog)
492 if (iclog->ic_state == XLOG_STATE_WANT_SYNC) {
493 /* update tail before writing to iclog */
496 iclog->ic_state = XLOG_STATE_SYNCING;
497 iclog->ic_header.h_tail_lsn = cpu_to_be64(tail_lsn);
498 xlog_verify_tail_lsn(log, iclog, tail_lsn);
503 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
508 * Flush iclog to disk if this is the last reference to the given iclog and the
514 struct xlog_in_core *iclog)
518 if (iclog->ic_state == XLOG_STATE_IOERROR)
521 if (atomic_dec_and_test(&iclog->ic_refcnt) &&
522 __xlog_state_release_iclog(log, iclog)) {
524 xlog_sync(log, iclog);
533 struct xlog_in_core *iclog)
535 struct xlog *log = iclog->ic_log;
538 if (atomic_dec_and_lock(&iclog->ic_refcnt, &log->l_icloglock)) {
539 if (iclog->ic_state != XLOG_STATE_IOERROR)
540 sync = __xlog_state_release_iclog(log, iclog);
545 xlog_sync(log, iclog);
787 * Wait for the iclog to be written disk, or return an error if the log has been
792 struct xlog_in_core *iclog)
793 __releases(iclog->ic_log->l_icloglock)
795 struct xlog *log = iclog->ic_log;
798 iclog->ic_state != XLOG_STATE_ACTIVE &&
799 iclog->ic_state != XLOG_STATE_DIRTY) {
801 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
850 struct xlog_in_core *iclog;
870 iclog = log->l_iclog;
871 atomic_inc(&iclog->ic_refcnt);
872 if (iclog->ic_state == XLOG_STATE_ACTIVE)
873 xlog_state_switch_iclogs(log, iclog, 0);
875 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
876 iclog->ic_state == XLOG_STATE_IOERROR);
877 error = xlog_state_release_iclog(log, iclog);
878 xlog_wait_on_iclog(iclog);
890 struct xlog_in_core *iclog = log->l_iclog;
893 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
894 ASSERT(iclog->ic_offset == 0);
895 } while ((iclog = iclog->ic_next) != log->l_iclog);
1095 * We may be holding the log iclog lock upon entering this routine.
1194 struct xlog_in_core *iclog =
1196 struct xlog *log = iclog->ic_log;
1199 error = blk_status_to_errno(iclog->ic_bio.bi_status);
1202 if (iclog->ic_fail_crc)
1214 xlog_state_done_syncing(iclog);
1215 bio_uninit(&iclog->ic_bio);
1219 * iclog after this, so an unmount waiting on this lock can now tear it
1220 * down safely. As such, it is unsafe to reference the iclog after the
1223 up(&iclog->ic_sema);
1315 xlog_in_core_t *iclog, *prev_iclog=NULL;
1378 * The amount of memory to allocate for the iclog structure is
1390 iclog = kmem_zalloc(sizeof(*iclog) + bvec_size, KM_MAYFAIL);
1391 if (!iclog)
1394 *iclogp = iclog;
1395 iclog->ic_prev = prev_iclog;
1396 prev_iclog = iclog;
1398 iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
1400 if (!iclog->ic_data)
1403 log->l_iclog_bak[i] = &iclog->ic_header;
1405 head = &iclog->ic_header;
1415 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1416 iclog->ic_state = XLOG_STATE_ACTIVE;
1417 iclog->ic_log = log;
1418 atomic_set(&iclog->ic_refcnt, 0);
1419 spin_lock_init(&iclog->ic_callback_lock);
1420 INIT_LIST_HEAD(&iclog->ic_callbacks);
1421 iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
1423 init_waitqueue_head(&iclog->ic_force_wait);
1424 init_waitqueue_head(&iclog->ic_write_wait);
1425 INIT_WORK(&iclog->ic_end_io_work, xlog_ioend_work);
1426 sema_init(&iclog->ic_sema, 1);
1428 iclogp = &iclog->ic_next;
1447 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1448 prev_iclog = iclog->ic_next;
1449 kmem_free(iclog->ic_data);
1450 kmem_free(iclog);
1468 struct xlog_in_core **iclog,
1485 error = xlog_write(log, &vec, ticket, lsn, iclog, XLOG_COMMIT_TRANS,
1581 struct xlog_in_core *iclog,
1585 int size = iclog->ic_offset + roundoff;
1589 cycle_lsn = CYCLE_LSN_DISK(iclog->ic_header.h_lsn);
1591 dp = iclog->ic_datap;
1595 iclog->ic_header.h_cycle_data[i] = *(__be32 *)dp;
1601 xlog_in_core_2_t *xhdr = iclog->ic_data;
1660 struct xlog_in_core *iclog = bio->bi_private;
1662 queue_work(iclog->ic_log->l_ioend_workqueue,
1663 &iclog->ic_end_io_work);
1690 struct xlog_in_core *iclog,
1705 down(&iclog->ic_sema);
1706 if (unlikely(iclog->ic_state == XLOG_STATE_IOERROR)) {
1714 xlog_state_done_syncing(iclog);
1715 up(&iclog->ic_sema);
1719 bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE));
1720 bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev);
1721 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1722 iclog->ic_bio.bi_end_io = xlog_bio_end_io;
1723 iclog->ic_bio.bi_private = iclog;
1731 iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC |
1734 iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
1736 if (xlog_map_iclog_data(&iclog->ic_bio, iclog->ic_data, count)) {
1740 if (is_vmalloc_addr(iclog->ic_data))
1741 flush_kernel_vmap_range(iclog->ic_data, count);
1750 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1752 bio_chain(split, &iclog->ic_bio);
1756 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1759 submit_bio(&iclog->ic_bio);
1763 * We need to bump cycle number for the part of the iclog that is
1789 struct xlog_in_core *iclog,
1799 count_init = log->l_iclog_hsize + iclog->ic_offset;
1820 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
1821 * fashion. Previously, we should have moved the current iclog
1822 * ptr in the log to point to the next available iclog. This allows further
1823 * write to continue while this code syncs out an iclog ready to go.
1833 * This routine is single threaded on the iclog. No other thread can be in
1834 * this routine with the same iclog. Changing contents of iclog can there-
1845 struct xlog_in_core *iclog)
1853 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
1855 count = xlog_calc_iclog_size(log, iclog, &roundoff);
1862 xlog_pack_data(log, iclog, roundoff);
1865 size = iclog->ic_offset;
1868 iclog->ic_header.h_len = cpu_to_be32(size);
1873 bno = BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn));
1877 xlog_split_iclog(log, &iclog->ic_header, bno, count);
1882 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
1883 iclog->ic_datap, size);
1893 iclog->ic_header.h_crc &= cpu_to_le32(0xAAAAAAAA);
1894 iclog->ic_fail_crc = true;
1897 be64_to_cpu(iclog->ic_header.h_lsn));
1914 xlog_verify_iclog(log, iclog, count);
1915 xlog_write_iclog(log, iclog, bno, count, need_flush);
1925 xlog_in_core_t *iclog, *next_iclog;
1934 iclog = log->l_iclog;
1936 down(&iclog->ic_sema);
1937 up(&iclog->ic_sema);
1938 iclog = iclog->ic_next;
1941 iclog = log->l_iclog;
1943 next_iclog = iclog->ic_next;
1944 kmem_free(iclog->ic_data);
1945 kmem_free(iclog);
1946 iclog = next_iclog;
1960 struct xlog_in_core *iclog,
1966 be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
1967 iclog->ic_offset += copy_bytes;
2223 struct xlog_in_core *iclog,
2236 * This iclog has already been marked WANT_SYNC by
2240 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2249 if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
2250 /* no more space in this iclog - push it. */
2252 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2256 if (iclog->ic_state == XLOG_STATE_ACTIVE)
2257 xlog_state_switch_iclogs(log, iclog, 0);
2259 ASSERT(iclog->ic_state == XLOG_STATE_WANT_SYNC ||
2260 iclog->ic_state == XLOG_STATE_IOERROR);
2265 *commit_iclog = iclog;
2271 error = xlog_state_release_iclog(log, iclog);
2286 * 3. While writing to this iclog
2287 * A. Reserve as much space in this iclog as can get
2292 * 3. Find out if we can fit entire region into this iclog
2295 * 6. If partial copy, release iclog; otherwise, continue
2296 * copying more regions into current iclog
2298 * 5. Release iclog for potential flush to on-disk log.
2311 * 3. When xlog_state_get_iclog_space() grabs the rest of the current iclog,
2326 struct xlog_in_core *iclog = NULL;
2358 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2363 ASSERT(log_offset <= iclog->ic_size - 1);
2364 ptr = iclog->ic_datap + log_offset;
2368 *start_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2395 * iclog we write to.
2411 iclog->ic_size-log_offset,
2441 error = xlog_write_copy_finish(log, iclog, flags,
2451 * if we had a partial copy, we need to get more iclog
2457 * the iclog (indicated by resetting of the record
2483 xlog_state_finish_copy(log, iclog, record_cnt, data_cnt);
2486 *commit_iclog = iclog;
2488 error = xlog_state_release_iclog(log, iclog);
2497 struct xlog_in_core *iclog,
2500 ASSERT(list_empty_careful(&iclog->ic_callbacks));
2503 * If the number of ops in this iclog indicate it just contains the
2509 iclog->ic_header.h_num_logops == cpu_to_be32(XLOG_COVER_OPS)) {
2519 iclog->ic_state = XLOG_STATE_ACTIVE;
2520 iclog->ic_offset = 0;
2521 iclog->ic_header.h_num_logops = 0;
2522 memset(iclog->ic_header.h_cycle_data, 0,
2523 sizeof(iclog->ic_header.h_cycle_data));
2524 iclog->ic_header.h_lsn = 0;
2529 * ACTIVE after iclog I/O has completed.
2536 struct xlog_in_core *iclog = log->l_iclog;
2539 if (iclog->ic_state == XLOG_STATE_DIRTY)
2540 xlog_state_activate_iclog(iclog, iclogs_changed);
2543 * an iclog doesn't become ACTIVE beyond one that is SYNCING.
2545 else if (iclog->ic_state != XLOG_STATE_ACTIVE)
2547 } while ((iclog = iclog->ic_next) != log->l_iclog);
2602 struct xlog_in_core *iclog = log->l_iclog;
2606 if (iclog->ic_state == XLOG_STATE_ACTIVE ||
2607 iclog->ic_state == XLOG_STATE_DIRTY)
2610 lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2613 } while ((iclog = iclog->ic_next) != log->l_iclog);
2619 * Completion of a iclog IO does not imply that a transaction has completed, as
2624 * should only update the last_sync_lsn if this iclog contains transaction
2643 struct xlog_in_core *iclog,
2646 iclog->ic_state = XLOG_STATE_CALLBACK;
2651 if (list_empty_careful(&iclog->ic_callbacks))
2660 * iclog. The caller will need to run callbacks if the iclog is returned in the
2666 struct xlog_in_core *iclog,
2672 switch (iclog->ic_state) {
2690 * Now that we have an iclog that is in the DONE_SYNC state, do
2692 * If this is not the lowest lsn iclog, then we will leave it
2695 header_lsn = be64_to_cpu(iclog->ic_header.h_lsn);
2699 xlog_state_set_callback(log, iclog, header_lsn);
2703 * Can only perform callbacks in order. Since this iclog is not
2712 * Keep processing entries in the iclog callback list until we come around and
2723 struct xlog_in_core *iclog)
2728 spin_lock(&iclog->ic_callback_lock);
2729 while (!list_empty(&iclog->ic_callbacks)) {
2732 list_splice_init(&iclog->ic_callbacks, &tmp);
2734 spin_unlock(&iclog->ic_callback_lock);
2736 spin_lock(&iclog->ic_callback_lock);
2741 * serialise against anyone trying to add more callbacks to this iclog
2745 spin_unlock(&iclog->ic_callback_lock);
2752 struct xlog_in_core *iclog;
2770 iclog = log->l_iclog;
2776 if (xlog_state_iodone_process_iclog(log, iclog,
2780 if (iclog->ic_state != XLOG_STATE_CALLBACK &&
2781 iclog->ic_state != XLOG_STATE_IOERROR) {
2782 iclog = iclog->ic_next;
2791 xlog_state_do_iclog_callbacks(log, iclog);
2793 wake_up_all(&iclog->ic_force_wait);
2795 xlog_state_clean_iclog(log, iclog);
2796 iclog = iclog->ic_next;
2797 } while (first_iclog != iclog);
2817 * Finish transitioning this iclog to the dirty state.
2820 * the last call to the iclog. There is a good chance that iclog flushes,
2822 * calls to bwrite. Hence, one iclog flush could generate two calls to this
2831 struct xlog_in_core *iclog)
2833 struct xlog *log = iclog->ic_log;
2836 ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
2844 ASSERT(iclog->ic_state == XLOG_STATE_SYNCING);
2845 iclog->ic_state = XLOG_STATE_DONE_SYNC;
2850 * iclog buffer, we wake them all, one will get to do the
2853 wake_up_all(&iclog->ic_write_wait);
2860 * sleep. We wait on the flush queue on the head iclog as that should be
2861 * the first iclog to complete flushing. Hence if all iclogs are syncing,
2865 * out-of-order even when an iclog past the head is free.
2887 xlog_in_core_t *iclog;
2896 iclog = log->l_iclog;
2897 if (iclog->ic_state != XLOG_STATE_ACTIVE) {
2905 head = &iclog->ic_header;
2907 atomic_inc(&iclog->ic_refcnt); /* prevents sync */
2908 log_offset = iclog->ic_offset;
2910 /* On the 1st write to an iclog, figure out lsn. This works
2935 if (iclog->ic_size - iclog->ic_offset < 2*sizeof(xlog_op_header_t)) {
2938 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2941 * If we are the only one writing to this iclog, sync it to
2945 * reference to the iclog.
2947 if (!atomic_add_unless(&iclog->ic_refcnt, -1, 1))
2948 error = xlog_state_release_iclog(log, iclog);
2956 * of this iclog? Or must we continue a write on the next iclog and
2957 * mark this iclog as completely taken? In the case where we switch
2958 * iclogs (to mark it taken), this particular iclog will release/sync
2961 if (len <= iclog->ic_size - iclog->ic_offset) {
2963 iclog->ic_offset += len;
2966 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2968 *iclogp = iclog;
2970 ASSERT(iclog->ic_offset <= iclog->ic_size);
3064 * This routine will mark the current iclog in the ring as WANT_SYNC and move
3065 * the current iclog pointer to the next iclog in the ring.
3070 struct xlog_in_core *iclog,
3073 ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
3077 eventual_size = iclog->ic_offset;
3078 iclog->ic_state = XLOG_STATE_WANT_SYNC;
3079 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
3108 ASSERT(iclog == log->l_iclog);
3109 log->l_iclog = iclog->ic_next;
3123 * 1. the current iclog is active and has no data; the previous iclog
3125 * 2. the current iclog is drity, and the previous iclog is in the
3130 * 1. the current iclog is not in the active nor dirty state.
3131 * 2. the current iclog dirty, and the previous iclog is not in the
3133 * 3. the current iclog is active, and there is another thread writing
3134 * to this particular iclog.
3135 * 4. a) the current iclog is active and has no other writers
3136 * b) when we return from flushing out this iclog, it is still
3145 struct xlog_in_core *iclog;
3154 iclog = log->l_iclog;
3155 if (iclog->ic_state == XLOG_STATE_IOERROR)
3158 if (iclog->ic_state == XLOG_STATE_DIRTY ||
3159 (iclog->ic_state == XLOG_STATE_ACTIVE &&
3160 atomic_read(&iclog->ic_refcnt) == 0 && iclog->ic_offset == 0)) {
3163 * look at the previous iclog.
3165 * If the previous iclog is active or dirty we are done. There
3167 * previous iclog and go to sleep.
3169 iclog = iclog->ic_prev;
3170 } else if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3171 if (atomic_read(&iclog->ic_refcnt) == 0) {
3173 * We are the only one with access to this iclog.
3179 atomic_inc(&iclog->ic_refcnt);
3180 lsn = be64_to_cpu(iclog->ic_header.h_lsn);
3181 xlog_state_switch_iclogs(log, iclog, 0);
3182 if (xlog_state_release_iclog(log, iclog))
3185 if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn)
3189 * Someone else is writing to this iclog.
3195 xlog_state_switch_iclogs(log, iclog, 0);
3199 * If the head iclog is not active nor dirty, we just attach
3206 return xlog_wait_on_iclog(iclog);
3223 struct xlog_in_core *iclog;
3226 iclog = log->l_iclog;
3227 if (iclog->ic_state == XLOG_STATE_IOERROR)
3230 while (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
3231 iclog = iclog->ic_next;
3232 if (iclog == log->l_iclog)
3236 if (iclog->ic_state == XLOG_STATE_ACTIVE) {
3239 * first time we've looked at the correct iclog buf) and the
3243 * transactions into this iclog before we close it down.
3253 (iclog->ic_prev->ic_state == XLOG_STATE_WANT_SYNC ||
3254 iclog->ic_prev->ic_state == XLOG_STATE_SYNCING)) {
3255 xlog_wait(&iclog->ic_prev->ic_write_wait,
3259 atomic_inc(&iclog->ic_refcnt);
3260 xlog_state_switch_iclogs(log, iclog, 0);
3261 if (xlog_state_release_iclog(log, iclog))
3268 return xlog_wait_on_iclog(iclog);
3391 * for LR headers - the space for data in an iclog is the size minus
3392 * the space used for the headers. If we use the iclog size, then we
3400 * transaction is the first in an iclog and hence has the LR headers
3401 * accounted to it, then the remaining space in the iclog is
3403 * than the iclog, it will be the only thing in that iclog.
3536 struct xlog_in_core *iclog,
3544 if (blocks < BTOBB(iclog->ic_offset)+BTOBB(log->l_iclog_hsize))
3553 if (blocks < BTOBB(iclog->ic_offset) + 1)
3559 * Perform a number of checks on the iclog before writing to disk.
3576 struct xlog_in_core *iclog,
3588 /* check validity of iclog pointers */
3595 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3599 if (iclog->ic_header.h_magicno != cpu_to_be32(XLOG_HEADER_MAGIC_NUM))
3602 base_ptr = ptr = &iclog->ic_header;
3603 p = &iclog->ic_header;
3611 len = be32_to_cpu(iclog->ic_header.h_num_logops);
3612 base_ptr = ptr = iclog->ic_datap;
3614 xhdr = iclog->ic_data;
3624 idx = BTOBBT((char *)&ophead->oh_clientid - iclog->ic_datap);
3632 iclog->ic_header.h_cycle_data[idx]);
3648 (uintptr_t)iclog->ic_datap);
3654 op_len = be32_to_cpu(iclog->ic_header.h_cycle_data[idx]);
3669 xlog_in_core_t *iclog, *ic;
3671 iclog = log->l_iclog;
3672 if (iclog->ic_state != XLOG_STATE_IOERROR) {
3677 ic = iclog;
3681 } while (ic != iclog);
3704 * otherwise the iclog writes will fail.
3793 xlog_in_core_t *iclog;
3795 iclog = log->l_iclog;
3800 if (iclog->ic_header.h_num_logops)
3802 iclog = iclog->ic_next;
3803 } while (iclog != log->l_iclog);