Lines Matching refs:sdp

35 static void gfs2_log_shutdown(struct gfs2_sbd *sdp);
39 * @sdp: the filesystem
48 unsigned int gfs2_struct2blk(struct gfs2_sbd *sdp, unsigned int nstruct)
55 first = sdp->sd_ldptrs;
59 second = sdp->sd_inptrs;
94 * @sdp: The superblock
100 static int gfs2_ail1_start_one(struct gfs2_sbd *sdp,
103 __releases(&sdp->sd_ail_lock)
104 __acquires(&sdp->sd_ail_lock)
115 gfs2_assert(sdp, bd->bd_tr == tr);
123 if (!cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
124 gfs2_io_error_bh(sdp, bh);
125 gfs2_withdraw_delayed(sdp);
129 if (gfs2_withdrawn(sdp)) {
142 spin_unlock(&sdp->sd_ail_lock);
149 spin_lock(&sdp->sd_ail_lock);
160 static void dump_ail_list(struct gfs2_sbd *sdp)
166 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
170 fs_err(sdp, "bd %p: blk:0x%llx bh=%p ", bd,
173 fs_err(sdp, "\n");
176 fs_err(sdp, "0x%llx up2:%d dirt:%d lkd:%d req:%d "
194 * @sdp: The super block
201 void gfs2_ail1_flush(struct gfs2_sbd *sdp, struct writeback_control *wbc)
203 struct list_head *head = &sdp->sd_ail1_list;
209 trace_gfs2_ail_flush(sdp, wbc, 1);
211 spin_lock(&sdp->sd_ail_lock);
215 fs_err(sdp, "Error: In %s for ten minutes! t=%d\n",
217 dump_ail_list(sdp);
223 ret = gfs2_ail1_start_one(sdp, wbc, tr, &plug);
231 spin_unlock(&sdp->sd_ail_lock);
234 gfs2_lm(sdp, "gfs2_ail1_start_one returned: %d\n", ret);
235 gfs2_withdraw(sdp);
237 trace_gfs2_ail_flush(sdp, wbc, 0);
242 * @sdp: The superblock
245 static void gfs2_ail1_start(struct gfs2_sbd *sdp)
254 return gfs2_ail1_flush(sdp, &wbc);
257 static void gfs2_log_update_flush_tail(struct gfs2_sbd *sdp)
259 unsigned int new_flush_tail = sdp->sd_log_head;
262 if (!list_empty(&sdp->sd_ail1_list)) {
263 tr = list_last_entry(&sdp->sd_ail1_list,
267 sdp->sd_log_flush_tail = new_flush_tail;
270 static void gfs2_log_update_head(struct gfs2_sbd *sdp)
272 unsigned int new_head = sdp->sd_log_flush_head;
274 if (sdp->sd_log_flush_tail == sdp->sd_log_head)
275 sdp->sd_log_flush_tail = new_head;
276 sdp->sd_log_head = new_head;
283 static void gfs2_ail_empty_tr(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
291 gfs2_assert(sdp, bd->bd_tr == tr);
298 * @sdp: the filesystem
305 static int gfs2_ail1_empty_one(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
315 gfs2_assert(sdp, bd->bd_tr == tr);
325 if (!sdp->sd_log_error && buffer_busy(bh)) {
330 !cmpxchg(&sdp->sd_log_error, 0, -EIO)) {
331 gfs2_io_error_bh(sdp, bh);
332 gfs2_withdraw_delayed(sdp);
341 gfs2_add_revoke(sdp, bd);
352 * @sdp: The superblock
358 static int gfs2_ail1_empty(struct gfs2_sbd *sdp, int max_revokes)
364 spin_lock(&sdp->sd_ail_lock);
365 list_for_each_entry_safe_reverse(tr, s, &sdp->sd_ail1_list, tr_list) {
366 if (!gfs2_ail1_empty_one(sdp, tr, &max_revokes) && oldest_tr)
367 list_move(&tr->tr_list, &sdp->sd_ail2_list);
371 gfs2_log_update_flush_tail(sdp);
372 ret = list_empty(&sdp->sd_ail1_list);
373 spin_unlock(&sdp->sd_ail_lock);
375 if (test_bit(SDF_WITHDRAWING, &sdp->sd_flags)) {
376 gfs2_lm(sdp, "fatal: I/O error(s)\n");
377 gfs2_withdraw(sdp);
383 static void gfs2_ail1_wait(struct gfs2_sbd *sdp)
389 spin_lock(&sdp->sd_ail_lock);
390 list_for_each_entry_reverse(tr, &sdp->sd_ail1_list, tr_list) {
396 spin_unlock(&sdp->sd_ail_lock);
402 spin_unlock(&sdp->sd_ail_lock);
405 static void __ail2_empty(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
407 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
409 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail1_list));
410 gfs2_assert_warn(sdp, list_empty(&tr->tr_ail2_list));
411 gfs2_trans_free(sdp, tr);
414 static void ail2_empty(struct gfs2_sbd *sdp, unsigned int new_tail)
416 struct list_head *ail2_list = &sdp->sd_ail2_list;
417 unsigned int old_tail = sdp->sd_log_tail;
420 spin_lock(&sdp->sd_ail_lock);
424 __ail2_empty(sdp, tr);
429 __ail2_empty(sdp, tr);
432 spin_unlock(&sdp->sd_ail_lock);
437 * @sdp: The GFS2 superblock
440 bool gfs2_log_is_empty(struct gfs2_sbd *sdp) {
441 return atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks;
444 static bool __gfs2_log_try_reserve_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
448 available = atomic_read(&sdp->sd_log_revokes_available);
450 if (atomic_try_cmpxchg(&sdp->sd_log_revokes_available,
459 * @sdp: The GFS2 superblock
462 * sdp->sd_log_flush_lock must be held.
464 void gfs2_log_release_revokes(struct gfs2_sbd *sdp, unsigned int revokes)
467 atomic_add(revokes, &sdp->sd_log_revokes_available);
472 * @sdp: The GFS2 superblock
477 void gfs2_log_release(struct gfs2_sbd *sdp, unsigned int blks)
479 atomic_add(blks, &sdp->sd_log_blks_free);
480 trace_gfs2_log_blocks(sdp, blks);
481 gfs2_assert_withdraw(sdp, atomic_read(&sdp->sd_log_blks_free) <=
482 sdp->sd_jdesc->jd_blocks);
483 if (atomic_read(&sdp->sd_log_blks_needed))
484 wake_up(&sdp->sd_log_waitq);
489 * @sdp: The GFS2 superblock
496 static bool __gfs2_log_try_reserve(struct gfs2_sbd *sdp, unsigned int blks,
502 free_blocks = atomic_read(&sdp->sd_log_blks_free);
504 if (atomic_try_cmpxchg(&sdp->sd_log_blks_free, &free_blocks,
506 trace_gfs2_log_blocks(sdp, -blks);
515 * @sdp: The GFS2 superblock
532 static void __gfs2_log_reserve(struct gfs2_sbd *sdp, unsigned int blks,
538 atomic_add(blks, &sdp->sd_log_blks_needed);
540 if (current != sdp->sd_logd_process)
541 wake_up(&sdp->sd_logd_waitq);
542 io_wait_event(sdp->sd_log_waitq,
543 (free_blocks = atomic_read(&sdp->sd_log_blks_free),
546 if (atomic_try_cmpxchg(&sdp->sd_log_blks_free,
554 trace_gfs2_log_blocks(sdp, -blks);
555 if (atomic_sub_return(blks, &sdp->sd_log_blks_needed))
556 wake_up(&sdp->sd_log_waitq);
561 * @sdp: The GFS2 superblock
565 * This is similar to gfs2_log_reserve, but sdp->sd_log_flush_lock must be
569 bool gfs2_log_try_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
577 if (revokes && !__gfs2_log_try_reserve_revokes(sdp, revokes)) {
578 revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
579 *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
584 if (__gfs2_log_try_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS))
587 gfs2_log_release_revokes(sdp, revokes);
593 * @sdp: The GFS2 superblock
597 * sdp->sd_log_flush_lock must not be held.
600 void gfs2_log_reserve(struct gfs2_sbd *sdp, struct gfs2_trans *tr,
609 revoke_blks = DIV_ROUND_UP(revokes, sdp->sd_inptrs);
610 *extra_revokes = revoke_blks * sdp->sd_inptrs - revokes;
613 __gfs2_log_reserve(sdp, blks, GFS2_LOG_FLUSH_MIN_BLOCKS);
618 * @sdp: The GFS2 superblock
628 static inline unsigned int log_distance(struct gfs2_sbd *sdp, unsigned int newer,
635 dist += sdp->sd_jdesc->jd_blocks;
642 * @sdp: The GFS2 superblock
661 static unsigned int calc_reserved(struct gfs2_sbd *sdp)
665 struct gfs2_trans *tr = sdp->sd_log_tr;
669 reserved += blocks + DIV_ROUND_UP(blocks, buf_limit(sdp));
671 reserved += blocks + DIV_ROUND_UP(blocks, databuf_limit(sdp));
676 static void log_pull_tail(struct gfs2_sbd *sdp)
678 unsigned int new_tail = sdp->sd_log_flush_tail;
681 if (new_tail == sdp->sd_log_tail)
683 dist = log_distance(sdp, new_tail, sdp->sd_log_tail);
684 ail2_empty(sdp, new_tail);
685 gfs2_log_release(sdp, dist);
686 sdp->sd_log_tail = new_tail;
690 void log_flush_wait(struct gfs2_sbd *sdp)
694 if (atomic_read(&sdp->sd_log_in_flight)) {
696 prepare_to_wait(&sdp->sd_log_flush_wait, &wait,
698 if (atomic_read(&sdp->sd_log_in_flight))
700 } while(atomic_read(&sdp->sd_log_in_flight));
701 finish_wait(&sdp->sd_log_flush_wait, &wait);
725 static void gfs2_ordered_write(struct gfs2_sbd *sdp)
730 spin_lock(&sdp->sd_ordered_lock);
731 list_sort(NULL, &sdp->sd_log_ordered, &ip_cmp);
732 while (!list_empty(&sdp->sd_log_ordered)) {
733 ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
739 spin_unlock(&sdp->sd_ordered_lock);
741 spin_lock(&sdp->sd_ordered_lock);
743 list_splice(&written, &sdp->sd_log_ordered);
744 spin_unlock(&sdp->sd_ordered_lock);
747 static void gfs2_ordered_wait(struct gfs2_sbd *sdp)
751 spin_lock(&sdp->sd_ordered_lock);
752 while (!list_empty(&sdp->sd_log_ordered)) {
753 ip = list_first_entry(&sdp->sd_log_ordered, struct gfs2_inode, i_ordered);
757 spin_unlock(&sdp->sd_ordered_lock);
759 spin_lock(&sdp->sd_ordered_lock);
761 spin_unlock(&sdp->sd_ordered_lock);
766 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
768 spin_lock(&sdp->sd_ordered_lock);
770 spin_unlock(&sdp->sd_ordered_lock);
773 void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
778 sdp->sd_log_num_revoke++;
786 list_add(&bd->bd_list, &sdp->sd_log_revokes);
799 * @sdp: The GFS2 superblock
809 void gfs2_flush_revokes(struct gfs2_sbd *sdp)
812 unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available);
814 gfs2_log_lock(sdp);
815 gfs2_ail1_empty(sdp, max_revokes);
816 gfs2_log_unlock(sdp);
821 * @sdp: The GFS2 superblock
832 void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
839 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
841 struct super_block *sb = sdp->sd_vfs;
844 if (gfs2_withdrawn(sdp))
855 lh->lh_header.mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
874 if (gfs2_assert_withdraw(sdp, ret == 0))
885 cpu_to_be64(GFS2_I(sdp->sd_sc_inode)->i_no_addr);
887 cpu_to_be64(GFS2_I(sdp->sd_qc_inode)->i_no_addr);
889 spin_lock(&sdp->sd_statfs_spin);
893 spin_unlock(&sdp->sd_statfs_spin);
902 gfs2_log_write(sdp, jd, page, sb->s_blocksize, 0, dblock);
908 * @sdp: The GFS2 superblock
914 static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
918 gfs2_assert_withdraw(sdp, !test_bit(SDF_FROZEN, &sdp->sd_flags));
920 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
921 gfs2_ordered_wait(sdp);
922 log_flush_wait(sdp);
925 sdp->sd_log_idle = (sdp->sd_log_flush_tail == sdp->sd_log_flush_head);
926 gfs2_write_log_header(sdp, sdp->sd_jdesc, sdp->sd_log_sequence++,
927 sdp->sd_log_flush_tail, sdp->sd_log_flush_head,
929 gfs2_log_incr_head(sdp);
930 log_flush_wait(sdp);
931 log_pull_tail(sdp);
932 gfs2_log_update_head(sdp);
937 * @sdp: Pointer to GFS2 superblock
939 void gfs2_ail_drain(struct gfs2_sbd *sdp)
943 spin_lock(&sdp->sd_ail_lock);
951 while (!list_empty(&sdp->sd_ail1_list)) {
952 tr = list_first_entry(&sdp->sd_ail1_list, struct gfs2_trans,
954 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail1_list);
955 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
957 gfs2_trans_free(sdp, tr);
959 while (!list_empty(&sdp->sd_ail2_list)) {
960 tr = list_first_entry(&sdp->sd_ail2_list, struct gfs2_trans,
962 gfs2_ail_empty_tr(sdp, tr, &tr->tr_ail2_list);
964 gfs2_trans_free(sdp, tr);
966 gfs2_drain_revokes(sdp);
967 spin_unlock(&sdp->sd_ail_lock);
972 * @sdp: Pointer to GFS2 superblock
974 static void empty_ail1_list(struct gfs2_sbd *sdp)
980 fs_err(sdp, "Error: In %s for 10 minutes! t=%d\n",
982 dump_ail_list(sdp);
985 gfs2_ail1_start(sdp);
986 gfs2_ail1_wait(sdp);
987 if (gfs2_ail1_empty(sdp, 0))
1028 * @sdp: The filesystem
1034 void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags)
1038 bool frozen = test_bit(SDF_FROZEN, &sdp->sd_flags);
1042 down_write(&sdp->sd_log_flush_lock);
1043 trace_gfs2_log_flush(sdp, 1, flags);
1050 if (gfs2_withdrawn(sdp) || !test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
1057 first_log_head = sdp->sd_log_head;
1058 sdp->sd_log_flush_head = first_log_head;
1060 tr = sdp->sd_log_tr;
1061 if (tr || sdp->sd_log_num_revoke) {
1063 gfs2_log_release(sdp, reserved_blocks);
1064 reserved_blocks = sdp->sd_log_blks_reserved;
1065 reserved_revokes = sdp->sd_log_num_revoke;
1067 sdp->sd_log_tr = NULL;
1070 if (gfs2_assert_withdraw_delayed(sdp,
1079 if (current == sdp->sd_logd_process)
1082 if (!__gfs2_log_try_reserve(sdp, reserved_blocks, taboo_blocks)) {
1083 up_write(&sdp->sd_log_flush_lock);
1084 __gfs2_log_reserve(sdp, reserved_blocks, taboo_blocks);
1085 down_write(&sdp->sd_log_flush_lock);
1088 BUG_ON(sdp->sd_log_num_revoke);
1092 clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
1095 if (gfs2_assert_withdraw_delayed(sdp, !reserved_revokes))
1098 gfs2_ordered_write(sdp);
1099 if (gfs2_withdrawn(sdp))
1101 lops_before_commit(sdp, tr);
1102 if (gfs2_withdrawn(sdp))
1104 gfs2_log_submit_bio(&sdp->sd_jdesc->jd_log_bio, REQ_OP_WRITE);
1105 if (gfs2_withdrawn(sdp))
1108 if (sdp->sd_log_head != sdp->sd_log_flush_head) {
1109 log_write_header(sdp, flags);
1110 } else if (sdp->sd_log_tail != sdp->sd_log_flush_tail && !sdp->sd_log_idle) {
1111 log_write_header(sdp, flags);
1113 if (gfs2_withdrawn(sdp))
1115 lops_after_commit(sdp, tr);
1117 gfs2_log_lock(sdp);
1118 sdp->sd_log_blks_reserved = 0;
1120 spin_lock(&sdp->sd_ail_lock);
1122 list_add(&tr->tr_list, &sdp->sd_ail1_list);
1125 spin_unlock(&sdp->sd_ail_lock);
1126 gfs2_log_unlock(sdp);
1129 if (!sdp->sd_log_idle) {
1130 empty_ail1_list(sdp);
1131 if (gfs2_withdrawn(sdp))
1133 log_write_header(sdp, flags);
1137 gfs2_log_shutdown(sdp);
1141 used_blocks = log_distance(sdp, sdp->sd_log_flush_head, first_log_head);
1142 reserved_revokes += atomic_read(&sdp->sd_log_revokes_available);
1143 atomic_set(&sdp->sd_log_revokes_available, sdp->sd_ldptrs);
1144 gfs2_assert_withdraw(sdp, reserved_revokes % sdp->sd_inptrs == sdp->sd_ldptrs);
1145 if (reserved_revokes > sdp->sd_ldptrs)
1146 reserved_blocks += (reserved_revokes - sdp->sd_ldptrs) / sdp->sd_inptrs;
1149 gfs2_assert_withdraw_delayed(sdp, used_blocks < reserved_blocks);
1150 gfs2_log_release(sdp, reserved_blocks - used_blocks);
1152 up_write(&sdp->sd_log_flush_lock);
1153 gfs2_trans_free(sdp, tr);
1154 if (gfs2_withdrawing(sdp))
1155 gfs2_withdraw(sdp);
1156 trace_gfs2_log_flush(sdp, 0, flags);
1167 spin_lock(&sdp->sd_ail_lock);
1169 list_add(&tr->tr_list, &sdp->sd_ail1_list);
1170 spin_unlock(&sdp->sd_ail_lock);
1177 * @sdp: the filesystem
1181 static void gfs2_merge_trans(struct gfs2_sbd *sdp, struct gfs2_trans *new)
1183 struct gfs2_trans *old = sdp->sd_log_tr;
1197 spin_lock(&sdp->sd_ail_lock);
1200 spin_unlock(&sdp->sd_ail_lock);
1203 static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1209 gfs2_log_lock(sdp);
1211 if (sdp->sd_log_tr) {
1212 gfs2_merge_trans(sdp, tr);
1214 gfs2_assert_withdraw(sdp, !test_bit(TR_ONSTACK, &tr->tr_flags));
1215 sdp->sd_log_tr = tr;
1219 reserved = calc_reserved(sdp);
1220 maxres = sdp->sd_log_blks_reserved + tr->tr_reserved;
1221 gfs2_assert_withdraw(sdp, maxres >= reserved);
1224 gfs2_log_release(sdp, unused);
1225 sdp->sd_log_blks_reserved = reserved;
1227 gfs2_log_unlock(sdp);
1230 static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
1232 return atomic_read(&sdp->sd_log_pinned) +
1233 atomic_read(&sdp->sd_log_blks_needed) >=
1234 atomic_read(&sdp->sd_log_thresh1);
1237 static inline int gfs2_ail_flush_reqd(struct gfs2_sbd *sdp)
1239 return sdp->sd_jdesc->jd_blocks -
1240 atomic_read(&sdp->sd_log_blks_free) +
1241 atomic_read(&sdp->sd_log_blks_needed) >=
1242 atomic_read(&sdp->sd_log_thresh2);
1247 * @sdp: the filesystem
1260 void gfs2_log_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1262 log_refund(sdp, tr);
1264 if (gfs2_ail_flush_reqd(sdp) || gfs2_jrnl_flush_reqd(sdp))
1265 wake_up(&sdp->sd_logd_waitq);
1270 * @sdp: the filesystem
1274 static void gfs2_log_shutdown(struct gfs2_sbd *sdp)
1276 gfs2_assert_withdraw(sdp, !sdp->sd_log_blks_reserved);
1277 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
1278 gfs2_assert_withdraw(sdp, list_empty(&sdp->sd_ail1_list));
1280 log_write_header(sdp, GFS2_LOG_HEAD_UNMOUNT | GFS2_LFC_SHUTDOWN);
1281 log_pull_tail(sdp);
1283 gfs2_assert_warn(sdp, sdp->sd_log_head == sdp->sd_log_tail);
1284 gfs2_assert_warn(sdp, list_empty(&sdp->sd_ail2_list));
1297 struct gfs2_sbd *sdp = data;
1301 if (gfs2_withdrawn(sdp))
1305 if (sdp->sd_log_error) {
1306 gfs2_lm(sdp,
1310 sdp->sd_fsname, sdp->sd_log_error);
1311 gfs2_withdraw(sdp);
1315 if (gfs2_jrnl_flush_reqd(sdp) || t == 0) {
1316 gfs2_ail1_empty(sdp, 0);
1317 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1321 if (test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
1322 gfs2_ail_flush_reqd(sdp)) {
1323 clear_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
1324 gfs2_ail1_start(sdp);
1325 gfs2_ail1_wait(sdp);
1326 gfs2_ail1_empty(sdp, 0);
1327 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
1331 t = gfs2_tune_get(sdp, gt_logd_secs) * HZ;
1335 t = wait_event_interruptible_timeout(sdp->sd_logd_waitq,
1336 test_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags) ||
1337 gfs2_ail_flush_reqd(sdp) ||
1338 gfs2_jrnl_flush_reqd(sdp) ||
1339 sdp->sd_log_error ||
1340 gfs2_withdrawn(sdp) ||