Lines Matching refs:sbp

90 	struct nilfs_super_block **sbp;
95 sbp = nilfs_prepare_super(sb, 0);
96 if (likely(sbp)) {
97 sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
98 if (sbp[1])
99 sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
191 * sbp[0] points to newer log than sbp[1],
192 * so copy sbp[0] to sbp[1] to take over sbp[0].
200 struct nilfs_super_block *sbp = nilfs->ns_sbp[0];
219 sbp = nilfs->ns_sbp[1];
223 nilfs->ns_prot_seq = le64_to_cpu(sbp->s_last_seq);
230 void nilfs_set_log_cursor(struct nilfs_super_block *sbp,
237 sbp->s_free_blocks_count = cpu_to_le64(nfreeblocks);
240 sbp->s_last_seq = cpu_to_le64(nilfs->ns_last_seq);
241 sbp->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg);
242 sbp->s_last_cno = cpu_to_le64(nilfs->ns_last_cno);
250 struct nilfs_super_block **sbp = nilfs->ns_sbp;
253 if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
254 if (sbp[1] &&
255 sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) {
256 memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
261 } else if (sbp[1] &&
262 sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
263 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
266 if (flip && sbp[1])
269 return sbp;
275 struct nilfs_super_block **sbp = nilfs->ns_sbp;
281 sbp[0]->s_wtime = cpu_to_le64(t);
282 sbp[0]->s_sum = 0;
283 sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
284 (unsigned char *)sbp[0],
286 if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) {
287 sbp[1]->s_wtime = sbp[0]->s_wtime;
288 sbp[1]->s_sum = 0;
289 sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
290 (unsigned char *)sbp[1],
311 struct nilfs_super_block **sbp;
315 sbp = nilfs_prepare_super(sb, 0);
316 if (sbp) {
317 sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
318 nilfs_set_log_cursor(sbp[0], nilfs);
319 if (sbp[1] && sbp[0]->s_last_cno == sbp[1]->s_last_cno) {
325 sbp[1]->s_state = sbp[0]->s_state;
422 struct nilfs_super_block **sbp;
464 sbp = nilfs_prepare_super(sb, 0);
465 if (likely(sbp)) {
466 nilfs_set_log_cursor(sbp[0], nilfs);
472 sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) &
474 sbp[0]->s_dev_size = cpu_to_le64(newsize);
475 sbp[0]->s_nsegments = cpu_to_le64(nilfs->ns_nsegments);
476 if (sbp[1])
477 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
518 struct nilfs_super_block **sbp;
527 sbp = nilfs_prepare_super(sb, nilfs_sb_will_flip(nilfs));
528 if (likely(sbp)) {
529 nilfs_set_log_cursor(sbp[0], nilfs);
818 struct nilfs_super_block *sbp)
829 struct nilfs_super_block **sbp;
834 sbp = nilfs_prepare_super(sb, 0);
835 if (!sbp)
841 max_mnt_count = le16_to_cpu(sbp[0]->s_max_mnt_count);
842 mnt_count = le16_to_cpu(sbp[0]->s_mnt_count);
852 sbp[0]->s_max_mnt_count = cpu_to_le16(NILFS_DFL_MAX_MNT_COUNT);
854 sbp[0]->s_mnt_count = cpu_to_le16(mnt_count + 1);
855 sbp[0]->s_mtime = cpu_to_le64(ktime_get_real_seconds());
858 sbp[0]->s_state =
859 cpu_to_le16(le16_to_cpu(sbp[0]->s_state) & ~NILFS_VALID_FS);
860 /* synchronize sbp[1] with sbp[0] */
861 if (sbp[1])
862 memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
881 struct nilfs_super_block *sbp,
886 sb->s_magic = le16_to_cpu(sbp->s_magic);
893 nilfs_set_default_options(sb, sbp);
895 nilfs->ns_resuid = le16_to_cpu(sbp->s_def_resuid);
896 nilfs->ns_resgid = le16_to_cpu(sbp->s_def_resgid);
897 nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval);
898 nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max);
904 struct nilfs_super_block *sbp)
908 features = le64_to_cpu(sbp->s_feature_incompat) &
916 features = le64_to_cpu(sbp->s_feature_compat_ro) &