Commit bc98a42c authored by David Howells's avatar David Howells

VFS: Convert sb->s_flags & MS_RDONLY to sb_rdonly(sb)

Firstly by applying the following with coccinelle's spatch:

	@@ expression SB; @@
	-SB->s_flags & MS_RDONLY
	+sb_rdonly(SB)

to effect the conversion to sb_rdonly(sb), then by applying:

	@@ expression A, SB; @@
	(
	-(!sb_rdonly(SB)) && A
	+!sb_rdonly(SB) && A
	|
	-A != (sb_rdonly(SB))
	+A != sb_rdonly(SB)
	|
	-A == (sb_rdonly(SB))
	+A == sb_rdonly(SB)
	|
	-!(sb_rdonly(SB))
	+!sb_rdonly(SB)
	|
	-A && (sb_rdonly(SB))
	+A && sb_rdonly(SB)
	|
	-A || (sb_rdonly(SB))
	+A || sb_rdonly(SB)
	|
	-(sb_rdonly(SB)) != A
	+sb_rdonly(SB) != A
	|
	-(sb_rdonly(SB)) == A
	+sb_rdonly(SB) == A
	|
	-(sb_rdonly(SB)) && A
	+sb_rdonly(SB) && A
	|
	-(sb_rdonly(SB)) || A
	+sb_rdonly(SB) || A
	)

	@@ expression A, B, SB; @@
	(
	-(sb_rdonly(SB)) ? 1 : 0
	+sb_rdonly(SB)
	|
	-(sb_rdonly(SB)) ? A : B
	+sb_rdonly(SB) ? A : B
	)

to remove left over excess bracketage and finally by applying:

	@@ expression A, SB; @@
	(
	-(A & MS_RDONLY) != sb_rdonly(SB)
	+(bool)(A & MS_RDONLY) != sb_rdonly(SB)
	|
	-(A & MS_RDONLY) == sb_rdonly(SB)
	+(bool)(A & MS_RDONLY) == sb_rdonly(SB)
	)

to make comparisons against the result of sb_rdonly() (which is a bool)
work correctly.
Signed-off-by: 's avatarDavid Howells <dhowells@redhat.com>
parent 94e92e7a
......@@ -210,7 +210,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
data->ocd_ibits_known = MDS_INODELOCK_FULL;
data->ocd_version = LUSTRE_VERSION_CODE;
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
data->ocd_connect_flags |= OBD_CONNECT_RDONLY;
if (sbi->ll_flags & LL_SBI_USER_XATTR)
data->ocd_connect_flags |= OBD_CONNECT_XATTR;
......@@ -2033,7 +2033,7 @@ int ll_remount_fs(struct super_block *sb, int *flags, char *data)
int err;
__u32 read_only;
if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
read_only = *flags & MS_RDONLY;
err = obd_set_info_async(NULL, sbi->ll_md_exp,
sizeof(KEY_READ_ONLY),
......
......@@ -562,8 +562,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
}
}
if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE &&
dentry->d_sb->s_flags & MS_RDONLY)
if (it->it_op & IT_OPEN && it->it_flags & FMODE_WRITE && sb_rdonly(dentry->d_sb))
return ERR_PTR(-EROFS);
if (it->it_op & IT_CREAT)
......
......@@ -450,7 +450,7 @@ affs_error(struct super_block *sb, const char *function, const char *fmt, ...)
vaf.fmt = fmt;
vaf.va = &args;
pr_crit("error (device %s): %s(): %pV\n", sb->s_id, function, &vaf);
if (!(sb->s_flags & MS_RDONLY))
if (!sb_rdonly(sb))
pr_warn("Remounting filesystem read-only\n");
sb->s_flags |= MS_RDONLY;
va_end(args);
......
......@@ -19,7 +19,7 @@ affs_count_free_blocks(struct super_block *sb)
pr_debug("%s()\n", __func__);
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
return 0;
mutex_lock(&AFFS_SB(sb)->s_bmlock);
......
......@@ -80,7 +80,7 @@ void affs_mark_sb_dirty(struct super_block *sb)
struct affs_sb_info *sbi = AFFS_SB(sb);
unsigned long delay;
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
return;
spin_lock(&sbi->work_lock);
......@@ -464,7 +464,7 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
* not recommended.
*/
if ((chksum == FS_DCFFS || chksum == MUFS_DCFFS || chksum == FS_DCOFS
|| chksum == MUFS_DCOFS) && !(sb->s_flags & MS_RDONLY)) {
|| chksum == MUFS_DCOFS) && !sb_rdonly(sb)) {
pr_notice("Dircache FS - mounting %s read only\n", sb->s_id);
sb->s_flags |= MS_RDONLY;
}
......@@ -596,7 +596,7 @@ affs_remount(struct super_block *sb, int *flags, char *data)
memcpy(sbi->s_volume, volume, 32);
spin_unlock(&sbi->symlink_lock);
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
return 0;
if (*flags & MS_RDONLY)
......
......@@ -838,7 +838,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
befs_debug(sb, "---> %s", __func__);
if (!(sb->s_flags & MS_RDONLY)) {
if (!sb_rdonly(sb)) {
befs_warning(sb,
"No write support. Marking filesystem read-only");
sb->s_flags |= MS_RDONLY;
......
......@@ -690,7 +690,7 @@ static u64 __btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info)
u64 result;
int ret;
if (fs_info->sb->s_flags & MS_RDONLY)
if (sb_rdonly(fs_info->sb))
return -EROFS;
mutex_lock(&dev_replace->lock_finishing_cancel_unmount);
......
......@@ -2478,7 +2478,7 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
return ret;
}
if (fs_info->sb->s_flags & MS_RDONLY) {
if (sb_rdonly(fs_info->sb)) {
ret = btrfs_commit_super(fs_info);
if (ret)
return ret;
......@@ -2874,7 +2874,7 @@ int open_ctree(struct super_block *sb,
features = btrfs_super_compat_ro_flags(disk_super) &
~BTRFS_FEATURE_COMPAT_RO_SUPP;
if (!(sb->s_flags & MS_RDONLY) && features) {
if (!sb_rdonly(sb) && features) {
btrfs_err(fs_info,
"cannot mount read-write because of unsupported optional features (%llx)",
features);
......@@ -3039,7 +3039,7 @@ int open_ctree(struct super_block *sb,
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
if (fs_info->fs_devices->missing_devices >
fs_info->num_tolerated_disk_barrier_failures &&
!(sb->s_flags & MS_RDONLY)) {
!sb_rdonly(sb)) {
btrfs_warn(fs_info,
"missing devices (%llu) exceeds the limit (%d), writeable mount is not allowed",
fs_info->fs_devices->missing_devices,
......@@ -3102,7 +3102,7 @@ int open_ctree(struct super_block *sb,
if (ret)
goto fail_qgroup;
if (!(sb->s_flags & MS_RDONLY)) {
if (!sb_rdonly(sb)) {
ret = btrfs_cleanup_fs_roots(fs_info);
if (ret)
goto fail_qgroup;
......@@ -3128,7 +3128,7 @@ int open_ctree(struct super_block *sb,
goto fail_qgroup;
}
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
return 0;
if (btrfs_test_opt(fs_info, CLEAR_CACHE) &&
......@@ -3928,7 +3928,7 @@ void close_ctree(struct btrfs_fs_info *fs_info)
cancel_work_sync(&fs_info->async_reclaim_work);
if (!(fs_info->sb->s_flags & MS_RDONLY)) {
if (!sb_rdonly(fs_info->sb)) {
/*
* If the cleaner thread is stopped and there are
* block groups queued for removal, the deletion will be
......
......@@ -2061,7 +2061,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info,
unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
int ret = 0;
if (fs_info->sb->s_flags & MS_RDONLY)
if (sb_rdonly(fs_info->sb))
return -EROFS;
for (i = 0; i < num_pages; i++) {
......@@ -2111,7 +2111,7 @@ int clean_io_failure(struct btrfs_fs_info *fs_info,
failrec->start);
goto out;
}
if (fs_info->sb->s_flags & MS_RDONLY)
if (sb_rdonly(fs_info->sb))
goto out;
spin_lock(&io_tree->lock);
......
......@@ -5817,7 +5817,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
if (!IS_ERR(inode) && root != sub_root) {
down_read(&fs_info->cleanup_work_sem);
if (!(inode->i_sb->s_flags & MS_RDONLY))
if (!sb_rdonly(inode->i_sb))
ret = btrfs_orphan_cleanup(sub_root);
up_read(&fs_info->cleanup_work_sem);
if (ret) {
......
......@@ -4438,7 +4438,7 @@ static long btrfs_ioctl_dev_replace(struct btrfs_fs_info *fs_info,
switch (p->cmd) {
case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
if (fs_info->sb->s_flags & MS_RDONLY) {
if (sb_rdonly(fs_info->sb)) {
ret = -EROFS;
goto out;
}
......
......@@ -228,7 +228,7 @@ int btrfs_find_orphan_roots(struct btrfs_fs_info *fs_info)
int ret;
bool can_recover = true;
if (fs_info->sb->s_flags & MS_RDONLY)
if (sb_rdonly(fs_info->sb))
can_recover = false;
path = btrfs_alloc_path();
......
......@@ -102,7 +102,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
{
struct super_block *sb = fs_info->sb;
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
return;
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
......@@ -138,7 +138,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
* Special case: if the error is EROFS, and we're already
* under MS_RDONLY, then it is safe here.
*/
if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
if (errno == -EROFS && sb_rdonly(sb))
return;
#ifdef CONFIG_PRINTK
......@@ -1687,8 +1687,7 @@ static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
* close or the filesystem is read only.
*/
if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
(!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
(fs_info->sb->s_flags & MS_RDONLY))) {
(!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || sb_rdonly(fs_info->sb))) {
btrfs_cleanup_defrag_inodes(fs_info);
}
......@@ -1735,7 +1734,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
btrfs_resize_thread_pool(fs_info,
fs_info->thread_pool_size, old_thread_pool_size);
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
goto out;
if (*flags & MS_RDONLY) {
......@@ -1835,7 +1834,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
restore:
/* We've hit an error - don't reset MS_RDONLY */
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
old_flags |= MS_RDONLY;
sb->s_flags = old_flags;
fs_info->mount_opt = old_opts;
......
......@@ -120,7 +120,7 @@ static ssize_t btrfs_feature_attr_store(struct kobject *kobj,
if (!fs_info)
return -EPERM;
if (fs_info->sb->s_flags & MS_RDONLY)
if (sb_rdonly(fs_info->sb))
return -EROFS;
ret = kstrtoul(skip_spaces(buf), 0, &val);
......@@ -388,7 +388,7 @@ static ssize_t btrfs_label_store(struct kobject *kobj,
if (!fs_info)
return -EPERM;
if (fs_info->sb->s_flags & MS_RDONLY)
if (sb_rdonly(fs_info->sb))
return -EROFS;
/*
......
......@@ -2337,7 +2337,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
int seeding_dev = 0;
int ret = 0;
if ((sb->s_flags & MS_RDONLY) && !fs_info->fs_devices->seeding)
if (sb_rdonly(sb) && !fs_info->fs_devices->seeding)
return -EROFS;
bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
......@@ -4085,7 +4085,7 @@ int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
{
if (fs_info->sb->s_flags & MS_RDONLY)
if (sb_rdonly(fs_info->sb))
return -EROFS;
mutex_lock(&fs_info->balance_mutex);
......
......@@ -133,7 +133,7 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
goto error_unsupported;
ret = -EROFS;
if (root->d_sb->s_flags & MS_RDONLY)
if (sb_rdonly(root->d_sb))
goto error_unsupported;
/* determine the security of the on-disk cache as this governs
......
......@@ -568,8 +568,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
* 1) The lower mount is ro
* 2) The ecryptfs_encrypted_view mount option is specified
*/
if (path.dentry->d_sb->s_flags & MS_RDONLY ||
mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
if (sb_rdonly(path.dentry->d_sb) || mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
s->s_flags |= MS_RDONLY;
s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
......
......@@ -306,7 +306,7 @@ static int efs_fill_super(struct super_block *s, void *d, int silent)
}
brelse(bh);
if (!(s->s_flags & MS_RDONLY)) {
if (!sb_rdonly(s)) {
#ifdef DEBUG
pr_info("forcing read-only mode\n");
#endif
......
......@@ -52,7 +52,7 @@ void ext2_error(struct super_block *sb, const char *function,
struct ext2_sb_info *sbi = EXT2_SB(sb);
struct ext2_super_block *es = sbi->s_es;
if (!(sb->s_flags & MS_RDONLY)) {
if (!sb_rdonly(sb)) {
spin_lock(&sbi->s_lock);
sbi->s_mount_state |= EXT2_ERROR_FS;
es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
......@@ -151,7 +151,7 @@ static void ext2_put_super (struct super_block * sb)
ext2_xattr_destroy_cache(sbi->s_ea_block_cache);
sbi->s_ea_block_cache = NULL;
}
if (!(sb->s_flags & MS_RDONLY)) {
if (!sb_rdonly(sb)) {
struct ext2_super_block *es = sbi->s_es;
spin_lock(&sbi->s_lock);
......@@ -940,8 +940,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
le32_to_cpu(features));
goto failed_mount;
}
if (!(sb->s_flags & MS_RDONLY) &&
(features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
if (!sb_rdonly(sb) && (features = EXT2_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP))){
ext2_msg(sb, KERN_ERR, "error: couldn't mount RDWR because of "
"unsupported optional features (%x)",
le32_to_cpu(features));
......@@ -1170,7 +1169,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
ext2_msg(sb, KERN_WARNING,
"warning: mounting ext3 filesystem as ext2");
if (ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY))
if (ext2_setup_super (sb, es, sb_rdonly(sb)))
sb->s_flags |= MS_RDONLY;
ext2_write_super(sb);
return 0;
......@@ -1301,7 +1300,7 @@ static int ext2_unfreeze(struct super_block *sb)
static void ext2_write_super(struct super_block *sb)
{
if (!(sb->s_flags & MS_RDONLY))
if (!sb_rdonly(sb))
ext2_sync_fs(sb, 1);
}
......@@ -1339,7 +1338,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
"dax flag with busy inodes while remounting");
sbi->s_mount_opt ^= EXT2_MOUNT_DAX;
}
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb)) {
spin_unlock(&sbi->s_lock);
return 0;
}
......
......@@ -47,7 +47,7 @@ static int ext4_journal_check_start(struct super_block *sb)
if (unlikely(ext4_forced_shutdown(EXT4_SB(sb))))
return -EIO;
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
return -EROFS;
WARN_ON(sb->s_writers.frozen == SB_FREEZE_COMPLETE);
journal = EXT4_SB(sb)->s_journal;
......
......@@ -388,7 +388,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
return -EIO;
if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) &&
!(sb->s_flags & MS_RDONLY))) {
!sb_rdonly(sb))) {
sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED;
/*
* Sample where the filesystem has been mounted and
......
......@@ -107,7 +107,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
trace_ext4_sync_file_enter(file, datasync);
if (inode->i_sb->s_flags & MS_RDONLY) {
if (sb_rdonly(inode->i_sb)) {
/* Make sure that we read updated s_mount_flags value */
smp_rmb();
if (EXT4_SB(inode->i_sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
......
......@@ -1355,7 +1355,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
int num, ret = 0, used_blks = 0;
/* This should not happen, but just to be sure check this */
if (sb->s_flags & MS_RDONLY) {
if (sb_rdonly(sb)) {
ret = 1;
goto out;
}
......
......@@ -185,7 +185,7 @@ static int kmmpd(void *data)
goto exit_thread;
}
if (sb->s_flags & MS_RDONLY) {
if (sb_rdonly(sb)) {
ext4_warning(sb, "kmmpd being stopped since filesystem "
"has been remounted as readonly.");
goto exit_thread;
......
......@@ -405,7 +405,7 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
static void ext4_handle_error(struct super_block *sb)
{
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
return;
if (!test_opt(sb, ERRORS_CONT)) {
......@@ -587,8 +587,7 @@ void __ext4_std_error(struct super_block *sb, const char *function,
/* Special case: if the error is EROFS, and we're not already
* inside a transaction, then there's really no point in logging
* an error. */
if (errno == -EROFS && journal_current_handle() == NULL &&
(sb->s_flags & MS_RDONLY))
if (errno == -EROFS && journal_current_handle() == NULL && sb_rdonly(sb))
return;
if (ext4_error_ratelimit(sb)) {
......@@ -628,7 +627,7 @@ void __ext4_abort(struct super_block *sb, const char *function,
sb->s_id, function, line, &vaf);
va_end(args);
if ((sb->s_flags & MS_RDONLY) == 0) {
if (sb_rdonly(sb) == 0) {
ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
/*
......@@ -889,11 +888,11 @@ static void ext4_put_super(struct super_block *sb)
ext4_mb_release(sb);
ext4_ext_release(sb);
if (!(sb->s_flags & MS_RDONLY) && !aborted) {
if (!sb_rdonly(sb) && !aborted) {
ext4_clear_feature_journal_needs_recovery(sb);
es->s_state = cpu_to_le16(sbi->s_mount_state);
}
if (!(sb->s_flags & MS_RDONLY))
if (!sb_rdonly(sb))
ext4_commit_super(sb, 1);
for (i = 0; i < sbi->s_gdb_count; i++)
......@@ -2101,7 +2100,7 @@ int ext4_seq_options_show(struct seq_file *seq, void *offset)
struct super_block *sb = seq->private;
int rc;
seq_puts(seq, (sb->s_flags & MS_RDONLY) ? "ro" : "rw");
seq_puts(seq, sb_rdonly(sb) ? "ro" : "rw");
rc = _ext4_show_options(seq, sb, 1);
seq_puts(seq, "\n");
return rc;
......@@ -2369,7 +2368,7 @@ static int ext4_check_descriptors(struct super_block *sb,
"Checksum for group %u failed (%u!=%u)",
i, le16_to_cpu(ext4_group_desc_csum(sb, i,
gdp)), le16_to_cpu(gdp->bg_checksum));
if (!(sb->s_flags & MS_RDONLY)) {
if (!sb_rdonly(sb)) {
ext4_unlock_group(sb, i);
return 0;
}
......@@ -3116,8 +3115,7 @@ int ext4_register_li_request(struct super_block *sb,
goto out;
}
if (first_not_zeroed == ngroups ||
(sb->s_flags & MS_RDONLY) ||
if (first_not_zeroed == ngroups || sb_rdonly(sb) ||
!test_opt(sb, INIT_INODE_TABLE))
goto out;
......@@ -3661,7 +3659,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
* previously didn't change the revision level when setting the flags,
* so there is a chance incompat flags are set on a rev 0 filesystem.
*/
if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
goto failed_mount;
blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
......@@ -3790,12 +3788,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_hash_unsigned = 3;
else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
#ifdef __CHAR_UNSIGNED__
if (!(sb->s_flags & MS_RDONLY))
if (!sb_rdonly(sb))
es->s_flags |=
cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
sbi->s_hash_unsigned = 3;
#else
if (!(sb->s_flags & MS_RDONLY))
if (!sb_rdonly(sb))
es->s_flags |=
cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
#endif
......@@ -3995,7 +3993,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
needs_recovery = (es->s_last_orphan != 0 ||
ext4_has_feature_journal_needs_recovery(sb));
if (ext4_has_feature_mmp(sb) && !(sb->s_flags & MS_RDONLY))
if (ext4_has_feature_mmp(sb) && !sb_rdonly(sb))
if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
goto failed_mount3a;
......@@ -4007,7 +4005,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
err = ext4_load_journal(sb, es, journal_devnum);
if (err)
goto failed_mount3a;
} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
} else if (test_opt(sb, NOLOAD) && !sb_rdonly(sb) &&
ext4_has_feature_journal_needs_recovery(sb)) {
ext4_msg(sb, KERN_ERR, "required journal recovery "
"suppressed and not mounted read-only");
......@@ -4121,7 +4119,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount_wq;
}
if (DUMMY_ENCRYPTION_ENABLED(sbi) && !(sb->s_flags & MS_RDONLY) &&
if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
!ext4_has_feature_encrypt(sb)) {
ext4_set_feature_encrypt(sb);
ext4_commit_super(sb, 1);
......@@ -4175,7 +4173,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount4;
}
if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
if (ext4_setup_super(sb, es, sb_rdonly(sb)))
sb->s_flags |= MS_RDONLY;
/* determine the minimum size of new large inodes, if present */
......@@ -4263,7 +4261,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
#ifdef CONFIG_QUOTA
/* Enable quota usage during mount. */
if (ext4_has_feature_quota(sb) && !(sb->s_flags & MS_RDONLY)) {
if (ext4_has_feature_quota(sb) && !sb_rdonly(sb)) {
err = ext4_enable_quotas(sb);
if (err)
goto failed_mount8;
......@@ -4586,7 +4584,7 @@ static int ext4_load_journal(struct super_block *sb,
* can get read-write access to the device.
*/
if (ext4_has_feature_journal_needs_recovery(sb)) {
if (sb->s_flags & MS_RDONLY) {
if (sb_rdonly(sb)) {
ext4_msg(sb, KERN_INFO, "INFO: recovery "
"required on readonly filesystem");
if (really_read_only) {
......@@ -4741,8 +4739,7 @@ static void ext4_mark_recovery_complete(struct super_block *sb,
if (jbd2_journal_flush(journal) < 0)
goto out;
if (ext4_has_feature_journal_needs_recovery(sb) &&
sb->s_flags & MS_RDONLY) {
if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
ext4_clear_feature_journal_needs_recovery(sb);
ext4_commit_super(sb, 1);
}
......@@ -4798,7 +4795,7 @@ int ext4_force_commit(struct super_block *sb)
{
journal_t *journal;
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
return 0;
journal = EXT4_SB(sb)->s_journal;
......@@ -4863,7 +4860,7 @@ static int ext4_freeze(struct super_block *sb)
int error = 0;
journal_t *journal;
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
return 0;
journal = EXT4_SB(sb)->s_journal;
......@@ -4898,7 +4895,7 @@ static int ext4_freeze(struct super_block *sb)
*/
static int ext4_unfreeze(struct super_block *sb)
{
if ((sb->s_flags & MS_RDONLY) || ext4_forced_shutdown(EXT4_SB(sb)))
if (sb_rdonly(sb) || ext4_forced_shutdown(EXT4_SB(sb)))
return 0;
if (EXT4_SB(sb)->s_journal) {
......@@ -5036,7 +5033,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (*flags & MS_LAZYTIME)
sb->s_flags |= MS_LAZYTIME;
if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY)) {
if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
err = -EROFS;
goto restore_opts;
......@@ -5131,7 +5128,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
* Reinitialize lazy itable initialization thread based on
* current settings
*/
if ((sb->s_flags & MS_RDONLY) || !test_opt(sb, INIT_INODE_TABLE))
if (sb_rdonly(sb) || !test_opt(sb, INIT_INODE_TABLE))
ext4_unregister_li_request(sb);
else {
ext4_group_t first_not_zeroed;
......@@ -5703,7 +5700,7 @@ static inline int ext2_feature_set_ok(struct super_block *sb)
{
if (ext4_has_unknown_ext2_incompat_features(sb))
return 0;
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
return 1;
if (ext4_has_unknown_ext2_ro_compat_features(sb))
return 0;
......@@ -5734,7 +5731,7 @@ static inline int ext3_feature_set_ok(struct super_block *sb)
return 0;
if (!ext4_has_feature_journal(sb))
return 0;
if (sb->s_flags & MS_RDONLY)
if (sb_rdonly(sb))
return 1;
if (ext4_has_unknown_ext3_ro_compat_features(sb))
return 0;
......
......@@ -309,7 +309,7 @@ static void mark_fsinfo_dirty(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
if (sb->s_flags & MS_RDONLY || sbi->fat_bits != 32)
if (sb_rdonly(sb) || sbi->fat_bits != 32)
return;
__mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
......
......@@ -657,7 +657,7 @@ static void fat_set_state(struct super_block *sb,
struct msdos_sb_info *sbi = MSDOS_SB(sb);
/* do not change any thing if mounted read only */
if ((sb->s_flags & MS_RDONLY) && !force)
if (sb_rdonly(sb) && !force)
return;
/* do not change state if fs was dirty */
......@@ -787,7 +787,7 @@ static int fat_remount(struct super_block *sb, int *flags, char *data)
/* make sure we update state on remount. */
new_rdonly = *flags & MS_RDONLY;
if (new_rdonly != (sb->s_flags & MS_RDONLY)) {
if (new_rdonly != sb_rdonly(sb)) {
if (new_rdonly)
fat_set_state(sb, 0, 0);
else
......
......@@ -32,7 +32,7 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...)
if (opts->errors == FAT_ERRORS_PANIC)
panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id);
else if (opts->errors == FAT_ERRORS_RO && !(sb->s_flags & MS_RDONLY)) {
else if (opts->errors == FAT_ERRORS_RO && !sb_rdonly(sb)) {
sb->s_flags |= MS_RDONLY;
fat_msg(sb, KERN_ERR, "Filesystem has been set read-only");
}
......
......@@ -580,7 +580,7 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
struct gfs2_inode *ip = gl->gl_object;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
if (!remote || sb_rdonly(sdp->sd_vfs))
return;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
......
......@@ -1037,7 +1037,7 @@ void gfs2_online_uevent(struct gfs2_sbd *sdp)
char ro[20];
char spectator[20];
char *envp[] = { ro, spectator, NULL };
sprintf(ro, "RDONLY=%d", (sb->s_flags & MS_RDONLY) ? 1 : 0);
sprintf(ro, "RDONLY=%d", sb_rdonly(sb));
sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
kobject_uevent_env(&sdp->sd_kobj, KOBJ_ONLINE, envp);
}
......@@ -1179,7 +1179,7 @@ static int fill_super(struct super_block *sb, struct gfs2_args *args, int silent
goto fail_per_node;
}
if (!(sb->s_flags & MS_RDONLY)) {
if (!sb_rdonly(sb)) {
error = gfs2_make_fs_rw(sdp);
if (error) {
fs_err(sdp, "can't make FS RW: %d\n", error);
......
......@@ -452,7 +452,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
*qdp = NULL;
if (sdp->sd_vfs->s_flags & MS_RDONLY)
if (sb_rdonly(sdp->sd_vfs))
return 0;
spin_lock(&qd_lock);
......