// SPDX-License-Identifier: GPL-2.0
/*
* fs/f2fs/super.c
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/sched/mm.h>
#include <linux/statfs.h>
#include <linux/kthread.h>
#include <linux/parser.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/exportfs.h>
#include <linux/blkdev.h>
#include <linux/quotaops.h>
#include <linux/f2fs_fs.h>
#include <linux/sysfs.h>
#include <linux/quota.h>
#include <linux/unicode.h>
#include <linux/part_stat.h>
#include <linux/zstd.h>
#include <linux/lz4.h>
#include <linux/ctype.h>
#include <linux/fs_parser.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
#include "gc.h"
#include "iostat.h"
#define CREATE_TRACE_POINTS
#include <trace/events/f2fs.h>
static struct kmem_cache *f2fs_inode_cachep;
#ifdef CONFIG_F2FS_FAULT_INJECTION
const char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] =
"kmalloc",
[FAULT_KVMALLOC] =
"kvmalloc",
[FAULT_PAGE_ALLOC] =
"page alloc",
[FAULT_PAGE_GET] =
"page get",
[FAULT_ALLOC_BIO] =
"alloc bio(obsolete)",
[FAULT_ALLOC_NID] =
"alloc nid",
[FAULT_ORPHAN] =
"orphan",
[FAULT_BLOCK] =
"no more block",
[FAULT_DIR_DEPTH] =
"too big dir depth",
[FAULT_EVICT_INODE] =
"evict_inode fail",
[FAULT_TRUNCATE] =
"truncate fail",
[FAULT_READ_IO] =
"read IO error",
[FAULT_CHECKPOINT] =
"checkpoint error",
[FAULT_DISCARD] =
"discard error",
[FAULT_WRITE_IO] =
"write IO error",
[FAULT_SLAB_ALLOC] =
"slab alloc",
[FAULT_DQUOT_INIT] =
"dquot initialize",
[FAULT_LOCK_OP] =
"lock_op",
[FAULT_BLKADDR_VALIDITY] =
"invalid blkaddr",
[FAULT_BLKADDR_CONSISTENCE] =
"inconsistent blkaddr",
[FAULT_NO_SEGMENT] =
"no free segment",
[FAULT_INCONSISTENT_FOOTER] =
"inconsistent footer",
[FAULT_TIMEOUT] =
"timeout",
[FAULT_VMALLOC] =
"vmalloc",
};
int f2fs_build_fault_attr(
struct f2fs_sb_info *sbi,
unsigned long rate,
unsigned long type,
enum fault_option fo)
{
struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
if (fo & FAULT_ALL) {
memset(ffi, 0,
sizeof(
struct f2fs_fault_info));
return 0;
}
if (fo & FAULT_RATE) {
if (rate > INT_MAX)
return -EINVAL;
atomic_set(&ffi->inject_ops, 0);
ffi->inject_rate = (
int)rate;
f2fs_info(sbi,
"build fault injection rate: %lu", rate);
}
if (fo & FAULT_TYPE) {
if (type >= BIT(FAULT_MAX))
return -EINVAL;
ffi->inject_type = (
unsigned int)type;
f2fs_info(sbi,
"build fault injection type: 0x%lx", type);
}
return 0;
}
#endif
/* f2fs-wide shrinker description */
static struct shrinker *f2fs_shrinker_info;
static int __init f2fs_init_shrinker(
void)
{
f2fs_shrinker_info = shrinker_alloc(0,
"f2fs-shrinker");
if (!f2fs_shrinker_info)
return -ENOMEM;
f2fs_shrinker_info->count_objects = f2fs_shrink_count;
f2fs_shrinker_info->scan_objects = f2fs_shrink_scan;
shrinker_register(f2fs_shrinker_info);
return 0;
}
static void f2fs_exit_shrinker(
void)
{
shrinker_free(f2fs_shrinker_info);
}
enum {
Opt_gc_background,
Opt_disable_roll_forward,
Opt_norecovery,
Opt_discard,
Opt_noheap,
Opt_heap,
Opt_user_xattr,
Opt_acl,
Opt_active_logs,
Opt_disable_ext_identify,
Opt_inline_xattr,
Opt_inline_xattr_size,
Opt_inline_data,
Opt_inline_dentry,
Opt_flush_merge,
Opt_barrier,
Opt_fastboot,
Opt_extent_cache,
Opt_data_flush,
Opt_reserve_root,
Opt_resgid,
Opt_resuid,
Opt_mode,
Opt_fault_injection,
Opt_fault_type,
Opt_lazytime,
Opt_quota,
Opt_usrquota,
Opt_grpquota,
Opt_prjquota,
Opt_usrjquota,
Opt_grpjquota,
Opt_prjjquota,
Opt_alloc,
Opt_fsync,
Opt_test_dummy_encryption,
Opt_inlinecrypt,
Opt_checkpoint_disable,
Opt_checkpoint_disable_cap,
Opt_checkpoint_disable_cap_perc,
Opt_checkpoint_enable,
Opt_checkpoint_merge,
Opt_compress_algorithm,
Opt_compress_log_size,
Opt_nocompress_extension,
Opt_compress_extension,
Opt_compress_chksum,
Opt_compress_mode,
Opt_compress_cache,
Opt_atgc,
Opt_gc_merge,
Opt_discard_unit,
Opt_memory_mode,
Opt_age_extent_cache,
Opt_errors,
Opt_nat_bits,
Opt_jqfmt,
Opt_checkpoint,
Opt_err,
};
static const struct constant_table f2fs_param_background_gc[] = {
{
"on", BGGC_MODE_ON},
{
"off", BGGC_MODE_OFF},
{
"sync", BGGC_MODE_SYNC},
{}
};
static const struct constant_table f2fs_param_mode[] = {
{
"adaptive", FS_MODE_ADAPTIVE},
{
"lfs", FS_MODE_LFS},
{
"fragment:segment", FS_MODE_FRAGMENT_SEG},
{
"fragment:block", FS_MODE_FRAGMENT_BLK},
{}
};
static const struct constant_table f2fs_param_jqfmt[] = {
{
"vfsold", QFMT_VFS_OLD},
{
"vfsv0", QFMT_VFS_V0},
{
"vfsv1", QFMT_VFS_V1},
{}
};
static const struct constant_table f2fs_param_alloc_mode[] = {
{
"default", ALLOC_MODE_DEFAULT},
{
"reuse", ALLOC_MODE_REUSE},
{}
};
static const struct constant_table f2fs_param_fsync_mode[] = {
{
"posix", FSYNC_MODE_POSIX},
{
"strict", FSYNC_MODE_STRICT},
{
"nobarrier", FSYNC_MODE_NOBARRIER},
{}
};
static const struct constant_table f2fs_param_compress_mode[] = {
{
"fs", COMPR_MODE_FS},
{
"user", COMPR_MODE_USER},
{}
};
static const struct constant_table f2fs_param_discard_unit[] = {
{
"block", DISCARD_UNIT_BLOCK},
{
"segment", DISCARD_UNIT_SEGMENT},
{
"section", DISCARD_UNIT_SECTION},
{}
};
static const struct constant_table f2fs_param_memory_mode[] = {
{
"normal", MEMORY_MODE_NORMAL},
{
"low", MEMORY_MODE_LOW},
{}
};
static const struct constant_table f2fs_param_errors[] = {
{
"remount-ro", MOUNT_ERRORS_READONLY},
{
"continue", MOUNT_ERRORS_CONTINUE},
{
"panic", MOUNT_ERRORS_PANIC},
{}
};
static const struct fs_parameter_spec f2fs_param_specs[] = {
fsparam_enum(
"background_gc", Opt_gc_background, f2fs_param_background_gc),
fsparam_flag(
"disable_roll_forward", Opt_disable_roll_forward),
fsparam_flag(
"norecovery", Opt_norecovery),
fsparam_flag_no(
"discard", Opt_discard),
fsparam_flag(
"no_heap", Opt_noheap),
fsparam_flag(
"heap", Opt_heap),
fsparam_flag_no(
"user_xattr", Opt_user_xattr),
fsparam_flag_no(
"acl", Opt_acl),
fsparam_s32(
"active_logs", Opt_active_logs),
fsparam_flag(
"disable_ext_identify", Opt_disable_ext_identify),
fsparam_flag_no(
"inline_xattr", Opt_inline_xattr),
fsparam_s32(
"inline_xattr_size", Opt_inline_xattr_size),
fsparam_flag_no(
"inline_data", Opt_inline_data),
fsparam_flag_no(
"inline_dentry", Opt_inline_dentry),
fsparam_flag_no(
"flush_merge", Opt_flush_merge),
fsparam_flag_no(
"barrier", Opt_barrier),
fsparam_flag(
"fastboot", Opt_fastboot),
fsparam_flag_no(
"extent_cache", Opt_extent_cache),
fsparam_flag(
"data_flush", Opt_data_flush),
fsparam_u32(
"reserve_root", Opt_reserve_root),
fsparam_gid(
"resgid", Opt_resgid),
fsparam_uid(
"resuid", Opt_resuid),
fsparam_enum(
"mode", Opt_mode, f2fs_param_mode),
fsparam_s32(
"fault_injection", Opt_fault_injection),
fsparam_u32(
"fault_type", Opt_fault_type),
fsparam_flag_no(
"lazytime", Opt_lazytime),
fsparam_flag_no(
"quota", Opt_quota),
fsparam_flag(
"usrquota", Opt_usrquota),
fsparam_flag(
"grpquota", Opt_grpquota),
fsparam_flag(
"prjquota", Opt_prjquota),
fsparam_string_empty(
"usrjquota", Opt_usrjquota),
fsparam_string_empty(
"grpjquota", Opt_grpjquota),
fsparam_string_empty(
"prjjquota", Opt_prjjquota),
fsparam_flag(
"nat_bits", Opt_nat_bits),
fsparam_enum(
"jqfmt", Opt_jqfmt, f2fs_param_jqfmt),
fsparam_enum(
"alloc_mode", Opt_alloc, f2fs_param_alloc_mode),
fsparam_enum(
"fsync_mode", Opt_fsync, f2fs_param_fsync_mode),
fsparam_string(
"test_dummy_encryption", Opt_test_dummy_encryption),
fsparam_flag(
"test_dummy_encryption", Opt_test_dummy_encryption),
fsparam_flag(
"inlinecrypt", Opt_inlinecrypt),
fsparam_string(
"checkpoint", Opt_checkpoint),
fsparam_flag_no(
"checkpoint_merge", Opt_checkpoint_merge),
fsparam_string(
"compress_algorithm", Opt_compress_algorithm),
fsparam_u32(
"compress_log_size", Opt_compress_log_size),
fsparam_string(
"compress_extension", Opt_compress_extension),
fsparam_string(
"nocompress_extension", Opt_nocompress_extension),
fsparam_flag(
"compress_chksum", Opt_compress_chksum),
fsparam_enum(
"compress_mode", Opt_compress_mode, f2fs_param_compress_mode),
fsparam_flag(
"compress_cache", Opt_compress_cache),
fsparam_flag(
"atgc", Opt_atgc),
fsparam_flag_no(
"gc_merge", Opt_gc_merge),
fsparam_enum(
"discard_unit", Opt_discard_unit, f2fs_param_discard_unit),
fsparam_enum(
"memory", Opt_memory_mode, f2fs_param_memory_mode),
fsparam_flag(
"age_extent_cache", Opt_age_extent_cache),
fsparam_enum(
"errors", Opt_errors, f2fs_param_errors),
{}
};
/* Resort to a match_table for this interestingly formatted option */
static match_table_t f2fs_checkpoint_tokens = {
{Opt_checkpoint_disable,
"disable"},
{Opt_checkpoint_disable_cap,
"disable:%u"},
{Opt_checkpoint_disable_cap_perc,
"disable:%u%%"},
{Opt_checkpoint_enable,
"enable"},
{Opt_err, NULL},
};
#define F2FS_SPEC_background_gc (1 << 0)
#define F2FS_SPEC_inline_xattr_size (1 << 1)
#define F2FS_SPEC_active_logs (1 << 2)
#define F2FS_SPEC_reserve_root (1 << 3)
#define F2FS_SPEC_resgid (1 << 4)
#define F2FS_SPEC_resuid (1 << 5)
#define F2FS_SPEC_mode (1 << 6)
#define F2FS_SPEC_fault_injection (1 << 7)
#define F2FS_SPEC_fault_type (1 << 8)
#define F2FS_SPEC_jqfmt (1 << 9)
#define F2FS_SPEC_alloc_mode (1 << 10)
#define F2FS_SPEC_fsync_mode (1 << 11)
#define F2FS_SPEC_checkpoint_disable_cap (1 << 12)
#define F2FS_SPEC_checkpoint_disable_cap_perc (1 << 13)
#define F2FS_SPEC_compress_level (1 << 14)
#define F2FS_SPEC_compress_algorithm (1 << 15)
#define F2FS_SPEC_compress_log_size (1 << 16)
#define F2FS_SPEC_compress_extension (1 << 17)
#define F2FS_SPEC_nocompress_extension (1 << 18)
#define F2FS_SPEC_compress_chksum (1 << 19)
#define F2FS_SPEC_compress_mode (1 << 20)
#define F2FS_SPEC_discard_unit (1 << 21)
#define F2FS_SPEC_memory_mode (1 << 22)
#define F2FS_SPEC_errors (1 << 23)
struct f2fs_fs_context {
struct f2fs_mount_info info;
unsigned int opt_mask;
/* Bits changed */
unsigned int spec_mask;
unsigned short qname_mask;
};
#define F2FS_CTX_INFO(ctx) ((ctx)->info)
static inline void ctx_set_opt(
struct f2fs_fs_context *ctx,
unsigned int flag)
{
ctx->info.opt |= flag;
ctx->opt_mask |= flag;
}
static inline void ctx_clear_opt(
struct f2fs_fs_context *ctx,
unsigned int flag)
{
ctx->info.opt &= ~flag;
ctx->opt_mask |= flag;
}
static inline bool ctx_test_opt(
struct f2fs_fs_context *ctx,
unsigned int flag)
{
return ctx->info.opt & flag;
}
void f2fs_printk(
struct f2fs_sb_info *sbi,
bool limit_rate,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
int level;
va_start(args, fmt);
level = printk_get_level(fmt);
vaf.fmt = printk_skip_level(fmt);
vaf.va = &args;
if (limit_rate)
if (sbi)
printk_ratelimited(
"%c%cF2FS-fs (%s): %pV\n",
KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
else
printk_ratelimited(
"%c%cF2FS-fs: %pV\n",
KERN_SOH_ASCII, level, &vaf);
else
if (sbi)
printk(
"%c%cF2FS-fs (%s): %pV\n",
KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
else
printk(
"%c%cF2FS-fs: %pV\n",
KERN_SOH_ASCII, level, &vaf);
va_end(args);
}
#if IS_ENABLED(CONFIG_UNICODE)
static const struct f2fs_sb_encodings {
__u16 magic;
char *name;
unsigned int version;
} f2fs_sb_encoding_map[] = {
{F2FS_ENC_UTF8_12_1,
"utf8", UNICODE_AGE(12, 1, 0)},
};
static const struct f2fs_sb_encodings *
f2fs_sb_read_encoding(
const struct f2fs_super_block *sb)
{
__u16 magic = le16_to_cpu(sb->s_encoding);
int i;
for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
if (magic == f2fs_sb_encoding_map[i].magic)
return &f2fs_sb_encoding_map[i];
return NULL;
}
struct kmem_cache *f2fs_cf_name_slab;
static int __init f2fs_create_casefold_cache(
void)
{
f2fs_cf_name_slab = f2fs_kmem_cache_create(
"f2fs_casefolded_name",
F2FS_NAME_LEN);
return f2fs_cf_name_slab ? 0 : -ENOMEM;
}
static void f2fs_destroy_casefold_cache(
void)
{
kmem_cache_destroy(f2fs_cf_name_slab);
}
#else
static int __init f2fs_create_casefold_cache(
void) {
return 0; }
static void f2fs_destroy_casefold_cache(
void) { }
#endif
static inline void limit_reserve_root(
struct f2fs_sb_info *sbi)
{
block_t limit = min((sbi->user_block_count >> 3),
sbi->user_block_count - sbi->reserved_blocks);
/* limit is 12.5% */
if (test_opt(sbi, RESERVE_ROOT) &&
F2FS_OPTION(sbi).root_reserved_blocks > limit) {
F2FS_OPTION(sbi).root_reserved_blocks = limit;
f2fs_info(sbi,
"Reduce reserved blocks for root = %u",
F2FS_OPTION(sbi).root_reserved_blocks);
}
if (!test_opt(sbi, RESERVE_ROOT) &&
(!uid_eq(F2FS_OPTION(sbi).s_resuid,
make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
!gid_eq(F2FS_OPTION(sbi).s_resgid,
make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
f2fs_info(sbi,
"Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
from_kuid_munged(&init_user_ns,
F2FS_OPTION(sbi).s_resuid),
from_kgid_munged(&init_user_ns,
F2FS_OPTION(sbi).s_resgid));
}
static inline void adjust_unusable_cap_perc(
struct f2fs_sb_info *sbi)
{
if (!F2FS_OPTION(sbi).unusable_cap_perc)
return;
if (F2FS_OPTION(sbi).unusable_cap_perc == 100)
F2FS_OPTION(sbi).unusable_cap = sbi->user_block_count;
else
F2FS_OPTION(sbi).unusable_cap = (sbi->user_block_count / 100) *
F2FS_OPTION(sbi).unusable_cap_perc;
f2fs_info(sbi,
"Adjust unusable cap for checkpoint=disable = %u / %u%%",
F2FS_OPTION(sbi).unusable_cap,
F2FS_OPTION(sbi).unusable_cap_perc);
}
static void init_once(
void *foo)
{
struct f2fs_inode_info *fi = (
struct f2fs_inode_info *) foo;
inode_init_once(&fi->vfs_inode);
}
#ifdef CONFIG_QUOTA
static const char *
const quotatypes[] = INITQFNAMES;
#define QTYPE2NAME(t) (quotatypes[t])
/*
* Note the name of the specified quota file.
*/
static int f2fs_note_qf_name(
struct fs_context *fc,
int qtype,
struct fs_parameter *param)
{
struct f2fs_fs_context *ctx = fc->fs_private;
char *qname;
if (param->size < 1) {
f2fs_err(NULL,
"Missing quota name");
return -EINVAL;
}
if (strchr(param->string,
'/')) {
f2fs_err(NULL,
"quotafile must be on filesystem root");
return -EINVAL;
}
if (ctx->info.s_qf_names[qtype]) {
if (strcmp(ctx->info.s_qf_names[qtype], param->string) != 0) {
f2fs_err(NULL,
"Quota file already specified");
return -EINVAL;
}
return 0;
}
qname = kmemdup_nul(param->string, param->size, GFP_KERNEL);
if (!qname) {
f2fs_err(NULL,
"Not enough memory for storing quotafile name");
return -ENOMEM;
}
F2FS_CTX_INFO(ctx).s_qf_names[qtype] = qname;
ctx->qname_mask |= 1 << qtype;
return 0;
}
/*
* Clear the name of the specified quota file.
*/
static int f2fs_unnote_qf_name(
struct fs_context *fc,
int qtype)
{
struct f2fs_fs_context *ctx = fc->fs_private;
kfree(ctx->info.s_qf_names[qtype]);
ctx->info.s_qf_names[qtype] = NULL;
ctx->qname_mask |= 1 << qtype;
return 0;
}
static void f2fs_unnote_qf_name_all(
struct fs_context *fc)
{
int i;
for (i = 0; i < MAXQUOTAS; i++)
f2fs_unnote_qf_name(fc, i);
}
#endif
static int f2fs_parse_test_dummy_encryption(
const struct fs_parameter *param,
struct f2fs_fs_context *ctx)
{
int err;
if (!IS_ENABLED(CONFIG_FS_ENCRYPTION)) {
f2fs_warn(NULL,
"test_dummy_encryption option not supported");
return -EINVAL;
}
err = fscrypt_parse_test_dummy_encryption(param,
&ctx->info.dummy_enc_policy);
if (err) {
if (err == -EINVAL)
f2fs_warn(NULL,
"Value of option \"%s\
" is unrecognized",
param->key);
else if (err == -EEXIST)
f2fs_warn(NULL,
"Conflicting test_dummy_encryption options");
else
f2fs_warn(NULL,
"Error processing option \"%s\
" [%d]",
param->key, err);
return -EINVAL;
}
return 0;
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
static bool is_compress_extension_exist(
struct f2fs_mount_info *info,
const char *new_ext,
bool is_ext)
{
unsigned char (*ext)[F2FS_EXTENSION_LEN];
int ext_cnt;
int i;
if (is_ext) {
ext = info->extensions;
ext_cnt = info->compress_ext_cnt;
}
else {
ext = info->noextensions;
ext_cnt = info->nocompress_ext_cnt;
}
for (i = 0; i < ext_cnt; i++) {
if (!strcasecmp(new_ext, ext[i]))
return true;
}
return false;
}
/*
* 1. The same extension name cannot not appear in both compress and non-compress extension
* at the same time.
* 2. If the compress extension specifies all files, the types specified by the non-compress
* extension will be treated as special cases and will not be compressed.
* 3. Don't allow the non-compress extension specifies all files.
*/
static int f2fs_test_compress_extension(
unsigned char (*noext)[F2FS_EXTENSION_LEN],
int noext_cnt,
unsigned char (*ext)[F2FS_EXTENSION_LEN],
int ext_cnt)
{
int index = 0, no_index = 0;
if (!noext_cnt)
return 0;
for (no_index = 0; no_index < noext_cnt; no_index++) {
if (strlen(noext[no_index]) == 0)
continue;
if (!strcasecmp(
"*", noext[no_index])) {
f2fs_info(NULL,
"Don't allow the nocompress extension specifies all files");
return -EINVAL;
}
for (index = 0; index < ext_cnt; index++) {
if (strlen(ext[index]) == 0)
continue;
if (!strcasecmp(ext[index], noext[no_index])) {
f2fs_info(NULL,
"Don't allow the same extension %s appear in both compress and nocompress extension",
ext[index]);
return -EINVAL;
}
}
}
return 0;
}
#ifdef CONFIG_F2FS_FS_LZ4
static int f2fs_set_lz4hc_level(
struct f2fs_fs_context *ctx,
const char *str)
{
#ifdef CONFIG_F2FS_FS_LZ4HC
unsigned int level;
if (strlen(str) == 3) {
F2FS_CTX_INFO(ctx).compress_level = 0;
ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
}
str += 3;
if (str[0] !=
':') {
f2fs_info(NULL,
"wrong format, e.g. :");
return -EINVAL;
}
if (kstrtouint(str + 1, 10, &level))
return -EINVAL;
if (!f2fs_is_compress_level_valid(COMPRESS_LZ4, level)) {
f2fs_info(NULL,
"invalid lz4hc compress level: %d", level);
return -EINVAL;
}
F2FS_CTX_INFO(ctx).compress_level = level;
ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
#else
if (strlen(str) == 3) {
F2FS_CTX_INFO(ctx).compress_level = 0;
ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
}
f2fs_info(NULL,
"kernel doesn't support lz4hc compression");
return -EINVAL;
#endif
}
#endif
#ifdef CONFIG_F2FS_FS_ZSTD
static int f2fs_set_zstd_level(
struct f2fs_fs_context *ctx,
const char *str)
{
int level;
int len = 4;
if (strlen(str) == len) {
F2FS_CTX_INFO(ctx).compress_level = F2FS_ZSTD_DEFAULT_CLEVEL;
ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
}
str += len;
if (str[0] !=
':') {
f2fs_info(NULL,
"wrong format, e.g. :");
return -EINVAL;
}
if (kstrtoint(str + 1, 10, &level))
return -EINVAL;
/* f2fs does not support negative compress level now */
if (level < 0) {
f2fs_info(NULL,
"do not support negative compress level: %d", level);
return -ERANGE;
}
if (!f2fs_is_compress_level_valid(COMPRESS_ZSTD, level)) {
f2fs_info(NULL,
"invalid zstd compress level: %d", level);
return -EINVAL;
}
F2FS_CTX_INFO(ctx).compress_level = level;
ctx->spec_mask |= F2FS_SPEC_compress_level;
return 0;
}
#endif
#endif
static int f2fs_parse_param(
struct fs_context *fc,
struct fs_parameter *param)
{
struct f2fs_fs_context *ctx = fc->fs_private;
#ifdef CONFIG_F2FS_FS_COMPRESSION
unsigned char (*ext)[F2FS_EXTENSION_LEN];
unsigned char (*noext)[F2FS_EXTENSION_LEN];
int ext_cnt, noext_cnt;
char *name;
#endif
substring_t args[MAX_OPT_ARGS];
struct fs_parse_result result;
int token, ret, arg;
token = fs_parse(fc, f2fs_param_specs, param, &result);
if (token < 0)
return token;
switch (token) {
case Opt_gc_background:
F2FS_CTX_INFO(ctx).bggc_mode = result.uint_32;
ctx->spec_mask |= F2FS_SPEC_background_gc;
break;
case Opt_disable_roll_forward:
ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_ROLL_FORWARD);
break;
case Opt_norecovery:
/* requires ro mount, checked in f2fs_validate_options */
ctx_set_opt(ctx, F2FS_MOUNT_NORECOVERY);
break;
case Opt_discard:
if (result.negated)
ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD);
else
ctx_set_opt(ctx, F2FS_MOUNT_DISCARD);
break;
case Opt_noheap:
case Opt_heap:
f2fs_warn(NULL,
"heap/no_heap options were deprecated");
break;
#ifdef CONFIG_F2FS_FS_XATTR
case Opt_user_xattr:
if (result.negated)
ctx_clear_opt(ctx, F2FS_MOUNT_XATTR_USER);
else
ctx_set_opt(ctx, F2FS_MOUNT_XATTR_USER);
break;
case Opt_inline_xattr:
if (result.negated)
ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_XATTR);
else
ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR);
break;
case Opt_inline_xattr_size:
if (result.int_32 < MIN_INLINE_XATTR_SIZE ||
result.int_32 > MAX_INLINE_XATTR_SIZE) {
f2fs_err(NULL,
"inline xattr size is out of range: %u ~ %u",
(u32)MIN_INLINE_XATTR_SIZE, (u32)MAX_INLINE_XATTR_SIZE);
return -EINVAL;
}
ctx_set_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE);
F2FS_CTX_INFO(ctx).inline_xattr_size = result.int_32;
ctx->spec_mask |= F2FS_SPEC_inline_xattr_size;
break;
#else
case Opt_user_xattr:
case Opt_inline_xattr:
case Opt_inline_xattr_size:
f2fs_info(NULL,
"%s options not supported", param->key);
break;
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
case Opt_acl:
if (result.negated)
ctx_clear_opt(ctx, F2FS_MOUNT_POSIX_ACL);
else
ctx_set_opt(ctx, F2FS_MOUNT_POSIX_ACL);
break;
#else
case Opt_acl:
f2fs_info(NULL,
"%s options not supported", param->key);
break;
#endif
case Opt_active_logs:
if (result.int_32 != 2 && result.int_32 != 4 &&
result.int_32 != NR_CURSEG_PERSIST_TYPE)
return -EINVAL;
ctx->spec_mask |= F2FS_SPEC_active_logs;
F2FS_CTX_INFO(ctx).active_logs = result.int_32;
break;
case Opt_disable_ext_identify:
ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_EXT_IDENTIFY);
break;
case Opt_inline_data:
if (result.negated)
ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DATA);
else
ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DATA);
break;
case Opt_inline_dentry:
if (result.negated)
ctx_clear_opt(ctx, F2FS_MOUNT_INLINE_DENTRY);
else
ctx_set_opt(ctx, F2FS_MOUNT_INLINE_DENTRY);
break;
case Opt_flush_merge:
if (result.negated)
ctx_clear_opt(ctx, F2FS_MOUNT_FLUSH_MERGE);
else
ctx_set_opt(ctx, F2FS_MOUNT_FLUSH_MERGE);
break;
case Opt_barrier:
if (result.negated)
ctx_set_opt(ctx, F2FS_MOUNT_NOBARRIER);
else
ctx_clear_opt(ctx, F2FS_MOUNT_NOBARRIER);
break;
case Opt_fastboot:
ctx_set_opt(ctx, F2FS_MOUNT_FASTBOOT);
break;
case Opt_extent_cache:
if (result.negated)
ctx_clear_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE);
else
ctx_set_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE);
break;
case Opt_data_flush:
ctx_set_opt(ctx, F2FS_MOUNT_DATA_FLUSH);
break;
case Opt_reserve_root:
ctx_set_opt(ctx, F2FS_MOUNT_RESERVE_ROOT);
F2FS_CTX_INFO(ctx).root_reserved_blocks = result.uint_32;
ctx->spec_mask |= F2FS_SPEC_reserve_root;
break;
case Opt_resuid:
F2FS_CTX_INFO(ctx).s_resuid = result.uid;
ctx->spec_mask |= F2FS_SPEC_resuid;
break;
case Opt_resgid:
F2FS_CTX_INFO(ctx).s_resgid = result.gid;
ctx->spec_mask |= F2FS_SPEC_resgid;
break;
case Opt_mode:
F2FS_CTX_INFO(ctx).fs_mode = result.uint_32;
ctx->spec_mask |= F2FS_SPEC_mode;
break;
#ifdef CONFIG_F2FS_FAULT_INJECTION
case Opt_fault_injection:
F2FS_CTX_INFO(ctx).fault_info.inject_rate = result.int_32;
ctx->spec_mask |= F2FS_SPEC_fault_injection;
ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION);
break;
case Opt_fault_type:
if (result.uint_32 > BIT(FAULT_MAX))
return -EINVAL;
F2FS_CTX_INFO(ctx).fault_info.inject_type = result.uint_32;
ctx->spec_mask |= F2FS_SPEC_fault_type;
ctx_set_opt(ctx, F2FS_MOUNT_FAULT_INJECTION);
break;
#else
case Opt_fault_injection:
case Opt_fault_type:
f2fs_info(NULL,
"%s options not supported", param->key);
break;
#endif
case Opt_lazytime:
if (result.negated)
ctx_clear_opt(ctx, F2FS_MOUNT_LAZYTIME);
else
ctx_set_opt(ctx, F2FS_MOUNT_LAZYTIME);
break;
#ifdef CONFIG_QUOTA
case Opt_quota:
if (result.negated) {
ctx_clear_opt(ctx, F2FS_MOUNT_QUOTA);
ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA);
ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA);
ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA);
}
else
ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA);
break;
case Opt_usrquota:
ctx_set_opt(ctx, F2FS_MOUNT_USRQUOTA);
break;
case Opt_grpquota:
ctx_set_opt(ctx, F2FS_MOUNT_GRPQUOTA);
break;
case Opt_prjquota:
ctx_set_opt(ctx, F2FS_MOUNT_PRJQUOTA);
break;
case Opt_usrjquota:
if (!*param->string)
ret = f2fs_unnote_qf_name(fc, USRQUOTA);
else
ret = f2fs_note_qf_name(fc, USRQUOTA, param);
if (ret)
return ret;
break;
case Opt_grpjquota:
if (!*param->string)
ret = f2fs_unnote_qf_name(fc, GRPQUOTA);
else
ret = f2fs_note_qf_name(fc, GRPQUOTA, param);
if (ret)
return ret;
break;
case Opt_prjjquota:
if (!*param->string)
ret = f2fs_unnote_qf_name(fc, PRJQUOTA);
else
ret = f2fs_note_qf_name(fc, PRJQUOTA, param);
if (ret)
return ret;
break;
case Opt_jqfmt:
F2FS_CTX_INFO(ctx).s_jquota_fmt = result.int_32;
ctx->spec_mask |= F2FS_SPEC_jqfmt;
break;
#else
case Opt_quota:
case Opt_usrquota:
case Opt_grpquota:
case Opt_prjquota:
case Opt_usrjquota:
case Opt_grpjquota:
case Opt_prjjquota:
f2fs_info(NULL,
"quota operations not supported");
break;
#endif
case Opt_alloc:
F2FS_CTX_INFO(ctx).alloc_mode = result.uint_32;
ctx->spec_mask |= F2FS_SPEC_alloc_mode;
break;
case Opt_fsync:
F2FS_CTX_INFO(ctx).fsync_mode = result.uint_32;
ctx->spec_mask |= F2FS_SPEC_fsync_mode;
break;
case Opt_test_dummy_encryption:
ret = f2fs_parse_test_dummy_encryption(param, ctx);
if (ret)
return ret;
break;
case Opt_inlinecrypt:
#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
ctx_set_opt(ctx, F2FS_MOUNT_INLINECRYPT);
#else
f2fs_info(NULL,
"inline encryption not supported");
#endif
break;
case Opt_checkpoint:
/*
* Initialize args struct so we know whether arg was
* found; some options take optional arguments.
*/
args[0].from = args[0].to = NULL;
arg = 0;
/* revert to match_table for checkpoint= options */
token = match_token(param->string, f2fs_checkpoint_tokens, args);
switch (token) {
case Opt_checkpoint_disable_cap_perc:
if (args->from && match_int(args, &arg))
return -EINVAL;
if (arg < 0 || arg > 100)
return -EINVAL;
F2FS_CTX_INFO(ctx).unusable_cap_perc = arg;
ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc;
ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_disable_cap:
if (args->from && match_int(args, &arg))
return -EINVAL;
F2FS_CTX_INFO(ctx).unusable_cap = arg;
ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap;
ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_disable:
ctx_set_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
case Opt_checkpoint_enable:
F2FS_CTX_INFO(ctx).unusable_cap_perc = 0;
ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap_perc;
F2FS_CTX_INFO(ctx).unusable_cap = 0;
ctx->spec_mask |= F2FS_SPEC_checkpoint_disable_cap;
ctx_clear_opt(ctx, F2FS_MOUNT_DISABLE_CHECKPOINT);
break;
default:
return -EINVAL;
}
break;
case Opt_checkpoint_merge:
if (result.negated)
ctx_clear_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT);
else
ctx_set_opt(ctx, F2FS_MOUNT_MERGE_CHECKPOINT);
break;
#ifdef CONFIG_F2FS_FS_COMPRESSION
case Opt_compress_algorithm:
name = param->string;
if (!strcmp(name,
"lzo")) {
#ifdef CONFIG_F2FS_FS_LZO
F2FS_CTX_INFO(ctx).compress_level = 0;
F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZO;
ctx->spec_mask |= F2FS_SPEC_compress_level;
ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
f2fs_info(NULL,
"kernel doesn't support lzo compression");
#endif
}
else if (!strncmp(name,
"lz4", 3)) {
#ifdef CONFIG_F2FS_FS_LZ4
ret = f2fs_set_lz4hc_level(ctx, name);
if (ret)
return -EINVAL;
F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZ4;
ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
f2fs_info(NULL,
"kernel doesn't support lz4 compression");
#endif
}
else if (!strncmp(name,
"zstd", 4)) {
#ifdef CONFIG_F2FS_FS_ZSTD
ret = f2fs_set_zstd_level(ctx, name);
if (ret)
return -EINVAL;
F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_ZSTD;
ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
f2fs_info(NULL,
"kernel doesn't support zstd compression");
#endif
}
else if (!strcmp(name,
"lzo-rle")) {
#ifdef CONFIG_F2FS_FS_LZORLE
F2FS_CTX_INFO(ctx).compress_level = 0;
F2FS_CTX_INFO(ctx).compress_algorithm = COMPRESS_LZORLE;
ctx->spec_mask |= F2FS_SPEC_compress_level;
ctx->spec_mask |= F2FS_SPEC_compress_algorithm;
#else
f2fs_info(NULL,
"kernel doesn't support lzorle compression");
#endif
}
else
return -EINVAL;
break;
case Opt_compress_log_size:
if (result.uint_32 < MIN_COMPRESS_LOG_SIZE ||
result.uint_32 > MAX_COMPRESS_LOG_SIZE) {
f2fs_err(NULL,
"Compress cluster log size is out of range");
return -EINVAL;
}
F2FS_CTX_INFO(ctx).compress_log_size = result.uint_32;
ctx->spec_mask |= F2FS_SPEC_compress_log_size;
break;
case Opt_compress_extension:
name = param->string;
ext = F2FS_CTX_INFO(ctx).extensions;
ext_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
if (strlen(name) >= F2FS_EXTENSION_LEN ||
ext_cnt >= COMPRESS_EXT_NUM) {
f2fs_err(NULL,
"invalid extension length/number");
return -EINVAL;
}
if (is_compress_extension_exist(&ctx->info, name,
true))
break;
ret = strscpy(ext[ext_cnt], name, F2FS_EXTENSION_LEN);
if (ret < 0)
return ret;
F2FS_CTX_INFO(ctx).compress_ext_cnt++;
ctx->spec_mask |= F2FS_SPEC_compress_extension;
break;
case Opt_nocompress_extension:
name = param->string;
noext = F2FS_CTX_INFO(ctx).noextensions;
noext_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
if (strlen(name) >= F2FS_EXTENSION_LEN ||
noext_cnt >= COMPRESS_EXT_NUM) {
f2fs_err(NULL,
"invalid extension length/number");
return -EINVAL;
}
if (is_compress_extension_exist(&ctx->info, name,
false))
break;
ret = strscpy(noext[noext_cnt], name, F2FS_EXTENSION_LEN);
if (ret < 0)
return ret;
F2FS_CTX_INFO(ctx).nocompress_ext_cnt++;
ctx->spec_mask |= F2FS_SPEC_nocompress_extension;
break;
case Opt_compress_chksum:
F2FS_CTX_INFO(ctx).compress_chksum =
true;
ctx->spec_mask |= F2FS_SPEC_compress_chksum;
break;
case Opt_compress_mode:
F2FS_CTX_INFO(ctx).compress_mode = result.uint_32;
ctx->spec_mask |= F2FS_SPEC_compress_mode;
break;
case Opt_compress_cache:
ctx_set_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE);
break;
#else
case Opt_compress_algorithm:
case Opt_compress_log_size:
case Opt_compress_extension:
case Opt_nocompress_extension:
case Opt_compress_chksum:
case Opt_compress_mode:
case Opt_compress_cache:
f2fs_info(NULL,
"compression options not supported");
break;
#endif
case Opt_atgc:
ctx_set_opt(ctx, F2FS_MOUNT_ATGC);
break;
case Opt_gc_merge:
if (result.negated)
ctx_clear_opt(ctx, F2FS_MOUNT_GC_MERGE);
else
ctx_set_opt(ctx, F2FS_MOUNT_GC_MERGE);
break;
case Opt_discard_unit:
F2FS_CTX_INFO(ctx).discard_unit = result.uint_32;
ctx->spec_mask |= F2FS_SPEC_discard_unit;
break;
case Opt_memory_mode:
F2FS_CTX_INFO(ctx).memory_mode = result.uint_32;
ctx->spec_mask |= F2FS_SPEC_memory_mode;
break;
case Opt_age_extent_cache:
ctx_set_opt(ctx, F2FS_MOUNT_AGE_EXTENT_CACHE);
break;
case Opt_errors:
F2FS_CTX_INFO(ctx).errors = result.uint_32;
ctx->spec_mask |= F2FS_SPEC_errors;
break;
case Opt_nat_bits:
ctx_set_opt(ctx, F2FS_MOUNT_NAT_BITS);
break;
}
return 0;
}
/*
* Check quota settings consistency.
*/
static int f2fs_check_quota_consistency(
struct fs_context *fc,
struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
#ifdef CONFIG_QUOTA
struct f2fs_fs_context *ctx = fc->fs_private;
bool quota_feature = f2fs_sb_has_quota_ino(sbi);
bool quota_turnon = sb_any_quota_loaded(sb);
char *old_qname, *new_qname;
bool usr_qf_name, grp_qf_name, prj_qf_name, usrquota, grpquota, prjquota;
int i;
/*
* We do the test below only for project quotas. 'usrquota' and
* 'grpquota' mount options are allowed even without quota feature
* to support legacy quotas in quota files.
*/
if (ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA) &&
!f2fs_sb_has_project_quota(sbi)) {
f2fs_err(sbi,
"Project quota feature not enabled. Cannot enable project quota enforcement.");
return -EINVAL;
}
if (ctx->qname_mask) {
for (i = 0; i < MAXQUOTAS; i++) {
if (!(ctx->qname_mask & (1 << i)))
continue;
old_qname = F2FS_OPTION(sbi).s_qf_names[i];
new_qname = F2FS_CTX_INFO(ctx).s_qf_names[i];
if (quota_turnon &&
!!old_qname != !!new_qname)
goto err_jquota_change;
if (old_qname) {
if (!new_qname) {
f2fs_info(sbi,
"remove qf_name %s",
old_qname);
continue;
}
else if (strcmp(old_qname, new_qname) == 0) {
ctx->qname_mask &= ~(1 << i);
continue;
}
goto err_jquota_specified;
}
if (quota_feature) {
f2fs_info(sbi,
"QUOTA feature is enabled, so ignore qf_name");
ctx->qname_mask &= ~(1 << i);
kfree(F2FS_CTX_INFO(ctx).s_qf_names[i]);
F2FS_CTX_INFO(ctx).s_qf_names[i] = NULL;
}
}
}
/* Make sure we don't mix old and new quota format */
usr_qf_name = F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
F2FS_CTX_INFO(ctx).s_qf_names[USRQUOTA];
grp_qf_name = F2FS_OPTION(sbi).s_qf_names[GRPQUOTA] ||
F2FS_CTX_INFO(ctx).s_qf_names[GRPQUOTA];
prj_qf_name = F2FS_OPTION(sbi).s_qf_names[PRJQUOTA] ||
F2FS_CTX_INFO(ctx).s_qf_names[PRJQUOTA];
usrquota = test_opt(sbi, USRQUOTA) ||
ctx_test_opt(ctx, F2FS_MOUNT_USRQUOTA);
grpquota = test_opt(sbi, GRPQUOTA) ||
ctx_test_opt(ctx, F2FS_MOUNT_GRPQUOTA);
prjquota = test_opt(sbi, PRJQUOTA) ||
ctx_test_opt(ctx, F2FS_MOUNT_PRJQUOTA);
if (usr_qf_name) {
ctx_clear_opt(ctx, F2FS_MOUNT_USRQUOTA);
usrquota =
false;
}
if (grp_qf_name) {
ctx_clear_opt(ctx, F2FS_MOUNT_GRPQUOTA);
grpquota =
false;
}
if (prj_qf_name) {
ctx_clear_opt(ctx, F2FS_MOUNT_PRJQUOTA);
prjquota =
false;
}
if (usr_qf_name || grp_qf_name || prj_qf_name) {
if (grpquota || usrquota || prjquota) {
f2fs_err(sbi,
"old and new quota format mixing");
return -EINVAL;
}
if (!(ctx->spec_mask & F2FS_SPEC_jqfmt ||
F2FS_OPTION(sbi).s_jquota_fmt)) {
f2fs_err(sbi,
"journaled quota format not specified");
return -EINVAL;
}
}
return 0;
err_jquota_change:
f2fs_err(sbi,
"Cannot change journaled quota options when quota turned on");
return -EINVAL;
err_jquota_specified:
f2fs_err(sbi,
"%s quota file already specified",
QTYPE2NAME(i));
return -EINVAL;
#else
if (f2fs_readonly(sbi->sb))
return 0;
if (f2fs_sb_has_quota_ino(sbi)) {
f2fs_info(sbi,
"Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
return -EINVAL;
}
if (f2fs_sb_has_project_quota(sbi)) {
f2fs_err(sbi,
"Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
return -EINVAL;
}
return 0;
#endif
}
static int f2fs_check_test_dummy_encryption(
struct fs_context *fc,
struct super_block *sb)
{
struct f2fs_fs_context *ctx = fc->fs_private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy))
return 0;
if (!f2fs_sb_has_encrypt(sbi)) {
f2fs_err(sbi,
"Encrypt feature is off");
return -EINVAL;
}
/*
* This mount option is just for testing, and it's not worthwhile to
* implement the extra complexity (e.g. RCU protection) that would be
* needed to allow it to be set or changed during remount. We do allow
* it to be specified during remount, but only if there is no change.
*/
if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
if (fscrypt_dummy_policies_equal(&F2FS_OPTION(sbi).dummy_enc_policy,
&F2FS_CTX_INFO(ctx).dummy_enc_policy))
return 0;
f2fs_warn(sbi,
"Can't set or change test_dummy_encryption on remount");
return -EINVAL;
}
return 0;
}
static inline bool test_compression_spec(
unsigned int mask)
{
return mask & (F2FS_SPEC_compress_algorithm
| F2FS_SPEC_compress_log_size
| F2FS_SPEC_compress_extension
| F2FS_SPEC_nocompress_extension
| F2FS_SPEC_compress_chksum
| F2FS_SPEC_compress_mode);
}
static inline void clear_compression_spec(
struct f2fs_fs_context *ctx)
{
ctx->spec_mask &= ~(F2FS_SPEC_compress_algorithm
| F2FS_SPEC_compress_log_size
| F2FS_SPEC_compress_extension
| F2FS_SPEC_nocompress_extension
| F2FS_SPEC_compress_chksum
| F2FS_SPEC_compress_mode);
}
static int f2fs_check_compression(
struct fs_context *fc,
struct super_block *sb)
{
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct f2fs_fs_context *ctx = fc->fs_private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int i, cnt;
if (!f2fs_sb_has_compression(sbi)) {
if (test_compression_spec(ctx->spec_mask) ||
ctx_test_opt(ctx, F2FS_MOUNT_COMPRESS_CACHE))
f2fs_info(sbi,
"Image doesn't support compression");
clear_compression_spec(ctx);
ctx->opt_mask &= ~F2FS_MOUNT_COMPRESS_CACHE;
return 0;
}
if (ctx->spec_mask & F2FS_SPEC_compress_extension) {
cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
for (i = 0; i < F2FS_CTX_INFO(ctx).compress_ext_cnt; i++) {
if (is_compress_extension_exist(&F2FS_OPTION(sbi),
F2FS_CTX_INFO(ctx).extensions[i],
true)) {
F2FS_CTX_INFO(ctx).extensions[i][0] =
'\0';
cnt--;
}
}
if (F2FS_OPTION(sbi).compress_ext_cnt + cnt > COMPRESS_EXT_NUM) {
f2fs_err(sbi,
"invalid extension length/number");
return -EINVAL;
}
}
if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) {
cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
for (i = 0; i < F2FS_CTX_INFO(ctx).nocompress_ext_cnt; i++) {
if (is_compress_extension_exist(&F2FS_OPTION(sbi),
F2FS_CTX_INFO(ctx).noextensions[i],
false)) {
F2FS_CTX_INFO(ctx).noextensions[i][0] =
'\0';
cnt--;
}
}
if (F2FS_OPTION(sbi).nocompress_ext_cnt + cnt > COMPRESS_EXT_NUM) {
f2fs_err(sbi,
"invalid noextension length/number");
return -EINVAL;
}
}
if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions,
F2FS_CTX_INFO(ctx).nocompress_ext_cnt,
F2FS_CTX_INFO(ctx).extensions,
F2FS_CTX_INFO(ctx).compress_ext_cnt)) {
f2fs_err(sbi,
"new noextensions conflicts with new extensions");
return -EINVAL;
}
if (f2fs_test_compress_extension(F2FS_CTX_INFO(ctx).noextensions,
F2FS_CTX_INFO(ctx).nocompress_ext_cnt,
F2FS_OPTION(sbi).extensions,
F2FS_OPTION(sbi).compress_ext_cnt)) {
f2fs_err(sbi,
"new noextensions conflicts with old extensions");
return -EINVAL;
}
if (f2fs_test_compress_extension(F2FS_OPTION(sbi).noextensions,
F2FS_OPTION(sbi).nocompress_ext_cnt,
F2FS_CTX_INFO(ctx).extensions,
F2FS_CTX_INFO(ctx).compress_ext_cnt)) {
f2fs_err(sbi,
"new extensions conflicts with old noextensions");
return -EINVAL;
}
#endif
return 0;
}
static int f2fs_check_opt_consistency(
struct fs_context *fc,
struct super_block *sb)
{
struct f2fs_fs_context *ctx = fc->fs_private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int err;
if (ctx_test_opt(ctx, F2FS_MOUNT_NORECOVERY) && !f2fs_readonly(sb))
return -EINVAL;
if (f2fs_hw_should_discard(sbi) &&
(ctx->opt_mask & F2FS_MOUNT_DISCARD) &&
!ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) {
f2fs_warn(sbi,
"discard is required for zoned block devices");
return -EINVAL;
}
if (!f2fs_hw_support_discard(sbi) &&
(ctx->opt_mask & F2FS_MOUNT_DISCARD) &&
ctx_test_opt(ctx, F2FS_MOUNT_DISCARD)) {
f2fs_warn(sbi,
"device does not support discard");
ctx_clear_opt(ctx, F2FS_MOUNT_DISCARD);
ctx->opt_mask &= ~F2FS_MOUNT_DISCARD;
}
if (f2fs_sb_has_device_alias(sbi) &&
(ctx->opt_mask & F2FS_MOUNT_READ_EXTENT_CACHE) &&
!ctx_test_opt(ctx, F2FS_MOUNT_READ_EXTENT_CACHE)) {
f2fs_err(sbi,
"device aliasing requires extent cache");
return -EINVAL;
}
if (test_opt(sbi, RESERVE_ROOT) &&
(ctx->opt_mask & F2FS_MOUNT_RESERVE_ROOT) &&
ctx_test_opt(ctx, F2FS_MOUNT_RESERVE_ROOT)) {
f2fs_info(sbi,
"Preserve previous reserve_root=%u",
F2FS_OPTION(sbi).root_reserved_blocks);
ctx_clear_opt(ctx, F2FS_MOUNT_RESERVE_ROOT);
ctx->opt_mask &= ~F2FS_MOUNT_RESERVE_ROOT;
}
err = f2fs_check_test_dummy_encryption(fc, sb);
if (err)
return err;
err = f2fs_check_compression(fc, sb);
if (err)
return err;
err = f2fs_check_quota_consistency(fc, sb);
if (err)
return err;
if (!IS_ENABLED(CONFIG_UNICODE) && f2fs_sb_has_casefold(sbi)) {
f2fs_err(sbi,
"Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
return -EINVAL;
}
/*
* The BLKZONED feature indicates that the drive was formatted with
* zone alignment optimization. This is optional for host-aware
* devices, but mandatory for host-managed zoned block devices.
*/
if (f2fs_sb_has_blkzoned(sbi)) {
if (F2FS_CTX_INFO(ctx).bggc_mode == BGGC_MODE_OFF) {
f2fs_warn(sbi,
"zoned devices need bggc");
return -EINVAL;
}
#ifdef CONFIG_BLK_DEV_ZONED
if ((ctx->spec_mask & F2FS_SPEC_discard_unit) &&
F2FS_CTX_INFO(ctx).discard_unit != DISCARD_UNIT_SECTION) {
f2fs_info(sbi,
"Zoned block device doesn't need small discard, set discard_unit=section by default");
F2FS_CTX_INFO(ctx).discard_unit = DISCARD_UNIT_SECTION;
}
if ((ctx->spec_mask & F2FS_SPEC_mode) &&
F2FS_CTX_INFO(ctx).fs_mode != FS_MODE_LFS) {
f2fs_info(sbi,
"Only lfs mode is allowed with zoned block device feature");
return -EINVAL;
}
#else
f2fs_err(sbi,
"Zoned block device support is not enabled");
return -EINVAL;
#endif
}
if (ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR_SIZE)) {
if (!f2fs_sb_has_extra_attr(sbi) ||
!f2fs_sb_has_flexible_inline_xattr(sbi)) {
f2fs_err(sbi,
"extra_attr or flexible_inline_xattr feature is off");
return -EINVAL;
}
if (!ctx_test_opt(ctx, F2FS_MOUNT_INLINE_XATTR) && !test_opt(sbi, INLINE_XATTR)) {
f2fs_err(sbi,
"inline_xattr_size option should be set with inline_xattr option");
return -EINVAL;
}
}
if (ctx_test_opt(ctx, F2FS_MOUNT_ATGC) &&
F2FS_CTX_INFO(ctx).fs_mode == FS_MODE_LFS) {
f2fs_err(sbi,
"LFS is not compatible with ATGC");
return -EINVAL;
}
if (f2fs_is_readonly(sbi) && ctx_test_opt(ctx, F2FS_MOUNT_FLUSH_MERGE)) {
f2fs_err(sbi,
"FLUSH_MERGE not compatible with readonly mode");
return -EINVAL;
}
if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
f2fs_err(sbi,
"Allow to mount readonly mode only");
return -EROFS;
}
return 0;
}
static void f2fs_apply_quota_options(
struct fs_context *fc,
struct super_block *sb)
{
#ifdef CONFIG_QUOTA
struct f2fs_fs_context *ctx = fc->fs_private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
bool quota_feature = f2fs_sb_has_quota_ino(sbi);
char *qname;
int i;
if (quota_feature)
return;
for (i = 0; i < MAXQUOTAS; i++) {
if (!(ctx->qname_mask & (1 << i)))
continue;
qname = F2FS_CTX_INFO(ctx).s_qf_names[i];
if (qname) {
qname = kstrdup(F2FS_CTX_INFO(ctx).s_qf_names[i],
GFP_KERNEL | __GFP_NOFAIL);
set_opt(sbi, QUOTA);
}
F2FS_OPTION(sbi).s_qf_names[i] = qname;
}
if (ctx->spec_mask & F2FS_SPEC_jqfmt)
F2FS_OPTION(sbi).s_jquota_fmt = F2FS_CTX_INFO(ctx).s_jquota_fmt;
if (quota_feature && F2FS_OPTION(sbi).s_jquota_fmt) {
f2fs_info(sbi,
"QUOTA feature is enabled, so ignore jquota_fmt");
F2FS_OPTION(sbi).s_jquota_fmt = 0;
}
#endif
}
static void f2fs_apply_test_dummy_encryption(
struct fs_context *fc,
struct super_block *sb)
{
struct f2fs_fs_context *ctx = fc->fs_private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (!fscrypt_is_dummy_policy_set(&F2FS_CTX_INFO(ctx).dummy_enc_policy) ||
/* if already set, it was already verified to be the same */
fscrypt_is_dummy_policy_set(&F2FS_OPTION(sbi).dummy_enc_policy))
return;
swap(F2FS_OPTION(sbi).dummy_enc_policy, F2FS_CTX_INFO(ctx).dummy_enc_policy);
f2fs_warn(sbi,
"Test dummy encryption mode enabled");
}
static void f2fs_apply_compression(
struct fs_context *fc,
struct super_block *sb)
{
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct f2fs_fs_context *ctx = fc->fs_private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
unsigned char (*ctx_ext)[F2FS_EXTENSION_LEN];
unsigned char (*sbi_ext)[F2FS_EXTENSION_LEN];
int ctx_cnt, sbi_cnt, i;
if (ctx->spec_mask & F2FS_SPEC_compress_level)
F2FS_OPTION(sbi).compress_level =
F2FS_CTX_INFO(ctx).compress_level;
if (ctx->spec_mask & F2FS_SPEC_compress_algorithm)
F2FS_OPTION(sbi).compress_algorithm =
F2FS_CTX_INFO(ctx).compress_algorithm;
if (ctx->spec_mask & F2FS_SPEC_compress_log_size)
F2FS_OPTION(sbi).compress_log_size =
F2FS_CTX_INFO(ctx).compress_log_size;
if (ctx->spec_mask & F2FS_SPEC_compress_chksum)
F2FS_OPTION(sbi).compress_chksum =
F2FS_CTX_INFO(ctx).compress_chksum;
if (ctx->spec_mask & F2FS_SPEC_compress_mode)
F2FS_OPTION(sbi).compress_mode =
F2FS_CTX_INFO(ctx).compress_mode;
if (ctx->spec_mask & F2FS_SPEC_compress_extension) {
ctx_ext = F2FS_CTX_INFO(ctx).extensions;
ctx_cnt = F2FS_CTX_INFO(ctx).compress_ext_cnt;
sbi_ext = F2FS_OPTION(sbi).extensions;
sbi_cnt = F2FS_OPTION(sbi).compress_ext_cnt;
for (i = 0; i < ctx_cnt; i++) {
if (strlen(ctx_ext[i]) == 0)
continue;
strscpy(sbi_ext[sbi_cnt], ctx_ext[i]);
sbi_cnt++;
}
F2FS_OPTION(sbi).compress_ext_cnt = sbi_cnt;
}
if (ctx->spec_mask & F2FS_SPEC_nocompress_extension) {
ctx_ext = F2FS_CTX_INFO(ctx).noextensions;
ctx_cnt = F2FS_CTX_INFO(ctx).nocompress_ext_cnt;
sbi_ext = F2FS_OPTION(sbi).noextensions;
sbi_cnt = F2FS_OPTION(sbi).nocompress_ext_cnt;
for (i = 0; i < ctx_cnt; i++) {
if (strlen(ctx_ext[i]) == 0)
continue;
strscpy(sbi_ext[sbi_cnt], ctx_ext[i]);
sbi_cnt++;
}
F2FS_OPTION(sbi).nocompress_ext_cnt = sbi_cnt;
}
#endif
}
static void f2fs_apply_options(
struct fs_context *fc,
struct super_block *sb)
{
struct f2fs_fs_context *ctx = fc->fs_private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
F2FS_OPTION(sbi).opt &= ~ctx->opt_mask;
F2FS_OPTION(sbi).opt |= F2FS_CTX_INFO(ctx).opt;
if (ctx->spec_mask & F2FS_SPEC_background_gc)
F2FS_OPTION(sbi).bggc_mode = F2FS_CTX_INFO(ctx).bggc_mode;
if (ctx->spec_mask & F2FS_SPEC_inline_xattr_size)
F2FS_OPTION(sbi).inline_xattr_size =
F2FS_CTX_INFO(ctx).inline_xattr_size;
if (ctx->spec_mask & F2FS_SPEC_active_logs)
F2FS_OPTION(sbi).active_logs = F2FS_CTX_INFO(ctx).active_logs;
if (ctx->spec_mask & F2FS_SPEC_reserve_root)
F2FS_OPTION(sbi).root_reserved_blocks =
F2FS_CTX_INFO(ctx).root_reserved_blocks;
if (ctx->spec_mask & F2FS_SPEC_resgid)
F2FS_OPTION(sbi).s_resgid = F2FS_CTX_INFO(ctx).s_resgid;
if (ctx->spec_mask & F2FS_SPEC_resuid)
F2FS_OPTION(sbi).s_resuid = F2FS_CTX_INFO(ctx).s_resuid;
if (ctx->spec_mask & F2FS_SPEC_mode)
F2FS_OPTION(sbi).fs_mode = F2FS_CTX_INFO(ctx).fs_mode;
#ifdef CONFIG_F2FS_FAULT_INJECTION
if (ctx->spec_mask & F2FS_SPEC_fault_injection)
(
void)f2fs_build_fault_attr(sbi,
F2FS_CTX_INFO(ctx).fault_info.inject_rate, 0, FAULT_RATE);
if (ctx->spec_mask & F2FS_SPEC_fault_type)
(
void)f2fs_build_fault_attr(sbi, 0,
F2FS_CTX_INFO(ctx).fault_info.inject_type, FAULT_TYPE);
#endif
if (ctx->spec_mask & F2FS_SPEC_alloc_mode)
F2FS_OPTION(sbi).alloc_mode = F2FS_CTX_INFO(ctx).alloc_mode;
if (ctx->spec_mask & F2FS_SPEC_fsync_mode)
F2FS_OPTION(sbi).fsync_mode = F2FS_CTX_INFO(ctx).fsync_mode;
if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap)
F2FS_OPTION(sbi).unusable_cap = F2FS_CTX_INFO(ctx).unusable_cap;
if (ctx->spec_mask & F2FS_SPEC_checkpoint_disable_cap_perc)
F2FS_OPTION(sbi).unusable_cap_perc =
F2FS_CTX_INFO(ctx).unusable_cap_perc;
if (ctx->spec_mask & F2FS_SPEC_discard_unit)
F2FS_OPTION(sbi).discard_unit = F2FS_CTX_INFO(ctx).discard_unit;
if (ctx->spec_mask & F2FS_SPEC_memory_mode)
F2FS_OPTION(sbi).memory_mode = F2FS_CTX_INFO(ctx).memory_mode;
if (ctx->spec_mask & F2FS_SPEC_errors)
F2FS_OPTION(sbi).errors = F2FS_CTX_INFO(ctx).errors;
f2fs_apply_compression(fc, sb);
f2fs_apply_test_dummy_encryption(fc, sb);
f2fs_apply_quota_options(fc, sb);
}
static int f2fs_sanity_check_options(
struct f2fs_sb_info *sbi,
bool remount)
{
if (f2fs_sb_has_device_alias(sbi) &&
!test_opt(sbi, READ_EXTENT_CACHE)) {
f2fs_err(sbi,
"device aliasing requires extent cache");
return -EINVAL;
}
if (!remount)
return 0;
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) &&
sbi->max_open_zones < F2FS_OPTION(sbi).active_logs) {
f2fs_err(sbi,
"zoned: max open zones %u is too small, need at least %u open zones",
sbi->max_open_zones, F2FS_OPTION(sbi).active_logs);
return -EINVAL;
}
#endif
if (f2fs_lfs_mode(sbi) && !IS_F2FS_IPU_DISABLE(sbi)) {
f2fs_warn(sbi,
"LFS is not compatible with IPU");
return -EINVAL;
}
return 0;
}
static struct inode *f2fs_alloc_inode(
struct super_block *sb)
{
struct f2fs_inode_info *fi;
if (time_to_inject(F2FS_SB(sb), FAULT_SLAB_ALLOC))
return NULL;
fi = alloc_inode_sb(sb, f2fs_inode_cachep, GFP_F2FS_ZERO);
if (!fi)
return NULL;
init_once((
void *) fi);
/* Initialize f2fs-specific inode info */
atomic_set(&fi->dirty_pages, 0);
atomic_set(&fi->i_compr_blocks, 0);
atomic_set(&fi->open_count, 0);
init_f2fs_rwsem(&fi->i_sem);
spin_lock_init(&fi->i_size_lock);
INIT_LIST_HEAD(&fi->dirty_list);
INIT_LIST_HEAD(&fi->gdirty_list);
INIT_LIST_HEAD(&fi->gdonate_list);
init_f2fs_rwsem(&fi->i_gc_rwsem[READ]);
init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]);
init_f2fs_rwsem(&fi->i_xattr_sem);
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
return &fi->vfs_inode;
}
static int f2fs_drop_inode(
struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int ret;
/*
* during filesystem shutdown, if checkpoint is disabled,
* drop useless meta/node dirty pages.
*/
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
if (inode->i_ino == F2FS_NODE_INO(sbi) ||
inode->i_ino == F2FS_META_INO(sbi)) {
trace_f2fs_drop_inode(inode, 1);
return 1;
}
}
/*
* This is to avoid a deadlock condition like below.
* writeback_single_inode(inode)
* - f2fs_write_data_page
* - f2fs_gc -> iput -> evict
* - inode_wait_for_writeback(inode)
*/
if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
if (!inode->i_nlink && !is_bad_inode(inode)) {
/* to avoid evict_inode call simultaneously */
atomic_inc(&inode->i_count);
spin_unlock(&inode->i_lock);
/* should remain fi->extent_tree for writepage */
f2fs_destroy_extent_node(inode);
sb_start_intwrite(inode->i_sb);
f2fs_i_size_write(inode, 0);
f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
inode, NULL, 0, DATA);
truncate_inode_pages_final(inode->i_mapping);
if (F2FS_HAS_BLOCKS(inode))
f2fs_truncate(inode);
sb_end_intwrite(inode->i_sb);
spin_lock(&inode->i_lock);
atomic_dec(&inode->i_count);
}
trace_f2fs_drop_inode(inode, 0);
return 0;
}
ret = generic_drop_inode(inode);
if (!ret)
ret = fscrypt_drop_inode(inode);
trace_f2fs_drop_inode(inode, ret);
return ret;
}
int f2fs_inode_dirtied(
struct inode *inode,
bool sync)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int ret = 0;
spin_lock(&sbi->inode_lock[DIRTY_META]);
if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
ret = 1;
}
else {
set_inode_flag(inode, FI_DIRTY_INODE);
stat_inc_dirty_inode(sbi, DIRTY_META);
}
if (sync && list_empty(&F2FS_I(inode)->gdirty_list)) {
list_add_tail(&F2FS_I(inode)->gdirty_list,
&sbi->inode_list[DIRTY_META]);
inc_page_count(sbi, F2FS_DIRTY_IMETA);
}
spin_unlock(&sbi->inode_lock[DIRTY_META]);
/* if atomic write is not committed, set inode w/ atomic dirty */
if (!ret && f2fs_is_atomic_file(inode) &&
!is_inode_flag_set(inode, FI_ATOMIC_COMMITTED))
set_inode_flag(inode, FI_ATOMIC_DIRTIED);
return ret;
}
void f2fs_inode_synced(
struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
spin_lock(&sbi->inode_lock[DIRTY_META]);
if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
spin_unlock(&sbi->inode_lock[DIRTY_META]);
return;
}
if (!list_empty(&F2FS_I(inode)->gdirty_list)) {
list_del_init(&F2FS_I(inode)->gdirty_list);
dec_page_count(sbi, F2FS_DIRTY_IMETA);
}
clear_inode_flag(inode, FI_DIRTY_INODE);
clear_inode_flag(inode, FI_AUTO_RECOVER);
stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
spin_unlock(&sbi->inode_lock[DIRTY_META]);
}
/*
* f2fs_dirty_inode() is called from __mark_inode_dirty()
*
* We should call set_dirty_inode to write the dirty inode through write_inode.
*/
static void f2fs_dirty_inode(
struct inode *inode,
int flags)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
if (inode->i_ino == F2FS_NODE_INO(sbi) ||
inode->i_ino == F2FS_META_INO(sbi))
return;
if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
clear_inode_flag(inode, FI_AUTO_RECOVER);
f2fs_inode_dirtied(inode,
false);
}
static void f2fs_free_inode(
struct inode *inode)
{
fscrypt_free_inode(inode);
kmem_cache_free(f2fs_inode_cachep, F2FS_I(inode));
}
static void destroy_percpu_info(
struct f2fs_sb_info *sbi)
{
percpu_counter_destroy(&sbi->total_valid_inode_count);
percpu_counter_destroy(&sbi->rf_node_block_count);
percpu_counter_destroy(&sbi->alloc_valid_block_count);
}
static void destroy_device_list(
struct f2fs_sb_info *sbi)
{
int i;
for (i = 0; i < sbi->s_ndevs; i++) {
if (i > 0)
bdev_fput(FDEV(i).bdev_file);
#ifdef CONFIG_BLK_DEV_ZONED
kvfree(FDEV(i).blkz_seq);
#endif
}
kvfree(sbi->devs);
}
static void f2fs_put_super(
struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int i;
int err = 0;
bool done;
/* unregister procfs/sysfs entries in advance to avoid race case */
f2fs_unregister_sysfs(sbi);
f2fs_quota_off_umount(sb);
/* prevent remaining shrinker jobs */
mutex_lock(&sbi->umount_mutex);
/*
* flush all issued checkpoints and stop checkpoint issue thread.
* after then, all checkpoints should be done by each process context.
*/
f2fs_stop_ckpt_thread(sbi);
/*
* We don't need to do checkpoint when superblock is clean.
* But, the previous checkpoint was not done by umount, it needs to do
* clean checkpoint again.
*/
if ((is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG))) {
struct cp_control cpc = {
.reason = CP_UMOUNT,
};
stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
}
/* be sure to wait for any on-going discard commands */
done = f2fs_issue_discard_timeout(sbi);
if (f2fs_realtime_discard_enable(sbi) && !sbi->discard_blks && done) {
struct cp_control cpc = {
.reason = CP_UMOUNT | CP_TRIMMED,
};
stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_write_checkpoint(sbi, &cpc);
}
/*
* normally superblock is clean, so we need to release this.
* In addition, EIO will skip do checkpoint, we need this as well.
*/
f2fs_release_ino_entry(sbi,
true);
f2fs_leave_shrinker(sbi);
mutex_unlock(&sbi->umount_mutex);
/* our cp_error case, we can wait for any writeback page */
f2fs_flush_merged_writes(sbi);
f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
if (err || f2fs_cp_error(sbi)) {
truncate_inode_pages_final(NODE_MAPPING(sbi));
truncate_inode_pages_final(META_MAPPING(sbi));
}
for (i = 0; i < NR_COUNT_TYPE; i++) {
if (!get_pages(sbi, i))
continue;
f2fs_err(sbi,
"detect filesystem reference count leak during "
"umount, type: %d, count: %lld", i, get_pages(sbi, i));
f2fs_bug_on(sbi, 1);
}
f2fs_bug_on(sbi, sbi->fsync_node_num);
f2fs_destroy_compress_inode(sbi);
iput(sbi->node_inode);
sbi->node_inode = NULL;
iput(sbi->meta_inode);
sbi->meta_inode = NULL;
/*
* iput() can update stat information, if f2fs_write_checkpoint()
* above failed with error.
*/
f2fs_destroy_stats(sbi);
/* destroy f2fs internal modules */
f2fs_destroy_node_manager(sbi);
f2fs_destroy_segment_manager(sbi);
/* flush s_error_work before sbi destroy */
flush_work(&sbi->s_error_work);
f2fs_destroy_post_read_wq(sbi);
kvfree(sbi->ckpt);
kfree(sbi->raw_super);
f2fs_destroy_page_array_cache(sbi);
f2fs_destroy_xattr_caches(sbi);
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
kfree(F2FS_OPTION(sbi).s_qf_names[i]);
#endif
fscrypt_free_dummy_policy(&F2FS_OPTION(sbi).dummy_enc_policy);
destroy_percpu_info(sbi);
f2fs_destroy_iostat(sbi);
for (i = 0; i < NR_PAGE_TYPE; i++)
kfree(sbi->write_io[i]);
#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
}
int f2fs_sync_fs(
struct super_block *sb,
int sync)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
int err = 0;
if (unlikely(f2fs_cp_error(sbi)))
return 0;
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
return 0;
trace_f2fs_sync_fs(sb, sync);
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return -EAGAIN;
if (sync) {
stat_inc_cp_call_count(sbi, TOTAL_CALL);
err = f2fs_issue_checkpoint(sbi);
}
return err;
}
static int f2fs_freeze(
struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (f2fs_readonly(sb))
return 0;
/* IO error happened before */
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
/* must be clean, since sync_filesystem() was already called */
if (is_sbi_flag_set(sbi, SBI_IS_DIRTY))
return -EINVAL;
sbi->umount_lock_holder = current;
/* Let's flush checkpoints and stop the thread. */
f2fs_flush_ckpt_thread(sbi);
sbi->umount_lock_holder = NULL;
/* to avoid deadlock on f2fs_evict_inode->SB_FREEZE_FS */
set_sbi_flag(sbi, SBI_IS_FREEZING);
return 0;
}
static int f2fs_unfreeze(
struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
/*
* It will update discard_max_bytes of mounted lvm device to zero
* after creating snapshot on this lvm device, let's drop all
* remained discards.
* We don't need to disable real-time discard because discard_max_bytes
* will recover after removal of snapshot.
*/
if (test_opt(sbi, DISCARD) && !f2fs_hw_support_discard(sbi))
f2fs_issue_discard_timeout(sbi);
clear_sbi_flag(F2FS_SB(sb), SBI_IS_FREEZING);
return 0;
}
#ifdef CONFIG_QUOTA
static int f2fs_statfs_project(
struct super_block *sb,
kprojid_t projid,
struct kstatfs *buf)
{
struct kqid qid;
struct dquot *dquot;
u64 limit;
u64 curblock;
qid = make_kqid_projid(projid);
dquot = dqget(sb, qid);
if (IS_ERR(dquot))
return PTR_ERR(dquot);
spin_lock(&dquot->dq_dqb_lock);
limit = min_not_zero(dquot->dq_dqb.dqb_bsoftlimit,
dquot->dq_dqb.dqb_bhardlimit);
limit >>= sb->s_blocksize_bits;
if (limit) {
uint64_t remaining = 0;
curblock = (dquot->dq_dqb.dqb_curspace +
dquot->dq_dqb.dqb_rsvspace) >> sb->s_blocksize_bits;
if (limit > curblock)
remaining = limit - curblock;
buf->f_blocks = min(buf->f_blocks, limit);
buf->f_bfree = min(buf->f_bfree, remaining);
buf->f_bavail = min(buf->f_bavail, remaining);
}
limit = min_not_zero(dquot->dq_dqb.dqb_isoftlimit,
dquot->dq_dqb.dqb_ihardlimit);
if (limit) {
uint64_t remaining = 0;
if (limit > dquot->dq_dqb.dqb_curinodes)
remaining = limit - dquot->dq_dqb.dqb_curinodes;
buf->f_files = min(buf->f_files, limit);
buf->f_ffree = min(buf->f_ffree, remaining);
}
spin_unlock(&dquot->dq_dqb_lock);
dqput(dquot);
return 0;
}
#endif
static int f2fs_statfs(
struct dentry *dentry,
struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
block_t total_count, user_block_count, start_count;
u64 avail_node_count;
unsigned int total_valid_node_count;
total_count = le64_to_cpu(sbi->raw_super->block_count);
start_count = le32_to_cpu(sbi->raw_super->segment0_blkaddr);
buf->f_type = F2FS_SUPER_MAGIC;
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
spin_lock(&sbi->stat_lock);
if (sbi->carve_out)
buf->f_blocks -= sbi->current_reserved_blocks;
user_block_count = sbi->user_block_count;
total_valid_node_count = valid_node_count(sbi);
avail_node_count = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
sbi->current_reserved_blocks;
if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
buf->f_bfree = 0;
else
buf->f_bfree -= sbi->unusable_block_count;
spin_unlock(&sbi->stat_lock);
if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
buf->f_bavail = buf->f_bfree -
F2FS_OPTION(sbi).root_reserved_blocks;
else
buf->f_bavail = 0;
if (avail_node_count > user_block_count) {
buf->f_files = user_block_count;
buf->f_ffree = buf->f_bavail;
}
else {
buf->f_files = avail_node_count;
buf->f_ffree = min(avail_node_count - total_valid_node_count,
buf->f_bavail);
}
buf->f_namelen = F2FS_NAME_LEN;
buf->f_fsid = u64_to_fsid(id);
#ifdef CONFIG_QUOTA
if (is_inode_flag_set(d_inode(dentry), FI_PROJ_INHERIT) &&
sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
f2fs_statfs_project(sb, F2FS_I(d_inode(dentry))->i_projid, buf);
}
#endif
return 0;
}
static inline void f2fs_show_quota_options(
struct seq_file *seq,
struct super_block *sb)
{
#ifdef CONFIG_QUOTA
struct f2fs_sb_info *sbi = F2FS_SB(sb);
if (F2FS_OPTION(sbi).s_jquota_fmt) {
char *fmtname =
"";
switch (F2FS_OPTION(sbi).s_jquota_fmt) {
case QFMT_VFS_OLD:
fmtname =
"vfsold";
break;
case QFMT_VFS_V0:
fmtname =
"vfsv0";
break;
case QFMT_VFS_V1:
fmtname =
"vfsv1";
break;
}
seq_printf(seq,
",jqfmt=%s", fmtname);
}
if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA])
seq_show_option(seq,
"usrjquota",
F2FS_OPTION(sbi).s_qf_names[USRQUOTA]);
if (F2FS_OPTION(sbi).s_qf_names[GRPQUOTA])
seq_show_option(seq,
"grpjquota",
F2FS_OPTION(sbi).s_qf_names[GRPQUOTA]);
if (F2FS_OPTION(sbi).s_qf_names[PRJQUOTA])
seq_show_option(seq,
"prjjquota",
F2FS_OPTION(sbi).s_qf_names[PRJQUOTA]);
#endif
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
static inline void f2fs_show_compress_options(
struct seq_file *seq,
struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
char *algtype =
"";
int i;
if (!f2fs_sb_has_compression(sbi))
return;
switch (F2FS_OPTION(sbi).compress_algorithm) {
case COMPRESS_LZO:
algtype =
"lzo";
break;
case COMPRESS_LZ4:
algtype =
"lz4";
break;
case COMPRESS_ZSTD:
algtype =
"zstd";
break;
case COMPRESS_LZORLE:
algtype =
"lzo-rle";
break;
}
seq_printf(seq,
",compress_algorithm=%s", algtype);
if (F2FS_OPTION(sbi).compress_level)
seq_printf(seq,
":%d", F2FS_OPTION(sbi).compress_level);
seq_printf(seq,
",compress_log_size=%u",
F2FS_OPTION(sbi).compress_log_size);
for (i = 0; i < F2FS_OPTION(sbi).compress_ext_cnt; i++) {
seq_printf(seq,
",compress_extension=%s",
F2FS_OPTION(sbi).extensions[i]);
}
for (i = 0; i < F2FS_OPTION(sbi).nocompress_ext_cnt; i++) {
seq_printf(seq,
",nocompress_extension=%s",
F2FS_OPTION(sbi).noextensions[i]);
}
if (F2FS_OPTION(sbi).compress_chksum)
seq_puts(seq,
",compress_chksum");
if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_FS)
seq_printf(seq,
",compress_mode=%s",
"fs");
else if (F2FS_OPTION(sbi).compress_mode == COMPR_MODE_USER)
seq_printf(seq,
",compress_mode=%s",
"user");
if (test_opt(sbi, COMPRESS_CACHE))
seq_puts(seq,
",compress_cache");
}
#endif
static int f2fs_show_options(
struct seq_file *seq,
struct dentry *root)
{
struct f2fs_sb_info *sbi = F2FS_SB(root->d_sb);
if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_SYNC)
seq_printf(seq,
",background_gc=%s",
"sync");
else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_ON)
seq_printf(seq,
",background_gc=%s",
"on");
else if (F2FS_OPTION(sbi).bggc_mode == BGGC_MODE_OFF)
seq_printf(seq,
",background_gc=%s",
"off");
if (test_opt(sbi, GC_MERGE))
seq_puts(seq,
",gc_merge");
else
seq_puts(seq,
",nogc_merge");
if (test_opt(sbi, DISABLE_ROLL_FORWARD))
seq_puts(seq,
",disable_roll_forward");
if (test_opt(sbi, NORECOVERY))
seq_puts(seq,
",norecovery");
if (test_opt(sbi, DISCARD)) {
seq_puts(seq,
",discard");
if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_BLOCK)
seq_printf(seq,
",discard_unit=%s",
"block");
else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SEGMENT)
seq_printf(seq,
",discard_unit=%s",
"segment");
else if (F2FS_OPTION(sbi).discard_unit == DISCARD_UNIT_SECTION)
seq_printf(seq,
",discard_unit=%s",
"section");
}
else {
seq_puts(seq,
",nodiscard");
}
#ifdef CONFIG_F2FS_FS_XATTR
if (test_opt(sbi, XATTR_USER))
seq_puts(seq,
",user_xattr");
else
seq_puts(seq,
",nouser_xattr");
if (test_opt(sbi, INLINE_XATTR))
seq_puts(seq,
",inline_xattr");
else
seq_puts(seq,
",noinline_xattr");
if (test_opt(sbi, INLINE_XATTR_SIZE))
seq_printf(seq,
",inline_xattr_size=%u",
F2FS_OPTION(sbi).inline_xattr_size);
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
if (test_opt(sbi, POSIX_ACL))
seq_puts(seq,
",acl");
else
seq_puts(seq,
",noacl");
#endif
if (test_opt(sbi, DISABLE_EXT_IDENTIFY))
seq_puts(seq,
",disable_ext_identify");
if (test_opt(sbi, INLINE_DATA))
seq_puts(seq,
",inline_data");
else
seq_puts(seq,
",noinline_data");
if (test_opt(sbi, INLINE_DENTRY))
seq_puts(seq,
",inline_dentry");
else
seq_puts(seq,
",noinline_dentry");
if (test_opt(sbi, FLUSH_MERGE))
seq_puts(seq,
",flush_merge");
else
seq_puts(seq,
",noflush_merge");
if (test_opt(sbi, NOBARRIER))
seq_puts(seq,
",nobarrier");
else
seq_puts(seq,
",barrier");
if (test_opt(sbi, FASTBOOT))
seq_puts(seq,
",fastboot");
if (test_opt(sbi, READ_EXTENT_CACHE))
seq_puts(seq,
",extent_cache");
else
--> --------------------
--> maximum size reached
--> --------------------