Commit f40f31ca authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'f2fs-for-5.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "In this round, we've mainly focused on fixing bugs and addressing
  issues in recently introduced compression support.

  Enhancement:
   - add zstd support, and set LZ4 by default
   - add ioctl() to show # of compressed blocks
   - show mount time in debugfs
   - replace rwsem with spinlock
   - avoid lock contention in DIO reads

  Some major bug fixes wrt compression:
   - compressed block count
   - memory access and leak
   - remove obsolete fields
   - flag controls

  Other bug fixes and clean ups:
   - fix overflow when handling .flags in inode_info
   - fix SPO issue during resize FS flow
   - fix compression with fsverity enabled
   - potential deadlock when writing compressed pages
   - show missing mount options"

* tag 'f2fs-for-5.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (66 commits)
  f2fs: keep inline_data when compression conversion
  f2fs: fix to disable compression on directory
  f2fs: add missing CONFIG_F2FS_FS_COMPRESSION
  f2fs: switch discard_policy.timeout to bool type
  f2fs: fix to verify tpage before releasing in f2fs_free_dic()
  f2fs: show compression in statx
  f2fs: clean up dic->tpages assignment
  f2fs: compress: support zstd compress algorithm
  f2fs: compress: add .{init,destroy}_decompress_ctx callback
  f2fs: compress: fix to call missing destroy_compress_ctx()
  f2fs: change default compression algorithm
  f2fs: clean up {cic,dic}.ref handling
  f2fs: fix to use f2fs_readpage_limit() in f2fs_read_multi_pages()
  f2fs: xattr.h: Make stub helpers inline
  f2fs: fix to avoid double unlock
  f2fs: fix potential .flags overflow on 32bit architecture
  f2fs: fix NULL pointer dereference in f2fs_verity_work()
  f2fs: fix to clear PG_error if fsverity failed
  f2fs: don't call fscrypt_get_encryption_info() explicitly in f2fs_tmpfile()
  f2fs: don't trigger data flush in foreground operation
  ...
parents 763dede1 531dfae5
......@@ -318,3 +318,8 @@ Date: September 2019
Contact: "Hridya Valsaraju" <hridya@google.com>
Description: Average number of valid blocks.
Available when CONFIG_F2FS_STAT_FS=y.
What: /sys/fs/f2fs/<disk>/mounted_time_sec
Date: February 2020
Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description: Show the mounted time in secs of this partition.
......@@ -243,8 +243,8 @@ checkpoint=%s[:%u[%]] Set to "disable" to turn off checkpointing. Set to "enabl
hide up to all remaining free space. The actual space that
would be unusable can be viewed at /sys/fs/f2fs/<disk>/unusable
This space is reclaimed once checkpoint=enable.
compress_algorithm=%s Control compress algorithm, currently f2fs supports "lzo"
and "lz4" algorithm.
compress_algorithm=%s Control compress algorithm, currently f2fs supports "lzo",
"lz4" and "zstd" algorithm.
compress_log_size=%u Support configuring compress cluster size, the size will
be 4KB * (1 << %u), 16KB is minimum size, also it's
default size.
......
......@@ -118,3 +118,12 @@ config F2FS_FS_LZ4
default y
help
Support LZ4 compress algorithm, if unsure, say Y.
config F2FS_FS_ZSTD
bool "ZSTD compression support"
depends on F2FS_FS_COMPRESSION
select ZSTD_COMPRESS
select ZSTD_DECOMPRESS
default y
help
Support ZSTD compress algorithm, if unsure, say Y.
......@@ -50,9 +50,6 @@ struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index)
return page;
}
/*
* We guarantee no failure on the returned page.
*/
static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
bool is_meta)
{
......@@ -206,7 +203,7 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
}
/*
* Readahead CP/NAT/SIT/SSA pages
* Readahead CP/NAT/SIT/SSA/POR pages
*/
int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync)
......@@ -898,7 +895,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
return -ENOMEM;
/*
* Finding out valid cp block involves read both
* sets( cp pack1 and cp pack 2)
* sets( cp pack 1 and cp pack 2)
*/
cp_start_blk_no = le32_to_cpu(fsb->cp_blkaddr);
cp1 = validate_checkpoint(sbi, cp_start_blk_no, &cp1_version);
......@@ -1250,20 +1247,20 @@ static void unblock_operations(struct f2fs_sb_info *sbi)
f2fs_unlock_all(sbi);
}
void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type)
{
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE);
if (!get_pages(sbi, F2FS_WB_CP_DATA))
if (!get_pages(sbi, type))
break;
if (unlikely(f2fs_cp_error(sbi)))
break;
io_schedule_timeout(5*HZ);
io_schedule_timeout(DEFAULT_IO_TIMEOUT);
}
finish_wait(&sbi->cp_wait, &wait);
}
......@@ -1301,10 +1298,14 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
else
__clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
__set_ckpt_flags(ckpt, CP_FSCK_FLAG);
if (is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
__set_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
else
__clear_ckpt_flags(ckpt, CP_RESIZEFS_FLAG);
if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
__set_ckpt_flags(ckpt, CP_DISABLED_FLAG);
else
......@@ -1384,13 +1385,8 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* Flush all the NAT/SIT pages */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
!f2fs_cp_error(sbi));
/*
* modify checkpoint
* version number is already updated
*/
/* start to update checkpoint, cp ver is already updated previously */
ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi, true));
ckpt->free_segment_count = cpu_to_le32(free_segments(sbi));
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
......@@ -1493,11 +1489,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* Here, we have one bio having CP pack except cp pack 2 page */
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_META) &&
!f2fs_cp_error(sbi));
/* Wait for all dirty meta pages to be submitted for IO */
f2fs_wait_on_all_pages(sbi, F2FS_DIRTY_META);
/* wait for previous submitted meta pages writeback */
f2fs_wait_on_all_pages_writeback(sbi);
f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
/* flush all device cache */
err = f2fs_flush_device_cache(sbi);
......@@ -1506,7 +1502,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
/* barrier and flush checkpoint cp pack 2 page if it can */
commit_checkpoint(sbi, ckpt, start_blk);
f2fs_wait_on_all_pages_writeback(sbi);
f2fs_wait_on_all_pages(sbi, F2FS_WB_CP_DATA);
/*
* invalidate intermediate page cache borrowed from meta inode which are
......@@ -1543,9 +1539,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
}
/*
* We guarantee that this checkpoint procedure will not fail.
*/
int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
......@@ -1613,7 +1606,6 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_flush_sit_entries(sbi, cpc);
/* unlock all the fs_lock[] in do_checkpoint() */
err = do_checkpoint(sbi, cpc);
if (err)
f2fs_release_discard_addrs(sbi);
......@@ -1626,7 +1618,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
if (cpc->reason & CP_RECOVERY)
f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
/* do checkpoint periodically */
/* update CP_TIME to trigger checkpoint periodically */
f2fs_update_time(sbi, CP_TIME);
trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint");
out:
......
......@@ -11,6 +11,7 @@
#include <linux/backing-dev.h>
#include <linux/lzo.h>
#include <linux/lz4.h>
#include <linux/zstd.h>
#include "f2fs.h"
#include "node.h"
......@@ -20,6 +21,8 @@ struct f2fs_compress_ops {
int (*init_compress_ctx)(struct compress_ctx *cc);
void (*destroy_compress_ctx)(struct compress_ctx *cc);
int (*compress_pages)(struct compress_ctx *cc);
int (*init_decompress_ctx)(struct decompress_io_ctx *dic);
void (*destroy_decompress_ctx)(struct decompress_io_ctx *dic);
int (*decompress_pages)(struct decompress_io_ctx *dic);
};
......@@ -52,7 +55,7 @@ bool f2fs_is_compressed_page(struct page *page)
}
static void f2fs_set_compressed_page(struct page *page,
struct inode *inode, pgoff_t index, void *data, refcount_t *r)
struct inode *inode, pgoff_t index, void *data)
{
SetPagePrivate(page);
set_page_private(page, (unsigned long)data);
......@@ -60,8 +63,6 @@ static void f2fs_set_compressed_page(struct page *page,
/* i_crypto_info and iv index */
page->index = index;
page->mapping = inode->i_mapping;
if (r)
refcount_inc(r);
}
static void f2fs_put_compressed_page(struct page *page)
......@@ -291,6 +292,165 @@ static const struct f2fs_compress_ops f2fs_lz4_ops = {
};
#endif
#ifdef CONFIG_F2FS_FS_ZSTD
#define F2FS_ZSTD_DEFAULT_CLEVEL 1
static int zstd_init_compress_ctx(struct compress_ctx *cc)
{
ZSTD_parameters params;
ZSTD_CStream *stream;
void *workspace;
unsigned int workspace_size;
params = ZSTD_getParams(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen, 0);
workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
workspace_size, GFP_NOFS);
if (!workspace)
return -ENOMEM;
stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
if (!stream) {
printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
__func__);
kvfree(workspace);
return -EIO;
}
cc->private = workspace;
cc->private2 = stream;
cc->clen = cc->rlen - PAGE_SIZE - COMPRESS_HEADER_SIZE;
return 0;
}
static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
{
kvfree(cc->private);
cc->private = NULL;
cc->private2 = NULL;
}
static int zstd_compress_pages(struct compress_ctx *cc)
{
ZSTD_CStream *stream = cc->private2;
ZSTD_inBuffer inbuf;
ZSTD_outBuffer outbuf;
int src_size = cc->rlen;
int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
int ret;
inbuf.pos = 0;
inbuf.src = cc->rbuf;
inbuf.size = src_size;
outbuf.pos = 0;
outbuf.dst = cc->cbuf->cdata;
outbuf.size = dst_size;
ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
if (ZSTD_isError(ret)) {
printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
__func__, ZSTD_getErrorCode(ret));
return -EIO;
}
ret = ZSTD_endStream(stream, &outbuf);
if (ZSTD_isError(ret)) {
printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
__func__, ZSTD_getErrorCode(ret));
return -EIO;
}
cc->clen = outbuf.pos;
return 0;
}
static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
{
ZSTD_DStream *stream;
void *workspace;
unsigned int workspace_size;
workspace_size = ZSTD_DStreamWorkspaceBound(MAX_COMPRESS_WINDOW_SIZE);
workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
workspace_size, GFP_NOFS);
if (!workspace)
return -ENOMEM;
stream = ZSTD_initDStream(MAX_COMPRESS_WINDOW_SIZE,
workspace, workspace_size);
if (!stream) {
printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
__func__);
kvfree(workspace);
return -EIO;
}
dic->private = workspace;
dic->private2 = stream;
return 0;
}
static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
{
kvfree(dic->private);
dic->private = NULL;
dic->private2 = NULL;
}
static int zstd_decompress_pages(struct decompress_io_ctx *dic)
{
ZSTD_DStream *stream = dic->private2;
ZSTD_inBuffer inbuf;
ZSTD_outBuffer outbuf;
int ret;
inbuf.pos = 0;
inbuf.src = dic->cbuf->cdata;
inbuf.size = dic->clen;
outbuf.pos = 0;
outbuf.dst = dic->rbuf;
outbuf.size = dic->rlen;
ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
if (ZSTD_isError(ret)) {
printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
__func__, ZSTD_getErrorCode(ret));
return -EIO;
}
if (dic->rlen != outbuf.pos) {
printk_ratelimited("%sF2FS-fs (%s): %s ZSTD invalid rlen:%zu, "
"expected:%lu\n", KERN_ERR,
F2FS_I_SB(dic->inode)->sb->s_id,
__func__, dic->rlen,
PAGE_SIZE << dic->log_cluster_size);
return -EIO;
}
return 0;
}
static const struct f2fs_compress_ops f2fs_zstd_ops = {
.init_compress_ctx = zstd_init_compress_ctx,
.destroy_compress_ctx = zstd_destroy_compress_ctx,
.compress_pages = zstd_compress_pages,
.init_decompress_ctx = zstd_init_decompress_ctx,
.destroy_decompress_ctx = zstd_destroy_decompress_ctx,
.decompress_pages = zstd_decompress_pages,
};
#endif
static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
#ifdef CONFIG_F2FS_FS_LZO
&f2fs_lzo_ops,
......@@ -302,6 +462,11 @@ static const struct f2fs_compress_ops *f2fs_cops[COMPRESS_MAX] = {
#else
NULL,
#endif
#ifdef CONFIG_F2FS_FS_ZSTD
&f2fs_zstd_ops,
#else
NULL,
#endif
};
bool f2fs_is_compress_backend_ready(struct inode *inode)
......@@ -334,9 +499,11 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
trace_f2fs_compress_pages_start(cc->inode, cc->cluster_idx,
cc->cluster_size, fi->i_compress_algorithm);
ret = cops->init_compress_ctx(cc);
if (ret)
goto out;
if (cops->init_compress_ctx) {
ret = cops->init_compress_ctx(cc);
if (ret)
goto out;
}
max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
......@@ -380,21 +547,27 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
}
cc->cbuf->clen = cpu_to_le32(cc->clen);
cc->cbuf->chksum = cpu_to_le32(0);
for (i = 0; i < COMPRESS_DATA_RESERVED_SIZE; i++)
cc->cbuf->reserved[i] = cpu_to_le32(0);
nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
/* zero out any unused part of the last page */
memset(&cc->cbuf->cdata[cc->clen], 0,
(nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
vunmap(cc->cbuf);
vunmap(cc->rbuf);
nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
for (i = nr_cpages; i < cc->nr_cpages; i++) {
f2fs_put_compressed_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc);
cc->nr_cpages = nr_cpages;
trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
......@@ -413,7 +586,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
kfree(cc->cpages);
cc->cpages = NULL;
destroy_compress_ctx:
cops->destroy_compress_ctx(cc);
if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc);
out:
trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
cc->clen, ret);
......@@ -447,10 +621,16 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
goto out_free_dic;
}
if (cops->init_decompress_ctx) {
ret = cops->init_decompress_ctx(dic);
if (ret)
goto out_free_dic;
}
dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
if (!dic->rbuf) {
ret = -ENOMEM;
goto out_free_dic;
goto destroy_decompress_ctx;
}
dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
......@@ -473,7 +653,12 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
vunmap(dic->cbuf);
out_vunmap_rbuf:
vunmap(dic->rbuf);
destroy_decompress_ctx:
if (cops->destroy_decompress_ctx)
cops->destroy_decompress_ctx(dic);
out_free_dic:
if (verity)
refcount_set(&dic->ref, dic->nr_cpages);
if (!verity)
f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
ret, false);
......@@ -532,8 +717,7 @@ static bool __cluster_may_compress(struct compress_ctx *cc)
return true;
}
/* return # of compressed block addresses */
static int f2fs_compressed_blocks(struct compress_ctx *cc)
static int __f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
{
struct dnode_of_data dn;
int ret;
......@@ -554,10 +738,15 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc)
for (i = 1; i < cc->cluster_size; i++) {
block_t blkaddr;
blkaddr = datablock_addr(dn.inode,
blkaddr = data_blkaddr(dn.inode,
dn.node_page, dn.ofs_in_node + i);
if (blkaddr != NULL_ADDR)
ret++;
if (compr) {
if (__is_valid_data_blkaddr(blkaddr))
ret++;
} else {
if (blkaddr != NULL_ADDR)
ret++;
}
}
}
fail:
......@@ -565,6 +754,18 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc)
return ret;
}
/* return # of compressed blocks in compressed cluster */
static int f2fs_compressed_blocks(struct compress_ctx *cc)
{
return __f2fs_cluster_blocks(cc, true);
}
/* return # of valid blocks in compressed cluster */
static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr)
{
return __f2fs_cluster_blocks(cc, false);
}
int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
{
struct compress_ctx cc = {
......@@ -574,7 +775,7 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index)
.cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size,
};
return f2fs_compressed_blocks(&cc);
return f2fs_cluster_blocks(&cc, false);
}
static bool cluster_may_compress(struct compress_ctx *cc)
......@@ -623,7 +824,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
bool prealloc;
retry:
ret = f2fs_compressed_blocks(cc);
ret = f2fs_cluster_blocks(cc, false);
if (ret <= 0)
return ret;
......@@ -653,7 +854,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
struct bio *bio = NULL;
ret = f2fs_read_multi_pages(cc, &bio, cc->cluster_size,
&last_block_in_bio, false);
&last_block_in_bio, false, true);
f2fs_destroy_compress_ctx(cc);
if (ret)
goto release_pages;
......@@ -772,7 +973,6 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
.encrypted_page = NULL,
.compressed_page = NULL,
.submitted = false,
.need_lock = LOCK_RETRY,
.io_type = io_type,
.io_wbc = wbc,
.encrypted = f2fs_encrypted_file(cc->inode),
......@@ -785,16 +985,17 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
loff_t psize;
int i, err;
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
if (!f2fs_trylock_op(sbi))
return -EAGAIN;
f2fs_lock_op(sbi);
set_new_dnode(&dn, cc->inode, NULL, NULL, 0);
err = f2fs_get_dnode_of_data(&dn, start_idx, LOOKUP_NODE);
if (err)
goto out_unlock_op;
for (i = 0; i < cc->cluster_size; i++) {
if (datablock_addr(dn.inode, dn.node_page,
if (data_blkaddr(dn.inode, dn.node_page,
dn.ofs_in_node + i) == NULL_ADDR)
goto out_put_dnode;
}
......@@ -813,7 +1014,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
refcount_set(&cic->ref, 1);
refcount_set(&cic->ref, cc->nr_cpages);
cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
cc->log_cluster_size, GFP_NOFS);
if (!cic->rpages)
......@@ -823,8 +1024,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
for (i = 0; i < cc->nr_cpages; i++) {
f2fs_set_compressed_page(cc->cpages[i], inode,
cc->rpages[i + 1]->index,
cic, i ? &cic->ref : NULL);
cc->rpages[i + 1]->index, cic);
fio.compressed_page = cc->cpages[i];
if (fio.encrypted) {
fio.page = cc->rpages[i + 1];
......@@ -843,9 +1043,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
for (i = 0; i < cc->cluster_size; i++, dn.ofs_in_node++) {
block_t blkaddr;
blkaddr = datablock_addr(dn.inode, dn.node_page,
dn.ofs_in_node);
fio.page = cic->rpages[i];
blkaddr = f2fs_data_blkaddr(&dn);
fio.page = cc->rpages[i];
fio.old_blkaddr = blkaddr;
/* cluster header */
......@@ -895,10 +1094,10 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
f2fs_put_dnode(&dn);
f2fs_unlock_op(sbi);
down_write(&fi->i_sem);
spin_lock(&fi->i_size_lock);
if (fi->last_disk_size < psize)
fi->last_disk_size = psize;
up_write(&fi->i_sem);
spin_unlock(&fi->i_size_lock);
f2fs_put_rpages(cc);
f2fs_destroy_compress_ctx(cc);
......@@ -984,24 +1183,30 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
unlock_page(cc->rpages[i]);
ret = 0;
} else if (ret == -EAGAIN) {
/*
* for quota file, just redirty left pages to
* avoid deadlock caused by cluster update race
* from foreground operation.
*/
if (IS_NOQUOTA(cc->inode)) {
err = 0;
goto out_err;
}
ret = 0;
cond_resched();
congestion_wait(BLK_RW_ASYNC, HZ/50);
congestion_wait(BLK_RW_ASYNC,
DEFAULT_IO_TIMEOUT);
lock_page(cc->rpages[i]);
clear_page_dirty_for_io(cc->rpages[i]);
goto retry_write;
}
err = ret;