Commit 25c4e6c3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-f2fs-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs

Pull f2fs updates from Jaegeuk Kim:
 "This round introduces several interesting features such as on-disk NAT
  bitmaps, IO alignment, and a discard thread. And it includes a couple
  of major bug fixes as below.

  Enhancements:

   - introduce on-disk bitmaps to avoid scanning NAT blocks when getting
     free nids

   - support IO alignment to prepare open-channel SSD integration in
     future

   - introduce a discard thread to avoid long latency during checkpoint
     and fstrim

   - use SSR for warm node and enable inline_xattr by default

   - introduce in-memory bitmaps to check FS consistency for debugging

   - improve write_begin by avoiding needless read IO

  Bug fixes:

   - fix broken zone_reset behavior for SMR drive

   - fix wrong victim selection policy during GC

   - fix missing behavior when preparing discard commands

   - fix bugs in atomic write support and fiemap

   - workaround to handle multiple f2fs_add_link calls having same name

  ... and it includes a bunch of clean-up patches as well"

* tag 'for-f2fs-4.11' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs: (97 commits)
  f2fs: avoid to flush nat journal entries
  f2fs: avoid to issue redundant discard commands
  f2fs: fix a plint compile warning
  f2fs: add f2fs_drop_inode tracepoint
  f2fs: Fix zoned block device support
  f2fs: remove redundant set_page_dirty()
  f2fs: fix to enlarge size of write_io_dummy mempool
  f2fs: fix memory leak of write_io_dummy mempool during umount
  f2fs: fix to update F2FS_{CP_}WB_DATA count correctly
  f2fs: use MAX_FREE_NIDS for the free nids target
  f2fs: introduce free nid bitmap
  f2fs: new helper cur_cp_crc() getting crc in f2fs_checkpoint
  f2fs: update the comment of default nr_pages to skipping
  f2fs: drop the duplicate pval in f2fs_getxattr
  f2fs: Don't update the xattr data that same as the exist
  f2fs: kill __is_extent_same
  f2fs: avoid bggc->fggc when enough free segments are avaliable after cp
  f2fs: select target segment with closer temperature in SSR mode
  f2fs: show simple call stack in fault injection message
  f2fs: no need lock_op in f2fs_write_inline_data
  ...
parents 6053dc98 900f7362
...@@ -125,13 +125,14 @@ active_logs=%u Support configuring the number of active logs. In the ...@@ -125,13 +125,14 @@ active_logs=%u Support configuring the number of active logs. In the
disable_ext_identify Disable the extension list configured by mkfs, so f2fs disable_ext_identify Disable the extension list configured by mkfs, so f2fs
does not aware of cold files such as media files. does not aware of cold files such as media files.
inline_xattr Enable the inline xattrs feature. inline_xattr Enable the inline xattrs feature.
noinline_xattr Disable the inline xattrs feature.
inline_data Enable the inline data feature: New created small(<~3.4k) inline_data Enable the inline data feature: New created small(<~3.4k)
files can be written into inode block. files can be written into inode block.
inline_dentry Enable the inline dir feature: data in new created inline_dentry Enable the inline dir feature: data in new created
directory entries can be written into inode block. The directory entries can be written into inode block. The
space of inode block which is used to store inline space of inode block which is used to store inline
dentries is limited to ~3.4k. dentries is limited to ~3.4k.
noinline_dentry Diable the inline dentry feature. noinline_dentry Disable the inline dentry feature.
flush_merge Merge concurrent cache_flush commands as much as possible flush_merge Merge concurrent cache_flush commands as much as possible
to eliminate redundant command issues. If the underlying to eliminate redundant command issues. If the underlying
device handles the cache_flush command relatively slowly, device handles the cache_flush command relatively slowly,
...@@ -157,6 +158,8 @@ data_flush Enable data flushing before checkpoint in order to ...@@ -157,6 +158,8 @@ data_flush Enable data flushing before checkpoint in order to
mode=%s Control block allocation mode which supports "adaptive" mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random and "lfs". In "lfs" mode, there should be no random
writes towards main area. writes towards main area.
io_bits=%u Set the bit size of write IO requests. It should be set
with "mode=lfs".
================================================================================ ================================================================================
DEBUGFS ENTRIES DEBUGFS ENTRIES
...@@ -174,7 +177,7 @@ f2fs. Each file shows the whole f2fs information. ...@@ -174,7 +177,7 @@ f2fs. Each file shows the whole f2fs information.
SYSFS ENTRIES SYSFS ENTRIES
================================================================================ ================================================================================
Information about mounted f2f2 file systems can be found in Information about mounted f2fs file systems can be found in
/sys/fs/f2fs. Each mounted filesystem will have a directory in /sys/fs/f2fs. Each mounted filesystem will have a directory in
/sys/fs/f2fs based on its device name (i.e., /sys/fs/f2fs/sda). /sys/fs/f2fs based on its device name (i.e., /sys/fs/f2fs/sda).
The files in each per-device directory are shown in table below. The files in each per-device directory are shown in table below.
......
...@@ -249,7 +249,8 @@ static int f2fs_write_meta_page(struct page *page, ...@@ -249,7 +249,8 @@ static int f2fs_write_meta_page(struct page *page,
dec_page_count(sbi, F2FS_DIRTY_META); dec_page_count(sbi, F2FS_DIRTY_META);
if (wbc->for_reclaim) if (wbc->for_reclaim)
f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, META, WRITE); f2fs_submit_merged_bio_cond(sbi, page->mapping->host,
0, page->index, META, WRITE);
unlock_page(page); unlock_page(page);
...@@ -493,6 +494,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi) ...@@ -493,6 +494,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_ORPHAN)) { if (time_to_inject(sbi, FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock); spin_unlock(&im->ino_lock);
f2fs_show_injection_info(FAULT_ORPHAN);
return -ENOSPC; return -ENOSPC;
} }
#endif #endif
...@@ -681,8 +683,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr, ...@@ -681,8 +683,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
return -EINVAL; return -EINVAL;
} }
crc = le32_to_cpu(*((__le32 *)((unsigned char *)*cp_block crc = cur_cp_crc(*cp_block);
+ crc_offset)));
if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) { if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value"); f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
return -EINVAL; return -EINVAL;
...@@ -891,7 +892,7 @@ int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) ...@@ -891,7 +892,7 @@ int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type)
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA)); F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA));
return 0; return 0;
} }
fi = list_entry(head->next, struct f2fs_inode_info, dirty_list); fi = list_first_entry(head, struct f2fs_inode_info, dirty_list);
inode = igrab(&fi->vfs_inode); inode = igrab(&fi->vfs_inode);
spin_unlock(&sbi->inode_lock[type]); spin_unlock(&sbi->inode_lock[type]);
if (inode) { if (inode) {
...@@ -924,7 +925,7 @@ int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi) ...@@ -924,7 +925,7 @@ int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
spin_unlock(&sbi->inode_lock[DIRTY_META]); spin_unlock(&sbi->inode_lock[DIRTY_META]);
return 0; return 0;
} }
fi = list_entry(head->next, struct f2fs_inode_info, fi = list_first_entry(head, struct f2fs_inode_info,
gdirty_list); gdirty_list);
inode = igrab(&fi->vfs_inode); inode = igrab(&fi->vfs_inode);
spin_unlock(&sbi->inode_lock[DIRTY_META]); spin_unlock(&sbi->inode_lock[DIRTY_META]);
...@@ -998,8 +999,6 @@ static int block_operations(struct f2fs_sb_info *sbi) ...@@ -998,8 +999,6 @@ static int block_operations(struct f2fs_sb_info *sbi)
static void unblock_operations(struct f2fs_sb_info *sbi) static void unblock_operations(struct f2fs_sb_info *sbi)
{ {
up_write(&sbi->node_write); up_write(&sbi->node_write);
build_free_nids(sbi, false);
f2fs_unlock_all(sbi); f2fs_unlock_all(sbi);
} }
...@@ -1025,6 +1024,10 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1025,6 +1024,10 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
spin_lock(&sbi->cp_lock); spin_lock(&sbi->cp_lock);
if (cpc->reason == CP_UMOUNT && ckpt->cp_pack_total_block_count >
sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks)
disable_nat_bits(sbi, false);
if (cpc->reason == CP_UMOUNT) if (cpc->reason == CP_UMOUNT)
__set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); __set_ckpt_flags(ckpt, CP_UMOUNT_FLAG);
else else
...@@ -1137,6 +1140,28 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1137,6 +1140,28 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
start_blk = __start_cp_next_addr(sbi); start_blk = __start_cp_next_addr(sbi);
/* write nat bits */
if (enabled_nat_bits(sbi, cpc)) {
__u64 cp_ver = cur_cp_version(ckpt);
unsigned int i;
block_t blk;
cp_ver |= ((__u64)crc32 << 32);
*(__le64 *)nm_i->nat_bits = cpu_to_le64(cp_ver);
blk = start_blk + sbi->blocks_per_seg - nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++)
update_meta_page(sbi, nm_i->nat_bits +
(i << F2FS_BLKSIZE_BITS), blk + i);
/* Flush all the NAT BITS pages */
while (get_pages(sbi, F2FS_DIRTY_META)) {
sync_meta_pages(sbi, META, LONG_MAX);
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
}
}
/* need to wait for end_io results */ /* need to wait for end_io results */
wait_on_all_pages_writeback(sbi); wait_on_all_pages_writeback(sbi);
if (unlikely(f2fs_cp_error(sbi))) if (unlikely(f2fs_cp_error(sbi)))
...@@ -1248,15 +1273,20 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1248,15 +1273,20 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
f2fs_flush_merged_bios(sbi); f2fs_flush_merged_bios(sbi);
/* this is the case of multiple fstrims without any changes */ /* this is the case of multiple fstrims without any changes */
if (cpc->reason == CP_DISCARD && !is_sbi_flag_set(sbi, SBI_IS_DIRTY)) { if (cpc->reason == CP_DISCARD) {
f2fs_bug_on(sbi, NM_I(sbi)->dirty_nat_cnt); if (!exist_trim_candidates(sbi, cpc)) {
f2fs_bug_on(sbi, SIT_I(sbi)->dirty_sentries); unblock_operations(sbi);
f2fs_bug_on(sbi, prefree_segments(sbi)); goto out;
flush_sit_entries(sbi, cpc); }
clear_prefree_segments(sbi, cpc);
f2fs_wait_all_discard_bio(sbi); if (NM_I(sbi)->dirty_nat_cnt == 0 &&
unblock_operations(sbi); SIT_I(sbi)->dirty_sentries == 0 &&
goto out; prefree_segments(sbi) == 0) {
flush_sit_entries(sbi, cpc);
clear_prefree_segments(sbi, cpc);
unblock_operations(sbi);
goto out;
}
} }
/* /*
...@@ -1268,17 +1298,15 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) ...@@ -1268,17 +1298,15 @@ int write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver); ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
/* write cached NAT/SIT entries to NAT/SIT area */ /* write cached NAT/SIT entries to NAT/SIT area */
flush_nat_entries(sbi); flush_nat_entries(sbi, cpc);
flush_sit_entries(sbi, cpc); flush_sit_entries(sbi, cpc);
/* unlock all the fs_lock[] in do_checkpoint() */ /* unlock all the fs_lock[] in do_checkpoint() */
err = do_checkpoint(sbi, cpc); err = do_checkpoint(sbi, cpc);
if (err) { if (err)
release_discard_addrs(sbi); release_discard_addrs(sbi);
} else { else
clear_prefree_segments(sbi, cpc); clear_prefree_segments(sbi, cpc);
f2fs_wait_all_discard_bio(sbi);
}
unblock_operations(sbi); unblock_operations(sbi);
stat_inc_cp_count(sbi->stat_info); stat_inc_cp_count(sbi->stat_info);
......
...@@ -55,8 +55,10 @@ static void f2fs_read_end_io(struct bio *bio) ...@@ -55,8 +55,10 @@ static void f2fs_read_end_io(struct bio *bio)
int i; int i;
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
f2fs_show_injection_info(FAULT_IO);
bio->bi_error = -EIO; bio->bi_error = -EIO;
}
#endif #endif
if (f2fs_bio_encrypted(bio)) { if (f2fs_bio_encrypted(bio)) {
...@@ -93,6 +95,17 @@ static void f2fs_write_end_io(struct bio *bio) ...@@ -93,6 +95,17 @@ static void f2fs_write_end_io(struct bio *bio)
struct page *page = bvec->bv_page; struct page *page = bvec->bv_page;
enum count_type type = WB_DATA_TYPE(page); enum count_type type = WB_DATA_TYPE(page);
if (IS_DUMMY_WRITTEN_PAGE(page)) {
set_page_private(page, (unsigned long)NULL);
ClearPagePrivate(page);
unlock_page(page);
mempool_free(page, sbi->write_io_dummy);
if (unlikely(bio->bi_error))
f2fs_stop_checkpoint(sbi, true);
continue;
}
fscrypt_pullback_bio_page(&page, true); fscrypt_pullback_bio_page(&page, true);
if (unlikely(bio->bi_error)) { if (unlikely(bio->bi_error)) {
...@@ -171,10 +184,46 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi, ...@@ -171,10 +184,46 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
struct bio *bio, enum page_type type) struct bio *bio, enum page_type type)
{ {
if (!is_read_io(bio_op(bio))) { if (!is_read_io(bio_op(bio))) {
unsigned int start;
if (f2fs_sb_mounted_blkzoned(sbi->sb) && if (f2fs_sb_mounted_blkzoned(sbi->sb) &&
current->plug && (type == DATA || type == NODE)) current->plug && (type == DATA || type == NODE))
blk_finish_plug(current->plug); blk_finish_plug(current->plug);
if (type != DATA && type != NODE)
goto submit_io;
start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
start %= F2FS_IO_SIZE(sbi);
if (start == 0)
goto submit_io;
/* fill dummy pages */
for (; start < F2FS_IO_SIZE(sbi); start++) {
struct page *page =
mempool_alloc(sbi->write_io_dummy,
GFP_NOIO | __GFP_ZERO | __GFP_NOFAIL);
f2fs_bug_on(sbi, !page);
SetPagePrivate(page);
set_page_private(page, (unsigned long)DUMMY_WRITTEN_PAGE);
lock_page(page);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE)
f2fs_bug_on(sbi, 1);
}
/*
* In the NODE case, we lose next block address chain. So, we
* need to do checkpoint in f2fs_sync_file.
*/
if (type == NODE)
set_sbi_flag(sbi, SBI_NEED_CP);
} }
submit_io:
if (is_read_io(bio_op(bio)))
trace_f2fs_submit_read_bio(sbi->sb, type, bio);
else
trace_f2fs_submit_write_bio(sbi->sb, type, bio);
submit_bio(bio); submit_bio(bio);
} }
...@@ -185,19 +234,19 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) ...@@ -185,19 +234,19 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
if (!io->bio) if (!io->bio)
return; return;
bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
if (is_read_io(fio->op)) if (is_read_io(fio->op))
trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); trace_f2fs_prepare_read_bio(io->sbi->sb, fio->type, io->bio);
else else
trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); trace_f2fs_prepare_write_bio(io->sbi->sb, fio->type, io->bio);
bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
__submit_bio(io->sbi, io->bio, fio->type); __submit_bio(io->sbi, io->bio, fio->type);
io->bio = NULL; io->bio = NULL;
} }
static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, static bool __has_merged_page(struct f2fs_bio_info *io,
struct page *page, nid_t ino) struct inode *inode, nid_t ino, pgoff_t idx)
{ {
struct bio_vec *bvec; struct bio_vec *bvec;
struct page *target; struct page *target;
...@@ -206,7 +255,7 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, ...@@ -206,7 +255,7 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
if (!io->bio) if (!io->bio)
return false; return false;
if (!inode && !page && !ino) if (!inode && !ino)
return true; return true;
bio_for_each_segment_all(bvec, io->bio, i) { bio_for_each_segment_all(bvec, io->bio, i) {
...@@ -216,10 +265,11 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, ...@@ -216,10 +265,11 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
else else
target = fscrypt_control_page(bvec->bv_page); target = fscrypt_control_page(bvec->bv_page);
if (idx != target->index)
continue;
if (inode && inode == target->mapping->host) if (inode && inode == target->mapping->host)
return true; return true;
if (page && page == target)
return true;
if (ino && ino == ino_of_node(target)) if (ino && ino == ino_of_node(target))
return true; return true;
} }
...@@ -228,22 +278,21 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, ...@@ -228,22 +278,21 @@ static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
} }
static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode, static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page, nid_t ino, nid_t ino, pgoff_t idx, enum page_type type)
enum page_type type)
{ {
enum page_type btype = PAGE_TYPE_OF_BIO(type); enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io = &sbi->write_io[btype]; struct f2fs_bio_info *io = &sbi->write_io[btype];
bool ret; bool ret;
down_read(&io->io_rwsem); down_read(&io->io_rwsem);
ret = __has_merged_page(io, inode, page, ino); ret = __has_merged_page(io, inode, ino, idx);
up_read(&io->io_rwsem); up_read(&io->io_rwsem);
return ret; return ret;
} }
static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page, struct inode *inode, nid_t ino, pgoff_t idx,
nid_t ino, enum page_type type, int rw) enum page_type type, int rw)
{ {
enum page_type btype = PAGE_TYPE_OF_BIO(type); enum page_type btype = PAGE_TYPE_OF_BIO(type);
struct f2fs_bio_info *io; struct f2fs_bio_info *io;
...@@ -252,16 +301,16 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, ...@@ -252,16 +301,16 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
down_write(&io->io_rwsem); down_write(&io->io_rwsem);
if (!__has_merged_page(io, inode, page, ino)) if (!__has_merged_page(io, inode, ino, idx))
goto out; goto out;
/* change META to META_FLUSH in the checkpoint procedure */ /* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) { if (type >= META_FLUSH) {
io->fio.type = META_FLUSH; io->fio.type = META_FLUSH;
io->fio.op = REQ_OP_WRITE; io->fio.op = REQ_OP_WRITE;
io->fio.op_flags = REQ_PREFLUSH | REQ_META | REQ_PRIO; io->fio.op_flags = REQ_META | REQ_PRIO;
if (!test_opt(sbi, NOBARRIER)) if (!test_opt(sbi, NOBARRIER))
io->fio.op_flags |= REQ_FUA; io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
} }
__submit_merged_bio(io); __submit_merged_bio(io);
out: out:
...@@ -271,15 +320,15 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, ...@@ -271,15 +320,15 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type, void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
int rw) int rw)
{ {
__f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw); __f2fs_submit_merged_bio(sbi, NULL, 0, 0, type, rw);
} }
void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi, void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
struct inode *inode, struct page *page, struct inode *inode, nid_t ino, pgoff_t idx,
nid_t ino, enum page_type type, int rw) enum page_type type, int rw)
{ {
if (has_merged_page(sbi, inode, page, ino, type)) if (has_merged_page(sbi, inode, ino, idx, type))
__f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw); __f2fs_submit_merged_bio(sbi, inode, ino, idx, type, rw);
} }
void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi) void f2fs_flush_merged_bios(struct f2fs_sb_info *sbi)
...@@ -315,13 +364,14 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) ...@@ -315,13 +364,14 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
return 0; return 0;
} }
void f2fs_submit_page_mbio(struct f2fs_io_info *fio) int f2fs_submit_page_mbio(struct f2fs_io_info *fio)
{ {
struct f2fs_sb_info *sbi = fio->sbi; struct f2fs_sb_info *sbi = fio->sbi;
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io; struct f2fs_bio_info *io;
bool is_read = is_read_io(fio->op); bool is_read = is_read_io(fio->op);
struct page *bio_page; struct page *bio_page;
int err = 0;
io = is_read ? &sbi->read_io : &sbi->write_io[btype]; io = is_read ? &sbi->read_io : &sbi->write_io[btype];
...@@ -331,6 +381,9 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) ...@@ -331,6 +381,9 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
/* set submitted = 1 as a return value */
fio->submitted = 1;
if (!is_read) if (!is_read)
inc_page_count(sbi, WB_DATA_TYPE(bio_page)); inc_page_count(sbi, WB_DATA_TYPE(bio_page));
...@@ -342,6 +395,13 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) ...@@ -342,6 +395,13 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
__submit_merged_bio(io); __submit_merged_bio(io);
alloc_new: alloc_new:
if (io->bio == NULL) { if (io->bio == NULL) {
if ((fio->type == DATA || fio->type == NODE) &&
fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
err = -EAGAIN;
if (!is_read)
dec_page_count(sbi, WB_DATA_TYPE(bio_page));
goto out_fail;
}
io->bio = __bio_alloc(sbi, fio->new_blkaddr, io->bio = __bio_alloc(sbi, fio->new_blkaddr,
BIO_MAX_PAGES, is_read); BIO_MAX_PAGES, is_read);
io->fio = *fio; io->fio = *fio;
...@@ -355,9 +415,10 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) ...@@ -355,9 +415,10 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
io->last_block_in_bio = fio->new_blkaddr; io->last_block_in_bio = fio->new_blkaddr;
f2fs_trace_ios(fio, 0); f2fs_trace_ios(fio, 0);
out_fail:
up_write(&io->io_rwsem); up_write(&io->io_rwsem);
trace_f2fs_submit_page_mbio(fio->page, fio); trace_f2fs_submit_page_mbio(fio->page, fio);
return err;
} }
static void __set_data_blkaddr(struct dnode_of_data *dn) static void __set_data_blkaddr(struct dnode_of_data *dn)
...@@ -453,7 +514,7 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) ...@@ -453,7 +514,7 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index) int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
{ {
struct extent_info ei; struct extent_info ei = {0,0,0};
struct inode *inode = dn->inode; struct inode *inode = dn->inode;
if (f2fs_lookup_extent_cache(inode, index, &ei)) { if (f2fs_lookup_extent_cache(inode, index, &ei)) {
...@@ -470,7 +531,7 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index, ...@@ -470,7 +531,7 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn; struct dnode_of_data dn;
struct page *page; struct page *page;
struct extent_info ei; struct extent_info ei = {0,0,0};
int err; int err;
struct f2fs_io_info fio = { struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode), .sbi = F2FS_I_SB(inode),
...@@ -694,6 +755,9 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) ...@@ -694,6 +755,9 @@ int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
struct f2fs_map_blocks map; struct f2fs_map_blocks map;