mpage.c 20.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4 5 6 7 8 9
/*
 * fs/mpage.c
 *
 * Copyright (C) 2002, Linus Torvalds.
 *
 * Contains functions related to preparing and submitting BIOs which contain
 * multiple pagecache pages.
 *
10
 * 15May2002	Andrew Morton
Linus Torvalds's avatar
Linus Torvalds committed
11 12 13 14 15 16
 *		Initial version
 * 27Jun2002	axboe@suse.de
 *		use bio_add_page() to build bio's just the right size
 */

#include <linux/kernel.h>
17
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
18 19
#include <linux/mm.h>
#include <linux/kdev_t.h>
20
#include <linux/gfp.h>
Linus Torvalds's avatar
Linus Torvalds committed
21 22 23 24 25 26 27
#include <linux/bio.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/blkdev.h>
#include <linux/highmem.h>
#include <linux/prefetch.h>
#include <linux/mpage.h>
28
#include <linux/mm_inline.h>
Linus Torvalds's avatar
Linus Torvalds committed
29 30 31
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
32
#include <linux/cleancache.h>
33
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
34 35 36 37 38 39 40 41 42 43 44 45 46

/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
47
static void mpage_end_io(struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
48
{
49 50
	struct bio_vec *bv;
	int i;
Linus Torvalds's avatar
Linus Torvalds committed
51

52 53
	bio_for_each_segment_all(bv, bio, i) {
		struct page *page = bv->bv_page;
54 55
		page_endio(page, op_is_write(bio_op(bio)),
				blk_status_to_errno(bio->bi_status));
56 57
	}

Linus Torvalds's avatar
Linus Torvalds committed
58 59 60
	bio_put(bio);
}

61
static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
62
{
Hai Shan's avatar
Hai Shan committed
63
	bio->bi_end_io = mpage_end_io;
64 65
	bio_set_op_attrs(bio, op, op_flags);
	guard_bio_eod(op, bio);
66
	submit_bio(bio);
Linus Torvalds's avatar
Linus Torvalds committed
67 68 69 70 71 72
	return NULL;
}

static struct bio *
mpage_alloc(struct block_device *bdev,
		sector_t first_sector, int nr_vecs,
73
		gfp_t gfp_flags)
Linus Torvalds's avatar
Linus Torvalds committed
74 75 76
{
	struct bio *bio;

77 78
	/* Restrict the given (page cache) mask for slab allocations */
	gfp_flags &= GFP_KERNEL;
Linus Torvalds's avatar
Linus Torvalds committed
79 80 81 82 83 84 85 86
	bio = bio_alloc(gfp_flags, nr_vecs);

	if (bio == NULL && (current->flags & PF_MEMALLOC)) {
		while (!bio && (nr_vecs /= 2))
			bio = bio_alloc(gfp_flags, nr_vecs);
	}

	if (bio) {
87
		bio_set_dev(bio, bdev);
88
		bio->bi_iter.bi_sector = first_sector;
Linus Torvalds's avatar
Linus Torvalds committed
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
	}
	return bio;
}

/*
 * support function for mpage_readpages.  The fs supplied get_block might
 * return an up to date buffer.  This is used to map that buffer into
 * the page, which allows readpage to avoid triggering a duplicate call
 * to get_block.
 *
 * The idea is to avoid adding buffers to pages that don't already have
 * them.  So when the buffer is up to date and the page size == block size,
 * this marks the page up to date instead of adding new buffers.
 */
static void 
map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) 
{
	struct inode *inode = page->mapping->host;
	struct buffer_head *page_bh, *head;
	int block = 0;

	if (!page_has_buffers(page)) {
		/*
		 * don't make any buffers if there is only one buffer on
		 * the page and the page just needs to be set up to date
		 */
115
		if (inode->i_blkbits == PAGE_SHIFT &&
Linus Torvalds's avatar
Linus Torvalds committed
116 117 118 119
		    buffer_uptodate(bh)) {
			SetPageUptodate(page);    
			return;
		}
Fabian Frederick's avatar
Fabian Frederick committed
120
		create_empty_buffers(page, i_blocksize(inode), 0);
Linus Torvalds's avatar
Linus Torvalds committed
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
	}
	head = page_buffers(page);
	page_bh = head;
	do {
		if (block == page_block) {
			page_bh->b_state = bh->b_state;
			page_bh->b_bdev = bh->b_bdev;
			page_bh->b_blocknr = bh->b_blocknr;
			break;
		}
		page_bh = page_bh->b_this_page;
		block++;
	} while (page_bh != head);
}

136 137 138 139 140 141 142 143 144
/*
 * This is the worker routine which does all the work of mapping the disk
 * blocks and constructs largest possible bios, submits them for IO if the
 * blocks are not contiguous on the disk.
 *
 * We pass a buffer_head back and forth and use its buffer_mapped() flag to
 * represent the validity of its disk mapping and to decide when to do the next
 * get_block() call.
 */
Linus Torvalds's avatar
Linus Torvalds committed
145 146
static struct bio *
do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
147
		sector_t *last_block_in_bio, struct buffer_head *map_bh,
148 149
		unsigned long *first_logical_block, get_block_t get_block,
		gfp_t gfp)
Linus Torvalds's avatar
Linus Torvalds committed
150 151 152
{
	struct inode *inode = page->mapping->host;
	const unsigned blkbits = inode->i_blkbits;
153
	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
154 155 156
	const unsigned blocksize = 1 << blkbits;
	sector_t block_in_file;
	sector_t last_block;
157
	sector_t last_block_in_file;
Linus Torvalds's avatar
Linus Torvalds committed
158 159 160 161 162 163
	sector_t blocks[MAX_BUF_PER_PAGE];
	unsigned page_block;
	unsigned first_hole = blocks_per_page;
	struct block_device *bdev = NULL;
	int length;
	int fully_mapped = 1;
164 165
	unsigned nblocks;
	unsigned relative_block;
Linus Torvalds's avatar
Linus Torvalds committed
166 167 168 169

	if (page_has_buffers(page))
		goto confused;

170
	block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207
	last_block = block_in_file + nr_pages * blocks_per_page;
	last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
	if (last_block > last_block_in_file)
		last_block = last_block_in_file;
	page_block = 0;

	/*
	 * Map blocks using the result from the previous get_blocks call first.
	 */
	nblocks = map_bh->b_size >> blkbits;
	if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
			block_in_file < (*first_logical_block + nblocks)) {
		unsigned map_offset = block_in_file - *first_logical_block;
		unsigned last = nblocks - map_offset;

		for (relative_block = 0; ; relative_block++) {
			if (relative_block == last) {
				clear_buffer_mapped(map_bh);
				break;
			}
			if (page_block == blocks_per_page)
				break;
			blocks[page_block] = map_bh->b_blocknr + map_offset +
						relative_block;
			page_block++;
			block_in_file++;
		}
		bdev = map_bh->b_bdev;
	}

	/*
	 * Then do more get_blocks calls until we are done with this page.
	 */
	map_bh->b_page = page;
	while (page_block < blocks_per_page) {
		map_bh->b_state = 0;
		map_bh->b_size = 0;
Linus Torvalds's avatar
Linus Torvalds committed
208 209

		if (block_in_file < last_block) {
210 211
			map_bh->b_size = (last_block-block_in_file) << blkbits;
			if (get_block(inode, block_in_file, map_bh, 0))
Linus Torvalds's avatar
Linus Torvalds committed
212
				goto confused;
213
			*first_logical_block = block_in_file;
Linus Torvalds's avatar
Linus Torvalds committed
214 215
		}

216
		if (!buffer_mapped(map_bh)) {
Linus Torvalds's avatar
Linus Torvalds committed
217 218 219
			fully_mapped = 0;
			if (first_hole == blocks_per_page)
				first_hole = page_block;
220 221
			page_block++;
			block_in_file++;
Linus Torvalds's avatar
Linus Torvalds committed
222 223 224 225 226 227 228 229 230
			continue;
		}

		/* some filesystems will copy data into the page during
		 * the get_block call, in which case we don't want to
		 * read it again.  map_buffer_to_page copies the data
		 * we just collected from get_block into the page's buffers
		 * so readpage doesn't have to repeat the get_block call
		 */
231 232
		if (buffer_uptodate(map_bh)) {
			map_buffer_to_page(page, map_bh, page_block);
Linus Torvalds's avatar
Linus Torvalds committed
233 234 235 236 237 238 239
			goto confused;
		}
	
		if (first_hole != blocks_per_page)
			goto confused;		/* hole -> non-hole */

		/* Contiguous blocks? */
240
		if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
Linus Torvalds's avatar
Linus Torvalds committed
241
			goto confused;
242 243 244 245 246 247 248 249 250 251 252 253
		nblocks = map_bh->b_size >> blkbits;
		for (relative_block = 0; ; relative_block++) {
			if (relative_block == nblocks) {
				clear_buffer_mapped(map_bh);
				break;
			} else if (page_block == blocks_per_page)
				break;
			blocks[page_block] = map_bh->b_blocknr+relative_block;
			page_block++;
			block_in_file++;
		}
		bdev = map_bh->b_bdev;
Linus Torvalds's avatar
Linus Torvalds committed
254 255 256
	}

	if (first_hole != blocks_per_page) {
257
		zero_user_segment(page, first_hole << blkbits, PAGE_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
258 259 260 261 262 263 264 265 266
		if (first_hole == 0) {
			SetPageUptodate(page);
			unlock_page(page);
			goto out;
		}
	} else if (fully_mapped) {
		SetPageMappedToDisk(page);
	}

267 268 269 270 271 272
	if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
	    cleancache_get_page(page) == 0) {
		SetPageUptodate(page);
		goto confused;
	}

Linus Torvalds's avatar
Linus Torvalds committed
273 274 275 276
	/*
	 * This page will go to BIO.  Do we need to send this BIO off first?
	 */
	if (bio && (*last_block_in_bio != blocks[0] - 1))
277
		bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds's avatar
Linus Torvalds committed
278 279 280

alloc_new:
	if (bio == NULL) {
281 282 283 284 285
		if (first_hole == blocks_per_page) {
			if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9),
								page))
				goto out;
		}
Linus Torvalds's avatar
Linus Torvalds committed
286
		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
287
				min_t(int, nr_pages, BIO_MAX_PAGES), gfp);
Linus Torvalds's avatar
Linus Torvalds committed
288 289 290 291 292 293
		if (bio == NULL)
			goto confused;
	}

	length = first_hole << blkbits;
	if (bio_add_page(bio, page, length, 0) < length) {
294
		bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds's avatar
Linus Torvalds committed
295 296 297
		goto alloc_new;
	}

298 299 300 301
	relative_block = block_in_file - *first_logical_block;
	nblocks = map_bh->b_size >> blkbits;
	if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
	    (first_hole != blocks_per_page))
302
		bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds's avatar
Linus Torvalds committed
303 304 305 306 307 308 309
	else
		*last_block_in_bio = blocks[blocks_per_page - 1];
out:
	return bio;

confused:
	if (bio)
310
		bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds's avatar
Linus Torvalds committed
311 312 313 314 315 316 317
	if (!PageUptodate(page))
	        block_read_full_page(page, get_block);
	else
		unlock_page(page);
	goto out;
}

318
/**
319
 * mpage_readpages - populate an address space with some pages & start reads against them
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
 * @mapping: the address_space
 * @pages: The address of a list_head which contains the target pages.  These
 *   pages have their ->index populated and are otherwise uninitialised.
 *   The page at @pages->prev has the lowest file offset, and reads should be
 *   issued in @pages->prev to @pages->next order.
 * @nr_pages: The number of pages at *@pages
 * @get_block: The filesystem's block mapper function.
 *
 * This function walks the pages and the blocks within each page, building and
 * emitting large BIOs.
 *
 * If anything unusual happens, such as:
 *
 * - encountering a page which has buffers
 * - encountering a page which has a non-hole after a hole
 * - encountering a page with non-contiguous blocks
 *
 * then this code just gives up and calls the buffer_head-based read function.
 * It does handle a page which has holes at the end - that is a common case:
339
 * the end-of-file on blocksize < PAGE_SIZE setups.
340 341 342 343 344 345 346 347 348
 *
 * BH_Boundary explanation:
 *
 * There is a problem.  The mpage read code assembles several pages, gets all
 * their disk mappings, and then submits them all.  That's fine, but obtaining
 * the disk mappings may require I/O.  Reads of indirect blocks, for example.
 *
 * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
 * submitted in the following order:
349
 *
350
 * 	12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
351
 *
352 353 354 355 356 357 358 359 360 361
 * because the indirect block has to be read to get the mappings of blocks
 * 13,14,15,16.  Obviously, this impacts performance.
 *
 * So what we do it to allow the filesystem's get_block() function to set
 * BH_Boundary when it maps block 11.  BH_Boundary says: mapping of the block
 * after this one will require I/O against a block which is probably close to
 * this one.  So you should push what I/O you have currently accumulated.
 *
 * This all causes the disk requests to be issued in the correct order.
 */
Linus Torvalds's avatar
Linus Torvalds committed
362 363 364 365 366 367 368
int
mpage_readpages(struct address_space *mapping, struct list_head *pages,
				unsigned nr_pages, get_block_t get_block)
{
	struct bio *bio = NULL;
	unsigned page_idx;
	sector_t last_block_in_bio = 0;
369 370
	struct buffer_head map_bh;
	unsigned long first_logical_block = 0;
371
	gfp_t gfp = readahead_gfp_mask(mapping);
Linus Torvalds's avatar
Linus Torvalds committed
372

373 374
	map_bh.b_state = 0;
	map_bh.b_size = 0;
Linus Torvalds's avatar
Linus Torvalds committed
375
	for (page_idx = 0; page_idx < nr_pages; page_idx++) {
376
		struct page *page = lru_to_page(pages);
Linus Torvalds's avatar
Linus Torvalds committed
377 378 379

		prefetchw(&page->flags);
		list_del(&page->lru);
Nick Piggin's avatar
Nick Piggin committed
380
		if (!add_to_page_cache_lru(page, mapping,
381 382
					page->index,
					gfp)) {
Linus Torvalds's avatar
Linus Torvalds committed
383 384
			bio = do_mpage_readpage(bio, page,
					nr_pages - page_idx,
385 386
					&last_block_in_bio, &map_bh,
					&first_logical_block,
387
					get_block, gfp);
Linus Torvalds's avatar
Linus Torvalds committed
388
		}
389
		put_page(page);
Linus Torvalds's avatar
Linus Torvalds committed
390 391 392
	}
	BUG_ON(!list_empty(pages));
	if (bio)
393
		mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds's avatar
Linus Torvalds committed
394 395 396 397 398 399 400 401 402 403 404
	return 0;
}
EXPORT_SYMBOL(mpage_readpages);

/*
 * This isn't called much at all
 */
int mpage_readpage(struct page *page, get_block_t get_block)
{
	struct bio *bio = NULL;
	sector_t last_block_in_bio = 0;
405 406
	struct buffer_head map_bh;
	unsigned long first_logical_block = 0;
407
	gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
Linus Torvalds's avatar
Linus Torvalds committed
408

409 410
	map_bh.b_state = 0;
	map_bh.b_size = 0;
411
	bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
412
			&map_bh, &first_logical_block, get_block, gfp);
Linus Torvalds's avatar
Linus Torvalds committed
413
	if (bio)
414
		mpage_bio_submit(REQ_OP_READ, 0, bio);
Linus Torvalds's avatar
Linus Torvalds committed
415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
	return 0;
}
EXPORT_SYMBOL(mpage_readpage);

/*
 * Writing is not so simple.
 *
 * If the page has buffers then they will be used for obtaining the disk
 * mapping.  We only support pages which are fully mapped-and-dirty, with a
 * special case for pages which are unmapped at the end: end-of-file.
 *
 * If the page has no buffers (preferred) then the page is mapped here.
 *
 * If all blocks are found to be contiguous then the page can go into the
 * BIO.  Otherwise fall back to the mapping's writepage().
 * 
 * FIXME: This code wants an estimate of how many pages are still to be
 * written, so it can intelligently allocate a suitably-sized BIO.  For now,
 * just allocate full-size (16-page) BIOs.
 */
435

436 437 438 439 440 441 442
struct mpage_data {
	struct bio *bio;
	sector_t last_block_in_bio;
	get_block_t *get_block;
	unsigned use_writepage;
};

443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
/*
 * We have our BIO, so we can now mark the buffers clean.  Make
 * sure to only clean buffers which we know we'll be writing.
 */
static void clean_buffers(struct page *page, unsigned first_unmapped)
{
	unsigned buffer_counter = 0;
	struct buffer_head *bh, *head;
	if (!page_has_buffers(page))
		return;
	head = page_buffers(page);
	bh = head;

	do {
		if (buffer_counter++ == first_unmapped)
			break;
		clear_buffer_dirty(bh);
		bh = bh->b_this_page;
	} while (bh != head);

	/*
	 * we cannot drop the bh if the page is not uptodate or a concurrent
	 * readpage would fail to serialize with the bh and it would read from
	 * disk before we reach the platter.
	 */
	if (buffer_heads_over_limit && PageUptodate(page))
		try_to_free_buffers(page);
}

472 473 474 475 476 477 478 479 480 481
/*
 * For situations where we want to clean all buffers attached to a page.
 * We don't need to calculate how many buffers are attached to the page,
 * we just need to specify a number larger than the maximum number of buffers.
 */
void clean_page_buffers(struct page *page)
{
	clean_buffers(page, ~0U);
}

482
static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
483
		      void *data)
Linus Torvalds's avatar
Linus Torvalds committed
484
{
485 486
	struct mpage_data *mpd = data;
	struct bio *bio = mpd->bio;
Linus Torvalds's avatar
Linus Torvalds committed
487 488 489 490
	struct address_space *mapping = page->mapping;
	struct inode *inode = page->mapping->host;
	const unsigned blkbits = inode->i_blkbits;
	unsigned long end_index;
491
	const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
Linus Torvalds's avatar
Linus Torvalds committed
492 493 494 495 496 497 498 499 500 501 502 503
	sector_t last_block;
	sector_t block_in_file;
	sector_t blocks[MAX_BUF_PER_PAGE];
	unsigned page_block;
	unsigned first_unmapped = blocks_per_page;
	struct block_device *bdev = NULL;
	int boundary = 0;
	sector_t boundary_block = 0;
	struct block_device *boundary_bdev = NULL;
	int length;
	struct buffer_head map_bh;
	loff_t i_size = i_size_read(inode);
504
	int ret = 0;
505
	int op_flags = wbc_to_write_flags(wbc);
Linus Torvalds's avatar
Linus Torvalds committed
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560

	if (page_has_buffers(page)) {
		struct buffer_head *head = page_buffers(page);
		struct buffer_head *bh = head;

		/* If they're all mapped and dirty, do it */
		page_block = 0;
		do {
			BUG_ON(buffer_locked(bh));
			if (!buffer_mapped(bh)) {
				/*
				 * unmapped dirty buffers are created by
				 * __set_page_dirty_buffers -> mmapped data
				 */
				if (buffer_dirty(bh))
					goto confused;
				if (first_unmapped == blocks_per_page)
					first_unmapped = page_block;
				continue;
			}

			if (first_unmapped != blocks_per_page)
				goto confused;	/* hole -> non-hole */

			if (!buffer_dirty(bh) || !buffer_uptodate(bh))
				goto confused;
			if (page_block) {
				if (bh->b_blocknr != blocks[page_block-1] + 1)
					goto confused;
			}
			blocks[page_block++] = bh->b_blocknr;
			boundary = buffer_boundary(bh);
			if (boundary) {
				boundary_block = bh->b_blocknr;
				boundary_bdev = bh->b_bdev;
			}
			bdev = bh->b_bdev;
		} while ((bh = bh->b_this_page) != head);

		if (first_unmapped)
			goto page_is_mapped;

		/*
		 * Page has buffers, but they are all unmapped. The page was
		 * created by pagein or read over a hole which was handled by
		 * block_read_full_page().  If this address_space is also
		 * using mpage_readpages then this can rarely happen.
		 */
		goto confused;
	}

	/*
	 * The page has no buffers: map it to disk
	 */
	BUG_ON(!PageUptodate(page));
561
	block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
Linus Torvalds's avatar
Linus Torvalds committed
562 563 564 565 566
	last_block = (i_size - 1) >> blkbits;
	map_bh.b_page = page;
	for (page_block = 0; page_block < blocks_per_page; ) {

		map_bh.b_state = 0;
567
		map_bh.b_size = 1 << blkbits;
568
		if (mpd->get_block(inode, block_in_file, &map_bh, 1))
Linus Torvalds's avatar
Linus Torvalds committed
569 570
			goto confused;
		if (buffer_new(&map_bh))
571
			clean_bdev_bh_alias(&map_bh);
Linus Torvalds's avatar
Linus Torvalds committed
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
		if (buffer_boundary(&map_bh)) {
			boundary_block = map_bh.b_blocknr;
			boundary_bdev = map_bh.b_bdev;
		}
		if (page_block) {
			if (map_bh.b_blocknr != blocks[page_block-1] + 1)
				goto confused;
		}
		blocks[page_block++] = map_bh.b_blocknr;
		boundary = buffer_boundary(&map_bh);
		bdev = map_bh.b_bdev;
		if (block_in_file == last_block)
			break;
		block_in_file++;
	}
	BUG_ON(page_block == 0);

	first_unmapped = page_block;

page_is_mapped:
592
	end_index = i_size >> PAGE_SHIFT;
Linus Torvalds's avatar
Linus Torvalds committed
593 594 595
	if (page->index >= end_index) {
		/*
		 * The page straddles i_size.  It must be zeroed out on each
596
		 * and every writepage invocation because it may be mmapped.
Linus Torvalds's avatar
Linus Torvalds committed
597 598 599 600 601
		 * "A file is mapped in multiples of the page size.  For a file
		 * that is not a multiple of the page size, the remaining memory
		 * is zeroed when mapped, and writes to that region are not
		 * written out to the file."
		 */
602
		unsigned offset = i_size & (PAGE_SIZE - 1);
Linus Torvalds's avatar
Linus Torvalds committed
603 604 605

		if (page->index > end_index || !offset)
			goto confused;
606
		zero_user_segment(page, offset, PAGE_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
607 608 609 610 611
	}

	/*
	 * This page will go to BIO.  Do we need to send this BIO off first?
	 */
612
	if (bio && mpd->last_block_in_bio != blocks[0] - 1)
613
		bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds's avatar
Linus Torvalds committed
614 615 616

alloc_new:
	if (bio == NULL) {
617 618
		if (first_unmapped == blocks_per_page) {
			if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9),
619
								page, wbc))
620 621
				goto out;
		}
Linus Torvalds's avatar
Linus Torvalds committed
622
		bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
623
				BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH);
Linus Torvalds's avatar
Linus Torvalds committed
624 625
		if (bio == NULL)
			goto confused;
626

627
		wbc_init_bio(wbc, bio);
628
		bio->bi_write_hint = inode->i_write_hint;
Linus Torvalds's avatar
Linus Torvalds committed
629 630 631 632 633 634 635
	}

	/*
	 * Must try to add the page before marking the buffer clean or
	 * the confused fail path above (OOM) will be very confused when
	 * it finds all bh marked clean (i.e. it will not write anything)
	 */
636
	wbc_account_io(wbc, page, PAGE_SIZE);
Linus Torvalds's avatar
Linus Torvalds committed
637 638
	length = first_unmapped << blkbits;
	if (bio_add_page(bio, page, length, 0) < length) {
639
		bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds's avatar
Linus Torvalds committed
640 641 642
		goto alloc_new;
	}

643
	clean_buffers(page, first_unmapped);
Linus Torvalds's avatar
Linus Torvalds committed
644 645 646 647 648

	BUG_ON(PageWriteback(page));
	set_page_writeback(page);
	unlock_page(page);
	if (boundary || (first_unmapped != blocks_per_page)) {
649
		bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds's avatar
Linus Torvalds committed
650 651 652 653 654
		if (boundary_block) {
			write_boundary_block(boundary_bdev,
					boundary_block, 1 << blkbits);
		}
	} else {
655
		mpd->last_block_in_bio = blocks[blocks_per_page - 1];
Linus Torvalds's avatar
Linus Torvalds committed
656 657 658 659 660
	}
	goto out;

confused:
	if (bio)
661
		bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
Linus Torvalds's avatar
Linus Torvalds committed
662

663 664
	if (mpd->use_writepage) {
		ret = mapping->a_ops->writepage(page, wbc);
Linus Torvalds's avatar
Linus Torvalds committed
665
	} else {
666
		ret = -EAGAIN;
Linus Torvalds's avatar
Linus Torvalds committed
667 668 669 670 671
		goto out;
	}
	/*
	 * The caller has a ref on the inode, so *mapping is stable
	 */
672
	mapping_set_error(mapping, ret);
Linus Torvalds's avatar
Linus Torvalds committed
673
out:
674 675
	mpd->bio = bio;
	return ret;
Linus Torvalds's avatar
Linus Torvalds committed
676 677 678
}

/**
679
 * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
Linus Torvalds's avatar
Linus Torvalds committed
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700
 * @mapping: address space structure to write
 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
 * @get_block: the filesystem's block mapper function.
 *             If this is NULL then use a_ops->writepage.  Otherwise, go
 *             direct-to-BIO.
 *
 * This is a library function, which implements the writepages()
 * address_space_operation.
 *
 * If a page is already under I/O, generic_writepages() skips it, even
 * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
 * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
 * and msync() need to guarantee that all the data which was dirty at the time
 * the call was made get new I/O started against them.  If wbc->sync_mode is
 * WB_SYNC_ALL then we were called for data integrity and we must wait for
 * existing IO to complete.
 */
int
mpage_writepages(struct address_space *mapping,
		struct writeback_control *wbc, get_block_t get_block)
{
701
	struct blk_plug plug;
702 703
	int ret;

704 705
	blk_start_plug(&plug);

706 707 708 709 710 711 712 713 714 715 716
	if (!get_block)
		ret = generic_writepages(mapping, wbc);
	else {
		struct mpage_data mpd = {
			.bio = NULL,
			.last_block_in_bio = 0,
			.get_block = get_block,
			.use_writepage = 1,
		};

		ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
717
		if (mpd.bio) {
718
			int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
719
				  REQ_SYNC : 0);
720
			mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
721
		}
Linus Torvalds's avatar
Linus Torvalds committed
722
	}
723
	blk_finish_plug(&plug);
Linus Torvalds's avatar
Linus Torvalds committed
724 725 726 727 728 729 730
	return ret;
}
EXPORT_SYMBOL(mpage_writepages);

int mpage_writepage(struct page *page, get_block_t get_block,
	struct writeback_control *wbc)
{
731 732 733 734 735 736 737
	struct mpage_data mpd = {
		.bio = NULL,
		.last_block_in_bio = 0,
		.get_block = get_block,
		.use_writepage = 0,
	};
	int ret = __mpage_writepage(page, wbc, &mpd);
738
	if (mpd.bio) {
739
		int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
740
			  REQ_SYNC : 0);
741
		mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
742
	}
Linus Torvalds's avatar
Linus Torvalds committed
743 744 745
	return ret;
}
EXPORT_SYMBOL(mpage_writepage);