tree-log.c 171 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2
3
4
5
6
/*
 * Copyright (C) 2008 Oracle.  All rights reserved.
 */

#include <linux/sched.h>
7
#include <linux/slab.h>
8
#include <linux/blkdev.h>
Josef Bacik's avatar
Josef Bacik committed
9
#include <linux/list_sort.h>
10
#include <linux/iversion.h>
11
#include "misc.h"
12
#include "ctree.h"
13
#include "tree-log.h"
14
15
16
#include "disk-io.h"
#include "locking.h"
#include "print-tree.h"
Mark Fasheh's avatar
Mark Fasheh committed
17
#include "backref.h"
18
#include "compression.h"
19
#include "qgroup.h"
20
#include "inode-map.h"
21
22
#include "block-group.h"
#include "space-info.h"
23
24
25
26
27
28
29

/* magic values for the inode_only field in btrfs_log_inode:
 *
 * LOG_INODE_ALL means to log everything
 * LOG_INODE_EXISTS means to log just enough to recreate the inode
 * during log replay
 */
30
31
32
33
34
35
enum {
	LOG_INODE_ALL,
	LOG_INODE_EXISTS,
	LOG_OTHER_INODE,
	LOG_OTHER_INODE_ALL,
};
36

37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
/*
 * directory trouble cases
 *
 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
 * log, we must force a full commit before doing an fsync of the directory
 * where the unlink was done.
 * ---> record transid of last unlink/rename per directory
 *
 * mkdir foo/some_dir
 * normal commit
 * rename foo/some_dir foo2/some_dir
 * mkdir foo/some_dir
 * fsync foo/some_dir/some_file
 *
 * The fsync above will unlink the original some_dir without recording
 * it in its new location (foo2).  After a crash, some_dir will be gone
 * unless the fsync of some_file forces a full commit
 *
 * 2) we must log any new names for any file or dir that is in the fsync
 * log. ---> check inode while renaming/linking.
 *
 * 2a) we must log any new names for any file or dir during rename
 * when the directory they are being removed from was logged.
 * ---> check inode and old parent dir during rename
 *
 *  2a is actually the more important variant.  With the extra logging
 *  a crash might unlink the old name without recreating the new one
 *
 * 3) after a crash, we must go through any directories with a link count
 * of zero and redo the rm -rf
 *
 * mkdir f1/foo
 * normal commit
 * rm -rf f1/foo
 * fsync(f1)
 *
 * The directory f1 was fully removed from the FS, but fsync was never
 * called on f1, only its parent dir.  After a crash the rm -rf must
 * be replayed.  This must be able to recurse down the entire
 * directory tree.  The inode link count fixup code takes care of the
 * ugly details.
 */

80
81
82
83
84
85
86
87
88
/*
 * stages for the tree walking.  The first
 * stage (0) is to only pin down the blocks we find
 * the second stage (1) is to make sure that all the inodes
 * we find in the log are created in the subvolume.
 *
 * The last stage is to deal with directories and links and extents
 * and all the other fun semantics
 */
89
90
91
92
93
94
enum {
	LOG_WALK_PIN_ONLY,
	LOG_WALK_REPLAY_INODES,
	LOG_WALK_REPLAY_DIR_INDEX,
	LOG_WALK_REPLAY_ALL,
};
95

96
static int btrfs_log_inode(struct btrfs_trans_handle *trans,
97
			   struct btrfs_root *root, struct btrfs_inode *inode,
98
			   int inode_only,
99
100
			   const loff_t start,
			   const loff_t end,
101
			   struct btrfs_log_ctx *ctx);
102
103
104
static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
			     struct btrfs_root *root,
			     struct btrfs_path *path, u64 objectid);
105
106
107
108
109
static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
				       struct btrfs_root *root,
				       struct btrfs_root *log,
				       struct btrfs_path *path,
				       u64 dirid, int del_all);
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139

/*
 * tree logging is a special write ahead log used to make sure that
 * fsyncs and O_SYNCs can happen without doing full tree commits.
 *
 * Full tree commits are expensive because they require commonly
 * modified blocks to be recowed, creating many dirty pages in the
 * extent tree an 4x-6x higher write load than ext3.
 *
 * Instead of doing a tree commit on every fsync, we use the
 * key ranges and transaction ids to find items for a given file or directory
 * that have changed in this transaction.  Those items are copied into
 * a special tree (one per subvolume root), that tree is written to disk
 * and then the fsync is considered complete.
 *
 * After a crash, items are copied out of the log-tree back into the
 * subvolume tree.  Any file data extents found are recorded in the extent
 * allocation tree, and the log-tree freed.
 *
 * The log tree is read three times, once to pin down all the extents it is
 * using in ram and once, once to create all the inodes logged in the tree
 * and once to do all the other items.
 */

/*
 * start a sub transaction and setup the log tree
 * this increments the log tree writer count to make the people
 * syncing the tree wait for us to finish
 */
static int start_log_trans(struct btrfs_trans_handle *trans,
140
141
			   struct btrfs_root *root,
			   struct btrfs_log_ctx *ctx)
142
{
143
	struct btrfs_fs_info *fs_info = root->fs_info;
144
	int ret = 0;
Yan Zheng's avatar
Yan Zheng committed
145
146

	mutex_lock(&root->log_mutex);
147

Yan Zheng's avatar
Yan Zheng committed
148
	if (root->log_root) {
149
		if (btrfs_need_log_full_commit(trans)) {
150
151
152
			ret = -EAGAIN;
			goto out;
		}
153

154
		if (!root->log_start_pid) {
155
			clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
156
			root->log_start_pid = current->pid;
157
		} else if (root->log_start_pid != current->pid) {
158
			set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
159
		}
160
	} else {
161
162
163
164
		mutex_lock(&fs_info->tree_log_mutex);
		if (!fs_info->log_root_tree)
			ret = btrfs_init_log_root_tree(trans, fs_info);
		mutex_unlock(&fs_info->tree_log_mutex);
165
166
		if (ret)
			goto out;
167

168
		ret = btrfs_add_log_tree(trans, root);
169
		if (ret)
170
			goto out;
171
172
173

		clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
		root->log_start_pid = current->pid;
174
	}
175

Miao Xie's avatar
Miao Xie committed
176
	atomic_inc(&root->log_batch);
Yan Zheng's avatar
Yan Zheng committed
177
	atomic_inc(&root->log_writers);
178
	if (ctx) {
179
		int index = root->log_transid % 2;
180
		list_add_tail(&ctx->list, &root->log_ctxs[index]);
181
		ctx->log_transid = root->log_transid;
182
	}
183

184
out:
Yan Zheng's avatar
Yan Zheng committed
185
	mutex_unlock(&root->log_mutex);
186
	return ret;
187
188
189
190
191
192
193
194
195
196
197
}

/*
 * returns 0 if there was a log transaction running and we were able
 * to join, or returns -ENOENT if there were not transactions
 * in progress
 */
static int join_running_log_trans(struct btrfs_root *root)
{
	int ret = -ENOENT;

Yan Zheng's avatar
Yan Zheng committed
198
	mutex_lock(&root->log_mutex);
199
200
	if (root->log_root) {
		ret = 0;
Yan Zheng's avatar
Yan Zheng committed
201
		atomic_inc(&root->log_writers);
202
	}
Yan Zheng's avatar
Yan Zheng committed
203
	mutex_unlock(&root->log_mutex);
204
205
206
	return ret;
}

207
208
209
210
211
/*
 * This either makes the current running log transaction wait
 * until you call btrfs_end_log_trans() or it makes any future
 * log transactions wait until you call btrfs_end_log_trans()
 */
212
void btrfs_pin_log_trans(struct btrfs_root *root)
213
214
215
216
217
218
{
	mutex_lock(&root->log_mutex);
	atomic_inc(&root->log_writers);
	mutex_unlock(&root->log_mutex);
}

219
220
221
222
/*
 * indicate we're done making changes to the log tree
 * and wake up anyone waiting to do a sync
 */
223
void btrfs_end_log_trans(struct btrfs_root *root)
224
{
Yan Zheng's avatar
Yan Zheng committed
225
	if (atomic_dec_and_test(&root->log_writers)) {
226
227
		/* atomic_dec_and_test implies a barrier */
		cond_wake_up_nomb(&root->log_writer_wait);
Yan Zheng's avatar
Yan Zheng committed
228
	}
229
230
}

231
232
233
234
235
236
237
238
239
240
241
static int btrfs_write_tree_block(struct extent_buffer *buf)
{
	return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start,
					buf->start + buf->len - 1);
}

static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf)
{
	filemap_fdatawait_range(buf->pages[0]->mapping,
			        buf->start, buf->start + buf->len - 1);
}
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272

/*
 * the walk control struct is used to pass state down the chain when
 * processing the log tree.  The stage field tells us which part
 * of the log tree processing we are currently doing.  The others
 * are state fields used for that specific part
 */
struct walk_control {
	/* should we free the extent on disk when done?  This is used
	 * at transaction commit time while freeing a log tree
	 */
	int free;

	/* should we write out the extent buffer?  This is used
	 * while flushing the log tree to disk during a sync
	 */
	int write;

	/* should we wait for the extent buffer io to finish?  Also used
	 * while flushing the log tree to disk for a sync
	 */
	int wait;

	/* pin only walk, we record which extents on disk belong to the
	 * log trees
	 */
	int pin;

	/* what stage of the replay code we're currently in */
	int stage;

273
274
275
276
277
278
279
	/*
	 * Ignore any items from the inode currently being processed. Needs
	 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
	 * the LOG_WALK_REPLAY_INODES stage.
	 */
	bool ignore_cur_inode;

280
281
282
283
284
285
286
287
288
289
290
291
	/* the root we are currently replaying */
	struct btrfs_root *replay_dest;

	/* the trans handle for the current replay */
	struct btrfs_trans_handle *trans;

	/* the function that gets used to process blocks we find in the
	 * tree.  Note the extent_buffer might not be up to date when it is
	 * passed in, and it must be checked or read if you need the data
	 * inside it
	 */
	int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
292
			    struct walk_control *wc, u64 gen, int level);
293
294
295
296
297
298
299
};

/*
 * process_func used to pin down extents, write them or wait on them
 */
static int process_one_buffer(struct btrfs_root *log,
			      struct extent_buffer *eb,
300
			      struct walk_control *wc, u64 gen, int level)
301
{
302
	struct btrfs_fs_info *fs_info = log->fs_info;
303
304
	int ret = 0;

305
306
307
308
	/*
	 * If this fs is mixed then we need to be able to process the leaves to
	 * pin down any logged extents, so we have to read the block.
	 */
309
	if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
310
		ret = btrfs_read_buffer(eb, gen, level, NULL);
311
312
313
314
		if (ret)
			return ret;
	}

Josef Bacik's avatar
Josef Bacik committed
315
	if (wc->pin)
316
		ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start,
317
						      eb->len);
318

319
	if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
320
		if (wc->pin && btrfs_header_level(eb) == 0)
321
			ret = btrfs_exclude_logged_extents(eb);
322
323
324
325
326
		if (wc->write)
			btrfs_write_tree_block(eb);
		if (wc->wait)
			btrfs_wait_tree_block_writeback(eb);
	}
327
	return ret;
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
}

/*
 * Item overwrite used by replay and tree logging.  eb, slot and key all refer
 * to the src data we are copying out.
 *
 * root is the tree we are copying into, and path is a scratch
 * path for use in this function (it should be released on entry and
 * will be released on exit).
 *
 * If the key is already in the destination tree the existing item is
 * overwritten.  If the existing item isn't big enough, it is extended.
 * If it is too large, it is truncated.
 *
 * If the key isn't in the destination yet, a new item is inserted.
 */
static noinline int overwrite_item(struct btrfs_trans_handle *trans,
				   struct btrfs_root *root,
				   struct btrfs_path *path,
				   struct extent_buffer *eb, int slot,
				   struct btrfs_key *key)
{
	int ret;
	u32 item_size;
	u64 saved_i_size = 0;
	int save_old_i_size = 0;
	unsigned long src_ptr;
	unsigned long dst_ptr;
	int overwrite_root = 0;
357
	bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
358
359
360
361
362
363
364
365
366

	if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
		overwrite_root = 1;

	item_size = btrfs_item_size_nr(eb, slot);
	src_ptr = btrfs_item_ptr_offset(eb, slot);

	/* look for the key in the destination tree */
	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
367
368
369
	if (ret < 0)
		return ret;

370
371
372
373
374
375
376
377
378
	if (ret == 0) {
		char *src_copy;
		char *dst_copy;
		u32 dst_size = btrfs_item_size_nr(path->nodes[0],
						  path->slots[0]);
		if (dst_size != item_size)
			goto insert;

		if (item_size == 0) {
379
			btrfs_release_path(path);
380
381
382
383
			return 0;
		}
		dst_copy = kmalloc(item_size, GFP_NOFS);
		src_copy = kmalloc(item_size, GFP_NOFS);
384
		if (!dst_copy || !src_copy) {
385
			btrfs_release_path(path);
386
387
388
389
			kfree(dst_copy);
			kfree(src_copy);
			return -ENOMEM;
		}
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406

		read_extent_buffer(eb, src_copy, src_ptr, item_size);

		dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
		read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
				   item_size);
		ret = memcmp(dst_copy, src_copy, item_size);

		kfree(dst_copy);
		kfree(src_copy);
		/*
		 * they have the same contents, just return, this saves
		 * us from cowing blocks in the destination tree and doing
		 * extra writes that may not have been done by a previous
		 * sync
		 */
		if (ret == 0) {
407
			btrfs_release_path(path);
408
409
410
			return 0;
		}

411
412
413
414
415
416
417
		/*
		 * We need to load the old nbytes into the inode so when we
		 * replay the extents we've logged we get the right nbytes.
		 */
		if (inode_item) {
			struct btrfs_inode_item *item;
			u64 nbytes;
418
			u32 mode;
419
420
421
422
423
424
425

			item = btrfs_item_ptr(path->nodes[0], path->slots[0],
					      struct btrfs_inode_item);
			nbytes = btrfs_inode_nbytes(path->nodes[0], item);
			item = btrfs_item_ptr(eb, slot,
					      struct btrfs_inode_item);
			btrfs_set_inode_nbytes(eb, item, nbytes);
426
427
428
429
430
431
432
433
434

			/*
			 * If this is a directory we need to reset the i_size to
			 * 0 so that we can set it up properly when replaying
			 * the rest of the items in this log.
			 */
			mode = btrfs_inode_mode(eb, item);
			if (S_ISDIR(mode))
				btrfs_set_inode_size(eb, item, 0);
435
436
437
		}
	} else if (inode_item) {
		struct btrfs_inode_item *item;
438
		u32 mode;
439
440
441
442
443
444
445

		/*
		 * New inode, set nbytes to 0 so that the nbytes comes out
		 * properly when we replay the extents.
		 */
		item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
		btrfs_set_inode_nbytes(eb, item, 0);
446
447
448
449
450
451
452
453
454

		/*
		 * If this is a directory we need to reset the i_size to 0 so
		 * that we can set it up properly when replaying the rest of
		 * the items in this log.
		 */
		mode = btrfs_inode_mode(eb, item);
		if (S_ISDIR(mode))
			btrfs_set_inode_size(eb, item, 0);
455
456
	}
insert:
457
	btrfs_release_path(path);
458
	/* try to insert the key into the destination tree */
459
	path->skip_release_on_error = 1;
460
461
	ret = btrfs_insert_empty_item(trans, root, path,
				      key, item_size);
462
	path->skip_release_on_error = 0;
463
464

	/* make sure any existing item is the correct size */
465
	if (ret == -EEXIST || ret == -EOVERFLOW) {
466
467
468
		u32 found_size;
		found_size = btrfs_item_size_nr(path->nodes[0],
						path->slots[0]);
469
		if (found_size > item_size)
470
			btrfs_truncate_item(path, item_size, 1);
471
		else if (found_size < item_size)
472
			btrfs_extend_item(path, item_size - found_size);
473
	} else if (ret) {
474
		return ret;
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
	}
	dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
					path->slots[0]);

	/* don't overwrite an existing inode if the generation number
	 * was logged as zero.  This is done when the tree logging code
	 * is just logging an inode to make sure it exists after recovery.
	 *
	 * Also, don't overwrite i_size on directories during replay.
	 * log replay inserts and removes directory items based on the
	 * state of the tree found in the subvolume, and i_size is modified
	 * as it goes
	 */
	if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
		struct btrfs_inode_item *src_item;
		struct btrfs_inode_item *dst_item;

		src_item = (struct btrfs_inode_item *)src_ptr;
		dst_item = (struct btrfs_inode_item *)dst_ptr;

495
496
		if (btrfs_inode_generation(eb, src_item) == 0) {
			struct extent_buffer *dst_eb = path->nodes[0];
497
			const u64 ino_size = btrfs_inode_size(eb, src_item);
498

499
500
501
502
503
504
505
			/*
			 * For regular files an ino_size == 0 is used only when
			 * logging that an inode exists, as part of a directory
			 * fsync, and the inode wasn't fsynced before. In this
			 * case don't set the size of the inode in the fs/subvol
			 * tree, otherwise we would be throwing valid data away.
			 */
506
			if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
507
508
			    S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
			    ino_size != 0) {
509
510
				struct btrfs_map_token token;

511
				btrfs_init_map_token(&token, dst_eb);
512
513
514
				btrfs_set_token_inode_size(dst_eb, dst_item,
							   ino_size, &token);
			}
515
			goto no_copy;
516
		}
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546

		if (overwrite_root &&
		    S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
		    S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
			save_old_i_size = 1;
			saved_i_size = btrfs_inode_size(path->nodes[0],
							dst_item);
		}
	}

	copy_extent_buffer(path->nodes[0], eb, dst_ptr,
			   src_ptr, item_size);

	if (save_old_i_size) {
		struct btrfs_inode_item *dst_item;
		dst_item = (struct btrfs_inode_item *)dst_ptr;
		btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
	}

	/* make sure the generation is filled in */
	if (key->type == BTRFS_INODE_ITEM_KEY) {
		struct btrfs_inode_item *dst_item;
		dst_item = (struct btrfs_inode_item *)dst_ptr;
		if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
			btrfs_set_inode_generation(path->nodes[0], dst_item,
						   trans->transid);
		}
	}
no_copy:
	btrfs_mark_buffer_dirty(path->nodes[0]);
547
	btrfs_release_path(path);
548
549
550
551
552
553
554
555
556
557
	return 0;
}

/*
 * simple helper to read an inode off the disk from a given root
 * This can only be called for subvolume roots and not for the log
 */
static noinline struct inode *read_one_inode(struct btrfs_root *root,
					     u64 objectid)
{
558
	struct btrfs_key key;
559
560
	struct inode *inode;

561
562
563
	key.objectid = objectid;
	key.type = BTRFS_INODE_ITEM_KEY;
	key.offset = 0;
564
	inode = btrfs_iget(root->fs_info->sb, &key, root);
565
	if (IS_ERR(inode))
566
		inode = NULL;
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
	return inode;
}

/* replays a single extent in 'eb' at 'slot' with 'key' into the
 * subvolume 'root'.  path is released on entry and should be released
 * on exit.
 *
 * extents in the log tree have not been allocated out of the extent
 * tree yet.  So, this completes the allocation, taking a reference
 * as required if the extent already exists or creating a new extent
 * if it isn't in the extent allocation tree yet.
 *
 * The extent is inserted into the file, dropping any existing extents
 * from the file that overlap the new one.
 */
static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      struct btrfs_path *path,
				      struct extent_buffer *eb, int slot,
				      struct btrfs_key *key)
{
588
	struct btrfs_fs_info *fs_info = root->fs_info;
589
590
591
	int found_type;
	u64 extent_end;
	u64 start = key->offset;
592
	u64 nbytes = 0;
593
594
595
596
597
598
599
600
	struct btrfs_file_extent_item *item;
	struct inode *inode = NULL;
	unsigned long size;
	int ret = 0;

	item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
	found_type = btrfs_file_extent_type(eb, item);

Yan Zheng's avatar
Yan Zheng committed
601
	if (found_type == BTRFS_FILE_EXTENT_REG ||
602
603
604
605
606
607
608
609
610
611
612
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
		nbytes = btrfs_file_extent_num_bytes(eb, item);
		extent_end = start + nbytes;

		/*
		 * We don't add to the inodes nbytes if we are prealloc or a
		 * hole.
		 */
		if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
			nbytes = 0;
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
613
		size = btrfs_file_extent_ram_bytes(eb, item);
614
		nbytes = btrfs_file_extent_ram_bytes(eb, item);
615
		extent_end = ALIGN(start + size,
616
				   fs_info->sectorsize);
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
	} else {
		ret = 0;
		goto out;
	}

	inode = read_one_inode(root, key->objectid);
	if (!inode) {
		ret = -EIO;
		goto out;
	}

	/*
	 * first check to see if we already have this extent in the
	 * file.  This must be done before the btrfs_drop_extents run
	 * so we don't try to drop this extent.
	 */
633
634
	ret = btrfs_lookup_file_extent(trans, root, path,
			btrfs_ino(BTRFS_I(inode)), start, 0);
635

Yan Zheng's avatar
Yan Zheng committed
636
637
638
	if (ret == 0 &&
	    (found_type == BTRFS_FILE_EXTENT_REG ||
	     found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
		struct btrfs_file_extent_item cmp1;
		struct btrfs_file_extent_item cmp2;
		struct btrfs_file_extent_item *existing;
		struct extent_buffer *leaf;

		leaf = path->nodes[0];
		existing = btrfs_item_ptr(leaf, path->slots[0],
					  struct btrfs_file_extent_item);

		read_extent_buffer(eb, &cmp1, (unsigned long)item,
				   sizeof(cmp1));
		read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
				   sizeof(cmp2));

		/*
		 * we already have a pointer to this exact extent,
		 * we don't have to do anything
		 */
		if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
658
			btrfs_release_path(path);
659
660
661
			goto out;
		}
	}
662
	btrfs_release_path(path);
663
664

	/* drop any overlapping extents */
665
	ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
666
667
	if (ret)
		goto out;
668

Yan Zheng's avatar
Yan Zheng committed
669
670
	if (found_type == BTRFS_FILE_EXTENT_REG ||
	    found_type == BTRFS_FILE_EXTENT_PREALLOC) {
671
		u64 offset;
Yan Zheng's avatar
Yan Zheng committed
672
673
674
		unsigned long dest_offset;
		struct btrfs_key ins;

675
676
677
678
		if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
		    btrfs_fs_incompat(fs_info, NO_HOLES))
			goto update_inode;

Yan Zheng's avatar
Yan Zheng committed
679
680
		ret = btrfs_insert_empty_item(trans, root, path, key,
					      sizeof(*item));
681
682
		if (ret)
			goto out;
Yan Zheng's avatar
Yan Zheng committed
683
684
685
686
687
688
689
690
		dest_offset = btrfs_item_ptr_offset(path->nodes[0],
						    path->slots[0]);
		copy_extent_buffer(path->nodes[0], eb, dest_offset,
				(unsigned long)item,  sizeof(*item));

		ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
		ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
		ins.type = BTRFS_EXTENT_ITEM_KEY;
691
		offset = key->offset - btrfs_file_extent_offset(eb, item);
Yan Zheng's avatar
Yan Zheng committed
692

693
694
695
696
697
698
699
700
		/*
		 * Manually record dirty extent, as here we did a shallow
		 * file extent item copy and skip normal backref update,
		 * but modifying extent tree all by ourselves.
		 * So need to manually record dirty extent for qgroup,
		 * as the owner of the file extent changed from log tree
		 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
		 */
701
		ret = btrfs_qgroup_trace_extent(trans,
702
703
704
705
706
707
				btrfs_file_extent_disk_bytenr(eb, item),
				btrfs_file_extent_disk_num_bytes(eb, item),
				GFP_NOFS);
		if (ret < 0)
			goto out;

Yan Zheng's avatar
Yan Zheng committed
708
		if (ins.objectid > 0) {
709
			struct btrfs_ref ref = { 0 };
Yan Zheng's avatar
Yan Zheng committed
710
711
712
			u64 csum_start;
			u64 csum_end;
			LIST_HEAD(ordered_sums);
713

Yan Zheng's avatar
Yan Zheng committed
714
715
716
717
			/*
			 * is this extent already allocated in the extent
			 * allocation tree?  If so, just add a reference
			 */
718
			ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
Yan Zheng's avatar
Yan Zheng committed
719
720
						ins.offset);
			if (ret == 0) {
721
722
723
724
725
				btrfs_init_generic_ref(&ref,
						BTRFS_ADD_DELAYED_REF,
						ins.objectid, ins.offset, 0);
				btrfs_init_data_ref(&ref,
						root->root_key.objectid,
726
						key->objectid, offset);
727
				ret = btrfs_inc_extent_ref(trans, &ref);
728
729
				if (ret)
					goto out;
Yan Zheng's avatar
Yan Zheng committed
730
731
732
733
734
			} else {
				/*
				 * insert the extent pointer in the extent
				 * allocation tree
				 */
735
				ret = btrfs_alloc_logged_file_extent(trans,
736
						root->root_key.objectid,
737
						key->objectid, offset, &ins);
738
739
				if (ret)
					goto out;
Yan Zheng's avatar
Yan Zheng committed
740
			}
741
			btrfs_release_path(path);
Yan Zheng's avatar
Yan Zheng committed
742
743
744
745
746
747
748
749
750
751
752
753
754

			if (btrfs_file_extent_compression(eb, item)) {
				csum_start = ins.objectid;
				csum_end = csum_start + ins.offset;
			} else {
				csum_start = ins.objectid +
					btrfs_file_extent_offset(eb, item);
				csum_end = csum_start +
					btrfs_file_extent_num_bytes(eb, item);
			}

			ret = btrfs_lookup_csums_range(root->log_root,
						csum_start, csum_end - 1,
Arne Jansen's avatar
Arne Jansen committed
755
						&ordered_sums, 0);
756
757
			if (ret)
				goto out;
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
			/*
			 * Now delete all existing cums in the csum root that
			 * cover our range. We do this because we can have an
			 * extent that is completely referenced by one file
			 * extent item and partially referenced by another
			 * file extent item (like after using the clone or
			 * extent_same ioctls). In this case if we end up doing
			 * the replay of the one that partially references the
			 * extent first, and we do not do the csum deletion
			 * below, we can get 2 csum items in the csum tree that
			 * overlap each other. For example, imagine our log has
			 * the two following file extent items:
			 *
			 * key (257 EXTENT_DATA 409600)
			 *     extent data disk byte 12845056 nr 102400
			 *     extent data offset 20480 nr 20480 ram 102400
			 *
			 * key (257 EXTENT_DATA 819200)
			 *     extent data disk byte 12845056 nr 102400
			 *     extent data offset 0 nr 102400 ram 102400
			 *
			 * Where the second one fully references the 100K extent
			 * that starts at disk byte 12845056, and the log tree
			 * has a single csum item that covers the entire range
			 * of the extent:
			 *
			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
			 *
			 * After the first file extent item is replayed, the
			 * csum tree gets the following csum item:
			 *
			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
			 *
			 * Which covers the 20K sub-range starting at offset 20K
			 * of our extent. Now when we replay the second file
			 * extent item, if we do not delete existing csum items
			 * that cover any of its blocks, we end up getting two
			 * csum items in our csum tree that overlap each other:
			 *
			 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
			 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
			 *
			 * Which is a problem, because after this anyone trying
			 * to lookup up for the checksum of any block of our
			 * extent starting at an offset of 40K or higher, will
			 * end up looking at the second csum item only, which
			 * does not contain the checksum for any block starting
			 * at offset 40K or higher of our extent.
			 */
Yan Zheng's avatar
Yan Zheng committed
807
808
809
810
811
			while (!list_empty(&ordered_sums)) {
				struct btrfs_ordered_sum *sums;
				sums = list_entry(ordered_sums.next,
						struct btrfs_ordered_sum,
						list);
812
				if (!ret)
813
814
					ret = btrfs_del_csums(trans,
							      fs_info->csum_root,
815
816
							      sums->bytenr,
							      sums->len);
817
818
				if (!ret)
					ret = btrfs_csum_file_blocks(trans,
819
						fs_info->csum_root, sums);
Yan Zheng's avatar
Yan Zheng committed
820
821
822
				list_del(&sums->list);
				kfree(sums);
			}
823
824
			if (ret)
				goto out;
Yan Zheng's avatar
Yan Zheng committed
825
		} else {
826
			btrfs_release_path(path);
Yan Zheng's avatar
Yan Zheng committed
827
828
829
830
		}
	} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
		/* inline extents are easy, we just overwrite them */
		ret = overwrite_item(trans, root, path, eb, slot, key);
831
832
		if (ret)
			goto out;
Yan Zheng's avatar
Yan Zheng committed
833
	}
834

835
836
837
838
839
	ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start,
						extent_end - start);
	if (ret)
		goto out;

840
	inode_add_bytes(inode, nbytes);
841
update_inode:
842
	ret = btrfs_update_inode(trans, root, inode);
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
out:
	if (inode)
		iput(inode);
	return ret;
}

/*
 * when cleaning up conflicts between the directory names in the
 * subvolume, directory names in the log and directory names in the
 * inode back references, we may have to unlink inodes from directories.
 *
 * This is a helper function to do the unlink of a specific directory
 * item
 */
static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
				      struct btrfs_root *root,
				      struct btrfs_path *path,
860
				      struct btrfs_inode *dir,
861
862
863
864
865
866
867
868
869
870
871
872
873
874
				      struct btrfs_dir_item *di)
{
	struct inode *inode;
	char *name;
	int name_len;
	struct extent_buffer *leaf;
	struct btrfs_key location;
	int ret;

	leaf = path->nodes[0];

	btrfs_dir_item_key_to_cpu(leaf, di, &location);
	name_len = btrfs_dir_name_len(leaf, di);
	name = kmalloc(name_len, GFP_NOFS);
875
876
877
	if (!name)
		return -ENOMEM;

878
	read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
879
	btrfs_release_path(path);
880
881

	inode = read_one_inode(root, location.objectid);
882
	if (!inode) {
883
884
		ret = -EIO;
		goto out;
885
	}
886

887
	ret = link_to_fixup_dir(trans, root, path, location.objectid);
888
889
	if (ret)
		goto out;
890

891
892
	ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
			name_len);
893
894
	if (ret)
		goto out;
895
	else
896
		ret = btrfs_run_delayed_items(trans);
897
out:
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
	kfree(name);
	iput(inode);
	return ret;
}

/*
 * helper function to see if a given name and sequence number found
 * in an inode back reference are already in a directory and correctly
 * point to this inode
 */
static noinline int inode_in_dir(struct btrfs_root *root,
				 struct btrfs_path *path,
				 u64 dirid, u64 objectid, u64 index,
				 const char *name, int name_len)
{
	struct btrfs_dir_item *di;
	struct btrfs_key location;
	int match = 0;

	di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
					 index, name, name_len, 0);
	if (di && !IS_ERR(di)) {
		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
		if (location.objectid != objectid)
			goto out;
	} else
		goto out;
925
	btrfs_release_path(path);
926
927
928
929
930
931
932
933
934
935

	di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
	if (di && !IS_ERR(di)) {
		btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
		if (location.objectid != objectid)
			goto out;
	} else
		goto out;
	match = 1;
out:
936
	btrfs_release_path(path);
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
	return match;
}

/*
 * helper function to check a log tree for a named back reference in
 * an inode.  This is used to decide if a back reference that is
 * found in the subvolume conflicts with what we find in the log.
 *
 * inode backreferences may have multiple refs in a single item,
 * during replay we process one reference at a time, and we don't
 * want to delete valid links to a file from the subvolume if that
 * link is also in the log.
 */
static noinline int backref_in_log(struct btrfs_root *log,
				   struct btrfs_key *key,
Mark Fasheh's avatar
Mark Fasheh committed
952
				   u64 ref_objectid,
953
				   const char *name, int namelen)
954
955
956
957
958
{
	struct btrfs_path *path;
	int ret;

	path = btrfs_alloc_path();
959
960
961
	if (!path)
		return -ENOMEM;

962
	ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
963
964
965
	if (ret < 0) {
		goto out;
	} else if (ret == 1) {
966
		ret = 0;
Mark Fasheh's avatar
Mark Fasheh committed
967
968
969
		goto out;
	}

970
971
972
973
974
975
976
977
978
	if (key->type == BTRFS_INODE_EXTREF_KEY)
		ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
						       path->slots[0],
						       ref_objectid,
						       name, namelen);
	else
		ret = !!btrfs_find_name_in_backref(path->nodes[0],
						   path->slots[0],
						   name, namelen);
979
980
out:
	btrfs_free_path(path);
981
	return ret;
982
983
}

984
static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
985
986
				  struct btrfs_root *root,
				  struct btrfs_path *path,
987
				  struct btrfs_root *log_root,
988
989
				  struct btrfs_inode *dir,
				  struct btrfs_inode *inode,
Mark Fasheh's avatar
Mark Fasheh committed
990
991
992
				  u64 inode_objectid, u64 parent_objectid,
				  u64 ref_index, char *name, int namelen,
				  int *search_done)
993
{
liubo's avatar
liubo committed
994
	int ret;
Mark Fasheh's avatar
Mark Fasheh committed
995
996
997
	char *victim_name;
	int victim_name_len;
	struct extent_buffer *leaf;
998
	struct btrfs_dir_item *di;
Mark Fasheh's avatar
Mark Fasheh committed
999
1000
	struct btrfs_key search_key;
	struct btrfs_inode_extref *extref;
1001

Mark Fasheh's avatar
Mark Fasheh committed
1002
1003
1004
1005
1006
1007
again:
	/* Search old style refs */
	search_key.objectid = inode_objectid;
	search_key.type = BTRFS_INODE_REF_KEY;
	search_key.offset = parent_objectid;
	ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1008
1009
1010
1011
	if (ret == 0) {
		struct btrfs_inode_ref *victim_ref;
		unsigned long ptr;
		unsigned long ptr_end;
Mark Fasheh's avatar
Mark Fasheh committed
1012
1013

		leaf = path->nodes[0];
1014
1015
1016
1017

		/* are we trying to overwrite a back ref for the root directory
		 * if so, just jump out, we're done
		 */
Mark Fasheh's avatar
Mark Fasheh committed
1018
		if (search_key.objectid == search_key.offset)
1019
			return 1;
1020
1021
1022
1023
1024
1025
1026

		/* check all the names in this back reference to see
		 * if they are in the log.  if so, we allow them to stay
		 * otherwise they must be unlinked as a conflict
		 */
		ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
		ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1027
		while (ptr < ptr_end) {
1028
1029
1030
1031
			victim_ref = (struct btrfs_inode_ref *)ptr;
			victim_name_len = btrfs_inode_ref_name_len(leaf,
								   victim_ref);
			victim_name = kmalloc(victim_name_len, GFP_NOFS);
1032
1033
			if (!victim_name)
				return -ENOMEM;
1034
1035
1036
1037
1038

			read_extent_buffer(leaf, victim_name,
					   (unsigned long)(victim_ref + 1),
					   victim_name_len);

1039
1040
1041
1042
1043
1044
1045
			ret = backref_in_log(log_root, &search_key,
					     parent_objectid, victim_name,
					     victim_name_len);
			if (ret < 0) {
				kfree(victim_name);
				return ret;
			} else if (!ret) {
1046
				inc_nlink(&inode->vfs_inode);
1047
				btrfs_release_path(path);
1048

1049
				ret = btrfs_unlink_inode(trans, root, dir, inode,
1050
						victim_name, victim_name_len);
Mark Fasheh's avatar
Mark Fasheh committed
1051
				kfree(victim_name);
1052
1053
				if (ret)
					return ret;
1054
				ret = btrfs_run_delayed_items(trans);
1055
1056
				if (ret)
					return ret;
Mark Fasheh's avatar
Mark Fasheh committed
1057
1058
				*search_done = 1;
				goto again;
1059
1060
			}
			kfree(victim_name);
Mark Fasheh's avatar
Mark Fasheh committed
1061

1062
1063
1064
			ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
		}

1065
1066
		/*
		 * NOTE: we have searched root tree and checked the
1067
		 * corresponding ref, it does not need to check again.
1068
		 */
1069
		*search_done = 1;
1070
	}
1071
	btrfs_release_path(path);
1072

Mark Fasheh's avatar
Mark Fasheh committed
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
	/* Same search but for extended refs */
	extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
					   inode_objectid, parent_objectid, 0,
					   0);
	if (!IS_ERR_OR_NULL(extref)) {
		u32 item_size;
		u32 cur_offset = 0;
		unsigned long base;
		struct inode *victim_parent;

		leaf = path->nodes[0];

		item_size = btrfs_item_size_nr(leaf, path->slots[0]);
		base = btrfs_item_ptr_offset(leaf, path->slots[0]);

		while (cur_offset < item_size) {
1089
			extref = (struct btrfs_inode_extref *)(base + cur_offset);
Mark Fasheh's avatar
Mark Fasheh committed
1090
1091
1092
1093
1094
1095
1096

			victim_name_len = btrfs_inode_extref_name_len(leaf, extref);

			if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
				goto next;

			victim_name = kmalloc(victim_name_len, GFP_NOFS);
1097
1098
			if (!victim_name)
				return -ENOMEM;
Mark Fasheh's avatar
Mark Fasheh committed
1099
1100
1101
1102
1103
1104
1105
1106
			read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
					   victim_name_len);

			search_key.objectid = inode_objectid;
			search_key.type = BTRFS_INODE_EXTREF_KEY;
			search_key.offset = btrfs_extref_hash(parent_objectid,
							      victim_name,
							      victim_name_len);
1107
1108
1109
1110
1111
1112
			ret = backref_in_log(log_root, &search_key,
					     parent_objectid, victim_name,
					     victim_name_len);
			if (ret < 0) {
				return ret;
			} else if (!ret) {
Mark Fasheh's avatar
Mark Fasheh committed
1113
1114
				ret = -ENOENT;
				victim_parent = read_one_inode(root,
1115
						parent_objectid);
Mark Fasheh's avatar
Mark Fasheh committed
1116
				if (victim_parent) {
1117
					inc_nlink(&inode->vfs_inode);
Mark Fasheh's avatar
Mark Fasheh committed
1118
1119
1120
					btrfs_release_path(path);

					ret = btrfs_unlink_inode(trans, root,
1121
							BTRFS_I(victim_parent),
1122
							inode,
1123
1124
							victim_name,
							victim_name_len);
1125
1126
					if (!ret)
						ret = btrfs_run_delayed_items(
1127
								  trans);
Mark Fasheh's avatar
Mark Fasheh committed
1128
1129
1130
				}
				iput(victim_parent);
				kfree(victim_name);
1131
1132
				if (ret)
					return ret;
Mark Fasheh's avatar
Mark Fasheh committed
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
				*search_done = 1;
				goto again;
			}
			kfree(victim_name);
next:
			cur_offset += victim_name_len + sizeof(*extref);
		}
		*search_done = 1;
	}
	btrfs_release_path(path);

liubo's avatar
liubo committed
1144
	/* look for a conflicting sequence number */
1145
	di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
Mark Fasheh's avatar
Mark Fasheh committed
1146
					 ref_index, name, namelen, 0);
liubo's avatar
liubo committed
1147
	if (di && !IS_ERR(di)) {
1148
		ret = drop_one_dir_item(trans, root, path, dir, di);
1149
1150
		if (ret)
			return ret;
liubo's avatar
liubo committed
1151
1152
1153
	}
	btrfs_release_path(path);

1154
	/* look for a conflicting name */
1155
	di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
liubo's avatar
liubo committed
1156
1157
				   name, namelen, 0);
	if (di && !IS_ERR(di)) {
1158
		ret = drop_one_dir_item(trans, root, path, dir, di);
1159
1160
		if (ret)
			return ret;
liubo's avatar
liubo committed
1161
1162
1163
	}
	btrfs_release_path(path);

1164
1165
	return 0;
}
1166

1167
1168
1169
static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
			     u32 *namelen, char **name, u64 *index,
			     u64 *parent_objectid)
Mark Fasheh's avatar
Mark Fasheh committed
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
{
	struct btrfs_inode_extref *extref;

	extref = (struct btrfs_inode_extref *)ref_ptr;

	*namelen = btrfs_inode_extref_name_len(eb, extref);
	*name = kmalloc(*namelen, GFP_NOFS);
	if (*name == NULL)
		return -ENOMEM;

	read_extent_buffer(eb, *name, (unsigned long)&extref->name,
			   *namelen);

1183
1184
	if (index)
		*index = btrfs_inode_extref_index(eb, extref);
Mark Fasheh's avatar
Mark Fasheh committed
1185
1186
1187
1188
1189
1190
	if (parent_objectid)
		*parent_objectid = btrfs_inode_extref_parent(eb, extref);

	return 0;
}

1191
1192
static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
			  u32 *namelen, char **name, u64 *index)
Mark Fasheh's avatar
Mark Fasheh committed
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
{
	struct btrfs_inode_ref *ref;

	ref = (struct btrfs_inode_ref *)ref_ptr;

	*namelen = btrfs_inode_ref_name_len(eb, ref);
	*name = kmalloc(*namelen, GFP_NOFS);
	if (*name == NULL)
		return -ENOMEM;

	read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);

1205
1206
	if (index)
		*index = btrfs_inode_ref_index(eb, ref);
Mark Fasheh's avatar
Mark Fasheh committed
1207
1208
1209
1210

	return 0;
}

1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
/*
 * Take an inode reference item from the log tree and iterate all names from the
 * inode reference item in the subvolume tree with the same key (if it exists).
 * For any name that is not in the inode reference item from the log tree, do a
 * proper unlink of that name (that is, remove its entry from the inode
 * reference item and both dir index keys).
 */
static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
				 struct btrfs_root *root,
				 struct btrfs_path *path,
				 struct btrfs_inode *inode,
				 struct extent_buffer *log_eb,
				 int log_slot,
				 struct btrfs_key *key)
{
	int ret;
	unsigned long ref_ptr;
	unsigned long ref_end;
	struct extent_buffer *eb;

again:
	btrfs_release_path(path);
	ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
	if (ret > 0) {
		ret = 0;
		goto out;
	}
	if (ret < 0)
		goto out;

	eb = path->nodes[0];
	ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
	ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
	while (ref_ptr < ref_end) {
		char *name = NULL;
		int namelen;
		u64 parent_id;

		if (key->type == BTRFS_INODE_EXTREF_KEY) {
			ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
						NULL, &parent_id);
		} else {
			parent_id = key->offset;
			ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
					     NULL);
		}
		if (ret)
			goto out;

		if (key->type == BTRFS_INODE_EXTREF_KEY)
1261
1262
1263
			ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot,
							       parent_id, name,
							       namelen);
1264
		else
1265
1266
			ret = !!btrfs_find_name_in_backref(log_eb, log_slot,
							   name, namelen);
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299

		if (!ret) {
			struct inode *dir;

			btrfs_release_path(path);
			dir = read_one_inode(root, parent_id);
			if (!dir) {
				ret = -ENOENT;
				kfree(name);
				goto out;
			}
			ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
						 inode, name, namelen);
			kfree(name);
			iput(dir);
			if (ret)
				goto out;
			goto again;
		}

		kfree(name);
		ref_ptr += namelen;
		if (key->type == BTRFS_INODE_EXTREF_KEY)
			ref_ptr += sizeof(struct btrfs_inode_extref);
		else
			ref_ptr += sizeof(struct btrfs_inode_ref);
	}
	ret = 0;
 out:
	btrfs_release_path(path);
	return ret;
}

1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
				  const u8 ref_type, const char *name,
				  const int namelen)
{
	struct btrfs_key key;
	struct btrfs_path *path;
	const u64 parent_id = btrfs_ino(BTRFS_I(dir));
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	key.objectid = btrfs_ino(BTRFS_I(inode));
	key.type = ref_type;
	if (key.type == BTRFS_INODE_REF_KEY)
		key.offset = parent_id;
	else
		key.offset = btrfs_extref_hash(parent_id, name, namelen);

	ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
	if (ret < 0)
		goto out;
	if (ret > 0) {
		ret = 0;
		goto out;
	}
	if (key.type == BTRFS_INODE_EXTREF_KEY)
1328
1329
		ret = !!btrfs_find_name_in_ext_backref(path->nodes[0],
				path->slots[0], parent_id, name, namelen);
1330
	else
1331
1332
		ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
						   name, namelen);
1333
1334
1335
1336
1337
1338

out:
	btrfs_free_path(path);
	return ret;
}

1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
		    struct inode *dir, struct inode *inode, const char *name,
		    int namelen, u64 ref_index)
{
	struct btrfs_dir_item *dir_item;
	struct btrfs_key key;
	struct btrfs_path *path;
	struct inode *other_inode = NULL;
	int ret;

	path = btrfs_alloc_path();
	if (!path)
		return -ENOMEM;

	dir_item = btrfs_lookup_dir_item(NULL, root, path,
					 btrfs_ino(BTRFS_I(dir)),
					 name, namelen, 0);
	if (!dir_item) {
		btrfs_release_path(path);
		goto add_link;
	} else if (IS_ERR(dir_item)) {
		ret = PTR_ERR(dir_item);
		goto out;
	}

	/*
	 * Our inode's dentry collides with the dentry of another inode which is
	 * in the log but not yet processed since it has a higher inode number.
	 * So delete that other dentry.
	 */
	btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
	btrfs_release_path(path);
	other_inode = read_one_inode(root, key.objectid);
	if (!other_inode) {
		ret = -ENOENT;
		goto out;
	}
	ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
				 name, namelen);
	if (ret)
		goto out;
	/*
	 * If we dropped the link count to 0, bump it so that later the iput()
	 * on the inode will not free it. We will fixup the link count later.
	 */
	if (other_inode->i_nlink == 0)
		inc_nlink(other_inode);

	ret = btrfs_run_delayed_items(trans);
	if (ret)
		goto out;
add_link:
	ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
			     name, namelen, 0, ref_index);
out:
	iput(other_inode);
	btrfs_free_path(path);

	return ret;
}

1400
1401
1402
1403
1404
1405
1406
1407
1408
1409