inode.c 50 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2
/*
 * (C) 1997 Linus Torvalds
3
 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
Linus Torvalds's avatar
Linus Torvalds committed
4
 */
Al Viro's avatar
Al Viro committed
5
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
6 7 8 9 10 11 12 13
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
#include <linux/hash.h>
#include <linux/swap.h>
#include <linux/security.h>
#include <linux/cdev.h>
#include <linux/bootmem.h>
14
#include <linux/fsnotify.h>
15
#include <linux/mount.h>
Al Viro's avatar
Al Viro committed
16
#include <linux/posix_acl.h>
17
#include <linux/prefetch.h>
18
#include <linux/buffer_head.h> /* for inode_has_buffers */
Miklos Szeredi's avatar
Miklos Szeredi committed
19
#include <linux/ratelimit.h>
20
#include <linux/list_lru.h>
21
#include "internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
22

23
/*
24
 * Inode locking rules:
25 26 27
 *
 * inode->i_lock protects:
 *   inode->i_state, inode->i_hash, __iget()
28
 * Inode LRU list locks protect:
29
 *   inode->i_sb->s_inode_lru, inode->i_lru
30 31
 * inode_sb_list_lock protects:
 *   sb->s_inodes, inode->i_sb_list
32
 * bdi->wb.list_lock protects:
33
 *   bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
34 35
 * inode_hash_lock protects:
 *   inode_hashtable, inode->i_hash
36 37
 *
 * Lock ordering:
38 39 40
 *
 * inode_sb_list_lock
 *   inode->i_lock
41
 *     Inode LRU list locks
42
 *
43
 * bdi->wb.list_lock
44
 *   inode->i_lock
45 46 47 48 49 50 51
 *
 * inode_hash_lock
 *   inode_sb_list_lock
 *   inode->i_lock
 *
 * iunique_lock
 *   inode_hash_lock
52 53
 */

54 55
static unsigned int i_hash_mask __read_mostly;
static unsigned int i_hash_shift __read_mostly;
56 57
static struct hlist_head *inode_hashtable __read_mostly;
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
Linus Torvalds's avatar
Linus Torvalds committed
58

59 60
__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);

Jens Axboe's avatar
Jens Axboe committed
61 62 63 64 65 66 67 68
/*
 * Empty aops. Can be used for the cases where the user does not
 * define any of the address_space operations.
 */
const struct address_space_operations empty_aops = {
};
EXPORT_SYMBOL(empty_aops);

Linus Torvalds's avatar
Linus Torvalds committed
69 70 71 72 73
/*
 * Statistics gathering..
 */
struct inodes_stat_t inodes_stat;

74 75
static DEFINE_PER_CPU(unsigned long, nr_inodes);
static DEFINE_PER_CPU(unsigned long, nr_unused);
76

77
static struct kmem_cache *inode_cachep __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
78

79
static long get_nr_inodes(void)
80
{
81
	int i;
82
	long sum = 0;
83 84 85
	for_each_possible_cpu(i)
		sum += per_cpu(nr_inodes, i);
	return sum < 0 ? 0 : sum;
86 87
}

88
static inline long get_nr_inodes_unused(void)
89
{
90
	int i;
91
	long sum = 0;
92 93 94
	for_each_possible_cpu(i)
		sum += per_cpu(nr_unused, i);
	return sum < 0 ? 0 : sum;
95 96
}

97
long get_nr_dirty_inodes(void)
98
{
99
	/* not actually dirty inodes, but a wild approximation */
100
	long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
101 102 103 104 105 106 107
	return nr_dirty > 0 ? nr_dirty : 0;
}

/*
 * Handle nr_inode sysctl
 */
#ifdef CONFIG_SYSCTL
108
int proc_nr_inodes(struct ctl_table *table, int write,
109 110 111
		   void __user *buffer, size_t *lenp, loff_t *ppos)
{
	inodes_stat.nr_inodes = get_nr_inodes();
112
	inodes_stat.nr_unused = get_nr_inodes_unused();
113
	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
114 115 116
}
#endif

117 118
/**
 * inode_init_always - perform inode structure intialisation
119 120
 * @sb: superblock inode belongs to
 * @inode: inode to initialise
121 122 123 124
 *
 * These are initializations that need to be done on every inode
 * allocation as the fields are not initialised by slab allocation.
 */
125
int inode_init_always(struct super_block *sb, struct inode *inode)
Linus Torvalds's avatar
Linus Torvalds committed
126
{
127
	static const struct inode_operations empty_iops;
128
	static const struct file_operations empty_fops;
129
	struct address_space *const mapping = &inode->i_data;
130 131 132 133 134 135 136

	inode->i_sb = sb;
	inode->i_blkbits = sb->s_blocksize_bits;
	inode->i_flags = 0;
	atomic_set(&inode->i_count, 1);
	inode->i_op = &empty_iops;
	inode->i_fop = &empty_fops;
Miklos Szeredi's avatar
Miklos Szeredi committed
137
	inode->__i_nlink = 1;
138
	inode->i_opflags = 0;
139 140
	i_uid_write(inode, 0);
	i_gid_write(inode, 0);
141 142 143 144 145
	atomic_set(&inode->i_writecount, 0);
	inode->i_size = 0;
	inode->i_blocks = 0;
	inode->i_bytes = 0;
	inode->i_generation = 0;
Linus Torvalds's avatar
Linus Torvalds committed
146
#ifdef CONFIG_QUOTA
147
	memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
Linus Torvalds's avatar
Linus Torvalds committed
148
#endif
149 150 151 152 153
	inode->i_pipe = NULL;
	inode->i_bdev = NULL;
	inode->i_cdev = NULL;
	inode->i_rdev = 0;
	inode->dirtied_when = 0;
Mimi Zohar's avatar
Mimi Zohar committed
154 155

	if (security_inode_alloc(inode))
156
		goto out;
157 158 159 160 161 162
	spin_lock_init(&inode->i_lock);
	lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);

	mutex_init(&inode->i_mutex);
	lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);

Christoph Hellwig's avatar
Christoph Hellwig committed
163
	atomic_set(&inode->i_dio_count, 0);
164 165 166 167

	mapping->a_ops = &empty_aops;
	mapping->host = inode;
	mapping->flags = 0;
168
	atomic_set(&mapping->i_mmap_writable, 0);
169
	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
170
	mapping->private_data = NULL;
171 172 173 174 175 176 177 178 179 180 181
	mapping->backing_dev_info = &default_backing_dev_info;
	mapping->writeback_index = 0;

	/*
	 * If the block_device provides a backing_dev_info for client
	 * inodes then use that.  Otherwise the inode share the bdev's
	 * backing_dev_info.
	 */
	if (sb->s_bdev) {
		struct backing_dev_info *bdi;

182
		bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
183 184 185 186
		mapping->backing_dev_info = bdi;
	}
	inode->i_private = NULL;
	inode->i_mapping = mapping;
187
	INIT_HLIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */
Al Viro's avatar
Al Viro committed
188 189 190
#ifdef CONFIG_FS_POSIX_ACL
	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
#endif
191

192 193 194 195
#ifdef CONFIG_FSNOTIFY
	inode->i_fsnotify_mask = 0;
#endif

196
	this_cpu_inc(nr_inodes);
197

198 199 200
	return 0;
out:
	return -ENOMEM;
Linus Torvalds's avatar
Linus Torvalds committed
201
}
202 203 204 205 206 207 208 209 210 211 212
EXPORT_SYMBOL(inode_init_always);

static struct inode *alloc_inode(struct super_block *sb)
{
	struct inode *inode;

	if (sb->s_op->alloc_inode)
		inode = sb->s_op->alloc_inode(sb);
	else
		inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);

213 214 215 216 217 218 219 220 221 222 223 224
	if (!inode)
		return NULL;

	if (unlikely(inode_init_always(sb, inode))) {
		if (inode->i_sb->s_op->destroy_inode)
			inode->i_sb->s_op->destroy_inode(inode);
		else
			kmem_cache_free(inode_cachep, inode);
		return NULL;
	}

	return inode;
225
}
Linus Torvalds's avatar
Linus Torvalds committed
226

227 228 229 230 231 232
void free_inode_nonrcu(struct inode *inode)
{
	kmem_cache_free(inode_cachep, inode);
}
EXPORT_SYMBOL(free_inode_nonrcu);

Christoph Hellwig's avatar
Christoph Hellwig committed
233
void __destroy_inode(struct inode *inode)
Linus Torvalds's avatar
Linus Torvalds committed
234
{
235
	BUG_ON(inode_has_buffers(inode));
Linus Torvalds's avatar
Linus Torvalds committed
236
	security_inode_free(inode);
237
	fsnotify_inode_delete(inode);
Miklos Szeredi's avatar
Miklos Szeredi committed
238 239 240 241 242
	if (!inode->i_nlink) {
		WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
		atomic_long_dec(&inode->i_sb->s_remove_count);
	}

Al Viro's avatar
Al Viro committed
243 244 245 246 247 248
#ifdef CONFIG_FS_POSIX_ACL
	if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
		posix_acl_release(inode->i_acl);
	if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
		posix_acl_release(inode->i_default_acl);
#endif
249
	this_cpu_dec(nr_inodes);
Christoph Hellwig's avatar
Christoph Hellwig committed
250 251 252
}
EXPORT_SYMBOL(__destroy_inode);

Nick Piggin's avatar
Nick Piggin committed
253 254 255 256 257 258
static void i_callback(struct rcu_head *head)
{
	struct inode *inode = container_of(head, struct inode, i_rcu);
	kmem_cache_free(inode_cachep, inode);
}

259
static void destroy_inode(struct inode *inode)
Christoph Hellwig's avatar
Christoph Hellwig committed
260
{
Nick Piggin's avatar
Nick Piggin committed
261
	BUG_ON(!list_empty(&inode->i_lru));
Christoph Hellwig's avatar
Christoph Hellwig committed
262
	__destroy_inode(inode);
Linus Torvalds's avatar
Linus Torvalds committed
263 264 265
	if (inode->i_sb->s_op->destroy_inode)
		inode->i_sb->s_op->destroy_inode(inode);
	else
Nick Piggin's avatar
Nick Piggin committed
266
		call_rcu(&inode->i_rcu, i_callback);
Linus Torvalds's avatar
Linus Torvalds committed
267 268
}

Miklos Szeredi's avatar
Miklos Szeredi committed
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
/**
 * drop_nlink - directly drop an inode's link count
 * @inode: inode
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.  In cases
 * where we are attempting to track writes to the
 * filesystem, a decrement to zero means an imminent
 * write when the file is truncated and actually unlinked
 * on the filesystem.
 */
void drop_nlink(struct inode *inode)
{
	WARN_ON(inode->i_nlink == 0);
	inode->__i_nlink--;
	if (!inode->i_nlink)
		atomic_long_inc(&inode->i_sb->s_remove_count);
}
EXPORT_SYMBOL(drop_nlink);

/**
 * clear_nlink - directly zero an inode's link count
 * @inode: inode
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.  See
 * drop_nlink() for why we care about i_nlink hitting zero.
 */
void clear_nlink(struct inode *inode)
{
	if (inode->i_nlink) {
		inode->__i_nlink = 0;
		atomic_long_inc(&inode->i_sb->s_remove_count);
	}
}
EXPORT_SYMBOL(clear_nlink);

/**
 * set_nlink - directly set an inode's link count
 * @inode: inode
 * @nlink: new nlink (should be non-zero)
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.
 */
void set_nlink(struct inode *inode, unsigned int nlink)
{
	if (!nlink) {
		clear_nlink(inode);
	} else {
		/* Yes, some filesystems do change nlink from zero to one */
		if (inode->i_nlink == 0)
			atomic_long_dec(&inode->i_sb->s_remove_count);

		inode->__i_nlink = nlink;
	}
}
EXPORT_SYMBOL(set_nlink);

/**
 * inc_nlink - directly increment an inode's link count
 * @inode: inode
 *
 * This is a low-level filesystem helper to replace any
 * direct filesystem manipulation of i_nlink.  Currently,
 * it is only here for parity with dec_nlink().
 */
void inc_nlink(struct inode *inode)
{
338 339
	if (unlikely(inode->i_nlink == 0)) {
		WARN_ON(!(inode->i_state & I_LINKABLE));
Miklos Szeredi's avatar
Miklos Szeredi committed
340
		atomic_long_dec(&inode->i_sb->s_remove_count);
341
	}
Miklos Szeredi's avatar
Miklos Szeredi committed
342 343 344 345 346

	inode->__i_nlink++;
}
EXPORT_SYMBOL(inc_nlink);

347 348 349 350 351
void address_space_init_once(struct address_space *mapping)
{
	memset(mapping, 0, sizeof(*mapping));
	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
	spin_lock_init(&mapping->tree_lock);
352
	mutex_init(&mapping->i_mmap_mutex);
353 354
	INIT_LIST_HEAD(&mapping->private_list);
	spin_lock_init(&mapping->private_lock);
355
	mapping->i_mmap = RB_ROOT;
356 357 358 359
	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
}
EXPORT_SYMBOL(address_space_init_once);

Linus Torvalds's avatar
Linus Torvalds committed
360 361 362 363 364 365 366 367 368 369
/*
 * These are initializations that only need to be done
 * once, because the fields are idempotent across use
 * of the inode, so let the slab aware of that.
 */
void inode_init_once(struct inode *inode)
{
	memset(inode, 0, sizeof(*inode));
	INIT_HLIST_NODE(&inode->i_hash);
	INIT_LIST_HEAD(&inode->i_devices);
Nick Piggin's avatar
Nick Piggin committed
370 371
	INIT_LIST_HEAD(&inode->i_wb_list);
	INIT_LIST_HEAD(&inode->i_lru);
372
	address_space_init_once(&inode->i_data);
Linus Torvalds's avatar
Linus Torvalds committed
373
	i_size_ordered_init(inode);
374
#ifdef CONFIG_FSNOTIFY
375
	INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
376
#endif
Linus Torvalds's avatar
Linus Torvalds committed
377 378 379
}
EXPORT_SYMBOL(inode_init_once);

380
static void init_once(void *foo)
Linus Torvalds's avatar
Linus Torvalds committed
381
{
382
	struct inode *inode = (struct inode *) foo;
Linus Torvalds's avatar
Linus Torvalds committed
383

384
	inode_init_once(inode);
Linus Torvalds's avatar
Linus Torvalds committed
385 386 387
}

/*
388
 * inode->i_lock must be held
Linus Torvalds's avatar
Linus Torvalds committed
389
 */
390
void __iget(struct inode *inode)
Linus Torvalds's avatar
Linus Torvalds committed
391
{
392 393
	atomic_inc(&inode->i_count);
}
394

Al Viro's avatar
Al Viro committed
395 396 397 398 399 400 401 402 403
/*
 * get additional reference to inode; caller must already hold one.
 */
void ihold(struct inode *inode)
{
	WARN_ON(atomic_inc_return(&inode->i_count) < 2);
}
EXPORT_SYMBOL(ihold);

404 405
static void inode_lru_list_add(struct inode *inode)
{
406
	if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
407
		this_cpu_inc(nr_unused);
408
}
409

410 411 412 413 414 415 416 417 418 419 420 421 422
/*
 * Add inode to LRU if needed (inode is unused and clean).
 *
 * Needs inode->i_lock held.
 */
void inode_add_lru(struct inode *inode)
{
	if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) &&
	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
		inode_lru_list_add(inode);
}


423 424
static void inode_lru_list_del(struct inode *inode)
{
425 426

	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
427
		this_cpu_dec(nr_unused);
Linus Torvalds's avatar
Linus Torvalds committed
428 429
}

430 431 432 433 434 435
/**
 * inode_sb_list_add - add inode to the superblock list of inodes
 * @inode: inode to add
 */
void inode_sb_list_add(struct inode *inode)
{
436 437 438
	spin_lock(&inode_sb_list_lock);
	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
	spin_unlock(&inode_sb_list_lock);
439 440 441
}
EXPORT_SYMBOL_GPL(inode_sb_list_add);

442
static inline void inode_sb_list_del(struct inode *inode)
443
{
444 445 446 447 448
	if (!list_empty(&inode->i_sb_list)) {
		spin_lock(&inode_sb_list_lock);
		list_del_init(&inode->i_sb_list);
		spin_unlock(&inode_sb_list_lock);
	}
449 450
}

451 452 453 454 455 456
static unsigned long hash(struct super_block *sb, unsigned long hashval)
{
	unsigned long tmp;

	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
			L1_CACHE_BYTES;
457 458
	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
	return tmp & i_hash_mask;
459 460 461 462 463 464 465 466 467 468 469 470
}

/**
 *	__insert_inode_hash - hash an inode
 *	@inode: unhashed inode
 *	@hashval: unsigned long value used to locate this object in the
 *		inode_hashtable.
 *
 *	Add an inode to the inode hash for this superblock.
 */
void __insert_inode_hash(struct inode *inode, unsigned long hashval)
{
471 472
	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);

473
	spin_lock(&inode_hash_lock);
474
	spin_lock(&inode->i_lock);
475
	hlist_add_head(&inode->i_hash, b);
476
	spin_unlock(&inode->i_lock);
477
	spin_unlock(&inode_hash_lock);
478 479 480 481
}
EXPORT_SYMBOL(__insert_inode_hash);

/**
482
 *	__remove_inode_hash - remove an inode from the hash
483 484 485 486
 *	@inode: inode to unhash
 *
 *	Remove an inode from the superblock.
 */
487
void __remove_inode_hash(struct inode *inode)
488
{
489
	spin_lock(&inode_hash_lock);
490
	spin_lock(&inode->i_lock);
491
	hlist_del_init(&inode->i_hash);
492
	spin_unlock(&inode->i_lock);
493
	spin_unlock(&inode_hash_lock);
494
}
495
EXPORT_SYMBOL(__remove_inode_hash);
496

497
void clear_inode(struct inode *inode)
Al Viro's avatar
Al Viro committed
498 499
{
	might_sleep();
500 501 502 503 504 505
	/*
	 * We have to cycle tree_lock here because reclaim can be still in the
	 * process of removing the last page (in __delete_from_page_cache())
	 * and we must not free mapping under it.
	 */
	spin_lock_irq(&inode->i_data.tree_lock);
Al Viro's avatar
Al Viro committed
506
	BUG_ON(inode->i_data.nrpages);
507
	BUG_ON(inode->i_data.nrshadows);
508
	spin_unlock_irq(&inode->i_data.tree_lock);
Al Viro's avatar
Al Viro committed
509 510 511
	BUG_ON(!list_empty(&inode->i_data.private_list));
	BUG_ON(!(inode->i_state & I_FREEING));
	BUG_ON(inode->i_state & I_CLEAR);
Nick Piggin's avatar
Nick Piggin committed
512
	/* don't need i_lock here, no concurrent mods to i_state */
Al Viro's avatar
Al Viro committed
513 514
	inode->i_state = I_FREEING | I_CLEAR;
}
515
EXPORT_SYMBOL(clear_inode);
Al Viro's avatar
Al Viro committed
516

Dave Chinner's avatar
Dave Chinner committed
517 518 519 520 521 522 523 524 525 526 527 528 529
/*
 * Free the inode passed in, removing it from the lists it is still connected
 * to. We remove any pages still attached to the inode and wait for any IO that
 * is still in progress before finally destroying the inode.
 *
 * An inode must already be marked I_FREEING so that we avoid the inode being
 * moved back onto lists if we race with other code that manipulates the lists
 * (e.g. writeback_single_inode). The caller is responsible for setting this.
 *
 * An inode must already be removed from the LRU list before being evicted from
 * the cache. This should occur atomically with setting the I_FREEING state
 * flag, so no inodes here should ever be on the LRU when being evicted.
 */
530
static void evict(struct inode *inode)
531 532 533
{
	const struct super_operations *op = inode->i_sb->s_op;

Dave Chinner's avatar
Dave Chinner committed
534 535 536
	BUG_ON(!(inode->i_state & I_FREEING));
	BUG_ON(!list_empty(&inode->i_lru));

537 538 539
	if (!list_empty(&inode->i_wb_list))
		inode_wb_list_del(inode);

540 541
	inode_sb_list_del(inode);

542 543 544 545 546 547 548
	/*
	 * Wait for flusher thread to be done with the inode so that filesystem
	 * does not start destroying it while writeback is still running. Since
	 * the inode has I_FREEING set, flusher thread won't start new work on
	 * the inode.  We just have to wait for running writeback to finish.
	 */
	inode_wait_for_writeback(inode);
549

Al Viro's avatar
Al Viro committed
550 551
	if (op->evict_inode) {
		op->evict_inode(inode);
552
	} else {
553
		truncate_inode_pages_final(&inode->i_data);
554
		clear_inode(inode);
555
	}
556 557 558 559
	if (S_ISBLK(inode->i_mode) && inode->i_bdev)
		bd_forget(inode);
	if (S_ISCHR(inode->i_mode) && inode->i_cdev)
		cd_forget(inode);
Dave Chinner's avatar
Dave Chinner committed
560 561 562 563 564 565 566 567 568

	remove_inode_hash(inode);

	spin_lock(&inode->i_lock);
	wake_up_bit(&inode->i_state, __I_NEW);
	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
	spin_unlock(&inode->i_lock);

	destroy_inode(inode);
569 570
}

Linus Torvalds's avatar
Linus Torvalds committed
571 572 573 574 575 576 577 578 579 580 581 582
/*
 * dispose_list - dispose of the contents of a local list
 * @head: the head of the list to free
 *
 * Dispose-list gets a local list with local inodes in it, so it doesn't
 * need to worry about list corruption and SMP locks.
 */
static void dispose_list(struct list_head *head)
{
	while (!list_empty(head)) {
		struct inode *inode;

Nick Piggin's avatar
Nick Piggin committed
583 584
		inode = list_first_entry(head, struct inode, i_lru);
		list_del_init(&inode->i_lru);
Linus Torvalds's avatar
Linus Torvalds committed
585

586
		evict(inode);
Linus Torvalds's avatar
Linus Torvalds committed
587 588 589
	}
}

Al Viro's avatar
Al Viro committed
590 591 592 593 594 595 596 597
/**
 * evict_inodes	- evict all evictable inodes for a superblock
 * @sb:		superblock to operate on
 *
 * Make sure that no inodes with zero refcount are retained.  This is
 * called by superblock shutdown after having MS_ACTIVE flag removed,
 * so any inode reaching zero refcount during or after that call will
 * be immediately evicted.
Linus Torvalds's avatar
Linus Torvalds committed
598
 */
Al Viro's avatar
Al Viro committed
599
void evict_inodes(struct super_block *sb)
Linus Torvalds's avatar
Linus Torvalds committed
600
{
Al Viro's avatar
Al Viro committed
601 602
	struct inode *inode, *next;
	LIST_HEAD(dispose);
Linus Torvalds's avatar
Linus Torvalds committed
603

604
	spin_lock(&inode_sb_list_lock);
Al Viro's avatar
Al Viro committed
605 606
	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
		if (atomic_read(&inode->i_count))
Nick Piggin's avatar
Nick Piggin committed
607
			continue;
608 609 610 611

		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
			spin_unlock(&inode->i_lock);
Linus Torvalds's avatar
Linus Torvalds committed
612
			continue;
613
		}
Al Viro's avatar
Al Viro committed
614 615

		inode->i_state |= I_FREEING;
616
		inode_lru_list_del(inode);
617
		spin_unlock(&inode->i_lock);
618
		list_add(&inode->i_lru, &dispose);
Linus Torvalds's avatar
Linus Torvalds committed
619
	}
620
	spin_unlock(&inode_sb_list_lock);
Al Viro's avatar
Al Viro committed
621 622

	dispose_list(&dispose);
Linus Torvalds's avatar
Linus Torvalds committed
623 624 625
}

/**
626 627
 * invalidate_inodes	- attempt to free all inodes on a superblock
 * @sb:		superblock to operate on
628
 * @kill_dirty: flag to guide handling of dirty inodes
Linus Torvalds's avatar
Linus Torvalds committed
629
 *
630 631
 * Attempts to free all inodes for a given superblock.  If there were any
 * busy inodes return a non-zero value, else zero.
632 633
 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
 * them as busy.
Linus Torvalds's avatar
Linus Torvalds committed
634
 */
635
int invalidate_inodes(struct super_block *sb, bool kill_dirty)
Linus Torvalds's avatar
Linus Torvalds committed
636
{
637
	int busy = 0;
638 639
	struct inode *inode, *next;
	LIST_HEAD(dispose);
Linus Torvalds's avatar
Linus Torvalds committed
640

641
	spin_lock(&inode_sb_list_lock);
642
	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
643 644 645
		spin_lock(&inode->i_lock);
		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
			spin_unlock(&inode->i_lock);
Nick Piggin's avatar
Nick Piggin committed
646
			continue;
647
		}
648
		if (inode->i_state & I_DIRTY && !kill_dirty) {
649
			spin_unlock(&inode->i_lock);
650 651 652
			busy = 1;
			continue;
		}
653
		if (atomic_read(&inode->i_count)) {
654
			spin_unlock(&inode->i_lock);
655
			busy = 1;
Linus Torvalds's avatar
Linus Torvalds committed
656 657
			continue;
		}
658 659

		inode->i_state |= I_FREEING;
660
		inode_lru_list_del(inode);
661
		spin_unlock(&inode->i_lock);
662
		list_add(&inode->i_lru, &dispose);
Linus Torvalds's avatar
Linus Torvalds committed
663
	}
664
	spin_unlock(&inode_sb_list_lock);
Linus Torvalds's avatar
Linus Torvalds committed
665

666
	dispose_list(&dispose);
Linus Torvalds's avatar
Linus Torvalds committed
667 668 669 670 671

	return busy;
}

/*
672
 * Isolate the inode from the LRU in preparation for freeing it.
Linus Torvalds's avatar
Linus Torvalds committed
673 674
 *
 * Any inodes which are pinned purely because of attached pagecache have their
675 676
 * pagecache removed.  If the inode has metadata buffers attached to
 * mapping->private_list then try to remove them.
Linus Torvalds's avatar
Linus Torvalds committed
677
 *
678 679 680 681 682 683 684
 * If the inode has the I_REFERENCED flag set, then it means that it has been
 * used recently - the flag is set in iput_final(). When we encounter such an
 * inode, clear the flag and move it to the back of the LRU so it gets another
 * pass through the LRU before it gets reclaimed. This is necessary because of
 * the fact we are doing lazy LRU updates to minimise lock contention so the
 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
 * with this flag set because they are the inodes that are out of order.
Linus Torvalds's avatar
Linus Torvalds committed
685
 */
686 687
static enum lru_status
inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
Linus Torvalds's avatar
Linus Torvalds committed
688
{
689 690
	struct list_head *freeable = arg;
	struct inode	*inode = container_of(item, struct inode, i_lru);
Linus Torvalds's avatar
Linus Torvalds committed
691

692 693 694 695 696 697
	/*
	 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
	 * If we fail to get the lock, just skip it.
	 */
	if (!spin_trylock(&inode->i_lock))
		return LRU_SKIP;
Linus Torvalds's avatar
Linus Torvalds committed
698

699 700 701 702 703 704 705 706 707 708 709
	/*
	 * Referenced or dirty inodes are still in use. Give them another pass
	 * through the LRU as we canot reclaim them now.
	 */
	if (atomic_read(&inode->i_count) ||
	    (inode->i_state & ~I_REFERENCED)) {
		list_del_init(&inode->i_lru);
		spin_unlock(&inode->i_lock);
		this_cpu_dec(nr_unused);
		return LRU_REMOVED;
	}
Linus Torvalds's avatar
Linus Torvalds committed
710

711 712 713 714 715 716
	/* recently referenced inodes get one more pass */
	if (inode->i_state & I_REFERENCED) {
		inode->i_state &= ~I_REFERENCED;
		spin_unlock(&inode->i_lock);
		return LRU_ROTATE;
	}
Linus Torvalds's avatar
Linus Torvalds committed
717

718 719 720 721 722 723 724 725 726 727 728 729 730
	if (inode_has_buffers(inode) || inode->i_data.nrpages) {
		__iget(inode);
		spin_unlock(&inode->i_lock);
		spin_unlock(lru_lock);
		if (remove_inode_buffers(inode)) {
			unsigned long reap;
			reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
			if (current_is_kswapd())
				__count_vm_events(KSWAPD_INODESTEAL, reap);
			else
				__count_vm_events(PGINODESTEAL, reap);
			if (current->reclaim_state)
				current->reclaim_state->reclaimed_slab += reap;
731
		}
732 733 734 735
		iput(inode);
		spin_lock(lru_lock);
		return LRU_RETRY;
	}
736

737 738
	WARN_ON(inode->i_state & I_NEW);
	inode->i_state |= I_FREEING;
739
	list_move(&inode->i_lru, freeable);
740
	spin_unlock(&inode->i_lock);
741

742 743 744
	this_cpu_dec(nr_unused);
	return LRU_REMOVED;
}
Nick Piggin's avatar
Nick Piggin committed
745

746 747 748 749 750 751
/*
 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
 * This is called from the superblock shrinker function with a number of inodes
 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
 * then are freed outside inode_lock by dispose_list().
 */
752 753
long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan,
		     int nid)
754 755 756
{
	LIST_HEAD(freeable);
	long freed;
Linus Torvalds's avatar
Linus Torvalds committed
757

758 759
	freed = list_lru_walk_node(&sb->s_inode_lru, nid, inode_lru_isolate,
				       &freeable, &nr_to_scan);
Linus Torvalds's avatar
Linus Torvalds committed
760
	dispose_list(&freeable);
761
	return freed;
Linus Torvalds's avatar
Linus Torvalds committed
762 763 764 765 766 767
}

static void __wait_on_freeing_inode(struct inode *inode);
/*
 * Called with the inode lock held.
 */
768 769 770 771
static struct inode *find_inode(struct super_block *sb,
				struct hlist_head *head,
				int (*test)(struct inode *, void *),
				void *data)
Linus Torvalds's avatar
Linus Torvalds committed
772
{
773
	struct inode *inode = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
774 775

repeat:
776
	hlist_for_each_entry(inode, head, i_hash) {
777
		if (inode->i_sb != sb)
Linus Torvalds's avatar
Linus Torvalds committed
778
			continue;
779
		if (!test(inode, data))
Linus Torvalds's avatar
Linus Torvalds committed
780
			continue;
781
		spin_lock(&inode->i_lock);
782
		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
Linus Torvalds's avatar
Linus Torvalds committed
783 784 785
			__wait_on_freeing_inode(inode);
			goto repeat;
		}
786
		__iget(inode);
787
		spin_unlock(&inode->i_lock);
788
		return inode;
Linus Torvalds's avatar
Linus Torvalds committed
789
	}
790
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
791 792 793 794 795 796
}

/*
 * find_inode_fast is the fast path version of find_inode, see the comment at
 * iget_locked for details.
 */
797 798
static struct inode *find_inode_fast(struct super_block *sb,
				struct hlist_head *head, unsigned long ino)
Linus Torvalds's avatar
Linus Torvalds committed
799
{
800
	struct inode *inode = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
801 802

repeat:
803
	hlist_for_each_entry(inode, head, i_hash) {
804
		if (inode->i_ino != ino)
Linus Torvalds's avatar
Linus Torvalds committed
805
			continue;
806
		if (inode->i_sb != sb)
Linus Torvalds's avatar
Linus Torvalds committed
807
			continue;
808
		spin_lock(&inode->i_lock);
809
		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
Linus Torvalds's avatar
Linus Torvalds committed
810 811 812
			__wait_on_freeing_inode(inode);
			goto repeat;
		}
813
		__iget(inode);
814
		spin_unlock(&inode->i_lock);
815
		return inode;
Linus Torvalds's avatar
Linus Torvalds committed
816
	}
817
	return NULL;
818 819
}

820 821 822 823
/*
 * Each cpu owns a range of LAST_INO_BATCH numbers.
 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
 * to renew the exhausted range.
824
 *
825 826 827 828 829 830 831 832 833
 * This does not significantly increase overflow rate because every CPU can
 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
 * overflow rate by 2x, which does not seem too significant.
 *
 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
 * error if st_ino won't fit in target struct field. Use 32bit counter
 * here to attempt to avoid that.
834
 */
835 836 837
#define LAST_INO_BATCH 1024
static DEFINE_PER_CPU(unsigned int, last_ino);

838
unsigned int get_next_ino(void)
839
{
840 841
	unsigned int *p = &get_cpu_var(last_ino);
	unsigned int res = *p;
842

843 844 845 846 847 848 849 850 851 852 853 854
#ifdef CONFIG_SMP
	if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
		static atomic_t shared_last_ino;
		int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);

		res = next - LAST_INO_BATCH;
	}
#endif

	*p = ++res;
	put_cpu_var(last_ino);
	return res;
855
}
856
EXPORT_SYMBOL(get_next_ino);
857

858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
/**
 *	new_inode_pseudo 	- obtain an inode
 *	@sb: superblock
 *
 *	Allocates a new inode for given superblock.
 *	Inode wont be chained in superblock s_inodes list
 *	This means :
 *	- fs can't be unmount
 *	- quotas, fsnotify, writeback can't work
 */
struct inode *new_inode_pseudo(struct super_block *sb)
{
	struct inode *inode = alloc_inode(sb);

	if (inode) {
		spin_lock(&inode->i_lock);
		inode->i_state = 0;
		spin_unlock(&inode->i_lock);
		INIT_LIST_HEAD(&inode->i_sb_list);
	}
	return inode;
}

Linus Torvalds's avatar
Linus Torvalds committed
881 882 883 884
/**
 *	new_inode 	- obtain an inode
 *	@sb: superblock
 *
885
 *	Allocates a new inode for given superblock. The default gfp_mask
886
 *	for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
887 888 889 890 891
 *	If HIGHMEM pages are unsuitable or it is known that pages allocated
 *	for the page cache are not reclaimable or migratable,
 *	mapping_set_gfp_mask() must be called with suitable flags on the
 *	newly created inode's mapping
 *
Linus Torvalds's avatar
Linus Torvalds committed
892 893 894
 */
struct inode *new_inode(struct super_block *sb)
{
895
	struct inode *inode;
Linus Torvalds's avatar
Linus Torvalds committed
896

897
	spin_lock_prefetch(&inode_sb_list_lock);
898

899 900
	inode = new_inode_pseudo(sb);
	if (inode)
901
		inode_sb_list_add(inode);
Linus Torvalds's avatar
Linus Torvalds committed
902 903 904 905
	return inode;
}
EXPORT_SYMBOL(new_inode);

906
#ifdef CONFIG_DEBUG_LOCK_ALLOC
907 908
void lockdep_annotate_inode_mutex_key(struct inode *inode)
{
909
	if (S_ISDIR(inode->i_mode)) {
910 911
		struct file_system_type *type = inode->i_sb->s_type;

912
		/* Set new key only if filesystem hasn't already changed it */
913
		if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
914 915 916 917 918 919 920 921
			/*
			 * ensure nobody is actually holding i_mutex
			 */
			mutex_destroy(&inode->i_mutex);
			mutex_init(&inode->i_mutex);
			lockdep_set_class(&inode->i_mutex,
					  &type->i_mutex_dir_key);
		}
922
	}
923 924
}
EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
925
#endif
926 927 928 929 930 931 932 933 934 935 936

/**
 * unlock_new_inode - clear the I_NEW state and wake up any waiters
 * @inode:	new inode to unlock
 *
 * Called when the inode is fully initialised to clear the new state of the
 * inode and wake up anyone waiting for the inode to finish initialisation.
 */
void unlock_new_inode(struct inode *inode)
{
	lockdep_annotate_inode_mutex_key(inode);
937
	spin_lock(&inode->i_lock);
Christoph Hellwig's avatar
Christoph Hellwig committed
938 939
	WARN_ON(!(inode->i_state & I_NEW));
	inode->i_state &= ~I_NEW;
940
	smp_mb();
941 942
	wake_up_bit(&inode->i_state, __I_NEW);
	spin_unlock(&inode->i_lock);
Linus Torvalds's avatar
Linus Torvalds committed
943 944 945
}
EXPORT_SYMBOL(unlock_new_inode);

946 947
/**
 * lock_two_nondirectories - take two i_mutexes on non-directory objects
948 949 950 951
 *
 * Lock any non-NULL argument that is not a directory.
 * Zero, one or two objects may be locked by this function.
 *
952 953 954 955 956
 * @inode1: first inode to lock
 * @inode2: second inode to lock
 */
void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
{
957 958 959 960
	if (inode1 > inode2)
		swap(inode1, inode2);

	if (inode1 && !S_ISDIR(inode1->i_mode))
961
		mutex_lock(&inode1->i_mutex);
962
	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
963
		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2);
964 965 966 967 968 969 970 971 972 973
}
EXPORT_SYMBOL(lock_two_nondirectories);

/**
 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
 * @inode1: first inode to unlock
 * @inode2: second inode to unlock
 */
void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
{
974 975 976
	if (inode1 && !S_ISDIR(inode1->i_mode))
		mutex_unlock(&inode1->i_mutex);
	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
977 978 979 980
		mutex_unlock(&inode2->i_mutex);
}
EXPORT_SYMBOL(unlock_two_nondirectories);

981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
/**
 * iget5_locked - obtain an inode from a mounted file system
 * @sb:		super block of file system
 * @hashval:	hash value (usually inode number) to get
 * @test:	callback used for comparisons between inodes
 * @set:	callback used to initialize a new struct inode
 * @data:	opaque data pointer to pass to @test and @set
 *
 * Search for the inode specified by @hashval and @data in the inode cache,
 * and if present it is return it with an increased reference count. This is
 * a generalized version of iget_locked() for file systems where the inode
 * number is not sufficient for unique identification of an inode.
 *
 * If the inode is not in cache, allocate a new inode and return it locked,
 * hashed, and with the I_NEW flag set. The file system gets to fill it in
 * before unlocking it via unlock_new_inode().
Linus Torvalds's avatar
Linus Torvalds committed
997
 *
998 999
 * Note both @test and @set are called with the inode_hash_lock held, so can't
 * sleep.
Linus Torvalds's avatar
Linus Torvalds committed
1000
 */
1001 1002 1003
struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
		int (*test)(struct inode *, void *),
		int (*set)(struct inode *, void *), void *data)
Linus Torvalds's avatar
Linus Torvalds committed
1004
{
1005
	struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1006
	struct inode *inode;
Linus Torvalds's avatar
Linus Torvalds committed
1007

1008 1009 1010 1011 1012 1013 1014 1015 1016
	spin_lock(&inode_hash_lock);
	inode = find_inode(sb, head, test, data);
	spin_unlock(&inode_hash_lock);

	if (inode) {
		wait_on_inode(inode);
		return inode;
	}

Linus Torvalds's avatar
Linus Torvalds committed
1017 1018
	inode = alloc_inode(sb);
	if (inode) {
1019
		struct inode *old;
Linus Torvalds's avatar
Linus Torvalds committed
1020

1021
		spin_lock(&inode_hash_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1022 1023 1024 1025 1026 1027
		/* We released the lock, so.. */
		old = find_inode(sb, head, test, data);
		if (!old) {
			if (set(inode, data))
				goto set_failed;

1028 1029
			spin_lock(&inode->i_lock);
			inode->i_state = I_NEW;
1030
			hlist_add_head(&inode->i_hash, head);
1031
			spin_unlock(&inode->i_lock);
1032
			inode_sb_list_add(inode);
1033
			spin_unlock(&inode_hash_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045

			/* Return the locked inode with I_NEW set, the
			 * caller is responsible for filling in the contents
			 */
			return inode;
		}

		/*
		 * Uhhuh, somebody else created the same inode under
		 * us. Use the old inode instead of the one we just
		 * allocated.
		 */
1046
		spin_unlock(&inode_hash_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1047 1048 1049 1050 1051 1052 1053
		destroy_inode(inode);
		inode = old;
		wait_on_inode(inode);
	}
	return inode;

set_failed:
1054
	spin_unlock(&inode_hash_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1055 1056 1057
	destroy_inode(inode);
	return NULL;
}
1058
EXPORT_SYMBOL(iget5_locked);
Linus Torvalds's avatar
Linus Torvalds committed
1059

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
/**
 * iget_locked - obtain an inode from a mounted file system
 * @sb:		super block of file system
 * @ino:	inode number to get
 *
 * Search for the inode specified by @ino in the inode cache and if present
 * return it with an increased reference count. This is for file systems
 * where the inode number is sufficient for unique identification of an inode.
 *
 * If the inode is not in cache, allocate a new inode and return it locked,
 * hashed, and with the I_NEW flag set.  The file system gets to fill it in
 * before unlocking it via unlock_new_inode().
Linus Torvalds's avatar
Linus Torvalds committed
1072
 */
1073
struct inode *iget_locked(struct super_block *sb, unsigned long ino)
Linus Torvalds's avatar
Linus Torvalds committed
1074
{
1075
	struct hlist_head *head = inode_hashtable + hash(sb, ino);
1076
	struct inode *inode;
Linus Torvalds's avatar
Linus Torvalds committed
1077

1078 1079 1080 1081 1082 1083 1084 1085
	spin_lock(&inode_hash_lock);
	inode = find_inode_fast(sb, head, ino);
	spin_unlock(&inode_hash_lock);
	if (inode) {
		wait_on_inode(inode);
		return inode;
	}

Linus Torvalds's avatar
Linus Torvalds committed
1086 1087
	inode = alloc_inode(sb);
	if (inode) {
1088
		struct inode *old;
Linus Torvalds's avatar
Linus Torvalds committed
1089

1090
		spin_lock(&inode_hash_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1091 1092 1093 1094
		/* We released the lock, so.. */
		old = find_inode_fast(sb, head, ino);
		if (!old) {
			inode->i_ino = ino;
1095 1096
			spin_lock(&inode->i_lock);
			inode->i_state = I_NEW;
1097
			hlist_add_head(&inode->i_hash, head);
1098
			spin_unlock(&inode->i_lock);
1099
			inode_sb_list_add(inode);
1100
			spin_unlock(&inode_hash_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112

			/* Return the locked inode with I_NEW set, the
			 * caller is responsible for filling in the contents
			 */
			return inode;
		}

		/*
		 * Uhhuh, somebody else created the same inode under
		 * us. Use the old inode instead of the one we just
		 * allocated.
		 */
1113
		spin_unlock(&inode_hash_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1114 1115 1116 1117 1118 1119
		destroy_inode(inode);
		inode = old;
		wait_on_inode(inode);
	}
	return inode;
}
1120
EXPORT_SYMBOL(iget_locked);
Linus Torvalds's avatar
Linus Torvalds committed
1121

1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133
/*
 * search the inode cache for a matching inode number.
 * If we find one, then the inode number we are trying to
 * allocate is not unique and so we should not use it.
 *
 * Returns 1 if the inode number is unique, 0 if it is not.
 */
static int test_inode_iunique(struct super_block *sb, unsigned long ino)
{
	struct hlist_head *b = inode_hashtable + hash(sb, ino);
	struct inode *inode;

1134
	spin_lock(&inode_hash_lock);
1135
	hlist_for_each_entry(inode, b, i_hash) {
1136 1137
		if (inode->i_ino == ino && inode->i_sb == sb) {
			spin_unlock(&inode_hash_lock);
1138
			return 0;
1139
		}
1140
	}
1141
	spin_unlock(&inode_hash_lock);
1142 1143 1144 1145

	return 1;
}

Linus Torvalds's avatar
Linus Torvalds committed
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161
/**
 *	iunique - get a unique inode number
 *	@sb: superblock
 *	@max_reserved: highest reserved inode number
 *
 *	Obtain an inode number that is unique on the system for a given
 *	superblock. This is used by file systems that have no natural
 *	permanent inode numbering system. An inode number is returned that
 *	is higher than the reserved limit but unique.
 *
 *	BUGS:
 *	With a large number of inodes live on the file system this function
 *	currently becomes quite slow.
 */
ino_t iunique(struct super_block *sb, ino_t max_reserved)
{
1162 1163 1164 1165 1166
	/*
	 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
	 * error if st_ino won't fit in target struct field. Use 32bit counter
	 * here to attempt to avoid that.
	 */
1167
	static DEFINE_SPINLOCK(iunique_lock);
1168
	static unsigned int counter;
Linus Torvalds's avatar
Linus Torvalds committed
1169
	ino_t res;
1170

1171
	spin_lock(&iunique_lock);
1172 1173 1174
	do {
		if (counter <= max_reserved)
			counter = max_reserved + 1;
Linus Torvalds's avatar
Linus Torvalds committed
1175
		res = counter++;
1176 1177
	} while (!test_inode_iunique(sb, res));
	spin_unlock(&iunique_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1178

1179 1180
	return res;
}
Linus Torvalds's avatar
Linus Torvalds committed
1181 1182 1183 1184
EXPORT_SYMBOL(iunique);

struct inode *igrab(struct inode *inode)
{
1185 1186
	spin_lock(&inode->i_lock);
	if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
Linus Torvalds's avatar
Linus Torvalds committed
1187
		__iget(inode);
1188 1189 1190
		spin_unlock(&inode->i_lock);
	} else {
		spin_unlock(&inode->i_lock);
Linus Torvalds's avatar
Linus Torvalds committed
1191 1192 1193 1194 1195 1196
		/*
		 * Handle the case where s_op->clear_inode is not been
		 * called yet, and somebody is calling igrab
		 * while the inode is getting freed.
		 */
		inode = NULL;
1197
	}
Linus Torvalds's avatar
Linus Torvalds committed
1198 1199 1200 1201 1202
	return inode;
}
EXPORT_SYMBOL(igrab);

/**
1203
 * ilookup5_nowait - search for an inode in the inode cache
Linus Torvalds's avatar
Linus Torvalds committed
1204
 * @sb:		super block of file system to search
1205
 * @hashval:	hash value (usually inode number) to search for
Linus Torvalds's avatar
Linus Torvalds committed
1206 1207 1208
 * @test:	callback used for comparisons between inodes
 * @data:	opaque data pointer to pass to @test
 *
1209
 * Search for the inode specified by @hashval and @data in the inode cache.
Linus Torvalds's avatar
Linus Torvalds committed
1210 1211 1212
 * If the inode is in the cache, the inode is returned with an incremented
 * reference count.
 *
1213 1214
 * Note: I_NEW is not waited upon so you have to be very careful what you do
 * with the returned inode.  You probably should be using ilookup5() instead.
Linus Torvalds's avatar
Linus Torvalds committed
1215
 *
1216
 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
Linus Torvalds's avatar
Linus Torvalds committed
1217
 */
Christoph Hellwig's avatar