Skip to content
Snippets Groups Projects
Select Git revision
  • fd0a37355c4d39affa39d5cd75168fb94b292318
  • vme-testing default
  • ci-test
  • master
  • remoteproc
  • am625-sk-ov5640
  • pcal6534-upstreaming
  • lps22df-upstreaming
  • msc-upstreaming
  • imx8mp
  • iio/noa1305
  • vme-next
  • vme-next-4.14-rc4
  • v4.14-rc4
  • v4.14-rc3
  • v4.14-rc2
  • v4.14-rc1
  • v4.13
  • vme-next-4.13-rc7
  • v4.13-rc7
  • v4.13-rc6
  • v4.13-rc5
  • v4.13-rc4
  • v4.13-rc3
  • v4.13-rc2
  • v4.13-rc1
  • v4.12
  • v4.12-rc7
  • v4.12-rc6
  • v4.12-rc5
  • v4.12-rc4
  • v4.12-rc3
32 results

spinlock_debug.c

Blame
  • user avatar
    Stephen Boyd authored and Linus Torvalds committed
    When a spinlock warning is printed we usually get
    
     BUG: spinlock bad magic on CPU#0, modprobe/111
      lock: 0xdff09f38, .magic: 00000000, .owner: /0, .owner_cpu: 0
    
    but it's nicer to print the symbol for the lock if we have it so that we
    can avoid 'grep dff09f38 /proc/kallsyms' to find out which lock it was.
    Use kallsyms to print the symbol name so we get something a bit easier to
    read
    
     BUG: spinlock bad magic on CPU#0, modprobe/112
      lock: test_lock, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
    
    If the lock is not in kallsyms %ps will fall back to printing the address
    directly.
    
    Signed-off-by: default avatarStephen Boyd <sboyd@codeaurora.org>
    Cc: Ingo Molnar <mingo@elte.hu>
    Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
    Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
    fd0a3735
    History
    spinlock_debug.c 6.81 KiB
    /*
     * Copyright 2005, Red Hat, Inc., Ingo Molnar
     * Released under the General Public License (GPL).
     *
     * This file contains the spinlock/rwlock implementations for
     * DEBUG_SPINLOCK.
     */
    
    #include <linux/spinlock.h>
    #include <linux/nmi.h>
    #include <linux/interrupt.h>
    #include <linux/debug_locks.h>
    #include <linux/delay.h>
    #include <linux/export.h>
    
    void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
    			  struct lock_class_key *key)
    {
    #ifdef CONFIG_DEBUG_LOCK_ALLOC
    	/*
    	 * Make sure we are not reinitializing a held lock:
    	 */
    	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
    	lockdep_init_map(&lock->dep_map, name, key, 0);
    #endif
    	lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
    	lock->magic = SPINLOCK_MAGIC;
    	lock->owner = SPINLOCK_OWNER_INIT;
    	lock->owner_cpu = -1;
    }
    
    EXPORT_SYMBOL(__raw_spin_lock_init);
    
    void __rwlock_init(rwlock_t *lock, const char *name,
    		   struct lock_class_key *key)
    {
    #ifdef CONFIG_DEBUG_LOCK_ALLOC
    	/*
    	 * Make sure we are not reinitializing a held lock:
    	 */
    	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
    	lockdep_init_map(&lock->dep_map, name, key, 0);
    #endif
    	lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
    	lock->magic = RWLOCK_MAGIC;
    	lock->owner = SPINLOCK_OWNER_INIT;
    	lock->owner_cpu = -1;
    }
    
    EXPORT_SYMBOL(__rwlock_init);
    
    static void spin_dump(raw_spinlock_t *lock, const char *msg)
    {
    	struct task_struct *owner = NULL;
    
    	if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
    		owner = lock->owner;
    	printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
    		msg, raw_smp_processor_id(),
    		current->comm, task_pid_nr(current));
    	printk(KERN_EMERG " lock: %ps, .magic: %08x, .owner: %s/%d, "
    			".owner_cpu: %d\n",
    		lock, lock->magic,
    		owner ? owner->comm : "<none>",
    		owner ? task_pid_nr(owner) : -1,
    		lock->owner_cpu);
    	dump_stack();
    }
    
    static void spin_bug(raw_spinlock_t *lock, const char *msg)
    {
    	if (!debug_locks_off())
    		return;
    
    	spin_dump(lock, msg);
    }
    
    #define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
    
    static inline void
    debug_spin_lock_before(raw_spinlock_t *lock)
    {
    	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
    	SPIN_BUG_ON(lock->owner == current, lock, "recursion");
    	SPIN_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
    							lock, "cpu recursion");
    }
    
    static inline void debug_spin_lock_after(raw_spinlock_t *lock)
    {
    	lock->owner_cpu = raw_smp_processor_id();
    	lock->owner = current;
    }
    
    static inline void debug_spin_unlock(raw_spinlock_t *lock)
    {
    	SPIN_BUG_ON(lock->magic != SPINLOCK_MAGIC, lock, "bad magic");
    	SPIN_BUG_ON(!raw_spin_is_locked(lock), lock, "already unlocked");
    	SPIN_BUG_ON(lock->owner != current, lock, "wrong owner");
    	SPIN_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
    							lock, "wrong CPU");
    	lock->owner = SPINLOCK_OWNER_INIT;
    	lock->owner_cpu = -1;
    }
    
    static void __spin_lock_debug(raw_spinlock_t *lock)
    {
    	u64 i;
    	u64 loops = loops_per_jiffy * HZ;
    	int print_once = 1;
    
    	for (;;) {
    		for (i = 0; i < loops; i++) {
    			if (arch_spin_trylock(&lock->raw_lock))
    				return;
    			__delay(1);
    		}
    		/* lockup suspected: */
    		if (print_once) {
    			print_once = 0;
    			spin_dump(lock, "lockup");
    #ifdef CONFIG_SMP
    			trigger_all_cpu_backtrace();
    #endif
    		}
    	}
    }
    
    void do_raw_spin_lock(raw_spinlock_t *lock)
    {
    	debug_spin_lock_before(lock);
    	if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
    		__spin_lock_debug(lock);
    	debug_spin_lock_after(lock);
    }
    
    int do_raw_spin_trylock(raw_spinlock_t *lock)
    {
    	int ret = arch_spin_trylock(&lock->raw_lock);
    
    	if (ret)
    		debug_spin_lock_after(lock);
    #ifndef CONFIG_SMP
    	/*
    	 * Must not happen on UP:
    	 */
    	SPIN_BUG_ON(!ret, lock, "trylock failure on UP");
    #endif
    	return ret;
    }
    
    void do_raw_spin_unlock(raw_spinlock_t *lock)
    {
    	debug_spin_unlock(lock);
    	arch_spin_unlock(&lock->raw_lock);
    }
    
    static void rwlock_bug(rwlock_t *lock, const char *msg)
    {
    	if (!debug_locks_off())
    		return;
    
    	printk(KERN_EMERG "BUG: rwlock %s on CPU#%d, %s/%d, %p\n",
    		msg, raw_smp_processor_id(), current->comm,
    		task_pid_nr(current), lock);
    	dump_stack();
    }
    
    #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
    
    #if 0		/* __write_lock_debug() can lock up - maybe this can too? */
    static void __read_lock_debug(rwlock_t *lock)
    {
    	u64 i;
    	u64 loops = loops_per_jiffy * HZ;
    	int print_once = 1;
    
    	for (;;) {
    		for (i = 0; i < loops; i++) {
    			if (arch_read_trylock(&lock->raw_lock))
    				return;
    			__delay(1);
    		}
    		/* lockup suspected: */
    		if (print_once) {
    			print_once = 0;
    			printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
    					"%s/%d, %p\n",
    				raw_smp_processor_id(), current->comm,
    				current->pid, lock);
    			dump_stack();
    		}
    	}
    }
    #endif
    
    void do_raw_read_lock(rwlock_t *lock)
    {
    	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
    	arch_read_lock(&lock->raw_lock);
    }
    
    int do_raw_read_trylock(rwlock_t *lock)
    {
    	int ret = arch_read_trylock(&lock->raw_lock);
    
    #ifndef CONFIG_SMP
    	/*
    	 * Must not happen on UP:
    	 */
    	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
    #endif
    	return ret;
    }
    
    void do_raw_read_unlock(rwlock_t *lock)
    {
    	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
    	arch_read_unlock(&lock->raw_lock);
    }
    
    static inline void debug_write_lock_before(rwlock_t *lock)
    {
    	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
    	RWLOCK_BUG_ON(lock->owner == current, lock, "recursion");
    	RWLOCK_BUG_ON(lock->owner_cpu == raw_smp_processor_id(),
    							lock, "cpu recursion");
    }
    
    static inline void debug_write_lock_after(rwlock_t *lock)
    {
    	lock->owner_cpu = raw_smp_processor_id();
    	lock->owner = current;
    }
    
    static inline void debug_write_unlock(rwlock_t *lock)
    {
    	RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
    	RWLOCK_BUG_ON(lock->owner != current, lock, "wrong owner");
    	RWLOCK_BUG_ON(lock->owner_cpu != raw_smp_processor_id(),
    							lock, "wrong CPU");
    	lock->owner = SPINLOCK_OWNER_INIT;
    	lock->owner_cpu = -1;
    }
    
    #if 0		/* This can cause lockups */
    static void __write_lock_debug(rwlock_t *lock)
    {
    	u64 i;
    	u64 loops = loops_per_jiffy * HZ;
    	int print_once = 1;
    
    	for (;;) {
    		for (i = 0; i < loops; i++) {
    			if (arch_write_trylock(&lock->raw_lock))
    				return;
    			__delay(1);
    		}
    		/* lockup suspected: */
    		if (print_once) {
    			print_once = 0;
    			printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
    					"%s/%d, %p\n",
    				raw_smp_processor_id(), current->comm,
    				current->pid, lock);
    			dump_stack();
    		}
    	}
    }
    #endif
    
    void do_raw_write_lock(rwlock_t *lock)
    {
    	debug_write_lock_before(lock);
    	arch_write_lock(&lock->raw_lock);
    	debug_write_lock_after(lock);
    }
    
    int do_raw_write_trylock(rwlock_t *lock)
    {
    	int ret = arch_write_trylock(&lock->raw_lock);
    
    	if (ret)
    		debug_write_lock_after(lock);
    #ifndef CONFIG_SMP
    	/*
    	 * Must not happen on UP:
    	 */
    	RWLOCK_BUG_ON(!ret, lock, "trylock failure on UP");
    #endif
    	return ret;
    }
    
    void do_raw_write_unlock(rwlock_t *lock)
    {
    	debug_write_unlock(lock);
    	arch_write_unlock(&lock->raw_lock);
    }