Skip to content
Snippets Groups Projects
Select Git revision
  • e3d301cae0092062cbcd6b4e7ceebbab9d87e263
  • vme-testing default
  • ci-test
  • master
  • remoteproc
  • am625-sk-ov5640
  • pcal6534-upstreaming
  • lps22df-upstreaming
  • msc-upstreaming
  • imx8mp
  • iio/noa1305
  • vme-next
  • vme-next-4.14-rc4
  • v4.14-rc4
  • v4.14-rc3
  • v4.14-rc2
  • v4.14-rc1
  • v4.13
  • vme-next-4.13-rc7
  • v4.13-rc7
  • v4.13-rc6
  • v4.13-rc5
  • v4.13-rc4
  • v4.13-rc3
  • v4.13-rc2
  • v4.13-rc1
  • v4.12
  • v4.12-rc7
  • v4.12-rc6
  • v4.12-rc5
  • v4.12-rc4
  • v4.12-rc3
32 results

memblock.c

Blame
    • Michal Hocko's avatar
      e3d301ca
      mm/memblock.c: do not complain about top-down allocations for !MEMORY_HOTREMOVE · e3d301ca
      Michal Hocko authored
      Mike Rapoport is converting architectures from bootmem to nobootmem
      allocator.  While doing so for m68k Geert has noticed that he gets a
      scary looking warning:
      
        WARNING: CPU: 0 PID: 0 at mm/memblock.c:230
        memblock_find_in_range_node+0x11c/0x1be
        memblock: bottom-up allocation failed, memory hotunplug may be affected
        Modules linked in:
        CPU: 0 PID: 0 Comm: swapper Not tainted
        4.18.0-rc3-atari-01343-gf2fb5f2e09a97a3c-dirty #7
        Call Trace: __warn+0xa8/0xc2
          kernel_pg_dir+0x0/0x1000
          netdev_lower_get_next+0x2/0x22
          warn_slowpath_fmt+0x2e/0x36
          memblock_find_in_range_node+0x11c/0x1be
          memblock_find_in_range_node+0x11c/0x1be
          memblock_find_in_range_node+0x0/0x1be
          vprintk_func+0x66/0x6e
          memblock_virt_alloc_internal+0xd0/0x156
          netdev_lower_get_next+0x2/0x22
          netdev_lower_get_next+0x2/0x22
          kernel_pg_dir+0x0/0x1000
          memblock_virt_alloc_try_nid_nopanic+0x58/0x7a
          netdev_lower_get_next+0x2/0x22
          kernel_pg_dir+0x0/0x1000
          kernel_pg_dir+0x0/0x1000
          EXPTBL+0x234/0x400
          EXPTBL+0x234/0x400
          alloc_node_mem_map+0x4a/0x66
          netdev_lower_get_next+0x2/0x22
          free_area_init_node+0xe2/0x29e
          EXPTBL+0x234/0x400
          paging_init+0x430/0x462
          kernel_pg_dir+0x0/0x1000
          printk+0x0/0x1a
          EXPTBL+0x234/0x400
          setup_arch+0x1b8/0x22c
          start_kernel+0x4a/0x40a
          _sinittext+0x344/0x9e8
      
      The warning is basically saying that a top-down allocation can break
      memory hotremove because memblock allocation is not movable.  But m68k
      doesn't even support MEMORY_HOTREMOVE so there is no point to warn about
      it.
      
      Make the warning conditional only to configurations that care.
      
      Link: http://lkml.kernel.org/r/20180706061750.GH32658@dhcp22.suse.cz
      
      
      Signed-off-by: default avatarMichal Hocko <mhocko@suse.com>
      Reported-by: default avatarGeert Uytterhoeven <geert@linux-m68k.org>
      Tested-by: default avatarGeert Uytterhoeven <geert@linux-m68k.org>
      Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
      Cc: Vlastimil Babka <vbabka@suse.cz>
      Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
      Cc: Greg Ungerer <gerg@linux-m68k.org>
      Cc: Sam Creasey <sammy@sammy.net>
      Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
      e3d301ca
      History
      mm/memblock.c: do not complain about top-down allocations for !MEMORY_HOTREMOVE
      Michal Hocko authored
      Mike Rapoport is converting architectures from bootmem to nobootmem
      allocator.  While doing so for m68k Geert has noticed that he gets a
      scary looking warning:
      
        WARNING: CPU: 0 PID: 0 at mm/memblock.c:230
        memblock_find_in_range_node+0x11c/0x1be
        memblock: bottom-up allocation failed, memory hotunplug may be affected
        Modules linked in:
        CPU: 0 PID: 0 Comm: swapper Not tainted
        4.18.0-rc3-atari-01343-gf2fb5f2e09a97a3c-dirty #7
        Call Trace: __warn+0xa8/0xc2
          kernel_pg_dir+0x0/0x1000
          netdev_lower_get_next+0x2/0x22
          warn_slowpath_fmt+0x2e/0x36
          memblock_find_in_range_node+0x11c/0x1be
          memblock_find_in_range_node+0x11c/0x1be
          memblock_find_in_range_node+0x0/0x1be
          vprintk_func+0x66/0x6e
          memblock_virt_alloc_internal+0xd0/0x156
          netdev_lower_get_next+0x2/0x22
          netdev_lower_get_next+0x2/0x22
          kernel_pg_dir+0x0/0x1000
          memblock_virt_alloc_try_nid_nopanic+0x58/0x7a
          netdev_lower_get_next+0x2/0x22
          kernel_pg_dir+0x0/0x1000
          kernel_pg_dir+0x0/0x1000
          EXPTBL+0x234/0x400
          EXPTBL+0x234/0x400
          alloc_node_mem_map+0x4a/0x66
          netdev_lower_get_next+0x2/0x22
          free_area_init_node+0xe2/0x29e
          EXPTBL+0x234/0x400
          paging_init+0x430/0x462
          kernel_pg_dir+0x0/0x1000
          printk+0x0/0x1a
          EXPTBL+0x234/0x400
          setup_arch+0x1b8/0x22c
          start_kernel+0x4a/0x40a
          _sinittext+0x344/0x9e8
      
      The warning is basically saying that a top-down allocation can break
      memory hotremove because memblock allocation is not movable.  But m68k
      doesn't even support MEMORY_HOTREMOVE so there is no point to warn about
      it.
      
      Make the warning conditional only to configurations that care.
      
      Link: http://lkml.kernel.org/r/20180706061750.GH32658@dhcp22.suse.cz
      
      
      Signed-off-by: default avatarMichal Hocko <mhocko@suse.com>
      Reported-by: default avatarGeert Uytterhoeven <geert@linux-m68k.org>
      Tested-by: default avatarGeert Uytterhoeven <geert@linux-m68k.org>
      Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
      Cc: Vlastimil Babka <vbabka@suse.cz>
      Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
      Cc: Greg Ungerer <gerg@linux-m68k.org>
      Cc: Sam Creasey <sammy@sammy.net>
      Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
      Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
    percpu.h 19.20 KiB
    #ifndef _ASM_X86_PERCPU_H
    #define _ASM_X86_PERCPU_H
    
    #ifdef CONFIG_X86_64
    #define __percpu_seg		gs
    #define __percpu_mov_op		movq
    #else
    #define __percpu_seg		fs
    #define __percpu_mov_op		movl
    #endif
    
    #ifdef __ASSEMBLY__
    
    /*
     * PER_CPU finds an address of a per-cpu variable.
     *
     * Args:
     *    var - variable name
     *    reg - 32bit register
     *
     * The resulting address is stored in the "reg" argument.
     *
     * Example:
     *    PER_CPU(cpu_gdt_descr, %ebx)
     */
    #ifdef CONFIG_SMP
    #define PER_CPU(var, reg)						\
    	__percpu_mov_op %__percpu_seg:this_cpu_off, reg;		\
    	lea var(reg), reg
    #define PER_CPU_VAR(var)	%__percpu_seg:var
    #else /* ! SMP */
    #define PER_CPU(var, reg)	__percpu_mov_op $var, reg
    #define PER_CPU_VAR(var)	var
    #endif	/* SMP */
    
    #ifdef CONFIG_X86_64_SMP
    #define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
    #else
    #define INIT_PER_CPU_VAR(var)  var
    #endif
    
    #else /* ...!ASSEMBLY */
    
    #include <linux/kernel.h>
    #include <linux/stringify.h>
    
    #ifdef CONFIG_SMP
    #define __percpu_prefix		"%%"__stringify(__percpu_seg)":"
    #define __my_cpu_offset		this_cpu_read(this_cpu_off)
    
    /*
     * Compared to the generic __my_cpu_offset version, the following
     * saves one instruction and avoids clobbering a temp register.
     */
    #define arch_raw_cpu_ptr(ptr)				\
    ({							\
    	unsigned long tcp_ptr__;			\
    	asm volatile("add " __percpu_arg(1) ", %0"	\
    		     : "=r" (tcp_ptr__)			\
    		     : "m" (this_cpu_off), "0" (ptr));	\
    	(typeof(*(ptr)) __kernel __force *)tcp_ptr__;	\
    })
    #else
    #define __percpu_prefix		""
    #endif
    
    #define __percpu_arg(x)		__percpu_prefix "%" #x
    
    /*
     * Initialized pointers to per-cpu variables needed for the boot
     * processor need to use these macros to get the proper address
     * offset from __per_cpu_load on SMP.
     *
     * There also must be an entry in vmlinux_64.lds.S
     */
    #define DECLARE_INIT_PER_CPU(var) \
           extern typeof(var) init_per_cpu_var(var)
    
    #ifdef CONFIG_X86_64_SMP
    #define init_per_cpu_var(var)  init_per_cpu__##var
    #else
    #define init_per_cpu_var(var)  var
    #endif
    
    /* For arch-specific code, we can use direct single-insn ops (they
     * don't give an lvalue though). */
    extern void __bad_percpu_size(void);
    
    #define percpu_to_op(op, var, val)			\
    do {							\
    	typedef typeof(var) pto_T__;			\
    	if (0) {					\
    		pto_T__ pto_tmp__;			\
    		pto_tmp__ = (val);			\
    		(void)pto_tmp__;			\
    	}						\
    	switch (sizeof(var)) {				\
    	case 1:						\
    		asm(op "b %1,"__percpu_arg(0)		\
    		    : "+m" (var)			\
    		    : "qi" ((pto_T__)(val)));		\
    		break;					\
    	case 2:						\
    		asm(op "w %1,"__percpu_arg(0)		\
    		    : "+m" (var)			\
    		    : "ri" ((pto_T__)(val)));		\
    		break;					\
    	case 4:						\
    		asm(op "l %1,"__percpu_arg(0)		\
    		    : "+m" (var)			\
    		    : "ri" ((pto_T__)(val)));		\
    		break;					\
    	case 8:						\
    		asm(op "q %1,"__percpu_arg(0)		\
    		    : "+m" (var)			\
    		    : "re" ((pto_T__)(val)));		\
    		break;					\
    	default: __bad_percpu_size();			\
    	}						\
    } while (0)
    
    /*
     * Generate a percpu add to memory instruction and optimize code
     * if one is added or subtracted.
     */
    #define percpu_add_op(var, val)						\
    do {									\
    	typedef typeof(var) pao_T__;					\
    	const int pao_ID__ = (__builtin_constant_p(val) &&		\
    			      ((val) == 1 || (val) == -1)) ?		\
    				(int)(val) : 0;				\
    	if (0) {							\
    		pao_T__ pao_tmp__;					\
    		pao_tmp__ = (val);					\
    		(void)pao_tmp__;					\
    	}								\
    	switch (sizeof(var)) {						\
    	case 1:								\
    		if (pao_ID__ == 1)					\
    			asm("incb "__percpu_arg(0) : "+m" (var));	\
    		else if (pao_ID__ == -1)				\
    			asm("decb "__percpu_arg(0) : "+m" (var));	\
    		else							\
    			asm("addb %1, "__percpu_arg(0)			\
    			    : "+m" (var)				\
    			    : "qi" ((pao_T__)(val)));			\
    		break;							\
    	case 2:								\
    		if (pao_ID__ == 1)					\
    			asm("incw "__percpu_arg(0) : "+m" (var));	\
    		else if (pao_ID__ == -1)				\
    			asm("decw "__percpu_arg(0) : "+m" (var));	\
    		else							\
    			asm("addw %1, "__percpu_arg(0)			\
    			    : "+m" (var)				\
    			    : "ri" ((pao_T__)(val)));			\
    		break;							\
    	case 4:								\
    		if (pao_ID__ == 1)					\
    			asm("incl "__percpu_arg(0) : "+m" (var));	\
    		else if (pao_ID__ == -1)				\
    			asm("decl "__percpu_arg(0) : "+m" (var));	\
    		else							\
    			asm("addl %1, "__percpu_arg(0)			\
    			    : "+m" (var)				\
    			    : "ri" ((pao_T__)(val)));			\
    		break;							\
    	case 8:								\
    		if (pao_ID__ == 1)					\
    			asm("incq "__percpu_arg(0) : "+m" (var));	\
    		else if (pao_ID__ == -1)				\
    			asm("decq "__percpu_arg(0) : "+m" (var));	\
    		else							\
    			asm("addq %1, "__percpu_arg(0)			\
    			    : "+m" (var)				\
    			    : "re" ((pao_T__)(val)));			\
    		break;							\
    	default: __bad_percpu_size();					\
    	}								\
    } while (0)
    
    #define percpu_from_op(op, var)				\
    ({							\
    	typeof(var) pfo_ret__;				\
    	switch (sizeof(var)) {				\
    	case 1:						\
    		asm(op "b "__percpu_arg(1)",%0"		\
    		    : "=q" (pfo_ret__)			\
    		    : "m" (var));			\
    		break;					\
    	case 2:						\
    		asm(op "w "__percpu_arg(1)",%0"		\
    		    : "=r" (pfo_ret__)			\
    		    : "m" (var));			\
    		break;					\
    	case 4:						\
    		asm(op "l "__percpu_arg(1)",%0"		\
    		    : "=r" (pfo_ret__)			\
    		    : "m" (var));			\
    		break;					\
    	case 8:						\
    		asm(op "q "__percpu_arg(1)",%0"		\
    		    : "=r" (pfo_ret__)			\
    		    : "m" (var));			\
    		break;					\
    	default: __bad_percpu_size();			\
    	}						\
    	pfo_ret__;					\
    })
    
    #define percpu_stable_op(op, var)			\
    ({							\
    	typeof(var) pfo_ret__;				\
    	switch (sizeof(var)) {				\
    	case 1:						\
    		asm(op "b "__percpu_arg(P1)",%0"	\
    		    : "=q" (pfo_ret__)			\
    		    : "p" (&(var)));			\
    		break;					\
    	case 2:						\
    		asm(op "w "__percpu_arg(P1)",%0"	\
    		    : "=r" (pfo_ret__)			\
    		    : "p" (&(var)));			\
    		break;					\
    	case 4:						\
    		asm(op "l "__percpu_arg(P1)",%0"	\
    		    : "=r" (pfo_ret__)			\
    		    : "p" (&(var)));			\
    		break;					\
    	case 8:						\
    		asm(op "q "__percpu_arg(P1)",%0"	\
    		    : "=r" (pfo_ret__)			\
    		    : "p" (&(var)));			\
    		break;					\
    	default: __bad_percpu_size();			\
    	}						\
    	pfo_ret__;					\
    })
    
    #define percpu_unary_op(op, var)			\
    ({							\
    	switch (sizeof(var)) {				\
    	case 1:						\
    		asm(op "b "__percpu_arg(0)		\
    		    : "+m" (var));			\
    		break;					\
    	case 2:						\
    		asm(op "w "__percpu_arg(0)		\
    		    : "+m" (var));			\
    		break;					\
    	case 4:						\
    		asm(op "l "__percpu_arg(0)		\
    		    : "+m" (var));			\
    		break;					\
    	case 8:						\
    		asm(op "q "__percpu_arg(0)		\
    		    : "+m" (var));			\
    		break;					\
    	default: __bad_percpu_size();			\
    	}						\
    })
    
    /*
     * Add return operation
     */
    #define percpu_add_return_op(var, val)					\
    ({									\
    	typeof(var) paro_ret__ = val;					\
    	switch (sizeof(var)) {						\
    	case 1:								\
    		asm("xaddb %0, "__percpu_arg(1)				\
    			    : "+q" (paro_ret__), "+m" (var)		\
    			    : : "memory");				\
    		break;							\
    	case 2:								\
    		asm("xaddw %0, "__percpu_arg(1)				\
    			    : "+r" (paro_ret__), "+m" (var)		\
    			    : : "memory");				\
    		break;							\
    	case 4:								\
    		asm("xaddl %0, "__percpu_arg(1)				\
    			    : "+r" (paro_ret__), "+m" (var)		\
    			    : : "memory");				\
    		break;							\
    	case 8:								\
    		asm("xaddq %0, "__percpu_arg(1)				\
    			    : "+re" (paro_ret__), "+m" (var)		\
    			    : : "memory");				\
    		break;							\
    	default: __bad_percpu_size();					\
    	}								\
    	paro_ret__ += val;						\
    	paro_ret__;							\
    })
    
    /*
     * xchg is implemented using cmpxchg without a lock prefix. xchg is
     * expensive due to the implied lock prefix.  The processor cannot prefetch
     * cachelines if xchg is used.
     */
    #define percpu_xchg_op(var, nval)					\
    ({									\
    	typeof(var) pxo_ret__;						\
    	typeof(var) pxo_new__ = (nval);					\
    	switch (sizeof(var)) {						\
    	case 1:								\
    		asm("\n\tmov "__percpu_arg(1)",%%al"			\
    		    "\n1:\tcmpxchgb %2, "__percpu_arg(1)		\
    		    "\n\tjnz 1b"					\
    			    : "=&a" (pxo_ret__), "+m" (var)		\
    			    : "q" (pxo_new__)				\
    			    : "memory");				\
    		break;							\
    	case 2:								\
    		asm("\n\tmov "__percpu_arg(1)",%%ax"			\
    		    "\n1:\tcmpxchgw %2, "__percpu_arg(1)		\
    		    "\n\tjnz 1b"					\
    			    : "=&a" (pxo_ret__), "+m" (var)		\
    			    : "r" (pxo_new__)				\
    			    : "memory");				\
    		break;							\
    	case 4:								\
    		asm("\n\tmov "__percpu_arg(1)",%%eax"			\
    		    "\n1:\tcmpxchgl %2, "__percpu_arg(1)		\
    		    "\n\tjnz 1b"					\
    			    : "=&a" (pxo_ret__), "+m" (var)		\
    			    : "r" (pxo_new__)				\
    			    : "memory");				\
    		break;							\
    	case 8:								\
    		asm("\n\tmov "__percpu_arg(1)",%%rax"			\
    		    "\n1:\tcmpxchgq %2, "__percpu_arg(1)		\
    		    "\n\tjnz 1b"					\
    			    : "=&a" (pxo_ret__), "+m" (var)		\
    			    : "r" (pxo_new__)				\
    			    : "memory");				\
    		break;							\
    	default: __bad_percpu_size();					\
    	}								\
    	pxo_ret__;							\
    })
    
    /*
     * cmpxchg has no such implied lock semantics as a result it is much
     * more efficient for cpu local operations.
     */
    #define percpu_cmpxchg_op(var, oval, nval)				\
    ({									\
    	typeof(var) pco_ret__;						\
    	typeof(var) pco_old__ = (oval);					\
    	typeof(var) pco_new__ = (nval);					\
    	switch (sizeof(var)) {						\
    	case 1:								\
    		asm("cmpxchgb %2, "__percpu_arg(1)			\
    			    : "=a" (pco_ret__), "+m" (var)		\
    			    : "q" (pco_new__), "0" (pco_old__)		\
    			    : "memory");				\
    		break;							\
    	case 2:								\
    		asm("cmpxchgw %2, "__percpu_arg(1)			\
    			    : "=a" (pco_ret__), "+m" (var)		\
    			    : "r" (pco_new__), "0" (pco_old__)		\
    			    : "memory");				\
    		break;							\
    	case 4:								\
    		asm("cmpxchgl %2, "__percpu_arg(1)			\
    			    : "=a" (pco_ret__), "+m" (var)		\
    			    : "r" (pco_new__), "0" (pco_old__)		\
    			    : "memory");				\
    		break;							\
    	case 8:								\
    		asm("cmpxchgq %2, "__percpu_arg(1)			\
    			    : "=a" (pco_ret__), "+m" (var)		\
    			    : "r" (pco_new__), "0" (pco_old__)		\
    			    : "memory");				\
    		break;							\
    	default: __bad_percpu_size();					\
    	}								\
    	pco_ret__;							\
    })
    
    /*
     * this_cpu_read() makes gcc load the percpu variable every time it is
     * accessed while this_cpu_read_stable() allows the value to be cached.
     * this_cpu_read_stable() is more efficient and can be used if its value
     * is guaranteed to be valid across cpus.  The current users include
     * get_current() and get_thread_info() both of which are actually
     * per-thread variables implemented as per-cpu variables and thus
     * stable for the duration of the respective task.
     */
    #define this_cpu_read_stable(var)	percpu_stable_op("mov", var)
    
    #define raw_cpu_read_1(pcp)		percpu_from_op("mov", pcp)
    #define raw_cpu_read_2(pcp)		percpu_from_op("mov", pcp)
    #define raw_cpu_read_4(pcp)		percpu_from_op("mov", pcp)
    
    #define raw_cpu_write_1(pcp, val)	percpu_to_op("mov", (pcp), val)
    #define raw_cpu_write_2(pcp, val)	percpu_to_op("mov", (pcp), val)
    #define raw_cpu_write_4(pcp, val)	percpu_to_op("mov", (pcp), val)
    #define raw_cpu_add_1(pcp, val)		percpu_add_op((pcp), val)
    #define raw_cpu_add_2(pcp, val)		percpu_add_op((pcp), val)
    #define raw_cpu_add_4(pcp, val)		percpu_add_op((pcp), val)
    #define raw_cpu_and_1(pcp, val)		percpu_to_op("and", (pcp), val)
    #define raw_cpu_and_2(pcp, val)		percpu_to_op("and", (pcp), val)
    #define raw_cpu_and_4(pcp, val)		percpu_to_op("and", (pcp), val)
    #define raw_cpu_or_1(pcp, val)		percpu_to_op("or", (pcp), val)
    #define raw_cpu_or_2(pcp, val)		percpu_to_op("or", (pcp), val)
    #define raw_cpu_or_4(pcp, val)		percpu_to_op("or", (pcp), val)
    #define raw_cpu_xchg_1(pcp, val)	percpu_xchg_op(pcp, val)
    #define raw_cpu_xchg_2(pcp, val)	percpu_xchg_op(pcp, val)
    #define raw_cpu_xchg_4(pcp, val)	percpu_xchg_op(pcp, val)
    
    #define this_cpu_read_1(pcp)		percpu_from_op("mov", pcp)
    #define this_cpu_read_2(pcp)		percpu_from_op("mov", pcp)
    #define this_cpu_read_4(pcp)		percpu_from_op("mov", pcp)
    #define this_cpu_write_1(pcp, val)	percpu_to_op("mov", (pcp), val)
    #define this_cpu_write_2(pcp, val)	percpu_to_op("mov", (pcp), val)
    #define this_cpu_write_4(pcp, val)	percpu_to_op("mov", (pcp), val)
    #define this_cpu_add_1(pcp, val)	percpu_add_op((pcp), val)
    #define this_cpu_add_2(pcp, val)	percpu_add_op((pcp), val)
    #define this_cpu_add_4(pcp, val)	percpu_add_op((pcp), val)
    #define this_cpu_and_1(pcp, val)	percpu_to_op("and", (pcp), val)
    #define this_cpu_and_2(pcp, val)	percpu_to_op("and", (pcp), val)
    #define this_cpu_and_4(pcp, val)	percpu_to_op("and", (pcp), val)
    #define this_cpu_or_1(pcp, val)		percpu_to_op("or", (pcp), val)
    #define this_cpu_or_2(pcp, val)		percpu_to_op("or", (pcp), val)
    #define this_cpu_or_4(pcp, val)		percpu_to_op("or", (pcp), val)
    #define this_cpu_xchg_1(pcp, nval)	percpu_xchg_op(pcp, nval)
    #define this_cpu_xchg_2(pcp, nval)	percpu_xchg_op(pcp, nval)
    #define this_cpu_xchg_4(pcp, nval)	percpu_xchg_op(pcp, nval)
    
    #define raw_cpu_add_return_1(pcp, val)		percpu_add_return_op(pcp, val)
    #define raw_cpu_add_return_2(pcp, val)		percpu_add_return_op(pcp, val)
    #define raw_cpu_add_return_4(pcp, val)		percpu_add_return_op(pcp, val)
    #define raw_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
    #define raw_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
    #define raw_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
    
    #define this_cpu_add_return_1(pcp, val)		percpu_add_return_op(pcp, val)
    #define this_cpu_add_return_2(pcp, val)		percpu_add_return_op(pcp, val)
    #define this_cpu_add_return_4(pcp, val)		percpu_add_return_op(pcp, val)
    #define this_cpu_cmpxchg_1(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
    #define this_cpu_cmpxchg_2(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
    #define this_cpu_cmpxchg_4(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
    
    #ifdef CONFIG_X86_CMPXCHG64
    #define percpu_cmpxchg8b_double(pcp1, pcp2, o1, o2, n1, n2)		\
    ({									\
    	bool __ret;							\
    	typeof(pcp1) __o1 = (o1), __n1 = (n1);				\
    	typeof(pcp2) __o2 = (o2), __n2 = (n2);				\
    	asm volatile("cmpxchg8b "__percpu_arg(1)"\n\tsetz %0\n\t"	\
    		    : "=a" (__ret), "+m" (pcp1), "+m" (pcp2), "+d" (__o2) \
    		    :  "b" (__n1), "c" (__n2), "a" (__o1));		\
    	__ret;								\
    })
    
    #define raw_cpu_cmpxchg_double_4	percpu_cmpxchg8b_double
    #define this_cpu_cmpxchg_double_4	percpu_cmpxchg8b_double
    #endif /* CONFIG_X86_CMPXCHG64 */
    
    /*
     * Per cpu atomic 64 bit operations are only available under 64 bit.
     * 32 bit must fall back to generic operations.
     */
    #ifdef CONFIG_X86_64
    #define raw_cpu_read_8(pcp)			percpu_from_op("mov", pcp)
    #define raw_cpu_write_8(pcp, val)		percpu_to_op("mov", (pcp), val)
    #define raw_cpu_add_8(pcp, val)			percpu_add_op((pcp), val)
    #define raw_cpu_and_8(pcp, val)			percpu_to_op("and", (pcp), val)
    #define raw_cpu_or_8(pcp, val)			percpu_to_op("or", (pcp), val)
    #define raw_cpu_add_return_8(pcp, val)		percpu_add_return_op(pcp, val)
    #define raw_cpu_xchg_8(pcp, nval)		percpu_xchg_op(pcp, nval)
    #define raw_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
    
    #define this_cpu_read_8(pcp)			percpu_from_op("mov", pcp)
    #define this_cpu_write_8(pcp, val)		percpu_to_op("mov", (pcp), val)
    #define this_cpu_add_8(pcp, val)		percpu_add_op((pcp), val)
    #define this_cpu_and_8(pcp, val)		percpu_to_op("and", (pcp), val)
    #define this_cpu_or_8(pcp, val)			percpu_to_op("or", (pcp), val)
    #define this_cpu_add_return_8(pcp, val)		percpu_add_return_op(pcp, val)
    #define this_cpu_xchg_8(pcp, nval)		percpu_xchg_op(pcp, nval)
    #define this_cpu_cmpxchg_8(pcp, oval, nval)	percpu_cmpxchg_op(pcp, oval, nval)
    
    /*
     * Pretty complex macro to generate cmpxchg16 instruction.  The instruction
     * is not supported on early AMD64 processors so we must be able to emulate
     * it in software.  The address used in the cmpxchg16 instruction must be
     * aligned to a 16 byte boundary.
     */
    #define percpu_cmpxchg16b_double(pcp1, pcp2, o1, o2, n1, n2)		\
    ({									\
    	bool __ret;							\
    	typeof(pcp1) __o1 = (o1), __n1 = (n1);				\
    	typeof(pcp2) __o2 = (o2), __n2 = (n2);				\
    	alternative_io("leaq %P1,%%rsi\n\tcall this_cpu_cmpxchg16b_emu\n\t", \
    		       "cmpxchg16b " __percpu_arg(1) "\n\tsetz %0\n\t",	\
    		       X86_FEATURE_CX16,				\
    		       ASM_OUTPUT2("=a" (__ret), "+m" (pcp1),		\
    				   "+m" (pcp2), "+d" (__o2)),		\
    		       "b" (__n1), "c" (__n2), "a" (__o1) : "rsi");	\
    	__ret;								\
    })
    
    #define raw_cpu_cmpxchg_double_8	percpu_cmpxchg16b_double
    #define this_cpu_cmpxchg_double_8	percpu_cmpxchg16b_double
    
    #endif
    
    /* This is not atomic against other CPUs -- CPU preemption needs to be off */
    #define x86_test_and_clear_bit_percpu(bit, var)				\
    ({									\
    	unsigned char old__;						\
    	asm volatile("btr %2,"__percpu_arg(1)"\n\tsetc %0"		\
    		     : "=qm" (old__), "+m" (var)			\
    		     : "dIr" (bit));					\
    	old__;								\
    })
    
    static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
                            const unsigned long __percpu *addr)
    {
    	unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
    
    #ifdef CONFIG_X86_64
    	return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
    #else
    	return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
    #endif
    }
    
    static inline int x86_this_cpu_variable_test_bit(int nr,
                            const unsigned long __percpu *addr)
    {
    	unsigned char oldbit;
    
    	asm volatile("bt "__percpu_arg(2)",%1\n\t"
    			"setc %0"
    			: "=qm" (oldbit)
    			: "m" (*(unsigned long *)addr), "Ir" (nr));
    
    	return oldbit;
    }
    
    #define x86_this_cpu_test_bit(nr, addr)			\
    	(__builtin_constant_p((nr))			\
    	 ? x86_this_cpu_constant_test_bit((nr), (addr))	\
    	 : x86_this_cpu_variable_test_bit((nr), (addr)))
    
    
    #include <asm-generic/percpu.h>
    
    /* We can use this directly for local CPU (faster). */
    DECLARE_PER_CPU_READ_MOSTLY(unsigned long, this_cpu_off);
    
    #endif /* !__ASSEMBLY__ */
    
    #ifdef CONFIG_SMP
    
    /*
     * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
     * variables that are initialized and accessed before there are per_cpu
     * areas allocated.
     */
    
    #define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)			\
    	DEFINE_PER_CPU(_type, _name) = _initvalue;			\
    	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
    				{ [0 ... NR_CPUS-1] = _initvalue };	\
    	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
    
    #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
    	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue;		\
    	__typeof__(_type) _name##_early_map[NR_CPUS] __initdata =	\
    				{ [0 ... NR_CPUS-1] = _initvalue };	\
    	__typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
    
    #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)			\
    	EXPORT_PER_CPU_SYMBOL(_name)
    
    #define DECLARE_EARLY_PER_CPU(_type, _name)			\
    	DECLARE_PER_CPU(_type, _name);				\
    	extern __typeof__(_type) *_name##_early_ptr;		\
    	extern __typeof__(_type)  _name##_early_map[]
    
    #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)		\
    	DECLARE_PER_CPU_READ_MOSTLY(_type, _name);		\
    	extern __typeof__(_type) *_name##_early_ptr;		\
    	extern __typeof__(_type)  _name##_early_map[]
    
    #define	early_per_cpu_ptr(_name) (_name##_early_ptr)
    #define	early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
    #define	early_per_cpu(_name, _cpu) 				\
    	*(early_per_cpu_ptr(_name) ?				\
    		&early_per_cpu_ptr(_name)[_cpu] :		\
    		&per_cpu(_name, _cpu))
    
    #else	/* !CONFIG_SMP */
    #define	DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)		\
    	DEFINE_PER_CPU(_type, _name) = _initvalue
    
    #define DEFINE_EARLY_PER_CPU_READ_MOSTLY(_type, _name, _initvalue)	\
    	DEFINE_PER_CPU_READ_MOSTLY(_type, _name) = _initvalue
    
    #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)			\
    	EXPORT_PER_CPU_SYMBOL(_name)
    
    #define DECLARE_EARLY_PER_CPU(_type, _name)			\
    	DECLARE_PER_CPU(_type, _name)
    
    #define DECLARE_EARLY_PER_CPU_READ_MOSTLY(_type, _name)		\
    	DECLARE_PER_CPU_READ_MOSTLY(_type, _name)
    
    #define	early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
    #define	early_per_cpu_ptr(_name) NULL
    /* no early_per_cpu_map() */
    
    #endif	/* !CONFIG_SMP */
    
    #endif /* _ASM_X86_PERCPU_H */