diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index ba5b23f547641e5222537b63f06893f5b6a55b41..817cea1b5ad3c6bd395d74a73248aea49f6656f2 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -17,7 +17,6 @@
 #include <linux/module.h>
 
 #include <asm/oprofile_impl.h>
-#include <asm/code-patching.h>
 #include <asm/cputable.h>
 #include <asm/prom.h>		/* for PTRRELOC on ARCH=ppc */
 
@@ -1638,38 +1637,3 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
 	BUG();
 	return NULL;
 }
-
-void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
-{
-	struct fixup_entry {
-		unsigned long	mask;
-		unsigned long	value;
-		long		start_off;
-		long		end_off;
-	} *fcur, *fend;
-
-	fcur = fixup_start;
-	fend = fixup_end;
-
-	for (; fcur < fend; fcur++) {
-		unsigned int *pstart, *pend, *p;
-
-		if ((value & fcur->mask) == fcur->value)
-			continue;
-
-		/* These PTRRELOCs will disappear once the new scheme for
-		 * modules and vdso is implemented
-		 */
-		pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
-		pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
-
-		for (p = pstart; p < pend; p++) {
-			*p = PPC_NOP_INSTR;
-			asm volatile ("dcbst 0, %0" : : "r" (p));
-		}
-		asm volatile ("sync" : : : "memory");
-		for (p = pstart; p < pend; p++)
-			asm volatile ("icbi 0,%0" : : "r" (p));
-		asm volatile ("sync; isync" : : : "memory");
-	}
-}
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index e7f7042b9f61eb934be06b60a0410d30471cb6ab..fc52771f0cdbe7ad4cd36e7d45cc6c7db1d7843b 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -26,3 +26,4 @@ endif
 obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
 
 obj-y			+= code-patching.o
+obj-y			+= feature-fixups.o
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
new file mode 100644
index 0000000000000000000000000000000000000000..f6fd5d2ff10deb14d2e9741c30c2eda40f338af8
--- /dev/null
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -0,0 +1,56 @@
+/*
+ *  Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
+ *
+ *  Modifications for ppc64:
+ *      Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
+ *
+ *  Copyright 2008 Michael Ellerman, IBM Corporation.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version
+ *  2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <asm/cputable.h>
+#include <asm/code-patching.h>
+
+
+struct fixup_entry {
+	unsigned long	mask;
+	unsigned long	value;
+	long		start_off;
+	long		end_off;
+};
+
+static void patch_feature_section(unsigned long value, struct fixup_entry *fcur)
+{
+	unsigned int *pstart, *pend, *p;
+
+	if ((value & fcur->mask) == fcur->value)
+		return;
+
+	pstart = ((unsigned int *)fcur) + (fcur->start_off / 4);
+	pend = ((unsigned int *)fcur) + (fcur->end_off / 4);
+
+	for (p = pstart; p < pend; p++) {
+		*p = PPC_NOP_INSTR;
+		asm volatile ("dcbst 0, %0" : : "r" (p));
+	}
+	asm volatile ("sync" : : : "memory");
+	for (p = pstart; p < pend; p++)
+		asm volatile ("icbi 0,%0" : : "r" (p));
+	asm volatile ("sync; isync" : : : "memory");
+}
+
+void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+{
+	struct fixup_entry *fcur, *fend;
+
+	fcur = fixup_start;
+	fend = fixup_end;
+
+	for (; fcur < fend; fcur++)
+		patch_feature_section(value, fcur);
+}