diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 610a3db680ae498b4eeb027f351e8e4a885a337c..8af0d4fa683db56c0ba0695c1e8939b57fd15fe4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2341,7 +2341,16 @@ void drain_local_pages(struct zone *zone)
 
 static void drain_local_pages_wq(struct work_struct *work)
 {
+	/*
+	 * drain_all_pages doesn't use proper cpu hotplug protection so
+	 * we can race with cpu offline when the WQ can move this from
+	 * a cpu pinned worker to an unbound one. We can operate on a different
+	 * cpu which is allright but we also have to make sure to not move to
+	 * a different one.
+	 */
+	preempt_disable();
 	drain_local_pages(NULL);
+	preempt_enable();
 }
 
 /*
@@ -2366,11 +2375,6 @@ void drain_all_pages(struct zone *zone)
 	if (current->flags & PF_WQ_WORKER)
 		return;
 
-	/*
-	 * As this can be called from reclaim context, do not reenter reclaim.
-	 * An allocation failure can be handled, it's simply slower
-	 */
-	get_online_cpus();
 	works = alloc_percpu_gfp(struct work_struct, GFP_ATOMIC);
 
 	/*
@@ -2421,7 +2425,6 @@ void drain_all_pages(struct zone *zone)
 			flush_work(&work);
 		}
 	}
-	put_online_cpus();
 }
 
 #ifdef CONFIG_HIBERNATION