diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f5f0cc91518cf64508f04a83e5b1d7310be3b8e7..45943b2fa82b9c978128263ff32574b108ece180 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1151,7 +1151,6 @@ static void task_numa_compare(struct task_numa_env *env,
 	struct rq *src_rq = cpu_rq(env->src_cpu);
 	struct rq *dst_rq = cpu_rq(env->dst_cpu);
 	struct task_struct *cur;
-	struct task_group *tg;
 	long src_load, dst_load;
 	long load;
 	long imp = env->p->numa_group ? groupimp : taskimp;
@@ -1223,14 +1222,9 @@ static void task_numa_compare(struct task_numa_env *env,
 	 * In the overloaded case, try and keep the load balanced.
 	 */
 balance:
-	src_load = env->src_stats.load;
-	dst_load = env->dst_stats.load;
-
-	/* Calculate the effect of moving env->p from src to dst. */
-	load = env->p->se.load.weight;
-	tg = task_group(env->p);
-	src_load += effective_load(tg, env->src_cpu, -load, -load);
-	dst_load += effective_load(tg, env->dst_cpu, load, load);
+	load = task_h_load(env->p);
+	dst_load = env->dst_stats.load + load;
+	src_load = env->src_stats.load - load;
 
 	if (moveimp > imp && moveimp > env->best_imp) {
 		/*
@@ -1250,11 +1244,9 @@ static void task_numa_compare(struct task_numa_env *env,
 		goto unlock;
 
 	if (cur) {
-		/* Cur moves in the opposite direction. */
-		load = cur->se.load.weight;
-		tg = task_group(cur);
-		src_load += effective_load(tg, env->src_cpu, load, load);
-		dst_load += effective_load(tg, env->dst_cpu, -load, -load);
+		load = task_h_load(cur);
+		dst_load -= load;
+		src_load += load;
 	}
 
 	if (load_too_imbalanced(src_load, dst_load, env))