From fabb657005edbbcb0d13ee49a40f1f4b042a1d19 Mon Sep 17 00:00:00 2001
From: Maxim Shchetynin <maxim@de.ibm.com>
Date: Sat, 5 Jul 2008 05:05:39 +1000
Subject: [PATCH] powerpc/spufs: add atomic busy_spus counter to struct
 cbe_spu_info

As nr_active counter includes also spus waiting for syscalls to return
we need a seperate counter that only counts spus that are currently running
on spu side. This counter shall be used by a cpufreq governor that targets
a frequency dependent from the number of running spus.

Signed-off-by: Christian Krafft <krafft@de.ibm.com>
Acked-by: Jeremy Kerr <jk@ozlabs.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
---
 arch/powerpc/platforms/cell/spufs/sched.c | 6 ++++++
 include/asm-powerpc/spu.h                 | 1 +
 2 files changed, 7 insertions(+)

diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index afb92d4fbcf5d..34654743363dc 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -994,6 +994,7 @@ void spuctx_switch_state(struct spu_context *ctx,
 	struct timespec ts;
 	struct spu *spu;
 	enum spu_utilization_state old_state;
+	int node;
 
 	ktime_get_ts(&ts);
 	curtime = timespec_to_ns(&ts);
@@ -1015,6 +1016,11 @@ void spuctx_switch_state(struct spu_context *ctx,
 		spu->stats.times[old_state] += delta;
 		spu->stats.util_state = new_state;
 		spu->stats.tstamp = curtime;
+		node = spu->node;
+		if (old_state == SPU_UTIL_USER)
+			atomic_dec(&cbe_spu_info[node].busy_spus);
+		if (new_state == SPU_UTIL_USER);
+			atomic_inc(&cbe_spu_info[node].busy_spus);
 	}
 }
 
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index 99348c1f4cab3..8b2eb044270ad 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -191,6 +191,7 @@ struct cbe_spu_info {
 	struct list_head spus;
 	int n_spus;
 	int nr_active;
+	atomic_t busy_spus;
 	atomic_t reserved_spus;
 };
 
-- 
GitLab