1 Subject: Improve search of node for contexts with SPU affinity
2 From: Andre Detsch <adetsch@br.ibm.com>
3 References: 447133 - LTC50070
5 This patch improves redability of the code responsible for trying to find
6 a node with enough SPUs not committed to other affinity gangs.
8 An additional check is also added, to avoid taking into account gangs that
11 Signed-off-by: Andre Detsch <adetsch@br.ibm.com>
12 Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
13 Signed-off-by: Olaf Hering <olh@suse.de>
15 arch/powerpc/platforms/cell/spufs/sched.c | 19 +++++++++++++------
16 1 file changed, 13 insertions(+), 6 deletions(-)
18 --- a/arch/powerpc/platforms/cell/spufs/sched.c
19 +++ b/arch/powerpc/platforms/cell/spufs/sched.c
20 @@ -312,6 +312,15 @@ static struct spu *aff_ref_location(stru
22 node = cpu_to_node(raw_smp_processor_id());
23 for (n = 0; n < MAX_NUMNODES; n++, node++) {
25 + * "available_spus" counts how many spus are not potentially
26 + * going to be used by other affinity gangs whose reference
27 + * context is already in place. Although this code seeks to
28 + * avoid having affinity gangs with a summed amount of
29 + * contexts bigger than the amount of spus in the node,
30 + * this may happen sporadically. In this case, available_spus
31 + * becomes negative, which is harmless.
35 node = (node < MAX_NUMNODES) ? node : 0;
36 @@ -321,12 +330,10 @@ static struct spu *aff_ref_location(stru
38 mutex_lock(&cbe_spu_info[node].list_mutex);
39 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
40 - if (spu->ctx && spu->ctx->gang
41 - && spu->ctx->aff_offset == 0)
43 - (spu->ctx->gang->contexts - 1);
46 + if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
47 + && spu->ctx->gang->aff_ref_spu)
48 + available_spus -= spu->ctx->gang->contexts;
51 if (available_spus < ctx->gang->contexts) {
52 mutex_unlock(&cbe_spu_info[node].list_mutex);