]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgomp/config/nvptx/team.c
Update copyright years.
[thirdparty/gcc.git] / libgomp / config / nvptx / team.c
1 /* Copyright (C) 2015-2018 Free Software Foundation, Inc.
2 Contributed by Alexander Monakov <amonakov@ispras.ru>
3
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
6
7 Libgomp is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
11
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 more details.
16
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
25
26 /* This file handles maintainance of threads on NVPTX. */
27
28 #if defined __nvptx_softstack__ && defined __nvptx_unisimt__
29
30 #include "libgomp.h"
31 #include <stdlib.h>
32 #include <string.h>
33
34 struct gomp_thread *nvptx_thrs __attribute__((shared,nocommon));
35
36 static void gomp_thread_start (struct gomp_thread_pool *);
37
38
39 /* This externally visible function handles target region entry. It
40 sets up a per-team thread pool and transfers control by calling FN (FN_DATA)
41 in the master thread or gomp_thread_start in other threads.
42
43 The name of this function is part of the interface with the compiler: for
44 each target region, GCC emits a PTX .kernel function that sets up soft-stack
45 and uniform-simt state and calls this function, passing in FN the original
46 function outlined for the target region. */
47
48 void
49 gomp_nvptx_main (void (*fn) (void *), void *fn_data)
50 {
51 int tid, ntids;
52 asm ("mov.u32 %0, %%tid.y;" : "=r" (tid));
53 asm ("mov.u32 %0, %%ntid.y;" : "=r" (ntids));
54 if (tid == 0)
55 {
56 gomp_global_icv.nthreads_var = ntids;
57 /* Starting additional threads is not supported. */
58 gomp_global_icv.dyn_var = true;
59
60 nvptx_thrs = alloca (ntids * sizeof (*nvptx_thrs));
61 memset (nvptx_thrs, 0, ntids * sizeof (*nvptx_thrs));
62
63 struct gomp_thread_pool *pool = alloca (sizeof (*pool));
64 pool->threads = alloca (ntids * sizeof (*pool->threads));
65 for (tid = 0; tid < ntids; tid++)
66 pool->threads[tid] = nvptx_thrs + tid;
67 pool->threads_size = ntids;
68 pool->threads_used = ntids;
69 pool->threads_busy = 1;
70 pool->last_team = NULL;
71 gomp_simple_barrier_init (&pool->threads_dock, ntids);
72
73 nvptx_thrs[0].thread_pool = pool;
74 asm ("bar.sync 0;");
75 fn (fn_data);
76
77 gomp_free_thread (nvptx_thrs);
78 }
79 else
80 {
81 asm ("bar.sync 0;");
82 gomp_thread_start (nvptx_thrs[0].thread_pool);
83 }
84 }
85
86 /* This function contains the idle loop in which a thread waits
87 to be called up to become part of a team. */
88
89 static void
90 gomp_thread_start (struct gomp_thread_pool *pool)
91 {
92 struct gomp_thread *thr = gomp_thread ();
93
94 gomp_sem_init (&thr->release, 0);
95 thr->thread_pool = pool;
96
97 do
98 {
99 gomp_simple_barrier_wait (&pool->threads_dock);
100 if (!thr->fn)
101 continue;
102 thr->fn (thr->data);
103 thr->fn = NULL;
104
105 struct gomp_task *task = thr->task;
106 gomp_team_barrier_wait_final (&thr->ts.team->barrier);
107 gomp_finish_task (task);
108 }
109 /* Work around an NVIDIA driver bug: when generating sm_50 machine code,
110 it can trash stack pointer R1 in loops lacking exit edges. Add a cheap
111 artificial exit that the driver would not be able to optimize out. */
112 while (nvptx_thrs);
113 }
114
115 /* Launch a team. */
116
117 void
118 gomp_team_start (void (*fn) (void *), void *data, unsigned nthreads,
119 unsigned flags, struct gomp_team *team)
120 {
121 struct gomp_thread *thr, *nthr;
122 struct gomp_task *task;
123 struct gomp_task_icv *icv;
124 struct gomp_thread_pool *pool;
125 unsigned long nthreads_var;
126
127 thr = gomp_thread ();
128 pool = thr->thread_pool;
129 task = thr->task;
130 icv = task ? &task->icv : &gomp_global_icv;
131
132 /* Always save the previous state, even if this isn't a nested team.
133 In particular, we should save any work share state from an outer
134 orphaned work share construct. */
135 team->prev_ts = thr->ts;
136
137 thr->ts.team = team;
138 thr->ts.team_id = 0;
139 ++thr->ts.level;
140 if (nthreads > 1)
141 ++thr->ts.active_level;
142 thr->ts.work_share = &team->work_shares[0];
143 thr->ts.last_work_share = NULL;
144 thr->ts.single_count = 0;
145 thr->ts.static_trip = 0;
146 thr->task = &team->implicit_task[0];
147 nthreads_var = icv->nthreads_var;
148 gomp_init_task (thr->task, task, icv);
149 team->implicit_task[0].icv.nthreads_var = nthreads_var;
150
151 if (nthreads == 1)
152 return;
153
154 /* Release existing idle threads. */
155 for (unsigned i = 1; i < nthreads; ++i)
156 {
157 nthr = pool->threads[i];
158 nthr->ts.team = team;
159 nthr->ts.work_share = &team->work_shares[0];
160 nthr->ts.last_work_share = NULL;
161 nthr->ts.team_id = i;
162 nthr->ts.level = team->prev_ts.level + 1;
163 nthr->ts.active_level = thr->ts.active_level;
164 nthr->ts.single_count = 0;
165 nthr->ts.static_trip = 0;
166 nthr->task = &team->implicit_task[i];
167 gomp_init_task (nthr->task, task, icv);
168 team->implicit_task[i].icv.nthreads_var = nthreads_var;
169 nthr->fn = fn;
170 nthr->data = data;
171 team->ordered_release[i] = &nthr->release;
172 }
173
174 gomp_simple_barrier_wait (&pool->threads_dock);
175 }
176
177 #include "../../team.c"
178 #endif