]>
Commit | Line | Data |
---|---|---|
1 | /* Copyright (C) 2015 Free Software Foundation, Inc. | |
2 | Contributed by Jakub Jelinek <jakub@redhat.com>. | |
3 | ||
4 | This file is part of the GNU Offloading and Multi Processing Library | |
5 | (libgomp). | |
6 | ||
7 | Libgomp is free software; you can redistribute it and/or modify it | |
8 | under the terms of the GNU General Public License as published by | |
9 | the Free Software Foundation; either version 3, or (at your option) | |
10 | any later version. | |
11 | ||
12 | Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY | |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS | |
14 | FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | more details. | |
16 | ||
17 | Under Section 7 of GPL version 3, you are granted additional | |
18 | permissions described in the GCC Runtime Library Exception, version | |
19 | 3.1, as published by the Free Software Foundation. | |
20 | ||
21 | You should have received a copy of the GNU General Public License and | |
22 | a copy of the GCC Runtime Library Exception along with this program; | |
23 | see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
24 | <http://www.gnu.org/licenses/>. */ | |
25 | ||
26 | /* This file handles the taskloop construct. It is included twice, once | |
27 | for the long and once for unsigned long long variant. */ | |
28 | ||
29 | /* Called when encountering an explicit task directive. If IF_CLAUSE is | |
30 | false, then we must not delay in executing the task. If UNTIED is true, | |
31 | then the task may be executed by any member of the team. */ | |
32 | ||
33 | void | |
34 | GOMP_taskloop (void (*fn) (void *), void *data, void (*cpyfn) (void *, void *), | |
35 | long arg_size, long arg_align, unsigned flags, | |
36 | unsigned long num_tasks, int priority, | |
37 | TYPE start, TYPE end, TYPE step) | |
38 | { | |
39 | struct gomp_thread *thr = gomp_thread (); | |
40 | struct gomp_team *team = thr->ts.team; | |
41 | ||
42 | #ifdef HAVE_BROKEN_POSIX_SEMAPHORES | |
43 | /* If pthread_mutex_* is used for omp_*lock*, then each task must be | |
44 | tied to one thread all the time. This means UNTIED tasks must be | |
45 | tied and if CPYFN is non-NULL IF(0) must be forced, as CPYFN | |
46 | might be running on different thread than FN. */ | |
47 | if (cpyfn) | |
48 | flags &= ~GOMP_TASK_FLAG_IF; | |
49 | flags &= ~GOMP_TASK_FLAG_UNTIED; | |
50 | #endif | |
51 | ||
52 | /* If parallel or taskgroup has been cancelled, don't start new tasks. */ | |
53 | if (team && gomp_team_barrier_cancelled (&team->barrier)) | |
54 | return; | |
55 | ||
56 | #ifdef TYPE_is_long | |
57 | TYPE s = step; | |
58 | if (step > 0) | |
59 | { | |
60 | if (start >= end) | |
61 | return; | |
62 | s--; | |
63 | } | |
64 | else | |
65 | { | |
66 | if (start <= end) | |
67 | return; | |
68 | s++; | |
69 | } | |
70 | UTYPE n = (end - start + s) / step; | |
71 | #else | |
72 | UTYPE n; | |
73 | if (flags & GOMP_TASK_FLAG_UP) | |
74 | { | |
75 | if (start >= end) | |
76 | return; | |
77 | n = (end - start + step - 1) / step; | |
78 | } | |
79 | else | |
80 | { | |
81 | if (start <= end) | |
82 | return; | |
83 | n = (start - end - step - 1) / -step; | |
84 | } | |
85 | #endif | |
86 | ||
87 | TYPE task_step = step; | |
88 | unsigned long nfirst = n; | |
89 | if (flags & GOMP_TASK_FLAG_GRAINSIZE) | |
90 | { | |
91 | unsigned long grainsize = num_tasks; | |
92 | #ifdef TYPE_is_long | |
93 | num_tasks = n / grainsize; | |
94 | #else | |
95 | UTYPE ndiv = n / grainsize; | |
96 | num_tasks = ndiv; | |
97 | if (num_tasks != ndiv) | |
98 | num_tasks = ~0UL; | |
99 | #endif | |
100 | if (num_tasks <= 1) | |
101 | { | |
102 | num_tasks = 1; | |
103 | task_step = end - start; | |
104 | } | |
105 | else if (num_tasks >= grainsize | |
106 | #ifndef TYPE_is_long | |
107 | && num_tasks != ~0UL | |
108 | #endif | |
109 | ) | |
110 | { | |
111 | UTYPE mul = num_tasks * grainsize; | |
112 | task_step = (TYPE) grainsize * step; | |
113 | if (mul != n) | |
114 | { | |
115 | task_step += step; | |
116 | nfirst = n - mul - 1; | |
117 | } | |
118 | } | |
119 | else | |
120 | { | |
121 | UTYPE div = n / num_tasks; | |
122 | UTYPE mod = n % num_tasks; | |
123 | task_step = (TYPE) div * step; | |
124 | if (mod) | |
125 | { | |
126 | task_step += step; | |
127 | nfirst = mod - 1; | |
128 | } | |
129 | } | |
130 | } | |
131 | else | |
132 | { | |
133 | if (num_tasks == 0) | |
134 | num_tasks = team ? team->nthreads : 1; | |
135 | if (num_tasks >= n) | |
136 | num_tasks = n; | |
137 | else | |
138 | { | |
139 | UTYPE div = n / num_tasks; | |
140 | UTYPE mod = n % num_tasks; | |
141 | task_step = (TYPE) div * step; | |
142 | if (mod) | |
143 | { | |
144 | task_step += step; | |
145 | nfirst = mod - 1; | |
146 | } | |
147 | } | |
148 | } | |
149 | ||
150 | if (flags & GOMP_TASK_FLAG_NOGROUP) | |
151 | { | |
152 | if (thr->task && thr->task->taskgroup && thr->task->taskgroup->cancelled) | |
153 | return; | |
154 | } | |
155 | else | |
156 | ialias_call (GOMP_taskgroup_start) (); | |
157 | ||
158 | /* FIXME, use priority. */ | |
159 | (void) priority; | |
160 | ||
161 | if ((flags & GOMP_TASK_FLAG_IF) == 0 || team == NULL | |
162 | || (thr->task && thr->task->final_task) | |
163 | || team->task_count + num_tasks > 64 * team->nthreads) | |
164 | { | |
165 | unsigned long i; | |
166 | if (__builtin_expect (cpyfn != NULL, 0)) | |
167 | { | |
168 | struct gomp_task task[num_tasks]; | |
169 | struct gomp_task *parent = thr->task; | |
170 | arg_size = (arg_size + arg_align - 1) & ~(arg_align - 1); | |
171 | char buf[num_tasks * arg_size + arg_align - 1]; | |
172 | char *arg = (char *) (((uintptr_t) buf + arg_align - 1) | |
173 | & ~(uintptr_t) (arg_align - 1)); | |
174 | char *orig_arg = arg; | |
175 | for (i = 0; i < num_tasks; i++) | |
176 | { | |
177 | gomp_init_task (&task[i], parent, gomp_icv (false)); | |
178 | task[i].kind = GOMP_TASK_UNDEFERRED; | |
179 | task[i].final_task = (thr->task && thr->task->final_task) | |
180 | || (flags & GOMP_TASK_FLAG_FINAL); | |
181 | if (thr->task) | |
182 | { | |
183 | task[i].in_tied_task = thr->task->in_tied_task; | |
184 | task[i].taskgroup = thr->task->taskgroup; | |
185 | } | |
186 | thr->task = &task[i]; | |
187 | cpyfn (arg, data); | |
188 | arg += arg_size; | |
189 | } | |
190 | arg = orig_arg; | |
191 | for (i = 0; i < num_tasks; i++) | |
192 | { | |
193 | thr->task = &task[i]; | |
194 | ((TYPE *)arg)[0] = start; | |
195 | start += task_step; | |
196 | ((TYPE *)arg)[1] = start; | |
197 | if (i == nfirst) | |
198 | task_step -= step; | |
199 | fn (arg); | |
200 | arg += arg_size; | |
201 | if (task[i].children != NULL) | |
202 | { | |
203 | gomp_mutex_lock (&team->task_lock); | |
204 | gomp_clear_parent (task[i].children); | |
205 | gomp_mutex_unlock (&team->task_lock); | |
206 | } | |
207 | gomp_end_task (); | |
208 | } | |
209 | } | |
210 | else | |
211 | for (i = 0; i < num_tasks; i++) | |
212 | { | |
213 | struct gomp_task task; | |
214 | ||
215 | gomp_init_task (&task, thr->task, gomp_icv (false)); | |
216 | task.kind = GOMP_TASK_UNDEFERRED; | |
217 | task.final_task = (thr->task && thr->task->final_task) | |
218 | || (flags & GOMP_TASK_FLAG_FINAL); | |
219 | if (thr->task) | |
220 | { | |
221 | task.in_tied_task = thr->task->in_tied_task; | |
222 | task.taskgroup = thr->task->taskgroup; | |
223 | } | |
224 | thr->task = &task; | |
225 | ((TYPE *)data)[0] = start; | |
226 | start += task_step; | |
227 | ((TYPE *)data)[1] = start; | |
228 | if (i == nfirst) | |
229 | task_step -= step; | |
230 | fn (data); | |
231 | if (task.children != NULL) | |
232 | { | |
233 | gomp_mutex_lock (&team->task_lock); | |
234 | gomp_clear_parent (task.children); | |
235 | gomp_mutex_unlock (&team->task_lock); | |
236 | } | |
237 | gomp_end_task (); | |
238 | } | |
239 | } | |
240 | else | |
241 | { | |
242 | struct gomp_task *tasks[num_tasks]; | |
243 | struct gomp_task *parent = thr->task; | |
244 | struct gomp_taskgroup *taskgroup = parent->taskgroup; | |
245 | char *arg; | |
246 | int do_wake; | |
247 | unsigned long i; | |
248 | ||
249 | for (i = 0; i < num_tasks; i++) | |
250 | { | |
251 | struct gomp_task *task | |
252 | = gomp_malloc (sizeof (*task) + arg_size + arg_align - 1); | |
253 | tasks[i] = task; | |
254 | arg = (char *) (((uintptr_t) (task + 1) + arg_align - 1) | |
255 | & ~(uintptr_t) (arg_align - 1)); | |
256 | gomp_init_task (task, parent, gomp_icv (false)); | |
257 | task->kind = GOMP_TASK_UNDEFERRED; | |
258 | task->in_tied_task = parent->in_tied_task; | |
259 | task->taskgroup = taskgroup; | |
260 | thr->task = task; | |
261 | if (cpyfn) | |
262 | { | |
263 | cpyfn (arg, data); | |
264 | task->copy_ctors_done = true; | |
265 | } | |
266 | else | |
267 | memcpy (arg, data, arg_size); | |
268 | ((TYPE *)arg)[0] = start; | |
269 | start += task_step; | |
270 | ((TYPE *)arg)[1] = start; | |
271 | if (i == nfirst) | |
272 | task_step -= step; | |
273 | thr->task = parent; | |
274 | task->kind = GOMP_TASK_WAITING; | |
275 | task->fn = fn; | |
276 | task->fn_data = arg; | |
277 | task->final_task = (flags & GOMP_TASK_FLAG_FINAL) >> 1; | |
278 | } | |
279 | gomp_mutex_lock (&team->task_lock); | |
280 | /* If parallel or taskgroup has been cancelled, don't start new | |
281 | tasks. */ | |
282 | if (__builtin_expect ((gomp_team_barrier_cancelled (&team->barrier) | |
283 | || (taskgroup && taskgroup->cancelled)) | |
284 | && cpyfn == NULL, 0)) | |
285 | { | |
286 | gomp_mutex_unlock (&team->task_lock); | |
287 | for (i = 0; i < num_tasks; i++) | |
288 | { | |
289 | gomp_finish_task (tasks[i]); | |
290 | free (tasks[i]); | |
291 | } | |
292 | if ((flags & GOMP_TASK_FLAG_NOGROUP) == 0) | |
293 | ialias_call (GOMP_taskgroup_end) (); | |
294 | return; | |
295 | } | |
296 | if (taskgroup) | |
297 | taskgroup->num_children += num_tasks; | |
298 | for (i = 0; i < num_tasks; i++) | |
299 | { | |
300 | struct gomp_task *task = tasks[i]; | |
301 | if (parent->children) | |
302 | { | |
303 | task->next_child = parent->children; | |
304 | task->prev_child = parent->children->prev_child; | |
305 | task->next_child->prev_child = task; | |
306 | task->prev_child->next_child = task; | |
307 | } | |
308 | else | |
309 | { | |
310 | task->next_child = task; | |
311 | task->prev_child = task; | |
312 | } | |
313 | parent->children = task; | |
314 | if (taskgroup) | |
315 | { | |
316 | if (taskgroup->children) | |
317 | { | |
318 | task->next_taskgroup = taskgroup->children; | |
319 | task->prev_taskgroup = taskgroup->children->prev_taskgroup; | |
320 | task->next_taskgroup->prev_taskgroup = task; | |
321 | task->prev_taskgroup->next_taskgroup = task; | |
322 | } | |
323 | else | |
324 | { | |
325 | task->next_taskgroup = task; | |
326 | task->prev_taskgroup = task; | |
327 | } | |
328 | taskgroup->children = task; | |
329 | } | |
330 | if (team->task_queue) | |
331 | { | |
332 | task->next_queue = team->task_queue; | |
333 | task->prev_queue = team->task_queue->prev_queue; | |
334 | task->next_queue->prev_queue = task; | |
335 | task->prev_queue->next_queue = task; | |
336 | } | |
337 | else | |
338 | { | |
339 | task->next_queue = task; | |
340 | task->prev_queue = task; | |
341 | team->task_queue = task; | |
342 | } | |
343 | ++team->task_count; | |
344 | ++team->task_queued_count; | |
345 | } | |
346 | gomp_team_barrier_set_task_pending (&team->barrier); | |
347 | if (team->task_running_count + !parent->in_tied_task | |
348 | < team->nthreads) | |
349 | { | |
350 | do_wake = team->nthreads - team->task_running_count | |
351 | - !parent->in_tied_task; | |
352 | if ((unsigned long) do_wake > num_tasks) | |
353 | do_wake = num_tasks; | |
354 | } | |
355 | else | |
356 | do_wake = 0; | |
357 | gomp_mutex_unlock (&team->task_lock); | |
358 | if (do_wake) | |
359 | gomp_team_barrier_wake (&team->barrier, do_wake); | |
360 | } | |
361 | if ((flags & GOMP_TASK_FLAG_NOGROUP) == 0) | |
362 | ialias_call (GOMP_taskgroup_end) (); | |
363 | } |