]>
git.ipfire.org Git - thirdparty/gcc.git/blob - libcilkrts/runtime/scheduler.h
1 /* scheduler.h -*-C++-*-
3 *************************************************************************
5 * Copyright (C) 2009-2016, Intel Corporation
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
29 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
32 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGE.
35 * *********************************************************************
37 * PLEASE NOTE: This file is a downstream copy of a file mainitained in
38 * a repository at cilkplus.org. Changes made to this file that are not
39 * submitted through the contribution process detailed at
40 * http://www.cilkplus.org/submit-cilk-contribution will be lost the next
41 * time that a new version is released. Changes only submitted to the
42 * GNU compiler collection or posted to the git repository at
43 * https://bitbucket.org/intelcilkruntime/intel-cilk-runtime.git are
46 * We welcome your contributions to this open source project. Thank you
47 * for your assistance in helping us improve Cilk Plus.
48 **************************************************************************/
53 * @brief scheduler.h declares routines for the Intel Cilk Plus scheduler,
54 * making it the heart of the Intel Cilk Plus implementation.
57 #ifndef INCLUDED_SCHEDULER_DOT_H
58 #define INCLUDED_SCHEDULER_DOT_H
60 #include <cilk/common.h>
61 #include <internal/abi.h>
63 #include "rts-common.h"
64 #include "full_frame.h"
65 #include "reducer_impl.h"
66 #include "global_state.h"
68 #ifdef CILK_RECORD_REPLAY
69 #include "record-replay.h"
72 __CILKRTS_BEGIN_EXTERN_C
76 * @brief Flag to disable parallel reductions.
78 * Set to 0 to allow parallel reductions.
80 #define DISABLE_PARALLEL_REDUCERS 0
83 * @brief Debugging level for parallel reductions.
85 * Print debugging messages and assertions for parallel reducers. 0 is
86 * no debugging. A higher value generates more output.
90 #define REDPAR_DEBUG 0
94 * @brief Lock the worker mutex to allow exclusive access to the
95 * values in the @c __cilkrts_worker and local_state structures.
97 * @pre @c w->l->do_not_steal must not be set. Essentially this
98 * condition asserts that the worker is not locked recursively.
100 * @param w The worker to lock.
103 void __cilkrts_worker_lock(__cilkrts_worker
*w
);
106 * @brief Unlock the worker mutex.
108 * @pre @c w->l->do_not_steal must be set. Essentially this condition
109 * asserts that the worker has been previously locked.
111 * @param w The worker to unlock.
114 void __cilkrts_worker_unlock(__cilkrts_worker
*w
);
117 * @brief Push the next full frame to be made active in this worker
118 * and increment its join counter.
120 * __cilkrts_push_next_frame and pop_next_frame work on a one-element queue.
121 * This queue is used to communicate across the runtime from the code that
122 * wants to activate a frame to the code that can actually begin execution
123 * on that frame. They are asymetrical in that push increments the join
124 * counter but pop does not decrement it. Rather, a single push/pop
125 * combination makes a frame active and increments its join counter once.
127 * @note A system worker may chose to push work onto a user worker if
128 * the work is the continuation from a sync which only the user worker
131 * @param w The worker which the frame is to be pushed onto.
132 * @param ff The full_frame which is to be continued by the worker.
135 void __cilkrts_push_next_frame(__cilkrts_worker
*w
,
139 * @brief Sync on this worker.
141 * If this worker is the last to reach the sync, execution may resume
142 * on this worker after the sync.
144 * If this worker is not the last spawned child to reach the sync,
145 * then execution is suspended and the worker will re-enter the
146 * scheduling loop, looking for work it can steal.
148 * This function will jump into the runtime to switch to the scheduling
149 * stack to implement most of its logic.
151 * @param w The worker which is executing the sync.
152 * @param sf The __cilkrts_stack_frame containing the sync.
155 NORETURN
__cilkrts_c_sync(__cilkrts_worker
*w
,
156 __cilkrts_stack_frame
*sf
);
159 * @brief Worker @c w completely promotes its own deque, simulating the case
160 * where the whole deque is stolen.
162 * We use this mechanism to force the allocation of new storage for
163 * reducers for race-detection purposes.
165 * This method is called from the reducer lookup logic when
166 * @c g->force_reduce is set.
168 * @warning Use of "force_reduce" is known to have bugs when run with
169 * more than 1 worker.
171 * @param w The worker which is to have all entries in its deque
172 * promoted to full frames.
175 void __cilkrts_promote_own_deque(__cilkrts_worker
*w
);
178 * Called when a spawned function attempts to return and
179 * __cilkrts_undo_detach() fails. This can happen for two reasons:
181 * @li If another worker is considering stealing our parent, it bumps the
182 * exception pointer while it did so, which will cause __cilkrts_undo_detach()
183 * to fail. If the other worker didn't complete the steal of our parent, we
184 * still may be able to return to it, either because the steal attempt failed,
185 * or we won the race for the tail pointer.
187 * @li If the function's parent has been stolen then we cannot return. Instead
188 * we'll longjmp into the runtime to switch onto the scheduling stack to
189 * execute do_return_from_spawn() and determine what to do. Either this
190 * worker is the last one to the sync, in which case we need to jump to the
191 * sync, or this worker is not the last one to the sync, in which case we'll
192 * abandon this work and jump to the scheduling loop to search for more work
195 * @param w The worker which attempting to return from a spawn to
197 * @param returning_sf The stack frame which is returning.
200 void __cilkrts_c_THE_exception_check(__cilkrts_worker
*w
,
201 __cilkrts_stack_frame
*returning_sf
);
204 * @brief Return an exception to an stolen parent.
206 * Used by the gcc implementation of exceptions to return an exception
209 * @param w The worker which attempting to return from a spawn with an
210 * exception to a stolen parent.
211 * @param returning_sf The stack frame which is returning.
214 NORETURN
__cilkrts_exception_from_spawn(__cilkrts_worker
*w
,
215 __cilkrts_stack_frame
*returning_sf
);
218 * @brief Used by the Windows implementations of exceptions to migrate an exception
221 * Call this function when an exception has been thrown and has to
222 * traverse across a steal. The exception has already been wrapped
223 * up, so all that remains is to longjmp() into the continuation,
224 * sync, and re-raise it.
226 * @param sf The __cilkrts_stack_frame for the frame that is attempting to
227 * return an exception to a stolen parent.
229 void __cilkrts_migrate_exception (__cilkrts_stack_frame
*sf
);
232 * @brief Return from a call, not a spawn, where this frame has ever
235 * @param w The worker that is returning from a frame which was ever stolen.
238 void __cilkrts_return(__cilkrts_worker
*w
);
241 * @brief Special return from the initial frame.
243 * This method will be called from @c __cilkrts_leave_frame if
244 * @c CILK_FRAME_LAST is set.
246 * This function will do the things necessary to cleanup, and unbind the
247 * thread from the Intel Cilk Plus runtime. If this is the last user
248 * worker unbinding from the runtime, all system worker threads will be
251 * @pre @c w must be the currently executing worker, and must be a user
254 * @param w The worker that's returning from the initial frame.
257 void __cilkrts_c_return_from_initial(__cilkrts_worker
*w
);
260 * @brief Used by exception handling code to pop an entry from the
263 * @param w Worker to pop the entry from
265 * @return __cilkrts_stack_frame of parent call
266 * @return NULL if the deque is empty
269 __cilkrts_stack_frame
*__cilkrts_pop_tail(__cilkrts_worker
*w
);
272 * @brief Modifies the worker's protected_tail to prevent frames from
275 * The Dekker protocol has been extended to only steal if head+1 is also
276 * less than protected_tail.
278 * @param w The worker to be modified.
279 * @param new_protected_tail The new setting for protected_tail, or NULL if the
280 * entire deque is to be protected
282 * @return Previous value of protected tail.
285 __cilkrts_stack_frame
*volatile *__cilkrts_disallow_stealing(
287 __cilkrts_stack_frame
*volatile *new_protected_tail
);
290 * @brief Restores the protected tail to a previous state, possibly
291 * allowing frames to be stolen.
293 * @param w The worker to be modified.
294 * @param saved_protected_tail A previous setting for protected_tail that is
298 void __cilkrts_restore_stealing(
300 __cilkrts_stack_frame
*volatile *saved_protected_tail
);
303 * @brief Initialize a @c __cilkrts_worker.
305 * @note The memory for the worker must have been allocated outside
308 * @param g The global_state_t.
309 * @param self The index into the global_state's array of workers for this
310 * worker, or -1 if this worker was allocated from the heap and cannot be
312 * @param w The worker to be initialized.
314 * @return The initialized __cilkrts_worker.
317 __cilkrts_worker
*make_worker(global_state_t
*g
,
319 __cilkrts_worker
*w
);
322 * @brief Free up any resources allocated for a worker.
324 * @note The memory for the @c __cilkrts_worker itself must be
325 * deallocated outside this call.
327 * @param w The worker to be destroyed.
330 void destroy_worker (__cilkrts_worker
*w
);
333 * @brief Initialize the runtime.
335 * If necessary, allocates and initializes the global state. If
336 * necessary, unsuspends the system workers.
338 * @param start Specifies whether the workers are to be unsuspended if
339 * they are suspended. Allows __cilkrts_init() to start up the runtime without
340 * releasing the system threads.
343 void __cilkrts_init_internal(int start
);
346 * @brief Part of the sequence to shutdown the runtime.
348 * Specifically, this call frees the @c global_state_t for the runtime.
350 * @param g The global_state_t.
353 void __cilkrts_deinit_internal(global_state_t
*g
);
356 * Obsolete. We no longer need to import or export reducer maps.
359 cilkred_map
*__cilkrts_xchg_reducer(
360 __cilkrts_worker
*w
, cilkred_map
*newmap
) cilk_nothrow
;
363 * @brief Called when a user thread is bound to the runtime.
365 * If this action increments the count of bound user threads from 0 to
366 * 1, the system worker threads are unsuspended.
368 * If this action increments the count of bound user threads from 0 to
369 * 1, the system worker threads are unsuspended.
371 * @pre Global lock must be held.
372 * @param g The runtime global state.
375 void __cilkrts_enter_cilk(global_state_t
*g
);
378 * @brief Called when a user thread is unbound from the runtime.
380 * If this action decrements the count of bound user threads to 0, the
381 * system worker threads are suspended.
384 * @pre Global lock must be held.
386 * @param g The runtime global state.
389 void __cilkrts_leave_cilk(global_state_t
*g
);
393 * @brief cilk_fiber_proc that runs the main scheduler loop on a
396 * @pre fiber's owner field should be set to the correct __cilkrts_worker
397 * @pre fiber must be a user worker.
399 * @param fiber The scheduling fiber object.
401 void scheduler_fiber_proc_for_user_worker(cilk_fiber
*fiber
);
405 * @brief Prints out Cilk runtime statistics.
407 * @param g The runtime global state.
409 * This method is useful only for debugging purposes. No guarantees
410 * are made as to the validity of this data. :)
413 void __cilkrts_dump_stats_to_stderr(global_state_t
*g
);
415 #ifdef CILK_RECORD_REPLAY
417 char * walk_pedigree_nodes(char *p
, const __cilkrts_pedigree
*pnode
);
420 * @brief Used by exception handling code to simulate the popping of
421 * an entry from the worker's deque.
423 * @param w Worker whose deque we want to check
425 * @return @c __cilkrts_stack_frame of parent call
426 * @return NULL if the deque is empty
429 __cilkrts_stack_frame
*simulate_pop_tail(__cilkrts_worker
*w
);
433 __CILKRTS_END_EXTERN_C
435 #endif // ! defined(INCLUDED_SCHEDULER_DOT_H)