]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/osf-share/cma_sched.h
Initial creation of sourceware repository
[thirdparty/binutils-gdb.git] / gdb / osf-share / cma_sched.h
1 /*
2 * (c) Copyright 1990-1996 OPEN SOFTWARE FOUNDATION, INC.
3 * (c) Copyright 1990-1996 HEWLETT-PACKARD COMPANY
4 * (c) Copyright 1990-1996 DIGITAL EQUIPMENT CORPORATION
5 * (c) Copyright 1991, 1992 Siemens-Nixdorf Information Systems
6 * To anyone who acknowledges that this file is provided "AS IS" without
7 * any express or implied warranty: permission to use, copy, modify, and
8 * distribute this file for any purpose is hereby granted without fee,
9 * provided that the above copyright notices and this notice appears in
10 * all source code copies, and that none of the names listed above be used
11 * in advertising or publicity pertaining to distribution of the software
12 * without specific, written prior permission. None of these organizations
13 * makes any representations about the suitability of this software for
14 * any purpose.
15 */
16 /*
17 * Header file for priority scheduling
18 */
19
20 \f
21 #ifndef CMA_SCHED
22 #define CMA_SCHED
23
24 /*
25 * INCLUDE FILES
26 */
27
28 /*
29 * CONSTANTS AND MACROS
30 */
31
32 /*
33 * Scaling factor for integer priority calculations
34 */
35 #define cma__c_prio_scale 8
36
37 #if _CMA_VENDOR_ == _CMA__APOLLO
38 /*
39 * FIX-ME: Apollo cc 6.8 blows contant folded "<<" and ">>"
40 */
41 # define cma__scale_up(exp) ((exp) * 256)
42 # define cma__scale_dn(exp) ((exp) / 256)
43 #else
44 # define cma__scale_up(exp) ((exp) << cma__c_prio_scale)
45 # define cma__scale_dn(exp) ((exp) >> cma__c_prio_scale)
46 #endif
47
48
49 /*
50 * Min. num. of ticks between self-adjustments for priority adjusting policies.
51 */
52 #define cma__c_prio_interval 10
53
54
55 /*
56 * Number of queues in each class of queues
57 */
58 #define cma__c_prio_n_id 1 /* Very-low-priority class threads */
59 #define cma__c_prio_n_bg 8 /* Background class threads */
60 #define cma__c_prio_n_0 1 /* Very low priority throughput quartile */
61 #define cma__c_prio_n_1 2 /* Low priority throughput quartile */
62 #define cma__c_prio_n_2 3 /* Medium priority throughput quartile */
63 #define cma__c_prio_n_3 4 /* High priority throughput quartile */
64 #define cma__c_prio_n_rt 1 /* Real Time priority queues */
65
66 /*
67 * Number of queues to skip (offset) to get to the queues in this section of LA
68 */
69 #define cma__c_prio_o_id 0
70 #define cma__c_prio_o_bg cma__c_prio_o_id + cma__c_prio_n_id
71 #define cma__c_prio_o_0 cma__c_prio_o_bg + cma__c_prio_n_bg
72 #define cma__c_prio_o_1 cma__c_prio_o_0 + cma__c_prio_n_0
73 #define cma__c_prio_o_2 cma__c_prio_o_1 + cma__c_prio_n_1
74 #define cma__c_prio_o_3 cma__c_prio_o_2 + cma__c_prio_n_2
75 #define cma__c_prio_o_rt cma__c_prio_o_3 + cma__c_prio_n_3
76
77 /*
78 * Ada_low: These threads are queued in the background queues, thus there
79 * must be enough queues to allow one queue for each Ada priority below the
80 * Ada default.
81 */
82 #define cma__c_prio_o_al cma__c_prio_o_bg
83
84 /*
85 * Total number of ready queues, for declaration purposes
86 */
87 #define cma__c_prio_n_tot \
88 cma__c_prio_n_id + cma__c_prio_n_bg + cma__c_prio_n_rt \
89 + cma__c_prio_n_0 + cma__c_prio_n_1 + cma__c_prio_n_2 + cma__c_prio_n_3
90
91 /*
92 * Formulae for determining a thread's priority. Variable priorities (such
93 * as foreground and background) are scaled values.
94 */
95 #define cma__sched_priority(tcb) \
96 ((tcb)->sched.class == cma__c_class_fore ? cma__sched_prio_fore (tcb) \
97 :((tcb)->sched.class == cma__c_class_back ? cma__sched_prio_back (tcb) \
98 :((tcb)->sched.class == cma__c_class_rt ? cma__sched_prio_rt (tcb) \
99 :((tcb)->sched.class == cma__c_class_idle ? cma__sched_prio_idle (tcb) \
100 :(cma__bugcheck ("cma__sched_priority: unrecognized class"), 0) ))))
101
102 #define cma__sched_prio_fore(tcb) cma__sched_prio_fore_var (tcb)
103 #define cma__sched_prio_back(tcb) ((tcb)->sched.fixed_prio \
104 ? cma__sched_prio_back_fix (tcb) : cma__sched_prio_back_var (tcb) )
105 #define cma__sched_prio_rt(tcb) ((tcb)->sched.priority)
106 #define cma__sched_prio_idle(tcb) ((tcb)->sched.priority)
107
108 #define cma__sched_prio_back_fix(tcb) \
109 (cma__g_prio_bg_min + (cma__g_prio_bg_max - cma__g_prio_bg_min) \
110 * ((tcb)->sched.priority + cma__c_prio_o_al - cma__c_prio_o_bg) \
111 / cma__c_prio_n_bg)
112
113 /*
114 * FIX-ME: Enable after modeling (if we like it)
115 */
116 #if 1
117 # define cma__sched_prio_fore_var(tcb) \
118 ((cma__g_prio_fg_max + cma__g_prio_fg_min)/2)
119 # define cma__sched_prio_back_var(tcb) \
120 ((cma__g_prio_bg_max + cma__g_prio_bg_min)/2)
121 #else
122 # define cma__sched_prio_back_var(tcb) cma__sched_prio_fore_var (tcb)
123
124 # if 1
125 /*
126 * Re-scale, since the division removes the scale factor.
127 * Scale and multiply before dividing to avoid loss of precision.
128 */
129 # define cma__sched_prio_fore_var(tcb) \
130 ((cma__g_vp_count * cma__scale_up((tcb)->sched.tot_time)) \
131 / (tcb)->sched.cpu_time)
132 # else
133 /*
134 * Re-scale, since the division removes the scale factor.
135 * Scale and multiply before dividing to avoid loss of precision.
136 * Left shift the numerator to multiply by two.
137 */
138 # define cma__sched_prio_fore_var(tcb) \
139 (((cma__g_vp_count * cma__scale_up((tcb)->sched.tot_time) \
140 * (tcb)->sched.priority * cma__g_init_frac_sum) << 1) \
141 / ((tcb)->sched.cpu_time * (tcb)->sched.priority * cma__g_init_frac_sum \
142 + (tcb)->sched.tot_time))
143 # endif
144 #endif
145
146 /*
147 * Update weighted-averaged, scaled tick counters
148 */
149 #define cma__sched_update_time(ave, new) \
150 (ave) = (ave) - ((cma__scale_dn((ave)) - (new)) << (cma__c_prio_scale - 4))
151
152 #define cma__sched_parameterize(tcb, policy) { \
153 switch (policy) { \
154 case cma_c_sched_fifo : { \
155 (tcb)->sched.rtb = cma_c_true; \
156 (tcb)->sched.spp = cma_c_true; \
157 (tcb)->sched.fixed_prio = cma_c_true; \
158 (tcb)->sched.class = cma__c_class_rt; \
159 break; \
160 } \
161 case cma_c_sched_rr : { \
162 (tcb)->sched.rtb = cma_c_false; \
163 (tcb)->sched.spp = cma_c_true; \
164 (tcb)->sched.fixed_prio = cma_c_true; \
165 (tcb)->sched.class = cma__c_class_rt; \
166 break; \
167 } \
168 case cma_c_sched_throughput : { \
169 (tcb)->sched.rtb = cma_c_false; \
170 (tcb)->sched.spp = cma_c_false; \
171 (tcb)->sched.fixed_prio = cma_c_false; \
172 (tcb)->sched.class = cma__c_class_fore; \
173 break; \
174 } \
175 case cma_c_sched_background : { \
176 (tcb)->sched.rtb = cma_c_false; \
177 (tcb)->sched.spp = cma_c_false; \
178 (tcb)->sched.fixed_prio = cma_c_false; \
179 (tcb)->sched.class = cma__c_class_back; \
180 break; \
181 } \
182 case cma_c_sched_ada_low : { \
183 (tcb)->sched.rtb = cma_c_false; \
184 (tcb)->sched.spp = cma_c_true; \
185 (tcb)->sched.fixed_prio = cma_c_true; \
186 (tcb)->sched.class = cma__c_class_back; \
187 break; \
188 } \
189 case cma_c_sched_idle : { \
190 (tcb)->sched.rtb = cma_c_false; \
191 (tcb)->sched.spp = cma_c_false; \
192 (tcb)->sched.fixed_prio = cma_c_false; \
193 (tcb)->sched.class = cma__c_class_idle; \
194 break; \
195 } \
196 default : { \
197 cma__bugcheck ("cma__sched_parameterize: bad scheduling Policy"); \
198 break; \
199 } \
200 } \
201 }
202
203 /*
204 * TYPEDEFS
205 */
206
207 /*
208 * Scheduling classes
209 */
210 typedef enum CMA__T_SCHED_CLASS {
211 cma__c_class_rt,
212 cma__c_class_fore,
213 cma__c_class_back,
214 cma__c_class_idle
215 } cma__t_sched_class;
216
217 /*
218 * GLOBAL DATA
219 */
220
221 /*
222 * Minimuma and maximum prioirities, for foreground and background threads,
223 * as of the last time the scheduler ran. (Scaled once.)
224 */
225 extern cma_t_integer cma__g_prio_fg_min;
226 extern cma_t_integer cma__g_prio_fg_max;
227 extern cma_t_integer cma__g_prio_bg_min;
228 extern cma_t_integer cma__g_prio_bg_max;
229
230 /*
231 * The "m" values are the slopes of the four sections of linear approximation.
232 *
233 * cma__g_prio_m_I = 4*N(I)/cma__g_prio_range (Scaled once.)
234 */
235 extern cma_t_integer cma__g_prio_m_0,
236 cma__g_prio_m_1,
237 cma__g_prio_m_2,
238 cma__g_prio_m_3;
239
240 /*
241 * The "b" values are the intercepts of the four sections of linear approx.
242 * (Not scaled.)
243 *
244 * cma__g_prio_b_I = -N(I)*(I*prio_max + (4-I)*prio_min)/prio_range + prio_o_I
245 */
246 extern cma_t_integer cma__g_prio_b_0,
247 cma__g_prio_b_1,
248 cma__g_prio_b_2,
249 cma__g_prio_b_3;
250
251 /*
252 * The "p" values are the end points of the four sections of linear approx.
253 *
254 * cma__g_prio_p_I = cma__g_prio_fg_min + (I/4)*cma__g_prio_range
255 *
256 * [cma__g_prio_p_0 is not defined since it is not used (also, it is the same
257 * as cma__g_prio_fg_min).] (Scaled once.)
258 */
259 extern cma_t_integer cma__g_prio_p_1,
260 cma__g_prio_p_2,
261 cma__g_prio_p_3;
262
263 /*
264 * Points to the next queue for the dispatcher to check for ready threads.
265 */
266 extern cma_t_integer cma__g_next_ready_queue;
267
268 /*
269 * Points to the queues of virtual processors (for preempt victim search)
270 */
271 extern cma__t_queue cma__g_run_vps;
272 extern cma__t_queue cma__g_susp_vps;
273 extern cma_t_integer cma__g_vp_count;
274
275 /*
276 * INTERNAL INTERFACES
277 */
278
279 #endif