]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgomp/config/linux/proc.c
Update copyright years.
[thirdparty/gcc.git] / libgomp / config / linux / proc.c
CommitLineData
818ab71a 1/* Copyright (C) 2005-2016 Free Software Foundation, Inc.
12aac30b
JJ
2 Contributed by Jakub Jelinek <jakub@redhat.com>.
3
f1f3453e
TS
4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
12aac30b
JJ
6
7 Libgomp is free software; you can redistribute it and/or modify it
748086b7
JJ
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12aac30b
JJ
11
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
748086b7 14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12aac30b
JJ
15 more details.
16
748086b7
JJ
17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
12aac30b
JJ
25
26/* This file contains system specific routines related to counting
27 online processors and dynamic load balancing. */
28
29#ifndef _GNU_SOURCE
30#define _GNU_SOURCE 1
31#endif
32#include "libgomp.h"
e0b23d9f 33#include "proc.h"
acf0174b 34#include <errno.h>
12aac30b
JJ
35#include <stdlib.h>
36#include <unistd.h>
37#ifdef HAVE_GETLOADAVG
38# ifdef HAVE_SYS_LOADAVG_H
39# include <sys/loadavg.h>
40# endif
41#endif
42
43#ifdef HAVE_PTHREAD_AFFINITY_NP
acf0174b
JJ
44unsigned long gomp_cpuset_size;
45static unsigned long gomp_get_cpuset_size;
46cpu_set_t *gomp_cpusetp;
47
e0b23d9f 48unsigned long
acf0174b 49gomp_cpuset_popcount (unsigned long cpusetsize, cpu_set_t *cpusetp)
12aac30b 50{
acf0174b
JJ
51#ifdef CPU_COUNT_S
52 /* glibc 2.7 and above provide a macro for this. */
53 return CPU_COUNT_S (cpusetsize, cpusetp);
12aac30b 54#else
acf0174b
JJ
55#ifdef CPU_COUNT
56 if (cpusetsize == sizeof (cpu_set_t))
57 /* glibc 2.6 and above provide a macro for this. */
58 return CPU_COUNT (cpusetp);
59#endif
12aac30b
JJ
60 size_t i;
61 unsigned long ret = 0;
acf0174b 62 extern int check[sizeof (cpusetp->__bits[0]) == sizeof (unsigned long int)
f89163fd 63 ? 1 : -1] __attribute__((unused));
12aac30b 64
acf0174b 65 for (i = 0; i < cpusetsize / sizeof (cpusetp->__bits[0]); i++)
12aac30b
JJ
66 {
67 unsigned long int mask = cpusetp->__bits[i];
68 if (mask == 0)
69 continue;
70 ret += __builtin_popcountl (mask);
71 }
72 return ret;
73#endif
74}
75#endif
76
77/* At startup, determine the default number of threads. It would seem
78 this should be related to the number of cpus online. */
79
80void
81gomp_init_num_threads (void)
82{
83#ifdef HAVE_PTHREAD_AFFINITY_NP
acf0174b
JJ
84#if defined (_SC_NPROCESSORS_CONF) && defined (CPU_ALLOC_SIZE)
85 gomp_cpuset_size = sysconf (_SC_NPROCESSORS_CONF);
86 gomp_cpuset_size = CPU_ALLOC_SIZE (gomp_cpuset_size);
87#else
88 gomp_cpuset_size = sizeof (cpu_set_t);
89#endif
12aac30b 90
acf0174b
JJ
91 gomp_cpusetp = (cpu_set_t *) gomp_malloc (gomp_cpuset_size);
92 do
12aac30b 93 {
acf0174b
JJ
94 int ret = pthread_getaffinity_np (pthread_self (), gomp_cpuset_size,
95 gomp_cpusetp);
96 if (ret == 0)
97 {
acf0174b
JJ
98 /* Count only the CPUs this process can use. */
99 gomp_global_icv.nthreads_var
100 = gomp_cpuset_popcount (gomp_cpuset_size, gomp_cpusetp);
101 if (gomp_global_icv.nthreads_var == 0)
102 break;
103 gomp_get_cpuset_size = gomp_cpuset_size;
104#ifdef CPU_ALLOC_SIZE
f89163fd 105 unsigned long i;
acf0174b
JJ
106 for (i = gomp_cpuset_size * 8; i; i--)
107 if (CPU_ISSET_S (i - 1, gomp_cpuset_size, gomp_cpusetp))
108 break;
109 gomp_cpuset_size = CPU_ALLOC_SIZE (i);
110#endif
111 return;
112 }
113 if (ret != EINVAL)
114 break;
115#ifdef CPU_ALLOC_SIZE
116 if (gomp_cpuset_size < sizeof (cpu_set_t))
117 gomp_cpuset_size = sizeof (cpu_set_t);
118 else
119 gomp_cpuset_size = gomp_cpuset_size * 2;
120 if (gomp_cpuset_size < 8 * sizeof (cpu_set_t))
121 gomp_cpusetp
122 = (cpu_set_t *) gomp_realloc (gomp_cpusetp, gomp_cpuset_size);
123 else
124 {
125 /* Avoid gomp_fatal if too large memory allocation would be
126 requested, e.g. kernel returning EINVAL all the time. */
127 void *p = realloc (gomp_cpusetp, gomp_cpuset_size);
128 if (p == NULL)
129 break;
130 gomp_cpusetp = (cpu_set_t *) p;
131 }
132#else
133 break;
134#endif
12aac30b 135 }
acf0174b
JJ
136 while (1);
137 gomp_cpuset_size = 0;
138 gomp_global_icv.nthreads_var = 1;
139 free (gomp_cpusetp);
140 gomp_cpusetp = NULL;
12aac30b
JJ
141#endif
142#ifdef _SC_NPROCESSORS_ONLN
a68ab351 143 gomp_global_icv.nthreads_var = sysconf (_SC_NPROCESSORS_ONLN);
12aac30b
JJ
144#endif
145}
146
147static int
148get_num_procs (void)
149{
150#ifdef HAVE_PTHREAD_AFFINITY_NP
acf0174b 151 if (gomp_places_list == NULL)
12aac30b
JJ
152 {
153 /* Count only the CPUs this process can use. */
acf0174b
JJ
154 if (gomp_cpusetp
155 && pthread_getaffinity_np (pthread_self (), gomp_get_cpuset_size,
156 gomp_cpusetp) == 0)
12aac30b 157 {
acf0174b 158 int ret = gomp_cpuset_popcount (gomp_get_cpuset_size, gomp_cpusetp);
12aac30b
JJ
159 return ret != 0 ? ret : 1;
160 }
161 }
162 else
163 {
12aac30b
JJ
164 /* We can't use pthread_getaffinity_np in this case
165 (we have changed it ourselves, it binds to just one CPU).
166 Count instead the number of different CPUs we are
e4ba7a60
JJ
167 using. gomp_init_affinity updated gomp_available_cpus to
168 the number of CPUs in the GOMP_AFFINITY mask that we are
169 allowed to use though. */
170 return gomp_available_cpus;
12aac30b
JJ
171 }
172#endif
173#ifdef _SC_NPROCESSORS_ONLN
174 return sysconf (_SC_NPROCESSORS_ONLN);
175#else
a68ab351 176 return gomp_icv (false)->nthreads_var;
12aac30b
JJ
177#endif
178}
179
180/* When OMP_DYNAMIC is set, at thread launch determine the number of
181 threads we should spawn for this team. */
182/* ??? I have no idea what best practice for this is. Surely some
183 function of the number of processors that are *still* online and
184 the load average. Here I use the number of processors online
185 minus the 15 minute load average. */
186
187unsigned
188gomp_dynamic_max_threads (void)
189{
a68ab351 190 unsigned n_onln, loadavg, nthreads_var = gomp_icv (false)->nthreads_var;
12aac30b
JJ
191
192 n_onln = get_num_procs ();
a68ab351
JJ
193 if (n_onln > nthreads_var)
194 n_onln = nthreads_var;
12aac30b
JJ
195
196 loadavg = 0;
197#ifdef HAVE_GETLOADAVG
198 {
199 double dloadavg[3];
200 if (getloadavg (dloadavg, 3) == 3)
201 {
202 /* Add 0.1 to get a kind of biased rounding. */
203 loadavg = dloadavg[2] + 0.1;
204 }
205 }
206#endif
207
208 if (loadavg >= n_onln)
209 return 1;
210 else
211 return n_onln - loadavg;
212}
213
214int
215omp_get_num_procs (void)
216{
217 return get_num_procs ();
218}
219
220ialias (omp_get_num_procs)