]> git.ipfire.org Git - thirdparty/gcc.git/blob - libgfortran/generated/matmul_r4.c
libgfortran.h (likely,unlikely): New makros.
[thirdparty/gcc.git] / libgfortran / generated / matmul_r4.c
1 /* Implementation of the MATMUL intrinsic
2 Copyright 2002, 2005, 2006, 2007 Free Software Foundation, Inc.
3 Contributed by Paul Brook <paul@nowt.org>
4
5 This file is part of the GNU Fortran 95 runtime library (libgfortran).
6
7 Libgfortran is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
11
12 In addition to the permissions in the GNU General Public License, the
13 Free Software Foundation gives you unlimited permission to link the
14 compiled version of this file into combinations with other programs,
15 and to distribute those combinations without any restriction coming
16 from the use of this file. (The General Public License restrictions
17 do apply in other respects; for example, they cover modification of
18 the file, and distribution when not linked into a combine
19 executable.)
20
21 Libgfortran is distributed in the hope that it will be useful,
22 but WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 GNU General Public License for more details.
25
26 You should have received a copy of the GNU General Public
27 License along with libgfortran; see the file COPYING. If not,
28 write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
29 Boston, MA 02110-1301, USA. */
30
31 #include "libgfortran.h"
32 #include <stdlib.h>
33 #include <string.h>
34 #include <assert.h>
35
36
37 #if defined (HAVE_GFC_REAL_4)
38
39 /* Prototype for the BLAS ?gemm subroutine, a pointer to which can be
40 passed to us by the front-end, in which case we'll call it for large
41 matrices. */
42
43 typedef void (*blas_call)(const char *, const char *, const int *, const int *,
44 const int *, const GFC_REAL_4 *, const GFC_REAL_4 *,
45 const int *, const GFC_REAL_4 *, const int *,
46 const GFC_REAL_4 *, GFC_REAL_4 *, const int *,
47 int, int);
48
49 /* The order of loops is different in the case of plain matrix
50 multiplication C=MATMUL(A,B), and in the frequent special case where
51 the argument A is the temporary result of a TRANSPOSE intrinsic:
52 C=MATMUL(TRANSPOSE(A),B). Transposed temporaries are detected by
53 looking at their strides.
54
55 The equivalent Fortran pseudo-code is:
56
57 DIMENSION A(M,COUNT), B(COUNT,N), C(M,N)
58 IF (.NOT.IS_TRANSPOSED(A)) THEN
59 C = 0
60 DO J=1,N
61 DO K=1,COUNT
62 DO I=1,M
63 C(I,J) = C(I,J)+A(I,K)*B(K,J)
64 ELSE
65 DO J=1,N
66 DO I=1,M
67 S = 0
68 DO K=1,COUNT
69 S = S+A(I,K)*B(K,J)
70 C(I,J) = S
71 ENDIF
72 */
73
74 /* If try_blas is set to a nonzero value, then the matmul function will
75 see if there is a way to perform the matrix multiplication by a call
76 to the BLAS gemm function. */
77
78 extern void matmul_r4 (gfc_array_r4 * const restrict retarray,
79 gfc_array_r4 * const restrict a, gfc_array_r4 * const restrict b, int try_blas,
80 int blas_limit, blas_call gemm);
81 export_proto(matmul_r4);
82
83 void
84 matmul_r4 (gfc_array_r4 * const restrict retarray,
85 gfc_array_r4 * const restrict a, gfc_array_r4 * const restrict b, int try_blas,
86 int blas_limit, blas_call gemm)
87 {
88 const GFC_REAL_4 * restrict abase;
89 const GFC_REAL_4 * restrict bbase;
90 GFC_REAL_4 * restrict dest;
91
92 index_type rxstride, rystride, axstride, aystride, bxstride, bystride;
93 index_type x, y, n, count, xcount, ycount;
94
95 assert (GFC_DESCRIPTOR_RANK (a) == 2
96 || GFC_DESCRIPTOR_RANK (b) == 2);
97
98 /* C[xcount,ycount] = A[xcount, count] * B[count,ycount]
99
100 Either A or B (but not both) can be rank 1:
101
102 o One-dimensional argument A is implicitly treated as a row matrix
103 dimensioned [1,count], so xcount=1.
104
105 o One-dimensional argument B is implicitly treated as a column matrix
106 dimensioned [count, 1], so ycount=1.
107 */
108
109 if (retarray->data == NULL)
110 {
111 if (GFC_DESCRIPTOR_RANK (a) == 1)
112 {
113 retarray->dim[0].lbound = 0;
114 retarray->dim[0].ubound = b->dim[1].ubound - b->dim[1].lbound;
115 retarray->dim[0].stride = 1;
116 }
117 else if (GFC_DESCRIPTOR_RANK (b) == 1)
118 {
119 retarray->dim[0].lbound = 0;
120 retarray->dim[0].ubound = a->dim[0].ubound - a->dim[0].lbound;
121 retarray->dim[0].stride = 1;
122 }
123 else
124 {
125 retarray->dim[0].lbound = 0;
126 retarray->dim[0].ubound = a->dim[0].ubound - a->dim[0].lbound;
127 retarray->dim[0].stride = 1;
128
129 retarray->dim[1].lbound = 0;
130 retarray->dim[1].ubound = b->dim[1].ubound - b->dim[1].lbound;
131 retarray->dim[1].stride = retarray->dim[0].ubound+1;
132 }
133
134 retarray->data
135 = internal_malloc_size (sizeof (GFC_REAL_4) * size0 ((array_t *) retarray));
136 retarray->offset = 0;
137 }
138 else if (unlikely (compile_options.bounds_check))
139 {
140 index_type ret_extent, arg_extent;
141
142 if (GFC_DESCRIPTOR_RANK (a) == 1)
143 {
144 arg_extent = b->dim[1].ubound + 1 - b->dim[1].lbound;
145 ret_extent = retarray->dim[0].ubound + 1 - retarray->dim[0].lbound;
146 if (arg_extent != ret_extent)
147 runtime_error ("Incorrect extent in return array in"
148 " MATMUL intrinsic: is %ld, should be %ld",
149 (long int) ret_extent, (long int) arg_extent);
150 }
151 else if (GFC_DESCRIPTOR_RANK (b) == 1)
152 {
153 arg_extent = a->dim[0].ubound + 1 - a->dim[0].lbound;
154 ret_extent = retarray->dim[0].ubound + 1 - retarray->dim[0].lbound;
155 if (arg_extent != ret_extent)
156 runtime_error ("Incorrect extent in return array in"
157 " MATMUL intrinsic: is %ld, should be %ld",
158 (long int) ret_extent, (long int) arg_extent);
159 }
160 else
161 {
162 arg_extent = a->dim[0].ubound + 1 - a->dim[0].lbound;
163 ret_extent = retarray->dim[0].ubound + 1 - retarray->dim[0].lbound;
164 if (arg_extent != ret_extent)
165 runtime_error ("Incorrect extent in return array in"
166 " MATMUL intrinsic for dimension 1:"
167 " is %ld, should be %ld",
168 (long int) ret_extent, (long int) arg_extent);
169
170 arg_extent = b->dim[1].ubound + 1 - b->dim[1].lbound;
171 ret_extent = retarray->dim[1].ubound + 1 - retarray->dim[1].lbound;
172 if (arg_extent != ret_extent)
173 runtime_error ("Incorrect extent in return array in"
174 " MATMUL intrinsic for dimension 2:"
175 " is %ld, should be %ld",
176 (long int) ret_extent, (long int) arg_extent);
177 }
178 }
179
180
181 if (GFC_DESCRIPTOR_RANK (retarray) == 1)
182 {
183 /* One-dimensional result may be addressed in the code below
184 either as a row or a column matrix. We want both cases to
185 work. */
186 rxstride = rystride = retarray->dim[0].stride;
187 }
188 else
189 {
190 rxstride = retarray->dim[0].stride;
191 rystride = retarray->dim[1].stride;
192 }
193
194
195 if (GFC_DESCRIPTOR_RANK (a) == 1)
196 {
197 /* Treat it as a a row matrix A[1,count]. */
198 axstride = a->dim[0].stride;
199 aystride = 1;
200
201 xcount = 1;
202 count = a->dim[0].ubound + 1 - a->dim[0].lbound;
203 }
204 else
205 {
206 axstride = a->dim[0].stride;
207 aystride = a->dim[1].stride;
208
209 count = a->dim[1].ubound + 1 - a->dim[1].lbound;
210 xcount = a->dim[0].ubound + 1 - a->dim[0].lbound;
211 }
212
213 if (count != b->dim[0].ubound + 1 - b->dim[0].lbound)
214 {
215 if (count > 0 || b->dim[0].ubound + 1 - b->dim[0].lbound > 0)
216 runtime_error ("dimension of array B incorrect in MATMUL intrinsic");
217 }
218
219 if (GFC_DESCRIPTOR_RANK (b) == 1)
220 {
221 /* Treat it as a column matrix B[count,1] */
222 bxstride = b->dim[0].stride;
223
224 /* bystride should never be used for 1-dimensional b.
225 in case it is we want it to cause a segfault, rather than
226 an incorrect result. */
227 bystride = 0xDEADBEEF;
228 ycount = 1;
229 }
230 else
231 {
232 bxstride = b->dim[0].stride;
233 bystride = b->dim[1].stride;
234 ycount = b->dim[1].ubound + 1 - b->dim[1].lbound;
235 }
236
237 abase = a->data;
238 bbase = b->data;
239 dest = retarray->data;
240
241
242 /* Now that everything is set up, we're performing the multiplication
243 itself. */
244
245 #define POW3(x) (((float) (x)) * ((float) (x)) * ((float) (x)))
246
247 if (try_blas && rxstride == 1 && (axstride == 1 || aystride == 1)
248 && (bxstride == 1 || bystride == 1)
249 && (((float) xcount) * ((float) ycount) * ((float) count)
250 > POW3(blas_limit)))
251 {
252 const int m = xcount, n = ycount, k = count, ldc = rystride;
253 const GFC_REAL_4 one = 1, zero = 0;
254 const int lda = (axstride == 1) ? aystride : axstride,
255 ldb = (bxstride == 1) ? bystride : bxstride;
256
257 if (lda > 0 && ldb > 0 && ldc > 0 && m > 1 && n > 1 && k > 1)
258 {
259 assert (gemm != NULL);
260 gemm (axstride == 1 ? "N" : "T", bxstride == 1 ? "N" : "T", &m, &n, &k,
261 &one, abase, &lda, bbase, &ldb, &zero, dest, &ldc, 1, 1);
262 return;
263 }
264 }
265
266 if (rxstride == 1 && axstride == 1 && bxstride == 1)
267 {
268 const GFC_REAL_4 * restrict bbase_y;
269 GFC_REAL_4 * restrict dest_y;
270 const GFC_REAL_4 * restrict abase_n;
271 GFC_REAL_4 bbase_yn;
272
273 if (rystride == xcount)
274 memset (dest, 0, (sizeof (GFC_REAL_4) * xcount * ycount));
275 else
276 {
277 for (y = 0; y < ycount; y++)
278 for (x = 0; x < xcount; x++)
279 dest[x + y*rystride] = (GFC_REAL_4)0;
280 }
281
282 for (y = 0; y < ycount; y++)
283 {
284 bbase_y = bbase + y*bystride;
285 dest_y = dest + y*rystride;
286 for (n = 0; n < count; n++)
287 {
288 abase_n = abase + n*aystride;
289 bbase_yn = bbase_y[n];
290 for (x = 0; x < xcount; x++)
291 {
292 dest_y[x] += abase_n[x] * bbase_yn;
293 }
294 }
295 }
296 }
297 else if (rxstride == 1 && aystride == 1 && bxstride == 1)
298 {
299 if (GFC_DESCRIPTOR_RANK (a) != 1)
300 {
301 const GFC_REAL_4 *restrict abase_x;
302 const GFC_REAL_4 *restrict bbase_y;
303 GFC_REAL_4 *restrict dest_y;
304 GFC_REAL_4 s;
305
306 for (y = 0; y < ycount; y++)
307 {
308 bbase_y = &bbase[y*bystride];
309 dest_y = &dest[y*rystride];
310 for (x = 0; x < xcount; x++)
311 {
312 abase_x = &abase[x*axstride];
313 s = (GFC_REAL_4) 0;
314 for (n = 0; n < count; n++)
315 s += abase_x[n] * bbase_y[n];
316 dest_y[x] = s;
317 }
318 }
319 }
320 else
321 {
322 const GFC_REAL_4 *restrict bbase_y;
323 GFC_REAL_4 s;
324
325 for (y = 0; y < ycount; y++)
326 {
327 bbase_y = &bbase[y*bystride];
328 s = (GFC_REAL_4) 0;
329 for (n = 0; n < count; n++)
330 s += abase[n*axstride] * bbase_y[n];
331 dest[y*rystride] = s;
332 }
333 }
334 }
335 else if (axstride < aystride)
336 {
337 for (y = 0; y < ycount; y++)
338 for (x = 0; x < xcount; x++)
339 dest[x*rxstride + y*rystride] = (GFC_REAL_4)0;
340
341 for (y = 0; y < ycount; y++)
342 for (n = 0; n < count; n++)
343 for (x = 0; x < xcount; x++)
344 /* dest[x,y] += a[x,n] * b[n,y] */
345 dest[x*rxstride + y*rystride] += abase[x*axstride + n*aystride] * bbase[n*bxstride + y*bystride];
346 }
347 else if (GFC_DESCRIPTOR_RANK (a) == 1)
348 {
349 const GFC_REAL_4 *restrict bbase_y;
350 GFC_REAL_4 s;
351
352 for (y = 0; y < ycount; y++)
353 {
354 bbase_y = &bbase[y*bystride];
355 s = (GFC_REAL_4) 0;
356 for (n = 0; n < count; n++)
357 s += abase[n*axstride] * bbase_y[n*bxstride];
358 dest[y*rxstride] = s;
359 }
360 }
361 else
362 {
363 const GFC_REAL_4 *restrict abase_x;
364 const GFC_REAL_4 *restrict bbase_y;
365 GFC_REAL_4 *restrict dest_y;
366 GFC_REAL_4 s;
367
368 for (y = 0; y < ycount; y++)
369 {
370 bbase_y = &bbase[y*bystride];
371 dest_y = &dest[y*rystride];
372 for (x = 0; x < xcount; x++)
373 {
374 abase_x = &abase[x*axstride];
375 s = (GFC_REAL_4) 0;
376 for (n = 0; n < count; n++)
377 s += abase_x[n*aystride] * bbase_y[n*bxstride];
378 dest_y[x*rxstride] = s;
379 }
380 }
381 }
382 }
383
384 #endif