]> git.ipfire.org Git - thirdparty/gcc.git/blob - libphobos/libdruntime/core/stdc/stdarg.d
Add D front-end, libphobos library, and D2 testsuite.
[thirdparty/gcc.git] / libphobos / libdruntime / core / stdc / stdarg.d
1 /**
2 * D header file for C99.
3 *
4 * $(C_HEADER_DESCRIPTION pubs.opengroup.org/onlinepubs/009695399/basedefs/_stdarg.h.html, _stdarg.h)
5 *
6 * Copyright: Copyright Digital Mars 2000 - 2009.
7 * License: $(HTTP www.boost.org/LICENSE_1_0.txt, Boost License 1.0).
8 * Authors: Walter Bright, Hauke Duden
9 * Standards: ISO/IEC 9899:1999 (E)
10 * Source: $(DRUNTIMESRC core/stdc/_stdarg.d)
11 */
12
13 /* NOTE: This file has been patched from the original DMD distribution to
14 * work with the GDC compiler.
15 */
16 module core.stdc.stdarg;
17
18 @system:
19 //@nogc: // Not yet, need to make TypeInfo's member functions @nogc first
20 nothrow:
21
22 version (GNU)
23 {
24 import gcc.builtins;
25 alias __builtin_va_list __gnuc_va_list;
26
27
28 /*********************
29 * The argument pointer type.
30 */
31 alias __gnuc_va_list va_list;
32
33
34 /**********
35 * Initialize ap.
36 * parmn should be the last named parameter.
37 */
38 void va_start(T)(out va_list ap, ref T parmn);
39
40
41 /************
42 * Retrieve and return the next value that is type T.
43 */
44 T va_arg(T)(ref va_list ap);
45
46
47 /*************
48 * Retrieve and store through parmn the next value that is of type T.
49 */
50 void va_arg(T)(ref va_list ap, ref T parmn);
51
52
53 /*************
54 * Retrieve and store through parmn the next value that is of TypeInfo ti.
55 * Used when the static type is not known.
56 */
57 version (X86)
58 {
59 ///
60 void va_arg()(ref va_list ap, TypeInfo ti, void* parmn)
61 {
62 auto p = ap;
63 auto tsize = ti.tsize;
64 ap = cast(va_list)(cast(size_t)p + ((tsize + size_t.sizeof - 1) & ~(size_t.sizeof - 1)));
65 parmn[0..tsize] = p[0..tsize];
66 }
67 }
68 else version (X86_64)
69 {
70 /// Layout of this struct must match __builtin_va_list for C ABI compatibility
71 struct __va_list
72 {
73 uint offset_regs = 6 * 8; // no regs
74 uint offset_fpregs = 6 * 8 + 8 * 16; // no fp regs
75 void* stack_args;
76 void* reg_args;
77 }
78
79 ///
80 void va_arg()(ref va_list apx, TypeInfo ti, void* parmn)
81 {
82 __va_list* ap = cast(__va_list*)apx;
83 TypeInfo arg1, arg2;
84 if (!ti.argTypes(arg1, arg2))
85 {
86 bool inXMMregister(TypeInfo arg) pure nothrow @safe
87 {
88 return (arg.flags & 2) != 0;
89 }
90
91 TypeInfo_Vector v1 = arg1 ? cast(TypeInfo_Vector)arg1 : null;
92 if (arg1 && (arg1.tsize() <= 8 || v1))
93 { // Arg is passed in one register
94 auto tsize = arg1.tsize();
95 void* p;
96 bool stack = false;
97 auto offset_fpregs_save = ap.offset_fpregs;
98 auto offset_regs_save = ap.offset_regs;
99 L1:
100 if (inXMMregister(arg1) || v1)
101 { // Passed in XMM register
102 if (ap.offset_fpregs < (6 * 8 + 16 * 8) && !stack)
103 {
104 p = ap.reg_args + ap.offset_fpregs;
105 ap.offset_fpregs += 16;
106 }
107 else
108 {
109 p = ap.stack_args;
110 ap.stack_args += (tsize + size_t.sizeof - 1) & ~(size_t.sizeof - 1);
111 stack = true;
112 }
113 }
114 else
115 { // Passed in regular register
116 if (ap.offset_regs < 6 * 8 && !stack)
117 {
118 p = ap.reg_args + ap.offset_regs;
119 ap.offset_regs += 8;
120 }
121 else
122 {
123 p = ap.stack_args;
124 ap.stack_args += 8;
125 stack = true;
126 }
127 }
128 parmn[0..tsize] = p[0..tsize];
129
130 if (arg2)
131 {
132 if (inXMMregister(arg2))
133 { // Passed in XMM register
134 if (ap.offset_fpregs < (6 * 8 + 16 * 8) && !stack)
135 {
136 p = ap.reg_args + ap.offset_fpregs;
137 ap.offset_fpregs += 16;
138 }
139 else
140 {
141 if (!stack)
142 { // arg1 is really on the stack, so rewind and redo
143 ap.offset_fpregs = offset_fpregs_save;
144 ap.offset_regs = offset_regs_save;
145 stack = true;
146 goto L1;
147 }
148 p = ap.stack_args;
149 ap.stack_args += (arg2.tsize() + size_t.sizeof - 1) & ~(size_t.sizeof - 1);
150 }
151 }
152 else
153 { // Passed in regular register
154 if (ap.offset_regs < 6 * 8 && !stack)
155 {
156 p = ap.reg_args + ap.offset_regs;
157 ap.offset_regs += 8;
158 }
159 else
160 {
161 if (!stack)
162 { // arg1 is really on the stack, so rewind and redo
163 ap.offset_fpregs = offset_fpregs_save;
164 ap.offset_regs = offset_regs_save;
165 stack = true;
166 goto L1;
167 }
168 p = ap.stack_args;
169 ap.stack_args += 8;
170 }
171 }
172 auto sz = ti.tsize() - 8;
173 (parmn + 8)[0..sz] = p[0..sz];
174 }
175 }
176 else
177 { // Always passed in memory
178 // The arg may have more strict alignment than the stack
179 auto talign = ti.talign();
180 auto tsize = ti.tsize();
181 auto p = cast(void*)((cast(size_t)ap.stack_args + talign - 1) & ~(talign - 1));
182 ap.stack_args = cast(void*)(cast(size_t)p + ((tsize + size_t.sizeof - 1) & ~(size_t.sizeof - 1)));
183 parmn[0..tsize] = p[0..tsize];
184 }
185 }
186 else
187 {
188 assert(false, "not a valid argument type for va_arg");
189 }
190 }
191 }
192 else version (ARM)
193 {
194 ///
195 void va_arg()(ref va_list ap, TypeInfo ti, void* parmn)
196 {
197 auto p = *cast(void**) &ap;
198 auto tsize = ti.tsize();
199 *cast(void**) &ap += ( tsize + size_t.sizeof - 1 ) & ~( size_t.sizeof - 1 );
200 parmn[0..tsize] = p[0..tsize];
201 }
202 }
203 else
204 {
205 ///
206 void va_arg()(ref va_list ap, TypeInfo ti, void* parmn)
207 {
208 static assert(false, "Unsupported platform");
209 }
210 }
211
212
213 /***********************
214 * End use of ap.
215 */
216 alias __builtin_va_end va_end;
217
218
219 /***********************
220 * Make a copy of ap.
221 */
222 alias __builtin_va_copy va_copy;
223
224 }
225 else version (X86)
226 {
227 /*********************
228 * The argument pointer type.
229 */
230 alias char* va_list;
231
232 /**********
233 * Initialize ap.
234 * For 32 bit code, parmn should be the last named parameter.
235 * For 64 bit code, parmn should be __va_argsave.
236 */
237 void va_start(T)(out va_list ap, ref T parmn)
238 {
239 ap = cast(va_list)( cast(void*) &parmn + ( ( T.sizeof + int.sizeof - 1 ) & ~( int.sizeof - 1 ) ) );
240 }
241
242 /************
243 * Retrieve and return the next value that is type T.
244 * Should use the other va_arg instead, as this won't work for 64 bit code.
245 */
246 T va_arg(T)(ref va_list ap)
247 {
248 T arg = *cast(T*) ap;
249 ap = cast(va_list)( cast(void*) ap + ( ( T.sizeof + int.sizeof - 1 ) & ~( int.sizeof - 1 ) ) );
250 return arg;
251 }
252
253 /************
254 * Retrieve and return the next value that is type T.
255 * This is the preferred version.
256 */
257 void va_arg(T)(ref va_list ap, ref T parmn)
258 {
259 parmn = *cast(T*)ap;
260 ap = cast(va_list)(cast(void*)ap + ((T.sizeof + int.sizeof - 1) & ~(int.sizeof - 1)));
261 }
262
263 /*************
264 * Retrieve and store through parmn the next value that is of TypeInfo ti.
265 * Used when the static type is not known.
266 */
267 void va_arg()(ref va_list ap, TypeInfo ti, void* parmn)
268 {
269 // Wait until everyone updates to get TypeInfo.talign
270 //auto talign = ti.talign;
271 //auto p = cast(void*)(cast(size_t)ap + talign - 1) & ~(talign - 1);
272 auto p = ap;
273 auto tsize = ti.tsize;
274 ap = cast(va_list)(cast(size_t)p + ((tsize + size_t.sizeof - 1) & ~(size_t.sizeof - 1)));
275 parmn[0..tsize] = p[0..tsize];
276 }
277
278 /***********************
279 * End use of ap.
280 */
281 void va_end(va_list ap)
282 {
283 }
284
285 ///
286 void va_copy(out va_list dest, va_list src)
287 {
288 dest = src;
289 }
290 }
291 else version (Windows) // Win64
292 { /* Win64 is characterized by all arguments fitting into a register size.
293 * Smaller ones are padded out to register size, and larger ones are passed by
294 * reference.
295 */
296
297 /*********************
298 * The argument pointer type.
299 */
300 alias char* va_list;
301
302 /**********
303 * Initialize ap.
304 * parmn should be the last named parameter.
305 */
306 void va_start(T)(out va_list ap, ref T parmn); // Compiler intrinsic
307
308 /************
309 * Retrieve and return the next value that is type T.
310 */
311 T va_arg(T)(ref va_list ap)
312 {
313 static if (T.sizeof > size_t.sizeof)
314 T arg = **cast(T**)ap;
315 else
316 T arg = *cast(T*)ap;
317 ap = cast(va_list)(cast(void*)ap + ((size_t.sizeof + size_t.sizeof - 1) & ~(size_t.sizeof - 1)));
318 return arg;
319 }
320
321 /************
322 * Retrieve and return the next value that is type T.
323 * This is the preferred version.
324 */
325 void va_arg(T)(ref va_list ap, ref T parmn)
326 {
327 static if (T.sizeof > size_t.sizeof)
328 parmn = **cast(T**)ap;
329 else
330 parmn = *cast(T*)ap;
331 ap = cast(va_list)(cast(void*)ap + ((size_t.sizeof + size_t.sizeof - 1) & ~(size_t.sizeof - 1)));
332 }
333
334 /*************
335 * Retrieve and store through parmn the next value that is of TypeInfo ti.
336 * Used when the static type is not known.
337 */
338 void va_arg()(ref va_list ap, TypeInfo ti, void* parmn)
339 {
340 // Wait until everyone updates to get TypeInfo.talign
341 //auto talign = ti.talign;
342 //auto p = cast(void*)(cast(size_t)ap + talign - 1) & ~(talign - 1);
343 auto p = ap;
344 auto tsize = ti.tsize;
345 ap = cast(va_list)(cast(size_t)p + ((size_t.sizeof + size_t.sizeof - 1) & ~(size_t.sizeof - 1)));
346 void* q = (tsize > size_t.sizeof) ? *cast(void**)p : p;
347 parmn[0..tsize] = q[0..tsize];
348 }
349
350 /***********************
351 * End use of ap.
352 */
353 void va_end(va_list ap)
354 {
355 }
356
357 ///
358 void va_copy(out va_list dest, va_list src)
359 {
360 dest = src;
361 }
362 }
363 else version (X86_64)
364 {
365 // Determine if type is a vector type
366 template isVectorType(T)
367 {
368 enum isVectorType = false;
369 }
370
371 template isVectorType(T : __vector(T[N]), size_t N)
372 {
373 enum isVectorType = true;
374 }
375
376 // Layout of this struct must match __gnuc_va_list for C ABI compatibility
377 struct __va_list_tag
378 {
379 uint offset_regs = 6 * 8; // no regs
380 uint offset_fpregs = 6 * 8 + 8 * 16; // no fp regs
381 void* stack_args;
382 void* reg_args;
383 }
384 alias __va_list = __va_list_tag;
385
386 align(16) struct __va_argsave_t
387 {
388 size_t[6] regs; // RDI,RSI,RDX,RCX,R8,R9
389 real[8] fpregs; // XMM0..XMM7
390 __va_list va;
391 }
392
393 /*
394 * Making it an array of 1 causes va_list to be passed as a pointer in
395 * function argument lists
396 */
397 alias va_list = __va_list*;
398
399 ///
400 void va_start(T)(out va_list ap, ref T parmn); // Compiler intrinsic
401
402 ///
403 T va_arg(T)(va_list ap)
404 { T a;
405 va_arg(ap, a);
406 return a;
407 }
408
409 ///
410 void va_arg(T)(va_list apx, ref T parmn)
411 {
412 __va_list* ap = cast(__va_list*)apx;
413 static if (is(T U == __argTypes))
414 {
415 static if (U.length == 0 || T.sizeof > 16 || (U[0].sizeof > 8 && !isVectorType!(U[0])))
416 { // Always passed in memory
417 // The arg may have more strict alignment than the stack
418 auto p = (cast(size_t)ap.stack_args + T.alignof - 1) & ~(T.alignof - 1);
419 ap.stack_args = cast(void*)(p + ((T.sizeof + size_t.sizeof - 1) & ~(size_t.sizeof - 1)));
420 parmn = *cast(T*)p;
421 }
422 else static if (U.length == 1)
423 { // Arg is passed in one register
424 alias U[0] T1;
425 static if (is(T1 == double) || is(T1 == float) || isVectorType!(T1))
426 { // Passed in XMM register
427 if (ap.offset_fpregs < (6 * 8 + 16 * 8))
428 {
429 parmn = *cast(T*)(ap.reg_args + ap.offset_fpregs);
430 ap.offset_fpregs += 16;
431 }
432 else
433 {
434 parmn = *cast(T*)ap.stack_args;
435 ap.stack_args += (T1.sizeof + size_t.sizeof - 1) & ~(size_t.sizeof - 1);
436 }
437 }
438 else
439 { // Passed in regular register
440 if (ap.offset_regs < 6 * 8 && T.sizeof <= 8)
441 {
442 parmn = *cast(T*)(ap.reg_args + ap.offset_regs);
443 ap.offset_regs += 8;
444 }
445 else
446 {
447 auto p = (cast(size_t)ap.stack_args + T.alignof - 1) & ~(T.alignof - 1);
448 ap.stack_args = cast(void*)(p + ((T.sizeof + size_t.sizeof - 1) & ~(size_t.sizeof - 1)));
449 parmn = *cast(T*)p;
450 }
451 }
452 }
453 else static if (U.length == 2)
454 { // Arg is passed in two registers
455 alias U[0] T1;
456 alias U[1] T2;
457 auto p = cast(void*)&parmn + 8;
458
459 // Both must be in registers, or both on stack, hence 4 cases
460
461 static if ((is(T1 == double) || is(T1 == float)) &&
462 (is(T2 == double) || is(T2 == float)))
463 {
464 if (ap.offset_fpregs < (6 * 8 + 16 * 8) - 16)
465 {
466 *cast(T1*)&parmn = *cast(T1*)(ap.reg_args + ap.offset_fpregs);
467 *cast(T2*)p = *cast(T2*)(ap.reg_args + ap.offset_fpregs + 16);
468 ap.offset_fpregs += 32;
469 }
470 else
471 {
472 *cast(T1*)&parmn = *cast(T1*)ap.stack_args;
473 ap.stack_args += (T1.sizeof + size_t.sizeof - 1) & ~(size_t.sizeof - 1);
474 *cast(T2*)p = *cast(T2*)ap.stack_args;
475 ap.stack_args += (T2.sizeof + size_t.sizeof - 1) & ~(size_t.sizeof - 1);
476 }
477 }
478 else static if (is(T1 == double) || is(T1 == float))
479 {
480 void* a = void;
481 if (ap.offset_fpregs < (6 * 8 + 16 * 8) &&
482 ap.offset_regs < 6 * 8 && T2.sizeof <= 8)
483 {
484 *cast(T1*)&parmn = *cast(T1*)(ap.reg_args + ap.offset_fpregs);
485 ap.offset_fpregs += 16;
486 a = ap.reg_args + ap.offset_regs;
487 ap.offset_regs += 8;
488 }
489 else
490 {
491 *cast(T1*)&parmn = *cast(T1*)ap.stack_args;
492 ap.stack_args += (T1.sizeof + size_t.sizeof - 1) & ~(size_t.sizeof - 1);
493 a = ap.stack_args;
494 ap.stack_args += 8;
495 }
496 // Be careful not to go past the size of the actual argument
497 const sz2 = T.sizeof - 8;
498 p[0..sz2] = a[0..sz2];
499 }
500 else static if (is(T2 == double) || is(T2 == float))
501 {
502 if (ap.offset_regs < 6 * 8 && T1.sizeof <= 8 &&
503 ap.offset_fpregs < (6 * 8 + 16 * 8))
504 {
505 *cast(T1*)&parmn = *cast(T1*)(ap.reg_args + ap.offset_regs);
506 ap.offset_regs += 8;
507 *cast(T2*)p = *cast(T2*)(ap.reg_args + ap.offset_fpregs);
508 ap.offset_fpregs += 16;
509 }
510 else
511 {
512 *cast(T1*)&parmn = *cast(T1*)ap.stack_args;
513 ap.stack_args += 8;
514 *cast(T2*)p = *cast(T2*)ap.stack_args;
515 ap.stack_args += (T2.sizeof + size_t.sizeof - 1) & ~(size_t.sizeof - 1);
516 }
517 }
518 else // both in regular registers
519 {
520 void* a = void;
521 if (ap.offset_regs < 5 * 8 && T1.sizeof <= 8 && T2.sizeof <= 8)
522 {
523 *cast(T1*)&parmn = *cast(T1*)(ap.reg_args + ap.offset_regs);
524 ap.offset_regs += 8;
525 a = ap.reg_args + ap.offset_regs;
526 ap.offset_regs += 8;
527 }
528 else
529 {
530 *cast(T1*)&parmn = *cast(T1*)ap.stack_args;
531 ap.stack_args += 8;
532 a = ap.stack_args;
533 ap.stack_args += 8;
534 }
535 // Be careful not to go past the size of the actual argument
536 const sz2 = T.sizeof - 8;
537 p[0..sz2] = a[0..sz2];
538 }
539 }
540 else
541 {
542 static assert(false);
543 }
544 }
545 else
546 {
547 static assert(false, "not a valid argument type for va_arg");
548 }
549 }
550
551 ///
552 void va_arg()(va_list apx, TypeInfo ti, void* parmn)
553 {
554 __va_list* ap = cast(__va_list*)apx;
555 TypeInfo arg1, arg2;
556 if (!ti.argTypes(arg1, arg2))
557 {
558 bool inXMMregister(TypeInfo arg) pure nothrow @safe
559 {
560 return (arg.flags & 2) != 0;
561 }
562
563 TypeInfo_Vector v1 = arg1 ? cast(TypeInfo_Vector)arg1 : null;
564 if (arg1 && (arg1.tsize <= 8 || v1))
565 { // Arg is passed in one register
566 auto tsize = arg1.tsize;
567 void* p;
568 bool stack = false;
569 auto offset_fpregs_save = ap.offset_fpregs;
570 auto offset_regs_save = ap.offset_regs;
571 L1:
572 if (inXMMregister(arg1) || v1)
573 { // Passed in XMM register
574 if (ap.offset_fpregs < (6 * 8 + 16 * 8) && !stack)
575 {
576 p = ap.reg_args + ap.offset_fpregs;
577 ap.offset_fpregs += 16;
578 }
579 else
580 {
581 p = ap.stack_args;
582 ap.stack_args += (tsize + size_t.sizeof - 1) & ~(size_t.sizeof - 1);
583 stack = true;
584 }
585 }
586 else
587 { // Passed in regular register
588 if (ap.offset_regs < 6 * 8 && !stack)
589 {
590 p = ap.reg_args + ap.offset_regs;
591 ap.offset_regs += 8;
592 }
593 else
594 {
595 p = ap.stack_args;
596 ap.stack_args += 8;
597 stack = true;
598 }
599 }
600 parmn[0..tsize] = p[0..tsize];
601
602 if (arg2)
603 {
604 if (inXMMregister(arg2))
605 { // Passed in XMM register
606 if (ap.offset_fpregs < (6 * 8 + 16 * 8) && !stack)
607 {
608 p = ap.reg_args + ap.offset_fpregs;
609 ap.offset_fpregs += 16;
610 }
611 else
612 {
613 if (!stack)
614 { // arg1 is really on the stack, so rewind and redo
615 ap.offset_fpregs = offset_fpregs_save;
616 ap.offset_regs = offset_regs_save;
617 stack = true;
618 goto L1;
619 }
620 p = ap.stack_args;
621 ap.stack_args += (arg2.tsize + size_t.sizeof - 1) & ~(size_t.sizeof - 1);
622 }
623 }
624 else
625 { // Passed in regular register
626 if (ap.offset_regs < 6 * 8 && !stack)
627 {
628 p = ap.reg_args + ap.offset_regs;
629 ap.offset_regs += 8;
630 }
631 else
632 {
633 if (!stack)
634 { // arg1 is really on the stack, so rewind and redo
635 ap.offset_fpregs = offset_fpregs_save;
636 ap.offset_regs = offset_regs_save;
637 stack = true;
638 goto L1;
639 }
640 p = ap.stack_args;
641 ap.stack_args += 8;
642 }
643 }
644 auto sz = ti.tsize - 8;
645 (parmn + 8)[0..sz] = p[0..sz];
646 }
647 }
648 else
649 { // Always passed in memory
650 // The arg may have more strict alignment than the stack
651 auto talign = ti.talign;
652 auto tsize = ti.tsize;
653 auto p = cast(void*)((cast(size_t)ap.stack_args + talign - 1) & ~(talign - 1));
654 ap.stack_args = cast(void*)(cast(size_t)p + ((tsize + size_t.sizeof - 1) & ~(size_t.sizeof - 1)));
655 parmn[0..tsize] = p[0..tsize];
656 }
657 }
658 else
659 {
660 assert(false, "not a valid argument type for va_arg");
661 }
662 }
663
664 ///
665 void va_end(va_list ap)
666 {
667 }
668
669 import core.stdc.stdlib : alloca;
670
671 ///
672 void va_copy(out va_list dest, va_list src, void* storage = alloca(__va_list_tag.sizeof))
673 {
674 // Instead of copying the pointers, and aliasing the source va_list,
675 // the default argument alloca will allocate storage in the caller's
676 // stack frame. This is still not correct (it should be allocated in
677 // the place where the va_list variable is declared) but most of the
678 // time the caller's stack frame _is_ the place where the va_list is
679 // allocated, so in most cases this will now work.
680 dest = cast(va_list)storage;
681 *dest = *src;
682 }
683 }
684 else
685 {
686 static assert(false, "Unsupported platform");
687 }