]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/bpf/bpf.c
368b99c199ea6abc4f0fb65ef1a1bcbfb2362123
[thirdparty/gcc.git] / gcc / config / bpf / bpf.c
1 /* Subroutines used for code generation for eBPF.
2 Copyright (C) 2019-2020 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
10
11 GCC is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #define IN_TARGET_CODE 1
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "insn-config.h"
29 #include "insn-attr.h"
30 #include "recog.h"
31 #include "output.h"
32 #include "alias.h"
33 #include "tree.h"
34 #include "stringpool.h"
35 #include "attribs.h"
36 #include "varasm.h"
37 #include "stor-layout.h"
38 #include "calls.h"
39 #include "function.h"
40 #include "explow.h"
41 #include "memmodel.h"
42 #include "emit-rtl.h"
43 #include "reload.h"
44 #include "tm_p.h"
45 #include "target.h"
46 #include "target-def.h"
47 #include "basic-block.h"
48 #include "expr.h"
49 #include "optabs.h"
50 #include "bitmap.h"
51 #include "df.h"
52 #include "c-family/c-common.h"
53 #include "diagnostic.h"
54 #include "builtins.h"
55 #include "predict.h"
56 #include "langhooks.h"
57
58 /* Per-function machine data. */
59 struct GTY(()) machine_function
60 {
61 /* Number of bytes saved on the stack for local variables. */
62 int local_vars_size;
63
64 /* Number of bytes saved on the stack for callee-saved
65 registers. */
66 int callee_saved_reg_size;
67 };
68
69 /* Data structures for the eBPF specific built-ins. */
70
71 /* Maximum number of arguments taken by a builtin function, plus
72 one. */
73 #define BPF_BUILTIN_MAX_ARGS 5
74
75 enum bpf_builtins
76 {
77 BPF_BUILTIN_UNUSED = 0,
78 /* Built-ins for kernel helpers. */
79 #define DEF_HELPER(V,D,N,T) BPF_BUILTIN_HELPER_##D,
80 # include "bpf-helpers.def"
81 #undef DEF_HELPER
82 BPF_BUILTIN_HELPER_MAX,
83 /* Built-ins for non-generic loads and stores. */
84 BPF_BUILTIN_LOAD_BYTE = BPF_BUILTIN_HELPER_MAX,
85 BPF_BUILTIN_LOAD_HALF,
86 BPF_BUILTIN_LOAD_WORD,
87 BPF_BUILTIN_MAX,
88 };
89
90 /* This table is indexed by an enum bpf_builtin. */
91 static const char *bpf_helper_names[] =
92 {
93 NULL,
94 #define DEF_HELPER(V,D,N,T) #N,
95 # include "bpf-helpers.def"
96 #undef DEF_HELPER
97 NULL,
98 NULL,
99 NULL,
100 NULL
101 };
102
103 /* Return the builtin code corresponding to the kernel helper builtin
104 __builtin_NAME, or 0 if the name doesn't correspond to a kernel
105 helper builtin. */
106
107 static inline int
108 bpf_helper_code (const char *name)
109 {
110 int i;
111
112 for (i = 1; i < BPF_BUILTIN_HELPER_MAX; ++i)
113 if (strcmp (name, bpf_helper_names[i]) == 0)
114 return i;
115
116 return 0;
117 }
118
119 static GTY (()) tree bpf_builtins[(int) BPF_BUILTIN_MAX];
120
121 /* Initialize the per-function machine status. */
122
123 static struct machine_function *
124 bpf_init_machine_status (void)
125 {
126 /* Note this initializes all fields to 0, which is just OK for
127 us. */
128 return ggc_cleared_alloc<machine_function> ();
129 }
130
131 /* Override options and do some other initialization. */
132
133 static void
134 bpf_option_override (void)
135 {
136 /* Set the initializer for the per-function status structure. */
137 init_machine_status = bpf_init_machine_status;
138 }
139
140 #undef TARGET_OPTION_OVERRIDE
141 #define TARGET_OPTION_OVERRIDE bpf_option_override
142
143 /* Define target-specific CPP macros. This function in used in the
144 definition of TARGET_CPU_CPP_BUILTINS in bpf.h */
145
146 #define builtin_define(TXT) cpp_define (pfile, TXT)
147
148 void
149 bpf_target_macros (cpp_reader *pfile)
150 {
151 builtin_define ("__BPF__");
152
153 if (TARGET_BIG_ENDIAN)
154 builtin_define ("__BPF_BIG_ENDIAN__");
155 else
156 builtin_define ("__BPF_LITTLE_ENDIAN__");
157
158 /* Define BPF_KERNEL_VERSION_CODE */
159 {
160 const char *version_code;
161 char *kernel_version_code;
162
163 switch (bpf_kernel)
164 {
165 case LINUX_V4_0: version_code = "0x40000"; break;
166 case LINUX_V4_1: version_code = "0x40100"; break;
167 case LINUX_V4_2: version_code = "0x40200"; break;
168 case LINUX_V4_3: version_code = "0x40300"; break;
169 case LINUX_V4_4: version_code = "0x40400"; break;
170 case LINUX_V4_5: version_code = "0x40500"; break;
171 case LINUX_V4_6: version_code = "0x40600"; break;
172 case LINUX_V4_7: version_code = "0x40700"; break;
173 case LINUX_V4_8: version_code = "0x40800"; break;
174 case LINUX_V4_9: version_code = "0x40900"; break;
175 case LINUX_V4_10: version_code = "0x40a00"; break;
176 case LINUX_V4_11: version_code = "0x40b00"; break;
177 case LINUX_V4_12: version_code = "0x40c00"; break;
178 case LINUX_V4_13: version_code = "0x40d00"; break;
179 case LINUX_V4_14: version_code = "0x40e00"; break;
180 case LINUX_V4_15: version_code = "0x40f00"; break;
181 case LINUX_V4_16: version_code = "0x41000"; break;
182 case LINUX_V4_17: version_code = "0x42000"; break;
183 case LINUX_V4_18: version_code = "0x43000"; break;
184 case LINUX_V4_19: version_code = "0x44000"; break;
185 case LINUX_V4_20: version_code = "0x45000"; break;
186 case LINUX_V5_0: version_code = "0x50000"; break;
187 case LINUX_V5_1: version_code = "0x50100"; break;
188 case LINUX_V5_2: version_code = "0x50200"; break;
189 default:
190 gcc_unreachable ();
191 }
192
193 kernel_version_code = ACONCAT (("__BPF_KERNEL_VERSION_CODE__=",
194 version_code, NULL));
195 builtin_define (kernel_version_code);
196 }
197 }
198
199 /* Output assembly directives to switch to section NAME. The section
200 should have attributes as specified by FLAGS, which is a bit mask
201 of the 'SECTION_*' flags defined in 'output.h'. If DECL is
202 non-NULL, it is the 'VAR_DECL' or 'FUNCTION_DECL' with which this
203 section is associated. */
204
205 static void
206 bpf_asm_named_section (const char *name,
207 unsigned int flags ATTRIBUTE_UNUSED,
208 tree decl ATTRIBUTE_UNUSED)
209 {
210 fprintf (asm_out_file, "\t.section\t%s\n", name);
211 }
212
213 #undef TARGET_ASM_NAMED_SECTION
214 #define TARGET_ASM_NAMED_SECTION bpf_asm_named_section
215
216 /* Return an RTX representing the place where a function returns or
217 receives a value of data type RET_TYPE, a tree node representing a
218 data type. */
219
220 static rtx
221 bpf_function_value (const_tree ret_type,
222 const_tree fntype_or_decl,
223 bool outgoing ATTRIBUTE_UNUSED)
224 {
225 enum machine_mode mode;
226 int unsignedp;
227
228 mode = TYPE_MODE (ret_type);
229 if (INTEGRAL_TYPE_P (ret_type))
230 mode = promote_function_mode (ret_type, mode, &unsignedp,
231 fntype_or_decl, 1);
232
233 return gen_rtx_REG (mode, BPF_R0);
234 }
235
236 #undef TARGET_FUNCTION_VALUE
237 #define TARGET_FUNCTION_VALUE bpf_function_value
238
239 /* Return true if REGNO is the number of a hard register in which the
240 values of called function may come back. */
241
242 static bool
243 bpf_function_value_regno_p (const unsigned int regno)
244 {
245 return (regno == BPF_R0);
246 }
247
248 #undef TARGET_FUNCTION_VALUE_REGNO_P
249 #define TARGET_FUNCTION_VALUE_REGNO_P bpf_function_value_regno_p
250
251 /* Compute the size of the function's stack frame, including the local
252 area and the register-save area. */
253
254 static void
255 bpf_compute_frame_layout (void)
256 {
257 int stack_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
258 int padding_locals, regno;
259
260 /* Set the space used in the stack by local variables. This is
261 rounded up to respect the minimum stack alignment. */
262 cfun->machine->local_vars_size = get_frame_size ();
263
264 padding_locals = cfun->machine->local_vars_size % stack_alignment;
265 if (padding_locals)
266 padding_locals = stack_alignment - padding_locals;
267
268 cfun->machine->local_vars_size += padding_locals;
269
270 /* Set the space used in the stack by callee-saved used registers in
271 the current function. There is no need to round up, since the
272 registers are all 8 bytes wide. */
273 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
274 if ((df_regs_ever_live_p (regno)
275 && !call_used_or_fixed_reg_p (regno))
276 || (cfun->calls_alloca
277 && regno == STACK_POINTER_REGNUM))
278 cfun->machine->callee_saved_reg_size += 8;
279
280 /* Check that the total size of the frame doesn't exceed the limit
281 imposed by eBPF. */
282 if ((cfun->machine->local_vars_size
283 + cfun->machine->callee_saved_reg_size) > bpf_frame_limit)
284 {
285 static int stack_limit_exceeded = 0;
286
287 if (!stack_limit_exceeded)
288 error ("eBPF stack limit exceeded");
289 stack_limit_exceeded = 1;
290 }
291 }
292
293 #undef TARGET_COMPUTE_FRAME_LAYOUT
294 #define TARGET_COMPUTE_FRAME_LAYOUT bpf_compute_frame_layout
295
296 /* Expand to the instructions in a function prologue. This function
297 is called when expanding the 'prologue' pattern in bpf.md. */
298
299 void
300 bpf_expand_prologue (void)
301 {
302 int regno, fp_offset;
303 rtx insn;
304 HOST_WIDE_INT size;
305
306 size = (cfun->machine->local_vars_size
307 + cfun->machine->callee_saved_reg_size);
308 fp_offset = -cfun->machine->local_vars_size;
309
310 /* Save callee-saved hard registes. The register-save-area starts
311 right after the local variables. */
312 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
313 {
314 if ((df_regs_ever_live_p (regno)
315 && !call_used_or_fixed_reg_p (regno))
316 || (cfun->calls_alloca
317 && regno == STACK_POINTER_REGNUM))
318 {
319 rtx mem;
320
321 if (!IN_RANGE (fp_offset, -1 - 0x7fff, 0x7fff))
322 /* This has been already reported as an error in
323 bpf_compute_frame_layout. */
324 break;
325 else
326 {
327 mem = gen_frame_mem (DImode,
328 plus_constant (DImode,
329 hard_frame_pointer_rtx,
330 fp_offset - 8));
331 insn = emit_move_insn (mem, gen_rtx_REG (DImode, regno));
332 RTX_FRAME_RELATED_P (insn) = 1;
333 fp_offset -= 8;
334 }
335 }
336 }
337
338 /* Set the stack pointer, if the function allocates space
339 dynamically. Note that the value of %sp should be directly
340 derived from %fp, for the kernel verifier to track it as a stack
341 accessor. */
342 if (cfun->calls_alloca)
343 {
344 insn = emit_move_insn (stack_pointer_rtx,
345 hard_frame_pointer_rtx);
346 RTX_FRAME_RELATED_P (insn) = 1;
347
348 if (size > 0)
349 {
350 insn = emit_insn (gen_rtx_SET (stack_pointer_rtx,
351 gen_rtx_PLUS (Pmode,
352 stack_pointer_rtx,
353 GEN_INT (-size))));
354 RTX_FRAME_RELATED_P (insn) = 1;
355 }
356 }
357 }
358
359 /* Expand to the instructions in a function epilogue. This function
360 is called when expanding the 'epilogue' pattern in bpf.md. */
361
362 void
363 bpf_expand_epilogue (void)
364 {
365 int regno, fp_offset;
366 rtx insn;
367
368 fp_offset = -cfun->machine->local_vars_size;
369
370 /* Restore callee-saved hard registes from the stack. */
371 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
372 {
373 if ((df_regs_ever_live_p (regno)
374 && !call_used_or_fixed_reg_p (regno))
375 || (cfun->calls_alloca
376 && regno == STACK_POINTER_REGNUM))
377 {
378 rtx mem;
379
380 if (!IN_RANGE (fp_offset, -1 - 0x7fff, 0x7fff))
381 /* This has been already reported as an error in
382 bpf_compute_frame_layout. */
383 break;
384 else
385 {
386 mem = gen_frame_mem (DImode,
387 plus_constant (DImode,
388 hard_frame_pointer_rtx,
389 fp_offset - 8));
390 insn = emit_move_insn (gen_rtx_REG (DImode, regno), mem);
391 RTX_FRAME_RELATED_P (insn) = 1;
392 fp_offset -= 8;
393 }
394 }
395 }
396
397 emit_jump_insn (gen_exit ());
398 }
399
400 /* Return the initial difference between the specified pair of
401 registers. The registers that can figure in FROM, and TO, are
402 specified by ELIMINABLE_REGS in bpf.h.
403
404 This function is used in the definition of
405 INITIAL_ELIMINATION_OFFSET in bpf.h */
406
407 HOST_WIDE_INT
408 bpf_initial_elimination_offset (int from, int to)
409 {
410 HOST_WIDE_INT ret;
411
412 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
413 ret = (cfun->machine->local_vars_size
414 + cfun->machine->callee_saved_reg_size);
415 else if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
416 ret = 0;
417 else
418 gcc_unreachable ();
419
420 return ret;
421 }
422
423 /* Return the number of consecutive hard registers, starting at
424 register number REGNO, required to hold a value of mode MODE. */
425
426 static unsigned int
427 bpf_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED,
428 enum machine_mode mode)
429 {
430 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
431 }
432
433 #undef TARGET_HARD_REGNO_NREGS
434 #define TARGET_HARD_REGNO_NREGS bpf_hard_regno_nregs
435
436 /* Return true if it is permissible to store a value of mode MODE in
437 hard register number REGNO, or in several registers starting with
438 that one. */
439
440 static bool
441 bpf_hard_regno_mode_ok (unsigned int regno ATTRIBUTE_UNUSED,
442 enum machine_mode mode)
443 {
444 switch (mode)
445 {
446 case E_SImode:
447 case E_DImode:
448 case E_HImode:
449 case E_QImode:
450 case E_TImode:
451 case E_SFmode:
452 case E_DFmode:
453 return true;
454 default:
455 return false;
456 }
457 }
458
459 #undef TARGET_HARD_REGNO_MODE_OK
460 #define TARGET_HARD_REGNO_MODE_OK bpf_hard_regno_mode_ok
461
462 /* Return true if a function must have and use a frame pointer. */
463
464 static bool
465 bpf_frame_pointer_required (void)
466 {
467 /* We do not have a stack pointer, so we absolutely depend on the
468 frame-pointer in order to access the stack... and fishes walk and
469 pigs fly glglgl */
470 return true;
471 }
472
473 #undef TARGET_FRAME_POINTER_REQUIRED
474 #define TARGET_FRAME_POINTER_REQUIRED bpf_frame_pointer_required
475
476 /* Return `true' if the given RTX X is a valid base for an indirect
477 memory access. STRICT has the same meaning than in
478 bpf_legitimate_address_p. */
479
480 static inline bool
481 bpf_address_base_p (rtx x, bool strict)
482 {
483 return (GET_CODE (x) == REG
484 && (REGNO (x) < 11
485 || (!strict && REGNO (x) >= FIRST_PSEUDO_REGISTER)));
486 }
487
488 /* Return true if X (a RTX) is a legitimate memory address on the
489 target machine for a memory operand of mode MODE. */
490
491 static bool
492 bpf_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
493 rtx x,
494 bool strict)
495 {
496 switch (GET_CODE (x))
497 {
498 case REG:
499 return bpf_address_base_p (x, strict);
500
501 case PLUS:
502 {
503 /* Accept (PLUS ADDR_BASE CONST_INT), provided CONST_INT fits
504 in a signed 16-bit.
505
506 Note that LABEL_REF and SYMBOL_REF are not allowed in
507 REG+IMM addresses, because it is almost certain they will
508 overload the offset field. */
509
510 rtx x0 = XEXP (x, 0);
511 rtx x1 = XEXP (x, 1);
512
513 if (bpf_address_base_p (x0, strict) && GET_CODE (x1) == CONST_INT)
514 return IN_RANGE (INTVAL (x1), -1 - 0x7fff, 0x7fff);
515
516 break;
517 }
518 default:
519 break;
520 }
521
522 return false;
523 }
524
525 #undef TARGET_LEGITIMATE_ADDRESS_P
526 #define TARGET_LEGITIMATE_ADDRESS_P bpf_legitimate_address_p
527
528 /* Describe the relative costs of RTL expressions. Return true when
529 all subexpressions of X have been processed, and false when
530 `rtx_cost' should recurse. */
531
532 static bool
533 bpf_rtx_costs (rtx x ATTRIBUTE_UNUSED,
534 enum machine_mode mode ATTRIBUTE_UNUSED,
535 int outer_code ATTRIBUTE_UNUSED,
536 int opno ATTRIBUTE_UNUSED,
537 int *total ATTRIBUTE_UNUSED,
538 bool speed ATTRIBUTE_UNUSED)
539 {
540 /* To be written. */
541 return false;
542 }
543
544 #undef TARGET_RTX_COSTS
545 #define TARGET_RTX_COSTS bpf_rtx_costs
546
547 /* Return true if an argument at the position indicated by CUM should
548 be passed by reference. If the hook returns true, a copy of that
549 argument is made in memory and a pointer to the argument is passed
550 instead of the argument itself. */
551
552 static bool
553 bpf_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
554 const function_arg_info &arg)
555 {
556 unsigned num_bytes = arg.type_size_in_bytes ();
557
558 /* Pass aggregates and values bigger than 5 words by reference.
559 Everything else is passed by copy. */
560 return (arg.aggregate_type_p () || (num_bytes > 8*5));
561 }
562
563 #undef TARGET_PASS_BY_REFERENCE
564 #define TARGET_PASS_BY_REFERENCE bpf_pass_by_reference
565
566 /* Return a RTX indicating whether a function argument is passed in a
567 register and if so, which register. */
568
569 static rtx
570 bpf_function_arg (cumulative_args_t ca, const function_arg_info &arg)
571 {
572 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
573
574 if (*cum < 5)
575 return gen_rtx_REG (arg.mode, *cum + 1);
576 else
577 /* An error will be emitted for this in
578 bpf_function_arg_advance. */
579 return NULL_RTX;
580 }
581
582 #undef TARGET_FUNCTION_ARG
583 #define TARGET_FUNCTION_ARG bpf_function_arg
584
585 /* Update the summarizer variable pointed by CA to advance past an
586 argument in the argument list. */
587
588 static void
589 bpf_function_arg_advance (cumulative_args_t ca,
590 const function_arg_info &arg)
591 {
592 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
593 unsigned num_bytes = arg.type_size_in_bytes ();
594 unsigned num_words = CEIL (num_bytes, UNITS_PER_WORD);
595
596 if (*cum <= 5 && *cum + num_words > 5)
597 error ("too many function arguments for eBPF");
598
599 *cum += num_words;
600 }
601
602 #undef TARGET_FUNCTION_ARG_ADVANCE
603 #define TARGET_FUNCTION_ARG_ADVANCE bpf_function_arg_advance
604
605 /* Output the assembly code for a constructor. Since eBPF doesn't
606 support indirect calls, constructors are not supported. */
607
608 static void
609 bpf_output_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
610 {
611 tree decl = SYMBOL_REF_DECL (symbol);
612
613 if (decl)
614 sorry_at (DECL_SOURCE_LOCATION (decl),
615 "no constructors");
616 else
617 sorry ("no constructors");
618 }
619
620 #undef TARGET_ASM_CONSTRUCTOR
621 #define TARGET_ASM_CONSTRUCTOR bpf_output_constructor
622
623 /* Output the assembly code for a destructor. Since eBPF doesn't
624 support indirect calls, destructors are not supported. */
625
626 static void
627 bpf_output_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
628 {
629 tree decl = SYMBOL_REF_DECL (symbol);
630
631 if (decl)
632 sorry_at (DECL_SOURCE_LOCATION (decl),
633 "no destructors");
634 else
635 sorry ("no destructors");
636 }
637
638 #undef TARGET_ASM_DESTRUCTOR
639 #define TARGET_ASM_DESTRUCTOR bpf_output_destructor
640
641 /* Return the appropriate instruction to CALL to a function. TARGET
642 is an RTX denoting the address of the called function.
643
644 The main purposes of this function are:
645 - To reject indirect CALL instructions, which are not supported by
646 eBPF.
647 - To recognize calls to kernel helper functions and emit the
648 corresponding CALL N instruction.
649
650 This function is called from the expansion of the 'call' pattern in
651 bpf.md. */
652
653 const char *
654 bpf_output_call (rtx target)
655 {
656 rtx xops[1];
657
658 switch (GET_CODE (target))
659 {
660 case CONST_INT:
661 output_asm_insn ("call\t%0", &target);
662 break;
663 case SYMBOL_REF:
664 {
665 const char *function_name = XSTR (target, 0);
666 int code;
667
668 if (strncmp (function_name, "__builtin_bpf_helper_", 21) == 0
669 && ((code = bpf_helper_code (function_name + 21)) != 0))
670 {
671 xops[0] = GEN_INT (code);
672 output_asm_insn ("call\t%0", xops);
673 }
674 else
675 output_asm_insn ("call\t%0", &target);
676
677 break;
678 }
679 default:
680 error ("indirect call in function, which are not supported by eBPF");
681 output_asm_insn ("call 0", NULL);
682 break;
683 }
684
685 return "";
686 }
687
688 /* Print an instruction operand. This function is called in the macro
689 PRINT_OPERAND defined in bpf.h */
690
691 void
692 bpf_print_operand (FILE *file, rtx op, int code ATTRIBUTE_UNUSED)
693 {
694 switch (GET_CODE (op))
695 {
696 case REG:
697 fprintf (file, "%s", reg_names[REGNO (op)]);
698 break;
699 case MEM:
700 output_address (GET_MODE (op), XEXP (op, 0));
701 break;
702 case CONST_DOUBLE:
703 if (CONST_DOUBLE_HIGH (op))
704 fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
705 CONST_DOUBLE_HIGH (op), CONST_DOUBLE_LOW (op));
706 else if (CONST_DOUBLE_LOW (op) < 0)
707 fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (op));
708 else
709 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (op));
710 break;
711 default:
712 output_addr_const (file, op);
713 }
714 }
715
716 /* Print an operand which is an address. This function should handle
717 any legit address, as accepted by bpf_legitimate_address_p, and
718 also addresses that are valid in CALL instructions.
719
720 This function is called in the PRINT_OPERAND_ADDRESS macro defined
721 in bpf.h */
722
723 void
724 bpf_print_operand_address (FILE *file, rtx addr)
725 {
726 switch (GET_CODE (addr))
727 {
728 case REG:
729 fprintf (file, "[%s+0]", reg_names[REGNO (addr)]);
730 break;
731 case PLUS:
732 {
733 rtx op0 = XEXP (addr, 0);
734 rtx op1 = XEXP (addr, 1);
735
736 if (GET_CODE (op0) == REG && GET_CODE (op1) == CONST_INT)
737 {
738 fprintf (file, "[%s+", reg_names[REGNO (op0)]);
739 output_addr_const (file, op1);
740 fputs ("]", file);
741 }
742 else
743 fatal_insn ("invalid address in operand", addr);
744 break;
745 }
746 case MEM:
747 /* Fallthrough. */
748 case LABEL_REF:
749 /* Fallthrough. */
750 fatal_insn ("unsupported operand", addr);
751 break;
752 default:
753 output_addr_const (file, addr);
754 break;
755 }
756 }
757
758 /* Add a BPF builtin function with NAME, CODE and TYPE. Return
759 the function decl or NULL_TREE if the builtin was not added. */
760
761 static tree
762 def_builtin (const char *name, enum bpf_builtins code, tree type)
763 {
764 tree t
765 = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
766
767 bpf_builtins[code] = t;
768 return t;
769 }
770
771 /* Define machine-specific built-in functions. */
772
773 static void
774 bpf_init_builtins (void)
775 {
776 /* Built-ins for calling kernel helpers. */
777
778 tree pt = build_pointer_type (void_type_node);
779 tree const_void_type
780 = build_qualified_type (void_type_node, TYPE_QUAL_CONST);
781 tree cpt = build_pointer_type (const_void_type);
782 tree st = short_integer_type_node;
783 tree ust = uint16_type_node;
784 tree it = integer_type_node;
785 tree ut = unsigned_type_node;
786 tree const_char_type
787 = build_qualified_type (char_type_node, TYPE_QUAL_CONST);
788 tree cst = build_pointer_type (const_char_type);
789 tree vt = void_type_node;
790 tree ult = long_unsigned_type_node;
791 tree u32t = uint32_type_node;
792 tree u64t = uint64_type_node;
793 tree llt = long_long_integer_type_node;
794 tree ullt = long_long_unsigned_type_node;
795
796 #define TYPES build_function_type_list
797 #define VTYPES build_varargs_function_type_list
798 #define DEF_HELPER(V,D,N,T) \
799 do \
800 { \
801 if (bpf_kernel >= (V)) \
802 def_builtin ("__builtin_bpf_helper_" #N, \
803 BPF_BUILTIN_HELPER_##D, \
804 T); \
805 } while (0);
806 # include "bpf-helpers.def"
807 #undef TYPES
808 #undef VTYPES
809 #undef DEF_HELPER
810
811 /* Built-ins for BPF_LD_ABS and BPF_LD_IND instructions. */
812
813 def_builtin ("__builtin_bpf_load_byte", BPF_BUILTIN_LOAD_BYTE,
814 build_function_type_list (ullt, ullt, 0));
815 def_builtin ("__builtin_bpf_load_half", BPF_BUILTIN_LOAD_HALF,
816 build_function_type_list (ullt, ullt, 0));
817 def_builtin ("__builtin_bpf_load_word", BPF_BUILTIN_LOAD_WORD,
818 build_function_type_list (ullt, ullt, 0));
819 }
820
821 #undef TARGET_INIT_BUILTINS
822 #define TARGET_INIT_BUILTINS bpf_init_builtins
823
824 /* Expand a call to a BPF-specific built-in function that was set up
825 with bpf_init_builtins. */
826
827 static rtx
828 bpf_expand_builtin (tree exp, rtx target,
829 rtx subtarget ATTRIBUTE_UNUSED,
830 machine_mode mode ATTRIBUTE_UNUSED,
831 int ignore)
832 {
833 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
834 int code = DECL_MD_FUNCTION_CODE (fndecl);
835
836 if (code >= 1 && code < BPF_BUILTIN_HELPER_MAX)
837 {
838 /* This is a builtin to call a kernel helper function.
839
840 For these builtins, we just expand the function call normally
841 with expand_call like we would do for a libcall. The function
842 bpf_output_call below will then do The Right Thing (TM),
843 recognizing the name of the called __builtin_helper_* symbol
844 and emitting the corresponding CALL N instruction whenever
845 necessary. */
846
847 return expand_call (exp, target, ignore);
848 }
849 else if (code == BPF_BUILTIN_LOAD_BYTE
850 || code == BPF_BUILTIN_LOAD_HALF
851 || code == BPF_BUILTIN_LOAD_WORD)
852 {
853 /* Expand an indirect load from the sk_buff in the context.
854 There is just one argument to the builtin, which is the
855 offset.
856
857 We try first to expand a ldabs* instruction. In case this
858 fails, we try a ldind* instruction. */
859
860 enum insn_code abs_icode
861 = (code == BPF_BUILTIN_LOAD_BYTE ? CODE_FOR_ldabsb
862 : code == BPF_BUILTIN_LOAD_HALF ? CODE_FOR_ldabsh
863 : CODE_FOR_ldabsw);
864
865 enum insn_code ind_icode
866 = (code == BPF_BUILTIN_LOAD_BYTE ? CODE_FOR_ldindb
867 : code == BPF_BUILTIN_LOAD_HALF ? CODE_FOR_ldindh
868 : CODE_FOR_ldindw);
869
870 tree offset_arg = CALL_EXPR_ARG (exp, 0);
871 struct expand_operand ops[2];
872
873 create_input_operand (&ops[0], expand_normal (offset_arg),
874 TYPE_MODE (TREE_TYPE (offset_arg)));
875 create_input_operand (&ops[1], const0_rtx, SImode);
876
877 if (!maybe_expand_insn (abs_icode, 2, ops)
878 && !maybe_expand_insn (ind_icode, 2, ops))
879 {
880 error ("invalid argument to built-in function");
881 return gen_rtx_REG (ops[0].mode, BPF_R0);
882 }
883
884 /* The result of the load is in R0. */
885 return gen_rtx_REG (ops[0].mode, BPF_R0);
886 }
887
888 gcc_unreachable ();
889 }
890
891 #undef TARGET_EXPAND_BUILTIN
892 #define TARGET_EXPAND_BUILTIN bpf_expand_builtin
893
894 /* Initialize target-specific function library calls. This is mainly
895 used to call library-provided soft-fp operations, since eBPF
896 doesn't support floating-point in "hardware". */
897
898 static void
899 bpf_init_libfuncs (void)
900 {
901 set_conv_libfunc (sext_optab, DFmode, SFmode,
902 "__bpf_extendsfdf2");
903 set_conv_libfunc (trunc_optab, SFmode, DFmode,
904 "__bpf_truncdfsf2");
905 set_conv_libfunc (sfix_optab, SImode, DFmode,
906 "__bpf_fix_truncdfsi");
907 set_conv_libfunc (sfloat_optab, DFmode, SImode,
908 "__bpf_floatsidf");
909 set_conv_libfunc (ufloat_optab, DFmode, SImode,
910 "__bpf_floatunsidf");
911 }
912
913 #undef TARGET_INIT_LIBFUNCS
914 #define TARGET_INIT_LIBFUNCS bpf_init_libfuncs
915
916 /* Define the mechanism that will be used for describing frame unwind
917 information to the debugger. In eBPF it is not possible to unwind
918 frames. */
919
920 static enum unwind_info_type
921 bpf_debug_unwind_info ()
922 {
923 return UI_NONE;
924 }
925
926 #undef TARGET_DEBUG_UNWIND_INFO
927 #define TARGET_DEBUG_UNWIND_INFO bpf_debug_unwind_info
928
929 /* Output assembly directives to assemble data of various sized and
930 alignments. */
931
932 #undef TARGET_ASM_BYTE_OP
933 #define TARGET_ASM_BYTE_OP "\t.byte\t"
934 #undef TARGET_ASM_ALIGNED_HI_OP
935 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
936 #undef TARGET_ASM_ALIGNED_SI_OP
937 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
938 #undef TARGET_ASM_ALIGNED_DI_OP
939 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
940
941 /* Finally, build the GCC target. */
942
943 struct gcc_target targetm = TARGET_INITIALIZER;
944
945 #include "gt-bpf.h"