]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/bpf/bpf.cc
bpf: cleanup missed refactor
[thirdparty/gcc.git] / gcc / config / bpf / bpf.cc
CommitLineData
91dfef96 1/* Subroutines used for code generation for eBPF.
7adcbafe 2 Copyright (C) 2019-2022 Free Software Foundation, Inc.
91dfef96
JM
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify
7it under the terms of the GNU General Public License as published by
8the Free Software Foundation; either version 3, or (at your option)
9any later version.
10
11GCC is distributed in the hope that it will be useful,
12but WITHOUT ANY WARRANTY; without even the implied warranty of
13MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14GNU General Public License for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#define IN_TARGET_CODE 1
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "tm.h"
26#include "rtl.h"
27#include "regs.h"
28#include "insn-config.h"
29#include "insn-attr.h"
30#include "recog.h"
31#include "output.h"
32#include "alias.h"
33#include "tree.h"
34#include "stringpool.h"
35#include "attribs.h"
36#include "varasm.h"
37#include "stor-layout.h"
38#include "calls.h"
39#include "function.h"
40#include "explow.h"
41#include "memmodel.h"
42#include "emit-rtl.h"
43#include "reload.h"
44#include "tm_p.h"
45#include "target.h"
46#include "target-def.h"
47#include "basic-block.h"
48#include "expr.h"
49#include "optabs.h"
50#include "bitmap.h"
51#include "df.h"
52#include "c-family/c-common.h"
53#include "diagnostic.h"
54#include "builtins.h"
55#include "predict.h"
56#include "langhooks.h"
e29a9607 57#include "flags.h"
91dfef96 58
8bdabb37
DF
59#include "cfg.h" /* needed for struct control_flow_graph used in BB macros */
60#include "gimple.h"
61#include "gimple-iterator.h"
62#include "gimple-walk.h"
63#include "tree-pass.h"
64#include "tree-iterator.h"
65
66#include "context.h"
67#include "pass_manager.h"
68
69#include "gimplify.h"
70#include "gimplify-me.h"
71
72#include "ctfc.h"
73#include "btf.h"
74
75#include "coreout.h"
76
91dfef96
JM
77/* Per-function machine data. */
78struct GTY(()) machine_function
79{
80 /* Number of bytes saved on the stack for local variables. */
81 int local_vars_size;
82
83 /* Number of bytes saved on the stack for callee-saved
84 registers. */
85 int callee_saved_reg_size;
86};
87
af30b83b
JM
88/* Handle an attribute requiring a FUNCTION_DECL;
89 arguments as in struct attribute_spec.handler. */
90
91static tree
92bpf_handle_fndecl_attribute (tree *node, tree name,
93 tree args,
94 int flags ATTRIBUTE_UNUSED,
95 bool *no_add_attrs)
96{
97 if (TREE_CODE (*node) != FUNCTION_DECL)
98 {
99 warning (OPT_Wattributes, "%qE attribute only applies to functions",
100 name);
101 *no_add_attrs = true;
102 }
103
104 if (is_attribute_p ("kernel_helper", name))
105 {
106 if (args)
107 {
108 tree cst = TREE_VALUE (args);
109 if (TREE_CODE (cst) != INTEGER_CST)
110 {
111 warning (OPT_Wattributes, "%qE attribute requires an integer argument",
112 name);
113 *no_add_attrs = true;
114 }
115 }
116 else
117 {
118 warning (OPT_Wattributes, "%qE requires an argument", name);
119 *no_add_attrs = true;
120 }
121 }
122
123 return NULL_TREE;
124}
125
8bdabb37
DF
126/* Handle preserve_access_index attribute, which can be applied to structs,
127 unions and classes. Actually adding the attribute to the TYPE_DECL is
128 taken care of for us, so just warn for types that aren't supported. */
129
130static tree
131bpf_handle_preserve_access_index_attribute (tree *node, tree name,
bd0a61be
JM
132 tree args ATTRIBUTE_UNUSED,
133 int flags ATTRIBUTE_UNUSED,
8bdabb37
DF
134 bool *no_add_attrs)
135{
136 if (TREE_CODE (*node) != RECORD_TYPE && TREE_CODE (*node) != UNION_TYPE)
137 {
138 warning (OPT_Wattributes,
139 "%qE attribute only applies to structure, union and class types",
140 name);
141 *no_add_attrs = true;
142 }
143
144 return NULL_TREE;
145}
146
af30b83b
JM
147/* Target-specific attributes. */
148
149static const struct attribute_spec bpf_attribute_table[] =
150{
151 /* Syntax: { name, min_len, max_len, decl_required, type_required,
152 function_type_required, affects_type_identity, handler,
153 exclude } */
154
155 /* Attribute to mark function prototypes as kernel helpers. */
156 { "kernel_helper", 1, 1, true, false, false, false,
157 bpf_handle_fndecl_attribute, NULL },
158
8bdabb37
DF
159 /* CO-RE support: attribute to mark that all accesses to the declared
160 struct/union/array should be recorded. */
161 { "preserve_access_index", 0, -1, false, true, false, true,
162 bpf_handle_preserve_access_index_attribute, NULL },
163
af30b83b
JM
164 /* The last attribute spec is set to be NULL. */
165 { NULL, 0, 0, false, false, false, false, NULL, NULL }
166};
167
168#undef TARGET_ATTRIBUTE_TABLE
169#define TARGET_ATTRIBUTE_TABLE bpf_attribute_table
170
91dfef96
JM
171/* Data structures for the eBPF specific built-ins. */
172
173/* Maximum number of arguments taken by a builtin function, plus
174 one. */
175#define BPF_BUILTIN_MAX_ARGS 5
176
177enum bpf_builtins
178{
179 BPF_BUILTIN_UNUSED = 0,
91dfef96 180 /* Built-ins for non-generic loads and stores. */
af30b83b 181 BPF_BUILTIN_LOAD_BYTE,
91dfef96
JM
182 BPF_BUILTIN_LOAD_HALF,
183 BPF_BUILTIN_LOAD_WORD,
8bdabb37
DF
184
185 /* Compile Once - Run Everywhere (CO-RE) support. */
186 BPF_BUILTIN_PRESERVE_ACCESS_INDEX,
068baae1 187 BPF_BUILTIN_PRESERVE_FIELD_INFO,
8bdabb37 188
91dfef96
JM
189 BPF_BUILTIN_MAX,
190};
191
91dfef96
JM
192static GTY (()) tree bpf_builtins[(int) BPF_BUILTIN_MAX];
193
8bdabb37
DF
194void bpf_register_coreattr_pass (void);
195
91dfef96
JM
196/* Initialize the per-function machine status. */
197
198static struct machine_function *
199bpf_init_machine_status (void)
200{
201 /* Note this initializes all fields to 0, which is just OK for
202 us. */
203 return ggc_cleared_alloc<machine_function> ();
204}
205
206/* Override options and do some other initialization. */
207
208static void
209bpf_option_override (void)
210{
211 /* Set the initializer for the per-function status structure. */
212 init_machine_status = bpf_init_machine_status;
e29a9607
IB
213
214 /* BPF CO-RE support requires BTF debug info generation. */
215 if (TARGET_BPF_CORE && !btf_debuginfo_p ())
216 error ("BPF CO-RE requires BTF debugging information, use %<-gbtf%>");
217
218 /* To support the portability needs of BPF CO-RE approach, BTF debug
219 information includes the BPF CO-RE relocations. */
220 if (TARGET_BPF_CORE)
221 write_symbols |= BTF_WITH_CORE_DEBUG;
222
223 /* Unlike much of the other BTF debug information, the information necessary
224 for CO-RE relocations is added to the CTF container by the BPF backend.
225 Enabling LTO adds some complications in the generation of the BPF CO-RE
226 relocations because if LTO is in effect, the relocations need to be
227 generated late in the LTO link phase. This poses a new challenge for the
228 compiler to now provide means to combine the early BTF and late BTF CO-RE
229 debug info, similar to DWARF debug info. BTF/CO-RE debug info is not
230 amenable to such a split generation and a later merging.
231
232 In any case, in absence of linker support for BTF sections at this time,
233 it is acceptable to simply disallow LTO for BPF CO-RE compilations. */
234
235 if (flag_lto && TARGET_BPF_CORE)
236 sorry ("BPF CO-RE does not support LTO");
8bdabb37
DF
237
238 /* -gbtf implies -mcore when using the BPF backend, unless -mno-co-re
239 is specified. */
240 if (btf_debuginfo_p () && !(target_flags_explicit & MASK_BPF_CORE))
241 {
242 target_flags |= MASK_BPF_CORE;
243 write_symbols |= BTF_WITH_CORE_DEBUG;
244 }
5b2ab1d3
DF
245
246 /* Determine available features from ISA setting (-mcpu=). */
247 if (bpf_has_jmpext == -1)
248 bpf_has_jmpext = (bpf_isa >= ISA_V2);
249
250 if (bpf_has_alu32 == -1)
251 bpf_has_alu32 = (bpf_isa >= ISA_V3);
252
253 if (bpf_has_jmp32 == -1)
254 bpf_has_jmp32 = (bpf_isa >= ISA_V3);
255
91dfef96
JM
256}
257
258#undef TARGET_OPTION_OVERRIDE
259#define TARGET_OPTION_OVERRIDE bpf_option_override
260
8bdabb37
DF
261/* Implement TARGET_ASM_INIT_SECTIONS. */
262
263static void
264bpf_asm_init_sections (void)
265{
266 if (TARGET_BPF_CORE)
267 btf_ext_init ();
268}
269
270#undef TARGET_ASM_INIT_SECTIONS
271#define TARGET_ASM_INIT_SECTIONS bpf_asm_init_sections
272
273/* Implement TARGET_ASM_FILE_END. */
274
275static void
276bpf_file_end (void)
277{
278 if (TARGET_BPF_CORE)
279 btf_ext_output ();
280}
281
282#undef TARGET_ASM_FILE_END
283#define TARGET_ASM_FILE_END bpf_file_end
284
91dfef96
JM
285/* Define target-specific CPP macros. This function in used in the
286 definition of TARGET_CPU_CPP_BUILTINS in bpf.h */
287
288#define builtin_define(TXT) cpp_define (pfile, TXT)
289
290void
291bpf_target_macros (cpp_reader *pfile)
292{
293 builtin_define ("__BPF__");
c68b5c07 294 builtin_define ("__bpf__");
af30b83b 295
91dfef96
JM
296 if (TARGET_BIG_ENDIAN)
297 builtin_define ("__BPF_BIG_ENDIAN__");
298 else
299 builtin_define ("__BPF_LITTLE_ENDIAN__");
300
301 /* Define BPF_KERNEL_VERSION_CODE */
302 {
303 const char *version_code;
304 char *kernel_version_code;
305
306 switch (bpf_kernel)
307 {
308 case LINUX_V4_0: version_code = "0x40000"; break;
309 case LINUX_V4_1: version_code = "0x40100"; break;
310 case LINUX_V4_2: version_code = "0x40200"; break;
311 case LINUX_V4_3: version_code = "0x40300"; break;
312 case LINUX_V4_4: version_code = "0x40400"; break;
313 case LINUX_V4_5: version_code = "0x40500"; break;
314 case LINUX_V4_6: version_code = "0x40600"; break;
315 case LINUX_V4_7: version_code = "0x40700"; break;
316 case LINUX_V4_8: version_code = "0x40800"; break;
317 case LINUX_V4_9: version_code = "0x40900"; break;
318 case LINUX_V4_10: version_code = "0x40a00"; break;
319 case LINUX_V4_11: version_code = "0x40b00"; break;
320 case LINUX_V4_12: version_code = "0x40c00"; break;
321 case LINUX_V4_13: version_code = "0x40d00"; break;
322 case LINUX_V4_14: version_code = "0x40e00"; break;
323 case LINUX_V4_15: version_code = "0x40f00"; break;
324 case LINUX_V4_16: version_code = "0x41000"; break;
325 case LINUX_V4_17: version_code = "0x42000"; break;
326 case LINUX_V4_18: version_code = "0x43000"; break;
327 case LINUX_V4_19: version_code = "0x44000"; break;
328 case LINUX_V4_20: version_code = "0x45000"; break;
329 case LINUX_V5_0: version_code = "0x50000"; break;
330 case LINUX_V5_1: version_code = "0x50100"; break;
331 case LINUX_V5_2: version_code = "0x50200"; break;
332 default:
af30b83b 333 gcc_unreachable ();
91dfef96
JM
334 }
335
336 kernel_version_code = ACONCAT (("__BPF_KERNEL_VERSION_CODE__=",
337 version_code, NULL));
338 builtin_define (kernel_version_code);
339 }
340}
341
91dfef96
JM
342/* Return an RTX representing the place where a function returns or
343 receives a value of data type RET_TYPE, a tree node representing a
344 data type. */
345
346static rtx
347bpf_function_value (const_tree ret_type,
348 const_tree fntype_or_decl,
349 bool outgoing ATTRIBUTE_UNUSED)
350{
351 enum machine_mode mode;
352 int unsignedp;
353
354 mode = TYPE_MODE (ret_type);
355 if (INTEGRAL_TYPE_P (ret_type))
356 mode = promote_function_mode (ret_type, mode, &unsignedp,
357 fntype_or_decl, 1);
358
359 return gen_rtx_REG (mode, BPF_R0);
360}
361
362#undef TARGET_FUNCTION_VALUE
363#define TARGET_FUNCTION_VALUE bpf_function_value
364
365/* Return true if REGNO is the number of a hard register in which the
366 values of called function may come back. */
367
368static bool
369bpf_function_value_regno_p (const unsigned int regno)
370{
371 return (regno == BPF_R0);
372}
373
374#undef TARGET_FUNCTION_VALUE_REGNO_P
375#define TARGET_FUNCTION_VALUE_REGNO_P bpf_function_value_regno_p
376
377/* Compute the size of the function's stack frame, including the local
378 area and the register-save area. */
379
380static void
381bpf_compute_frame_layout (void)
382{
383 int stack_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
384 int padding_locals, regno;
385
386 /* Set the space used in the stack by local variables. This is
387 rounded up to respect the minimum stack alignment. */
388 cfun->machine->local_vars_size = get_frame_size ();
389
390 padding_locals = cfun->machine->local_vars_size % stack_alignment;
391 if (padding_locals)
392 padding_locals = stack_alignment - padding_locals;
393
394 cfun->machine->local_vars_size += padding_locals;
395
98456a64
JM
396 if (TARGET_XBPF)
397 {
398 /* Set the space used in the stack by callee-saved used
399 registers in the current function. There is no need to round
400 up, since the registers are all 8 bytes wide. */
401 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
402 if ((df_regs_ever_live_p (regno)
403 && !call_used_or_fixed_reg_p (regno))
404 || (cfun->calls_alloca
405 && regno == STACK_POINTER_REGNUM))
406 cfun->machine->callee_saved_reg_size += 8;
407 }
91dfef96
JM
408
409 /* Check that the total size of the frame doesn't exceed the limit
410 imposed by eBPF. */
411 if ((cfun->machine->local_vars_size
412 + cfun->machine->callee_saved_reg_size) > bpf_frame_limit)
413 {
414 static int stack_limit_exceeded = 0;
415
416 if (!stack_limit_exceeded)
417 error ("eBPF stack limit exceeded");
418 stack_limit_exceeded = 1;
419 }
420}
421
422#undef TARGET_COMPUTE_FRAME_LAYOUT
423#define TARGET_COMPUTE_FRAME_LAYOUT bpf_compute_frame_layout
424
425/* Expand to the instructions in a function prologue. This function
426 is called when expanding the 'prologue' pattern in bpf.md. */
427
428void
429bpf_expand_prologue (void)
430{
91dfef96
JM
431 HOST_WIDE_INT size;
432
433 size = (cfun->machine->local_vars_size
434 + cfun->machine->callee_saved_reg_size);
91dfef96 435
98456a64
JM
436 /* The BPF "hardware" provides a fresh new set of registers for each
437 called function, some of which are initialized to the values of
438 the arguments passed in the first five registers. In doing so,
439 it saves the values of the registers of the caller, and restored
440 them upon returning. Therefore, there is no need to save the
441 callee-saved registers here. What is worse, the kernel
442 implementation refuses to run programs in which registers are
443 referred before being initialized. */
444 if (TARGET_XBPF)
91dfef96 445 {
98456a64
JM
446 int regno;
447 int fp_offset = -cfun->machine->local_vars_size;
91dfef96 448
98456a64
JM
449 /* Save callee-saved hard registes. The register-save-area
450 starts right after the local variables. */
451 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
452 {
453 if ((df_regs_ever_live_p (regno)
454 && !call_used_or_fixed_reg_p (regno))
455 || (cfun->calls_alloca
456 && regno == STACK_POINTER_REGNUM))
91dfef96 457 {
98456a64
JM
458 rtx mem;
459
460 if (!IN_RANGE (fp_offset, -1 - 0x7fff, 0x7fff))
461 /* This has been already reported as an error in
462 bpf_compute_frame_layout. */
463 break;
464 else
465 {
466 mem = gen_frame_mem (DImode,
467 plus_constant (DImode,
468 hard_frame_pointer_rtx,
469 fp_offset - 8));
2a2fb3e3 470 emit_move_insn (mem, gen_rtx_REG (DImode, regno));
98456a64
JM
471 fp_offset -= 8;
472 }
91dfef96
JM
473 }
474 }
475 }
476
477 /* Set the stack pointer, if the function allocates space
478 dynamically. Note that the value of %sp should be directly
479 derived from %fp, for the kernel verifier to track it as a stack
480 accessor. */
481 if (cfun->calls_alloca)
482 {
2a2fb3e3
JM
483 emit_move_insn (stack_pointer_rtx,
484 hard_frame_pointer_rtx);
af30b83b 485
91dfef96
JM
486 if (size > 0)
487 {
2a2fb3e3
JM
488 emit_insn (gen_rtx_SET (stack_pointer_rtx,
489 gen_rtx_PLUS (Pmode,
490 stack_pointer_rtx,
491 GEN_INT (-size))));
91dfef96
JM
492 }
493 }
494}
495
496/* Expand to the instructions in a function epilogue. This function
497 is called when expanding the 'epilogue' pattern in bpf.md. */
498
499void
500bpf_expand_epilogue (void)
501{
98456a64
JM
502 /* See note in bpf_expand_prologue for an explanation on why we are
503 not restoring callee-saved registers in BPF. */
504 if (TARGET_XBPF)
91dfef96 505 {
98456a64
JM
506 int regno;
507 int fp_offset = -cfun->machine->local_vars_size;
91dfef96 508
98456a64
JM
509 /* Restore callee-saved hard registes from the stack. */
510 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
511 {
512 if ((df_regs_ever_live_p (regno)
513 && !call_used_or_fixed_reg_p (regno))
514 || (cfun->calls_alloca
515 && regno == STACK_POINTER_REGNUM))
91dfef96 516 {
98456a64
JM
517 rtx mem;
518
519 if (!IN_RANGE (fp_offset, -1 - 0x7fff, 0x7fff))
520 /* This has been already reported as an error in
521 bpf_compute_frame_layout. */
522 break;
523 else
524 {
525 mem = gen_frame_mem (DImode,
526 plus_constant (DImode,
527 hard_frame_pointer_rtx,
528 fp_offset - 8));
2a2fb3e3 529 emit_move_insn (gen_rtx_REG (DImode, regno), mem);
98456a64
JM
530 fp_offset -= 8;
531 }
91dfef96
JM
532 }
533 }
534 }
535
536 emit_jump_insn (gen_exit ());
537}
538
5b2ab1d3
DF
539/* Expand to the instructions for a conditional branch. This function
540 is called when expanding the 'cbranch<mode>4' pattern in bpf.md. */
541
542void
543bpf_expand_cbranch (machine_mode mode, rtx *operands)
544{
545 /* If all jump instructions are available, nothing special to do here. */
546 if (bpf_has_jmpext)
547 return;
548
549 enum rtx_code code = GET_CODE (operands[0]);
550
551 /* Without the conditional branch instructions jslt, jsle, jlt, jle, we need
552 to convert conditional branches that would use them to an available
553 operation instead by reversing the comparison. */
554 if ((code == LT || code == LE || code == LTU || code == LEU))
555 {
556 /* Reverse the condition. */
557 PUT_CODE (operands[0], reverse_condition (code));
558
559 /* Swap the operands, and ensure that the first is a register. */
560 if (!register_operand (operands[2], mode))
561 operands[2] = force_reg (mode, operands[2]);
562
563 rtx tmp = operands[1];
564 operands[1] = operands[2];
565 operands[2] = tmp;
566 }
567}
568
91dfef96
JM
569/* Return the initial difference between the specified pair of
570 registers. The registers that can figure in FROM, and TO, are
571 specified by ELIMINABLE_REGS in bpf.h.
572
573 This function is used in the definition of
574 INITIAL_ELIMINATION_OFFSET in bpf.h */
575
576HOST_WIDE_INT
577bpf_initial_elimination_offset (int from, int to)
578{
579 HOST_WIDE_INT ret;
580
581 if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
582 ret = (cfun->machine->local_vars_size
583 + cfun->machine->callee_saved_reg_size);
584 else if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
585 ret = 0;
586 else
587 gcc_unreachable ();
588
589 return ret;
590}
591
592/* Return the number of consecutive hard registers, starting at
593 register number REGNO, required to hold a value of mode MODE. */
594
595static unsigned int
596bpf_hard_regno_nregs (unsigned int regno ATTRIBUTE_UNUSED,
597 enum machine_mode mode)
598{
599 return CEIL (GET_MODE_SIZE (mode), UNITS_PER_WORD);
600}
601
602#undef TARGET_HARD_REGNO_NREGS
603#define TARGET_HARD_REGNO_NREGS bpf_hard_regno_nregs
604
605/* Return true if it is permissible to store a value of mode MODE in
606 hard register number REGNO, or in several registers starting with
607 that one. */
608
609static bool
610bpf_hard_regno_mode_ok (unsigned int regno ATTRIBUTE_UNUSED,
611 enum machine_mode mode)
612{
613 switch (mode)
614 {
615 case E_SImode:
616 case E_DImode:
617 case E_HImode:
618 case E_QImode:
619 case E_TImode:
620 case E_SFmode:
621 case E_DFmode:
622 return true;
623 default:
624 return false;
625 }
626}
627
628#undef TARGET_HARD_REGNO_MODE_OK
629#define TARGET_HARD_REGNO_MODE_OK bpf_hard_regno_mode_ok
630
631/* Return true if a function must have and use a frame pointer. */
632
633static bool
634bpf_frame_pointer_required (void)
635{
636 /* We do not have a stack pointer, so we absolutely depend on the
637 frame-pointer in order to access the stack... and fishes walk and
638 pigs fly glglgl */
639 return true;
640}
641
642#undef TARGET_FRAME_POINTER_REQUIRED
643#define TARGET_FRAME_POINTER_REQUIRED bpf_frame_pointer_required
644
645/* Return `true' if the given RTX X is a valid base for an indirect
646 memory access. STRICT has the same meaning than in
647 bpf_legitimate_address_p. */
648
649static inline bool
650bpf_address_base_p (rtx x, bool strict)
651{
652 return (GET_CODE (x) == REG
653 && (REGNO (x) < 11
654 || (!strict && REGNO (x) >= FIRST_PSEUDO_REGISTER)));
655}
656
657/* Return true if X (a RTX) is a legitimate memory address on the
658 target machine for a memory operand of mode MODE. */
659
660static bool
6d1f144b 661bpf_legitimate_address_p (machine_mode mode,
91dfef96
JM
662 rtx x,
663 bool strict)
664{
665 switch (GET_CODE (x))
666 {
6d1f144b
JM
667 case CONST_INT:
668 return (mode == FUNCTION_MODE);
669
91dfef96
JM
670 case REG:
671 return bpf_address_base_p (x, strict);
672
673 case PLUS:
674 {
675 /* Accept (PLUS ADDR_BASE CONST_INT), provided CONST_INT fits
676 in a signed 16-bit.
677
678 Note that LABEL_REF and SYMBOL_REF are not allowed in
679 REG+IMM addresses, because it is almost certain they will
680 overload the offset field. */
681
682 rtx x0 = XEXP (x, 0);
683 rtx x1 = XEXP (x, 1);
af30b83b 684
91dfef96
JM
685 if (bpf_address_base_p (x0, strict) && GET_CODE (x1) == CONST_INT)
686 return IN_RANGE (INTVAL (x1), -1 - 0x7fff, 0x7fff);
687
688 break;
689 }
690 default:
691 break;
692 }
693
694 return false;
695}
696
697#undef TARGET_LEGITIMATE_ADDRESS_P
698#define TARGET_LEGITIMATE_ADDRESS_P bpf_legitimate_address_p
699
700/* Describe the relative costs of RTL expressions. Return true when
701 all subexpressions of X have been processed, and false when
702 `rtx_cost' should recurse. */
703
704static bool
705bpf_rtx_costs (rtx x ATTRIBUTE_UNUSED,
706 enum machine_mode mode ATTRIBUTE_UNUSED,
707 int outer_code ATTRIBUTE_UNUSED,
708 int opno ATTRIBUTE_UNUSED,
709 int *total ATTRIBUTE_UNUSED,
710 bool speed ATTRIBUTE_UNUSED)
711{
712 /* To be written. */
713 return false;
714}
715
716#undef TARGET_RTX_COSTS
717#define TARGET_RTX_COSTS bpf_rtx_costs
718
719/* Return true if an argument at the position indicated by CUM should
720 be passed by reference. If the hook returns true, a copy of that
721 argument is made in memory and a pointer to the argument is passed
722 instead of the argument itself. */
723
724static bool
725bpf_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
726 const function_arg_info &arg)
727{
728 unsigned num_bytes = arg.type_size_in_bytes ();
729
730 /* Pass aggregates and values bigger than 5 words by reference.
731 Everything else is passed by copy. */
732 return (arg.aggregate_type_p () || (num_bytes > 8*5));
733}
734
735#undef TARGET_PASS_BY_REFERENCE
736#define TARGET_PASS_BY_REFERENCE bpf_pass_by_reference
737
738/* Return a RTX indicating whether a function argument is passed in a
739 register and if so, which register. */
740
741static rtx
742bpf_function_arg (cumulative_args_t ca, const function_arg_info &arg)
743{
744 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
745
746 if (*cum < 5)
747 return gen_rtx_REG (arg.mode, *cum + 1);
748 else
749 /* An error will be emitted for this in
750 bpf_function_arg_advance. */
751 return NULL_RTX;
752}
753
754#undef TARGET_FUNCTION_ARG
755#define TARGET_FUNCTION_ARG bpf_function_arg
756
757/* Update the summarizer variable pointed by CA to advance past an
758 argument in the argument list. */
759
760static void
761bpf_function_arg_advance (cumulative_args_t ca,
762 const function_arg_info &arg)
763{
764 CUMULATIVE_ARGS *cum = get_cumulative_args (ca);
765 unsigned num_bytes = arg.type_size_in_bytes ();
766 unsigned num_words = CEIL (num_bytes, UNITS_PER_WORD);
767
768 if (*cum <= 5 && *cum + num_words > 5)
769 error ("too many function arguments for eBPF");
770
771 *cum += num_words;
772}
773
774#undef TARGET_FUNCTION_ARG_ADVANCE
775#define TARGET_FUNCTION_ARG_ADVANCE bpf_function_arg_advance
776
777/* Output the assembly code for a constructor. Since eBPF doesn't
778 support indirect calls, constructors are not supported. */
779
780static void
781bpf_output_constructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
782{
783 tree decl = SYMBOL_REF_DECL (symbol);
784
785 if (decl)
786 sorry_at (DECL_SOURCE_LOCATION (decl),
787 "no constructors");
788 else
789 sorry ("no constructors");
790}
791
792#undef TARGET_ASM_CONSTRUCTOR
793#define TARGET_ASM_CONSTRUCTOR bpf_output_constructor
794
795/* Output the assembly code for a destructor. Since eBPF doesn't
796 support indirect calls, destructors are not supported. */
797
798static void
799bpf_output_destructor (rtx symbol, int priority ATTRIBUTE_UNUSED)
800{
801 tree decl = SYMBOL_REF_DECL (symbol);
802
803 if (decl)
804 sorry_at (DECL_SOURCE_LOCATION (decl),
805 "no destructors");
806 else
807 sorry ("no destructors");
808}
809
810#undef TARGET_ASM_DESTRUCTOR
811#define TARGET_ASM_DESTRUCTOR bpf_output_destructor
812
813/* Return the appropriate instruction to CALL to a function. TARGET
814 is an RTX denoting the address of the called function.
815
816 The main purposes of this function are:
817 - To reject indirect CALL instructions, which are not supported by
818 eBPF.
819 - To recognize calls to kernel helper functions and emit the
820 corresponding CALL N instruction.
821
822 This function is called from the expansion of the 'call' pattern in
823 bpf.md. */
824
825const char *
826bpf_output_call (rtx target)
827{
828 rtx xops[1];
829
830 switch (GET_CODE (target))
831 {
832 case CONST_INT:
833 output_asm_insn ("call\t%0", &target);
834 break;
835 case SYMBOL_REF:
836 {
af30b83b
JM
837 tree decl = SYMBOL_REF_DECL (target);
838 tree attr;
839
840 if (decl
841 && (attr = lookup_attribute ("kernel_helper",
842 DECL_ATTRIBUTES (decl))))
91dfef96 843 {
af30b83b
JM
844 tree attr_args = TREE_VALUE (attr);
845
846 xops[0] = GEN_INT (TREE_INT_CST_LOW (TREE_VALUE (attr_args)));
91dfef96
JM
847 output_asm_insn ("call\t%0", xops);
848 }
849 else
850 output_asm_insn ("call\t%0", &target);
851
852 break;
853 }
854 default:
c3a0f537
DF
855 if (TARGET_XBPF)
856 output_asm_insn ("call\t%0", &target);
857 else
858 {
859 error ("indirect call in function, which are not supported by eBPF");
860 output_asm_insn ("call 0", NULL);
861 }
91dfef96
JM
862 break;
863 }
864
865 return "";
866}
867
868/* Print an instruction operand. This function is called in the macro
869 PRINT_OPERAND defined in bpf.h */
870
871void
872bpf_print_operand (FILE *file, rtx op, int code ATTRIBUTE_UNUSED)
873{
874 switch (GET_CODE (op))
875 {
876 case REG:
877 fprintf (file, "%s", reg_names[REGNO (op)]);
878 break;
879 case MEM:
880 output_address (GET_MODE (op), XEXP (op, 0));
881 break;
882 case CONST_DOUBLE:
883 if (CONST_DOUBLE_HIGH (op))
884 fprintf (file, HOST_WIDE_INT_PRINT_DOUBLE_HEX,
885 CONST_DOUBLE_HIGH (op), CONST_DOUBLE_LOW (op));
886 else if (CONST_DOUBLE_LOW (op) < 0)
887 fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (op));
888 else
889 fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (op));
890 break;
891 default:
892 output_addr_const (file, op);
893 }
894}
895
896/* Print an operand which is an address. This function should handle
897 any legit address, as accepted by bpf_legitimate_address_p, and
898 also addresses that are valid in CALL instructions.
899
900 This function is called in the PRINT_OPERAND_ADDRESS macro defined
901 in bpf.h */
902
903void
904bpf_print_operand_address (FILE *file, rtx addr)
905{
906 switch (GET_CODE (addr))
907 {
908 case REG:
909 fprintf (file, "[%s+0]", reg_names[REGNO (addr)]);
910 break;
911 case PLUS:
912 {
913 rtx op0 = XEXP (addr, 0);
914 rtx op1 = XEXP (addr, 1);
915
916 if (GET_CODE (op0) == REG && GET_CODE (op1) == CONST_INT)
917 {
918 fprintf (file, "[%s+", reg_names[REGNO (op0)]);
919 output_addr_const (file, op1);
920 fputs ("]", file);
921 }
922 else
923 fatal_insn ("invalid address in operand", addr);
924 break;
925 }
926 case MEM:
927 /* Fallthrough. */
928 case LABEL_REF:
929 /* Fallthrough. */
930 fatal_insn ("unsupported operand", addr);
931 break;
932 default:
933 output_addr_const (file, addr);
934 break;
935 }
936}
937
938/* Add a BPF builtin function with NAME, CODE and TYPE. Return
939 the function decl or NULL_TREE if the builtin was not added. */
940
941static tree
942def_builtin (const char *name, enum bpf_builtins code, tree type)
943{
944 tree t
945 = add_builtin_function (name, type, code, BUILT_IN_MD, NULL, NULL_TREE);
946
947 bpf_builtins[code] = t;
948 return t;
949}
950
951/* Define machine-specific built-in functions. */
952
953static void
954bpf_init_builtins (void)
955{
91dfef96 956 tree ullt = long_long_unsigned_type_node;
91dfef96
JM
957
958 /* Built-ins for BPF_LD_ABS and BPF_LD_IND instructions. */
959
960 def_builtin ("__builtin_bpf_load_byte", BPF_BUILTIN_LOAD_BYTE,
961 build_function_type_list (ullt, ullt, 0));
962 def_builtin ("__builtin_bpf_load_half", BPF_BUILTIN_LOAD_HALF,
963 build_function_type_list (ullt, ullt, 0));
964 def_builtin ("__builtin_bpf_load_word", BPF_BUILTIN_LOAD_WORD,
965 build_function_type_list (ullt, ullt, 0));
8bdabb37
DF
966 def_builtin ("__builtin_preserve_access_index",
967 BPF_BUILTIN_PRESERVE_ACCESS_INDEX,
968 build_function_type_list (ptr_type_node, ptr_type_node, 0));
068baae1
DF
969 def_builtin ("__builtin_preserve_field_info",
970 BPF_BUILTIN_PRESERVE_FIELD_INFO,
971 build_function_type_list (unsigned_type_node, ptr_type_node, unsigned_type_node, 0));
91dfef96
JM
972}
973
974#undef TARGET_INIT_BUILTINS
975#define TARGET_INIT_BUILTINS bpf_init_builtins
976
8bdabb37
DF
977static tree bpf_core_compute (tree, vec<unsigned int> *);
978static int bpf_core_get_index (const tree);
979static bool is_attr_preserve_access (tree);
980
068baae1
DF
981/* BPF Compile Once - Run Everywhere (CO-RE) support. Construct a CO-RE
982 relocation record for EXPR of kind KIND to be emitted in the .BTF.ext
983 section. Does nothing if we are not targetting BPF CO-RE, or if the
984 constructed relocation would be a no-op. */
985
986static void
987maybe_make_core_relo (tree expr, enum btf_core_reloc_kind kind)
988{
989 /* If we are not targetting BPF CO-RE, do not make a relocation. We
990 might not be generating any debug info at all. */
991 if (!TARGET_BPF_CORE)
992 return;
993
994 auto_vec<unsigned int, 16> accessors;
995 tree container = bpf_core_compute (expr, &accessors);
996
997 /* Any valid use of the builtin must have at least one access. Otherwise,
998 there is nothing to record and nothing to do. This is primarily a
999 guard against optimizations leading to unexpected expressions in the
1000 argument of the builtin. For example, if the builtin is used to read
1001 a field of a structure which can be statically determined to hold a
1002 constant value, the argument to the builtin will be optimized to that
1003 constant. This is OK, and means the builtin call is superfluous.
1004 e.g.
1005 struct S foo;
1006 foo.a = 5;
1007 int x = __preserve_access_index (foo.a);
1008 ... do stuff with x
1009 'foo.a' in the builtin argument will be optimized to '5' with -01+.
1010 This sequence does not warrant recording a CO-RE relocation. */
1011
1012 if (accessors.length () < 1)
1013 return;
1014 accessors.reverse ();
1015
1016 rtx_code_label *label = gen_label_rtx ();
1017 LABEL_PRESERVE_P (label) = 1;
1018 emit_label (label);
1019
1020 /* Determine what output section this relocation will apply to.
1021 If this function is associated with a section, use that. Otherwise,
1022 fall back on '.text'. */
1023 const char * section_name;
1024 if (current_function_decl && DECL_SECTION_NAME (current_function_decl))
1025 section_name = DECL_SECTION_NAME (current_function_decl);
1026 else
1027 section_name = ".text";
1028
1029 /* Add the CO-RE relocation information to the BTF container. */
1030 bpf_core_reloc_add (TREE_TYPE (container), section_name, &accessors, label,
1031 kind);
1032}
1033
1034/* Expand a call to __builtin_preserve_field_info by evaluating the requested
1035 information about SRC according to KIND, and return a tree holding
1036 the result. */
1037
1038static tree
1039bpf_core_field_info (tree src, enum btf_core_reloc_kind kind)
1040{
1041 unsigned int result;
1042 poly_int64 bitsize, bitpos;
1043 tree var_off = NULL_TREE;
1044 machine_mode mode;
1045 int unsignedp, reversep, volatilep;
1046 location_t loc = EXPR_LOCATION (src);
1047
1048 get_inner_reference (src, &bitsize, &bitpos, &var_off, &mode, &unsignedp,
1049 &reversep, &volatilep);
1050
1051 /* Note: Use DECL_BIT_FIELD_TYPE rather than DECL_BIT_FIELD here, because it
1052 remembers whether the field in question was originally declared as a
1053 bitfield, regardless of how it has been optimized. */
1054 bool bitfieldp = (TREE_CODE (src) == COMPONENT_REF
1055 && DECL_BIT_FIELD_TYPE (TREE_OPERAND (src, 1)));
1056
1057 unsigned int align = TYPE_ALIGN (TREE_TYPE (src));
1058 if (TREE_CODE (src) == COMPONENT_REF)
1059 {
1060 tree field = TREE_OPERAND (src, 1);
1061 if (DECL_BIT_FIELD_TYPE (field))
1062 align = TYPE_ALIGN (DECL_BIT_FIELD_TYPE (field));
1063 else
1064 align = TYPE_ALIGN (TREE_TYPE (field));
1065 }
1066
1067 unsigned int start_bitpos = bitpos & ~(align - 1);
1068 unsigned int end_bitpos = start_bitpos + align;
1069
1070 switch (kind)
1071 {
1072 case BPF_RELO_FIELD_BYTE_OFFSET:
1073 {
1074 if (var_off != NULL_TREE)
1075 {
1076 error_at (loc, "unsupported variable field offset");
1077 return error_mark_node;
1078 }
1079
1080 if (bitfieldp)
1081 result = start_bitpos / 8;
1082 else
1083 result = bitpos / 8;
1084 }
1085 break;
1086
1087 case BPF_RELO_FIELD_BYTE_SIZE:
1088 {
1089 if (mode == BLKmode && bitsize == -1)
1090 {
1091 error_at (loc, "unsupported variable size field access");
1092 return error_mark_node;
1093 }
1094
1095 if (bitfieldp)
1096 {
1097 /* To match LLVM behavior, byte size of bitfields is recorded as
1098 the full size of the base type. A 3-bit bitfield of type int is
1099 therefore recorded as having a byte size of 4 bytes. */
1100 result = end_bitpos - start_bitpos;
1101 if (result & (result - 1))
1102 {
1103 error_at (loc, "unsupported field expression");
1104 return error_mark_node;
1105 }
1106 result = result / 8;
1107 }
1108 else
1109 result = bitsize / 8;
1110 }
1111 break;
1112
1113 case BPF_RELO_FIELD_EXISTS:
1114 /* The field always exists at compile time. */
1115 result = 1;
1116 break;
1117
1118 case BPF_RELO_FIELD_SIGNED:
1119 result = !unsignedp;
1120 break;
1121
1122 case BPF_RELO_FIELD_LSHIFT_U64:
1123 case BPF_RELO_FIELD_RSHIFT_U64:
1124 {
1125 if (mode == BLKmode && bitsize == -1)
1126 {
1127 error_at (loc, "unsupported variable size field access");
1128 return error_mark_node;
1129 }
1130 if (var_off != NULL_TREE)
1131 {
1132 error_at (loc, "unsupported variable field offset");
1133 return error_mark_node;
1134 }
1135
1136 if (!bitfieldp)
1137 {
1138 if (bitsize > 64)
1139 {
1140 error_at (loc, "field size too large");
1141 return error_mark_node;
1142 }
1143 result = 64 - bitsize;
1144 break;
1145 }
1146
1147 if (end_bitpos - start_bitpos > 64)
1148 {
1149 error_at (loc, "field size too large");
1150 return error_mark_node;
1151 }
1152
1153 if (kind == BPF_RELO_FIELD_LSHIFT_U64)
1154 {
1155 if (TARGET_BIG_ENDIAN)
1156 result = bitpos + 64 - start_bitpos - align;
1157 else
1158 result = start_bitpos + 64 - bitpos - bitsize;
1159 }
1160 else /* RSHIFT_U64 */
1161 result = 64 - bitsize;
1162 }
1163 break;
1164
1165 default:
1166 error ("invalid second argument to built-in function");
1167 return error_mark_node;
1168 break;
1169 }
1170
1171 return build_int_cst (unsigned_type_node, result);
1172}
1173
91dfef96
JM
1174/* Expand a call to a BPF-specific built-in function that was set up
1175 with bpf_init_builtins. */
1176
1177static rtx
af30b83b 1178bpf_expand_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
91dfef96
JM
1179 rtx subtarget ATTRIBUTE_UNUSED,
1180 machine_mode mode ATTRIBUTE_UNUSED,
af30b83b 1181 int ignore ATTRIBUTE_UNUSED)
91dfef96
JM
1182{
1183 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
1184 int code = DECL_MD_FUNCTION_CODE (fndecl);
1185
af30b83b
JM
1186 if (code == BPF_BUILTIN_LOAD_BYTE
1187 || code == BPF_BUILTIN_LOAD_HALF
1188 || code == BPF_BUILTIN_LOAD_WORD)
91dfef96
JM
1189 {
1190 /* Expand an indirect load from the sk_buff in the context.
1191 There is just one argument to the builtin, which is the
1192 offset.
1193
1194 We try first to expand a ldabs* instruction. In case this
1195 fails, we try a ldind* instruction. */
1196
1197 enum insn_code abs_icode
1198 = (code == BPF_BUILTIN_LOAD_BYTE ? CODE_FOR_ldabsb
1199 : code == BPF_BUILTIN_LOAD_HALF ? CODE_FOR_ldabsh
1200 : CODE_FOR_ldabsw);
1201
1202 enum insn_code ind_icode
1203 = (code == BPF_BUILTIN_LOAD_BYTE ? CODE_FOR_ldindb
1204 : code == BPF_BUILTIN_LOAD_HALF ? CODE_FOR_ldindh
1205 : CODE_FOR_ldindw);
1206
1207 tree offset_arg = CALL_EXPR_ARG (exp, 0);
1208 struct expand_operand ops[2];
1209
1210 create_input_operand (&ops[0], expand_normal (offset_arg),
1211 TYPE_MODE (TREE_TYPE (offset_arg)));
1212 create_input_operand (&ops[1], const0_rtx, SImode);
1213
1214 if (!maybe_expand_insn (abs_icode, 2, ops)
1215 && !maybe_expand_insn (ind_icode, 2, ops))
1216 {
1217 error ("invalid argument to built-in function");
1218 return gen_rtx_REG (ops[0].mode, BPF_R0);
1219 }
1220
1221 /* The result of the load is in R0. */
1222 return gen_rtx_REG (ops[0].mode, BPF_R0);
1223 }
068baae1 1224
8bdabb37
DF
1225 else if (code == -1)
1226 {
068baae1 1227 /* A resolved overloaded __builtin_preserve_access_index. */
8bdabb37
DF
1228 tree arg = CALL_EXPR_ARG (exp, 0);
1229
1230 if (arg == NULL_TREE)
1231 return NULL_RTX;
1232
8bdabb37
DF
1233 if (TREE_CODE (arg) == SSA_NAME)
1234 {
1235 gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
1236
1237 if (is_gimple_assign (def_stmt))
1238 arg = gimple_assign_rhs1 (def_stmt);
1239 else
1240 return expand_normal (arg);
1241 }
1242
1243 /* Avoid double-recording information if the argument is an access to
1244 a struct/union marked __attribute__((preserve_access_index)). This
1245 Will be handled by the attribute handling pass. */
068baae1
DF
1246 if (!is_attr_preserve_access (arg))
1247 maybe_make_core_relo (arg, BPF_RELO_FIELD_BYTE_OFFSET);
1248
1249 return expand_normal (arg);
1250 }
1251
1252 else if (code == -2)
1253 {
1254 /* A resolved overloaded __builtin_preserve_field_info. */
1255 tree src = CALL_EXPR_ARG (exp, 0);
1256 tree kind_tree = CALL_EXPR_ARG (exp, 1);
1257 unsigned HOST_WIDE_INT kind_val;
1258 if (tree_fits_uhwi_p (kind_tree))
1259 kind_val = tree_to_uhwi (kind_tree);
8bdabb37 1260 else
068baae1 1261 error ("invalid argument to built-in function");
8bdabb37 1262
068baae1 1263 enum btf_core_reloc_kind kind = (enum btf_core_reloc_kind) kind_val;
8bdabb37 1264
068baae1
DF
1265 if (TREE_CODE (src) == SSA_NAME)
1266 {
1267 gimple *def_stmt = SSA_NAME_DEF_STMT (src);
1268 if (is_gimple_assign (def_stmt))
1269 src = gimple_assign_rhs1 (def_stmt);
1270 }
1271 if (TREE_CODE (src) == ADDR_EXPR)
1272 src = TREE_OPERAND (src, 0);
1273
1274 tree result = bpf_core_field_info (src, kind);
1275
1276 if (result != error_mark_node)
1277 maybe_make_core_relo (src, kind);
1278
1279 return expand_normal (result);
8bdabb37 1280 }
068baae1 1281
91dfef96
JM
1282 gcc_unreachable ();
1283}
1284
1285#undef TARGET_EXPAND_BUILTIN
1286#define TARGET_EXPAND_BUILTIN bpf_expand_builtin
1287
1288/* Initialize target-specific function library calls. This is mainly
1289 used to call library-provided soft-fp operations, since eBPF
1290 doesn't support floating-point in "hardware". */
1291
1292static void
1293bpf_init_libfuncs (void)
1294{
1295 set_conv_libfunc (sext_optab, DFmode, SFmode,
1296 "__bpf_extendsfdf2");
1297 set_conv_libfunc (trunc_optab, SFmode, DFmode,
1298 "__bpf_truncdfsf2");
1299 set_conv_libfunc (sfix_optab, SImode, DFmode,
1300 "__bpf_fix_truncdfsi");
1301 set_conv_libfunc (sfloat_optab, DFmode, SImode,
1302 "__bpf_floatsidf");
1303 set_conv_libfunc (ufloat_optab, DFmode, SImode,
1304 "__bpf_floatunsidf");
1305}
1306
1307#undef TARGET_INIT_LIBFUNCS
1308#define TARGET_INIT_LIBFUNCS bpf_init_libfuncs
1309
1310/* Define the mechanism that will be used for describing frame unwind
1311 information to the debugger. In eBPF it is not possible to unwind
1312 frames. */
1313
1314static enum unwind_info_type
1315bpf_debug_unwind_info ()
1316{
1317 return UI_NONE;
1318}
1319
1320#undef TARGET_DEBUG_UNWIND_INFO
1321#define TARGET_DEBUG_UNWIND_INFO bpf_debug_unwind_info
1322
1323/* Output assembly directives to assemble data of various sized and
1324 alignments. */
1325
1326#undef TARGET_ASM_BYTE_OP
1327#define TARGET_ASM_BYTE_OP "\t.byte\t"
1328#undef TARGET_ASM_ALIGNED_HI_OP
1329#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1330#undef TARGET_ASM_ALIGNED_SI_OP
1331#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1332#undef TARGET_ASM_ALIGNED_DI_OP
1333#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1334
8bdabb37
DF
1335
1336/* BPF Compile Once - Run Everywhere (CO-RE) support routines.
1337
1338 BPF CO-RE is supported in two forms:
1339 - A target builtin, __builtin_preserve_access_index
1340
1341 This builtin accepts a single argument. Any access to an aggregate data
1342 structure (struct, union or array) within the argument will be recorded by
1343 the CO-RE machinery, resulting in a relocation record being placed in the
1344 .BTF.ext section of the output.
1345
1346 It is implemented in bpf_resolve_overloaded_builtin () and
1347 bpf_expand_builtin (), using the supporting routines below.
1348
1349 - An attribute, __attribute__((preserve_access_index))
1350
1351 This attribute can be applied to struct and union types. Any access to a
1352 type with this attribute will be recorded by the CO-RE machinery.
1353
1354 The pass pass_bpf_core_attr, below, implements support for
1355 this attribute. */
1356
1357/* Traverse the subtree under NODE, which is expected to be some form of
1358 aggregate access the CO-RE machinery cares about (like a read of a member of
1359 a struct or union), collecting access indices for the components and storing
1360 them in the vector referenced by ACCESSORS.
1361
1362 Return the ultimate (top-level) container of the aggregate access. In general,
1363 this will be a VAR_DECL or some kind of REF.
1364
1365 Note that the accessors are computed *in reverse order* of how the BPF
1366 CO-RE machinery defines them. The vector needs to be reversed (or simply
1367 output in reverse order) for the .BTF.ext relocation information. */
1368
1369static tree
1370bpf_core_compute (tree node, vec<unsigned int> *accessors)
1371{
1372
1373 if (TREE_CODE (node) == ADDR_EXPR)
1374 node = TREE_OPERAND (node, 0);
1375
1376 else if (TREE_CODE (node) == INDIRECT_REF
1377 || TREE_CODE (node) == POINTER_PLUS_EXPR)
1378 {
1379 accessors->safe_push (0);
1380 return TREE_OPERAND (node, 0);
1381 }
1382
1383 while (1)
1384 {
1385 switch (TREE_CODE (node))
1386 {
1387 case COMPONENT_REF:
1388 accessors->safe_push (bpf_core_get_index (TREE_OPERAND (node, 1)));
1389 break;
1390
1391 case ARRAY_REF:
1392 case ARRAY_RANGE_REF:
1393 accessors->safe_push (bpf_core_get_index (node));
1394 break;
1395
1396 case MEM_REF:
1397 accessors->safe_push (bpf_core_get_index (node));
1398 if (TREE_CODE (TREE_OPERAND (node, 0)) == ADDR_EXPR)
1399 node = TREE_OPERAND (TREE_OPERAND (node, 0), 0);
1400 goto done;
1401
1402 default:
1403 goto done;
1404 }
1405 node = TREE_OPERAND (node, 0);
1406 }
1407 done:
1408 return node;
1409
1410}
1411
1412/* Compute the index of the NODE in its immediate container.
1413 NODE should be a FIELD_DECL (i.e. of struct or union), or an ARRAY_REF. */
1414static int
1415bpf_core_get_index (const tree node)
1416{
1417 enum tree_code code = TREE_CODE (node);
1418
1419 if (code == FIELD_DECL)
1420 {
1421 /* Lookup the index from the BTF information. Some struct/union members
1422 may not be emitted in BTF; only the BTF container has enough
1423 information to compute the correct index. */
1424 int idx = bpf_core_get_sou_member_index (ctf_get_tu_ctfc (), node);
1425 if (idx >= 0)
1426 return idx;
1427 }
1428
1429 else if (code == ARRAY_REF || code == ARRAY_RANGE_REF || code == MEM_REF)
1430 {
1431 /* For array accesses, the index is operand 1. */
1432 tree index = TREE_OPERAND (node, 1);
1433
1434 /* If the indexing operand is a constant, extracting is trivial. */
1435 if (TREE_CODE (index) == INTEGER_CST && tree_fits_shwi_p (index))
1436 return tree_to_shwi (index);
1437 }
1438
1439 return -1;
1440}
1441
bd0a61be 1442/* Synthesize a new builtin function declaration with signature TYPE.
8bdabb37
DF
1443 Used by bpf_resolve_overloaded_builtin to resolve calls to
1444 __builtin_preserve_access_index. */
1445
1446static tree
068baae1 1447bpf_core_newdecl (tree type, bool is_pai)
8bdabb37 1448{
068baae1 1449 tree rettype;
8bdabb37 1450 char name[80];
068baae1
DF
1451 static unsigned long pai_count = 0;
1452 static unsigned long pfi_count = 0;
8bdabb37 1453
068baae1
DF
1454 if (is_pai)
1455 {
1456 rettype = build_function_type_list (type, type, NULL);
1457 int len = snprintf (name, sizeof (name), "%s", "__builtin_pai_");
1458 len = snprintf (name + len, sizeof (name) - len, "%lu", pai_count++);
1459 }
1460 else
1461 {
1462 rettype = build_function_type_list (unsigned_type_node, type,
1463 unsigned_type_node, NULL);
1464 int len = snprintf (name, sizeof (name), "%s", "__builtin_pfi_");
1465 len = snprintf (name + len, sizeof (name) - len, "%lu", pfi_count++);
1466 }
8bdabb37 1467
068baae1
DF
1468 return add_builtin_function_ext_scope (name, rettype, is_pai ? -1 : -2,
1469 BUILT_IN_MD, NULL, NULL_TREE);
8bdabb37
DF
1470}
1471
1472/* Return whether EXPR could access some aggregate data structure that
1473 BPF CO-RE support needs to know about. */
1474
068baae1 1475static bool
8bdabb37
DF
1476bpf_core_is_maybe_aggregate_access (tree expr)
1477{
068baae1
DF
1478 switch (TREE_CODE (expr))
1479 {
1480 case COMPONENT_REF:
1481 case BIT_FIELD_REF:
1482 case ARRAY_REF:
1483 case ARRAY_RANGE_REF:
1484 return true;
1485 case ADDR_EXPR:
1486 case NOP_EXPR:
8bdabb37 1487 return bpf_core_is_maybe_aggregate_access (TREE_OPERAND (expr, 0));
068baae1
DF
1488 default:
1489 return false;
1490 }
8bdabb37
DF
1491}
1492
068baae1
DF
1493struct core_walk_data {
1494 location_t loc;
1495 tree arg;
1496};
1497
8bdabb37
DF
1498/* Callback function used with walk_tree from bpf_resolve_overloaded_builtin. */
1499
1500static tree
1501bpf_core_walk (tree *tp, int *walk_subtrees, void *data)
1502{
068baae1
DF
1503 struct core_walk_data *dat = (struct core_walk_data *) data;
1504 bool is_pai = dat->arg == NULL_TREE;
8bdabb37
DF
1505
1506 /* If this is a type, don't do anything. */
1507 if (TYPE_P (*tp))
1508 {
1509 *walk_subtrees = 0;
1510 return NULL_TREE;
1511 }
1512
068baae1
DF
1513 /* Build a new function call to a resolved builtin for the desired operation.
1514 If this is a preserve_field_info call, pass along the argument to the
1515 resolved builtin call. */
8bdabb37
DF
1516 if (bpf_core_is_maybe_aggregate_access (*tp))
1517 {
068baae1
DF
1518 tree newdecl = bpf_core_newdecl (TREE_TYPE (*tp), is_pai);
1519 tree newcall;
1520 if (is_pai)
1521 newcall = build_call_expr_loc (dat->loc, newdecl, 1, *tp);
1522 else
1523 newcall = build_call_expr_loc (dat->loc, newdecl, 2, *tp, dat->arg);
1524
8bdabb37
DF
1525 *tp = newcall;
1526 *walk_subtrees = 0;
1527 }
1528
1529 return NULL_TREE;
1530}
1531
6d1f144b
JM
1532/* Implement target hook small_register_classes_for_mode_p. */
1533
1534static bool
1535bpf_small_register_classes_for_mode_p (machine_mode mode)
1536{
1537 if (TARGET_XBPF)
1538 return 1;
1539 else
1540 /* Avoid putting function addresses in registers, as calling these
1541 is not supported in eBPF. */
1542 return (mode != FUNCTION_MODE);
1543}
1544
1545#undef TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P
1546#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P \
1547 bpf_small_register_classes_for_mode_p
8bdabb37 1548
068baae1
DF
1549/* Return whether EXPR is a valid first argument for a call to
1550 __builtin_preserve_field_info. */
1551
1552static bool
1553bpf_is_valid_preserve_field_info_arg (tree expr)
1554{
1555 switch (TREE_CODE (expr))
1556 {
1557 case COMPONENT_REF:
1558 case BIT_FIELD_REF:
1559 case ARRAY_REF:
1560 case ARRAY_RANGE_REF:
1561 return true;
1562 case NOP_EXPR:
1563 return bpf_is_valid_preserve_field_info_arg (TREE_OPERAND (expr, 0));
1564 case ADDR_EXPR:
1565 /* Do not accept ADDR_EXPRs like &foo.bar, but do accept accesses like
1566 foo.baz where baz is an array. */
1567 return (TREE_CODE (TREE_TYPE (TREE_OPERAND (expr, 0))) == ARRAY_TYPE);
1568 default:
1569 return false;
1570 }
1571}
1572
8bdabb37
DF
1573/* Implement TARGET_RESOLVE_OVERLOADED_BUILTIN (see gccint manual section
1574 Target Macros::Misc.).
1575 We use this for the __builtin_preserve_access_index builtin for CO-RE
1576 support.
1577
1578 FNDECL is the declaration of the builtin, and ARGLIST is the list of
1579 arguments passed to it, and is really a vec<tree,_> *.
1580
1581 In this case, the 'operation' implemented by the builtin is a no-op;
1582 the builtin is just a marker. So, the result is simply the argument. */
1583
1584static tree
1585bpf_resolve_overloaded_builtin (location_t loc, tree fndecl, void *arglist)
1586{
068baae1
DF
1587 bool is_pai = DECL_MD_FUNCTION_CODE (fndecl)
1588 == BPF_BUILTIN_PRESERVE_ACCESS_INDEX;
1589 bool is_pfi = DECL_MD_FUNCTION_CODE (fndecl)
1590 == BPF_BUILTIN_PRESERVE_FIELD_INFO;
1591
1592 if (!is_pai && !is_pfi)
8bdabb37
DF
1593 return NULL_TREE;
1594
1595 /* We only expect one argument, but it may be an arbitrarily-complicated
1596 statement-expression. */
1597 vec<tree, va_gc> *params = static_cast<vec<tree, va_gc> *> (arglist);
1598 unsigned n_params = params ? params->length() : 0;
1599
068baae1 1600 if ((is_pai && n_params != 1) || (is_pfi && n_params != 2))
8bdabb37 1601 {
068baae1
DF
1602 error_at (loc, "wrong number of arguments");
1603 return error_mark_node;
8bdabb37
DF
1604 }
1605
1606 tree param = (*params)[0];
1607
068baae1
DF
1608 /* If not generating BPF_CORE information, preserve_access_index does nothing,
1609 and simply "resolves to" the argument. */
1610 if (!TARGET_BPF_CORE && is_pai)
8bdabb37
DF
1611 return param;
1612
068baae1
DF
1613 if (is_pfi && !bpf_is_valid_preserve_field_info_arg (param))
1614 {
1615 error_at (EXPR_LOC_OR_LOC (param, loc),
1616 "argument is not a field access");
1617 return error_mark_node;
1618 }
1619
8bdabb37
DF
1620 /* Do remove_c_maybe_const_expr for the arg.
1621 TODO: WHY do we have to do this here? Why doesn't c-typeck take care
1622 of it before or after this hook? */
1623 if (TREE_CODE (param) == C_MAYBE_CONST_EXPR)
1624 param = C_MAYBE_CONST_EXPR_EXPR (param);
1625
1626 /* Construct a new function declaration with the correct type, and return
1627 a call to it.
1628
1629 Calls with statement-expressions, for example:
1630 _(({ foo->a = 1; foo->u[2].b = 2; }))
1631 require special handling.
1632
1633 We rearrange this into a new block scope in which each statement
1634 becomes a unique builtin call:
1635 {
1636 _ ({ foo->a = 1;});
1637 _ ({ foo->u[2].b = 2;});
1638 }
1639
1640 This ensures that all the relevant information remains within the
1641 expression trees the builtin finally gets. */
1642
068baae1
DF
1643 struct core_walk_data data;
1644 data.loc = loc;
1645 data.arg = is_pai ? NULL_TREE : (*params)[1];
1646
1647 walk_tree (&param, bpf_core_walk, (void *) &data, NULL);
8bdabb37
DF
1648
1649 return param;
1650}
1651
1652#undef TARGET_RESOLVE_OVERLOADED_BUILTIN
1653#define TARGET_RESOLVE_OVERLOADED_BUILTIN bpf_resolve_overloaded_builtin
1654
1655
1656/* Handling for __attribute__((preserve_access_index)) for BPF CO-RE support.
1657
1658 This attribute marks a structure/union/array type as "preseve", so that
1659 every access to that type should be recorded and replayed by the BPF loader;
1660 this is just the same functionality as __builtin_preserve_access_index,
1661 but in the form of an attribute for an entire aggregate type.
1662
1663 Note also that nested structs behave as though they all have the attribute.
1664 For example:
1665 struct X { int a; };
1666 struct Y { struct X bar} __attribute__((preserve_access_index));
1667 struct Y foo;
1668 foo.bar.a;
1669 will record access all the way to 'a', even though struct X does not have
1670 the preserve_access_index attribute.
1671
1672 This is to follow LLVM behavior.
1673
1674 This pass finds all accesses to objects of types marked with the attribute,
1675 and wraps them in the same "low-level" builtins used by the builtin version.
1676 All logic afterwards is therefore identical to the builtin version of
1677 preserve_access_index. */
1678
1679/* True iff tree T accesses any member of a struct/union/class which is marked
1680 with the PRESERVE_ACCESS_INDEX attribute. */
1681
1682static bool
1683is_attr_preserve_access (tree t)
1684{
1685 if (t == NULL_TREE)
1686 return false;
1687
1688 poly_int64 bitsize, bitpos;
1689 tree var_off;
1690 machine_mode mode;
1691 int sign, reverse, vol;
1692
1693 tree base = get_inner_reference (t, &bitsize, &bitpos, &var_off, &mode,
1694 &sign, &reverse, &vol);
1695
1696 if (TREE_CODE (base) == MEM_REF)
1697 {
1698 return lookup_attribute ("preserve_access_index",
1699 TYPE_ATTRIBUTES (TREE_TYPE (base)));
1700 }
1701
1702 if (TREE_CODE (t) == COMPONENT_REF)
1703 {
1704 /* preserve_access_index propegates into nested structures,
1705 so check whether this is a component of another component
1706 which in turn is part of such a struct. */
1707
1708 const tree op = TREE_OPERAND (t, 0);
1709
1710 if (TREE_CODE (op) == COMPONENT_REF)
1711 return is_attr_preserve_access (op);
1712
1713 const tree container = DECL_CONTEXT (TREE_OPERAND (t, 1));
1714
1715 return lookup_attribute ("preserve_access_index",
1716 TYPE_ATTRIBUTES (container));
1717 }
1718
1719 else if (TREE_CODE (t) == ADDR_EXPR)
1720 return is_attr_preserve_access (TREE_OPERAND (t, 0));
1721
1722 return false;
1723}
1724
1725/* The body of pass_bpf_core_attr. Scan RTL for accesses to structs/unions
1726 marked with __attribute__((preserve_access_index)) and generate a CO-RE
1727 relocation for any such access. */
1728
1729static void
1730handle_attr_preserve (function *fn)
1731{
1732 basic_block bb;
1733 rtx_insn *insn;
8bdabb37
DF
1734 FOR_EACH_BB_FN (bb, fn)
1735 {
1736 FOR_BB_INSNS (bb, insn)
1737 {
1738 if (!NONJUMP_INSN_P (insn))
1739 continue;
1740 rtx pat = PATTERN (insn);
1741 if (GET_CODE (pat) != SET)
1742 continue;
1743
1744 start_sequence();
1745
1746 for (int i = 0; i < 2; i++)
1747 {
1748 rtx mem = XEXP (pat, i);
1749 if (MEM_P (mem))
1750 {
1751 tree expr = MEM_EXPR (mem);
1752 if (!expr)
1753 continue;
1754
1755 if (TREE_CODE (expr) == MEM_REF
1756 && TREE_CODE (TREE_OPERAND (expr, 0)) == SSA_NAME)
1757 {
1758 gimple *def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (expr, 0));
e4c2b55b 1759 if (def_stmt && is_gimple_assign (def_stmt))
8bdabb37
DF
1760 expr = gimple_assign_rhs1 (def_stmt);
1761 }
1762
1763 if (is_attr_preserve_access (expr))
93ab7d03 1764 maybe_make_core_relo (expr, BPF_RELO_FIELD_BYTE_OFFSET);
8bdabb37
DF
1765 }
1766 }
1767 rtx_insn *seq = get_insns ();
1768 end_sequence ();
1769 emit_insn_before (seq, insn);
1770 }
1771 }
1772}
1773
1774
1775/* This pass finds accesses to structures marked with the BPF target attribute
1776 __attribute__((preserve_access_index)). For every such access, a CO-RE
1777 relocation record is generated, to be output in the .BTF.ext section. */
1778
1779namespace {
1780
1781const pass_data pass_data_bpf_core_attr =
1782{
1783 RTL_PASS, /* type */
1784 "bpf_core_attr", /* name */
1785 OPTGROUP_NONE, /* optinfo_flags */
1786 TV_NONE, /* tv_id */
1787 0, /* properties_required */
1788 0, /* properties_provided */
1789 0, /* properties_destroyed */
1790 0, /* todo_flags_start */
1791 0, /* todo_flags_finish */
1792};
1793
1794class pass_bpf_core_attr : public rtl_opt_pass
1795{
1796public:
1797 pass_bpf_core_attr (gcc::context *ctxt)
1798 : rtl_opt_pass (pass_data_bpf_core_attr, ctxt)
1799 {}
1800
1801 virtual bool gate (function *) { return TARGET_BPF_CORE; }
1802 virtual unsigned int execute (function *);
1803};
1804
1805unsigned int
1806pass_bpf_core_attr::execute (function *fn)
1807{
1808 handle_attr_preserve (fn);
1809 return 0;
1810}
1811
1812} /* Anonymous namespace. */
1813
1814rtl_opt_pass *
1815make_pass_bpf_core_attr (gcc::context *ctxt)
1816{
1817 return new pass_bpf_core_attr (ctxt);
1818}
1819
91dfef96
JM
1820/* Finally, build the GCC target. */
1821
1822struct gcc_target targetm = TARGET_INITIALIZER;
1823
1824#include "gt-bpf.h"