__section_tail(JMP_MAP_ID, 0)
int cls_loop(struct __sk_buff *skb)
{
- char fmt[] = "cb: %u\n";
-
- trace_printk(fmt, sizeof(fmt), skb->cb[0]++);
+ printt("cb: %u\n", skb->cb[0]++);
tail_call(skb, &jmp_tc, 0);
skb->tc_classid = TC_H_MAKE(1, 42);
__section("aaa")
int cls_aaa(struct __sk_buff *skb)
{
- char fmt[] = "aaa\n";
-
- trace_printk(fmt, sizeof(fmt));
+ printt("aaa\n");
return TC_H_MAKE(1, 42);
}
__section("bbb")
int cls_bbb(struct __sk_buff *skb)
{
- char fmt[] = "bbb\n";
-
- trace_printk(fmt, sizeof(fmt));
+ printt("bbb\n");
return TC_H_MAKE(1, 43);
}
__section_cls_entry
int cls_entry(struct __sk_buff *skb)
{
- char fmt[] = "fallthrough\n";
-
tail_call(skb, &jmp_tc, 0);
- trace_printk(fmt, sizeof(fmt));
-
+ printt("fallthrough\n");
return BPF_H_DEFAULT;
}
__u8 ip_proto;
};
-static inline int flow_ports_offset(__u8 ip_proto)
+static __inline__ int flow_ports_offset(__u8 ip_proto)
{
switch (ip_proto) {
case IPPROTO_TCP:
}
}
-static inline bool flow_is_frag(struct __sk_buff *skb, int nh_off)
+static __inline__ bool flow_is_frag(struct __sk_buff *skb, int nh_off)
{
return !!(load_half(skb, nh_off + offsetof(struct iphdr, frag_off)) &
(IP_MF | IP_OFFSET));
}
-static inline int flow_parse_ipv4(struct __sk_buff *skb, int nh_off,
- __u8 *ip_proto, struct flow_keys *flow)
+static __inline__ int flow_parse_ipv4(struct __sk_buff *skb, int nh_off,
+ __u8 *ip_proto, struct flow_keys *flow)
{
__u8 ip_ver_len;
return nh_off;
}
-static inline __u32 flow_addr_hash_ipv6(struct __sk_buff *skb, int off)
+static __inline__ __u32 flow_addr_hash_ipv6(struct __sk_buff *skb, int off)
{
__u32 w0 = load_word(skb, off);
__u32 w1 = load_word(skb, off + sizeof(w0));
return w0 ^ w1 ^ w2 ^ w3;
}
-static inline int flow_parse_ipv6(struct __sk_buff *skb, int nh_off,
- __u8 *ip_proto, struct flow_keys *flow)
+static __inline__ int flow_parse_ipv6(struct __sk_buff *skb, int nh_off,
+ __u8 *ip_proto, struct flow_keys *flow)
{
*ip_proto = load_byte(skb, nh_off + offsetof(struct ipv6hdr, nexthdr));
return nh_off + sizeof(struct ipv6hdr);
}
-static inline bool flow_dissector(struct __sk_buff *skb,
- struct flow_keys *flow)
+static __inline__ bool flow_dissector(struct __sk_buff *skb,
+ struct flow_keys *flow)
{
int poff, nh_off = BPF_LL_OFF + ETH_HLEN;
__be16 proto = skb->protocol;
return true;
}
-static inline void cls_update_proto_map(const struct __sk_buff *skb,
- const struct flow_keys *flow)
+static __inline__ void cls_update_proto_map(const struct __sk_buff *skb,
+ const struct flow_keys *flow)
{
uint8_t proto = flow->ip_proto;
struct count_tuple *ct, _ct;
map_update_elem(&map_proto, &proto, &_ct, BPF_ANY);
}
-static inline void cls_update_queue_map(const struct __sk_buff *skb)
+static __inline__ void cls_update_queue_map(const struct __sk_buff *skb)
{
uint32_t queue = skb->queue_mapping;
struct count_queue *cq, _cq;
return flow.ip_proto;
}
-static inline void act_update_drop_map(void)
+static __inline__ void act_update_drop_map(void)
{
uint32_t *count, cpu = get_smp_processor_id();
__section("ingress")
int imain(struct __sk_buff *skb)
{
- char fmt[] = "map val: %d\n";
int key = 0, *val;
val = map_lookup_elem(&map_sh, &key);
if (val)
- trace_printk(fmt, sizeof(fmt), *val);
+ printt("map val: %d\n", *val);
return BPF_H_DEFAULT;
}
__section_tail(FOO, ENTRY_0)
int cls_case1(struct __sk_buff *skb)
{
- char fmt[] = "case1: map-val: %d from:%u\n";
int key = 0, *val;
val = map_lookup_elem(&map_sh, &key);
if (val)
- trace_printk(fmt, sizeof(fmt), *val, skb->cb[0]);
+ printt("case1: map-val: %d from:%u\n", *val, skb->cb[0]);
skb->cb[0] = ENTRY_0;
tail_call(skb, &jmp_ex, ENTRY_0);
__section_tail(FOO, ENTRY_1)
int cls_case2(struct __sk_buff *skb)
{
- char fmt[] = "case2: map-val: %d from:%u\n";
int key = 0, *val;
val = map_lookup_elem(&map_sh, &key);
if (val)
- trace_printk(fmt, sizeof(fmt), *val, skb->cb[0]);
+ printt("case2: map-val: %d from:%u\n", *val, skb->cb[0]);
skb->cb[0] = ENTRY_1;
tail_call(skb, &jmp_tc, ENTRY_0);
__section_tail(BAR, ENTRY_0)
int cls_exit(struct __sk_buff *skb)
{
- char fmt[] = "exit: map-val: %d from:%u\n";
int key = 0, *val;
val = map_lookup_elem(&map_sh, &key);
if (val)
- trace_printk(fmt, sizeof(fmt), *val, skb->cb[0]);
+ printt("exit: map-val: %d from:%u\n", *val, skb->cb[0]);
/* Termination point. */
return BPF_H_DEFAULT;
__section_cls_entry
int cls_entry(struct __sk_buff *skb)
{
- char fmt[] = "fallthrough\n";
int key = 0, *val;
/* For transferring state, we can use skb->cb[0] ... skb->cb[4]. */
tail_call(skb, &jmp_tc, skb->hash & (MAX_JMP_SIZE - 1));
}
- trace_printk(fmt, sizeof(fmt));
+ printt("fallthrough\n");
return BPF_H_DEFAULT;
}
# define ntohl(X) __constant_ntohl((X))
#endif
+#ifndef __inline__
+# define __inline__ __attribute__((always_inline))
+#endif
+
/** Section helper macros. */
#ifndef __section
# define BPF_H_DEFAULT -1
#endif
-/** BPF helper functions for tc. */
+/** BPF helper functions for tc. Individual flags are in linux/bpf.h */
#ifndef BPF_FUNC
# define BPF_FUNC(NAME, ...) \
static uint64_t BPF_FUNC(ktime_get_ns);
/* Debugging */
+
+/* FIXME: __attribute__ ((format(printf, 1, 3))) not possible unless
+ * llvm bug https://llvm.org/bugs/show_bug.cgi?id=26243 gets resolved.
+ * It would require ____fmt to be made const, which generates a reloc
+ * entry (non-map).
+ */
static void BPF_FUNC(trace_printk, const char *fmt, int fmt_size, ...);
+#ifndef printt
+# define printt(fmt, ...) \
+ ({ \
+ char ____fmt[] = fmt; \
+ trace_printk(____fmt, sizeof(____fmt), ##__VA_ARGS__); \
+ })
+#endif
+
/* Random numbers */
static uint32_t BPF_FUNC(get_prandom_u32);
uint32_t flags);
/* Packet manipulation */
-#define BPF_PSEUDO_HDR 0x10
-#define BPF_HAS_PSEUDO_HDR(flags) ((flags) & BPF_PSEUDO_HDR)
-#define BPF_HDR_FIELD_SIZE(flags) ((flags) & 0x0f)
-
+static int BPF_FUNC(skb_load_bytes, struct __sk_buff *skb, uint32_t off,
+ void *to, uint32_t len);
static int BPF_FUNC(skb_store_bytes, struct __sk_buff *skb, uint32_t off,
- void *from, uint32_t len, uint32_t flags);
+ const void *from, uint32_t len, uint32_t flags);
+
static int BPF_FUNC(l3_csum_replace, struct __sk_buff *skb, uint32_t off,
uint32_t from, uint32_t to, uint32_t flags);
static int BPF_FUNC(l4_csum_replace, struct __sk_buff *skb, uint32_t off,
static int BPF_FUNC(skb_get_tunnel_key, struct __sk_buff *skb,
struct bpf_tunnel_key *to, uint32_t size, uint32_t flags);
static int BPF_FUNC(skb_set_tunnel_key, struct __sk_buff *skb,
- struct bpf_tunnel_key *from, uint32_t size, uint32_t flags);
+ const struct bpf_tunnel_key *from, uint32_t size,
+ uint32_t flags);
-/** LLVM built-ins */
+/** LLVM built-ins, mem*() routines work for constant size */
#ifndef lock_xadd
# define lock_xadd(ptr, val) ((void) __sync_fetch_and_add(ptr, val))
#endif
+#ifndef memset
+# define memset(s, c, n) __builtin_memset((s), (c), (n))
+#endif
+
+#ifndef memcpy
+# define memcpy(d, s, n) __builtin_memcpy((d), (s), (n))
+#endif
+
+#ifndef memmove
+# define memmove(d, s, n) __builtin_memmove((d), (s), (n))
+#endif
+
+/* FIXME: __builtin_memcmp() is not yet fully useable unless llvm bug
+ * https://llvm.org/bugs/show_bug.cgi?id=26218 gets resolved. Also
+ * this one would generate a reloc entry (non-map), otherwise.
+ */
+#if 0
+#ifndef memcmp
+# define memcmp(a, b, n) __builtin_memcmp((a), (b), (n))
+#endif
+#endif
+
unsigned long long load_byte(void *skb, unsigned long long off)
asm ("llvm.bpf.load.byte");