*/
if (buf->len >= COMPRESS_THRESHOLD && (compctx->flags & COMP_F_ALLOW_COMPRESS))
{
- const size_t ps = PAYLOAD_SIZE(frame);
+ const size_t ps = frame->buf.payload_size;
int zlen_max = ps + COMP_EXTRA_BUFFER(ps);
int zlen;
- ASSERT(buf_init(work, FRAME_HEADROOM(frame)));
+ ASSERT(buf_init(work, frame->buf.headroom));
ASSERT(buf_safe(work, zlen_max));
if (buf->len > ps)
return;
}
- ASSERT(buf_init(&work, FRAME_HEADROOM(frame)));
+ ASSERT(buf_init(&work, frame->buf.headroom));
/* do unframing/swap (assumes buf->len > 0) */
{
return;
}
- ASSERT(buf_init(&work, FRAME_HEADROOM(frame)));
+ ASSERT(buf_init(&work, frame->buf.headroom));
/* do unframing/swap (assumes buf->len > 0) */
uint8_t *head = BPTR(buf);
* work is a workspace buffer we are given of size BUF_SIZE.
* work may be used to return output data, or the input buffer
* may be modified and returned as output. If output data is
- * returned in work, the data should start after FRAME_HEADROOM bytes
+ * returned in work, the data should start after buf.headroom bytes
* of padding to leave room for downstream routines to prepend.
*
- * Up to a total of FRAME_HEADROOM bytes may be prepended to the input buf
+ * Up to a total of buf.headroom bytes may be prepended to the input buf
* by all routines (encryption, decryption, compression, and decompression).
*
* Note that the buf_prepend return will assert if we try to
- * make a header bigger than FRAME_HEADROOM. This should not
+ * make a header bigger than buf.headroom. This should not
* happen unless the frame parameters are wrong.
*/
ASSERT(ad_start >= buf->data && ad_start <= BPTR(buf));
- ASSERT(buf_init(&work, FRAME_HEADROOM(frame)));
+ ASSERT(buf_init(&work, frame->buf.headroom));
/* IV and Packet ID required for this mode */
ASSERT(packet_id_initialized(&opt->packet_id));
uint8_t iv_buf[OPENVPN_MAX_IV_LENGTH] = { 0 };
int outlen;
- /* initialize work buffer with FRAME_HEADROOM bytes of prepend capacity */
- ASSERT(buf_init(&work, FRAME_HEADROOM(frame)));
+ /* initialize work buffer with buf.headroom bytes of prepend capacity */
+ ASSERT(buf_init(&work, frame->buf.headroom));
/* read the IV from the packet */
if (buf->len < iv_size)
void *buf_p;
/* init work */
- ASSERT(buf_init(&work, FRAME_HEADROOM(frame)));
+ ASSERT(buf_init(&work, frame->buf.headroom));
/* init implicit IV */
{
ASSERT(buf_p);
memcpy(buf_p, BPTR(&src), BLEN(&src));
- /* initialize work buffer with FRAME_HEADROOM bytes of prepend capacity */
- ASSERT(buf_init(&encrypt_workspace, FRAME_HEADROOM(frame)));
+ /* initialize work buffer with buf.headroom bytes of prepend capacity */
+ ASSERT(buf_init(&encrypt_workspace, frame->buf.headroom));
/* encrypt */
openvpn_encrypt(&buf, encrypt_workspace, co);
#endif
}
- /* initialize work buffer with FRAME_HEADROOM bytes of prepend capacity */
- ASSERT(buf_init(&b->encrypt_buf, FRAME_HEADROOM(&c->c2.frame)));
+ /* initialize work buffer with buf.headroom bytes of prepend capacity */
+ ASSERT(buf_init(&b->encrypt_buf, c->c2.frame.buf.headroom));
if (c->c2.tls_multi)
{
perf_push(PERF_READ_IN_LINK);
c->c2.buf = c->c2.buffers->read_link_buf;
- ASSERT(buf_init(&c->c2.buf, FRAME_HEADROOM(&c->c2.frame)));
+ ASSERT(buf_init(&c->c2.buf, c->c2.frame.buf.headroom));
status = link_socket_read(c->c2.link_socket,
&c->c2.buf,
sockethandle_finalize(sh, &c->c1.tuntap->reads, &c->c2.buf, NULL);
}
#else /* ifdef _WIN32 */
- ASSERT(buf_init(&c->c2.buf, FRAME_HEADROOM(&c->c2.frame)));
+ ASSERT(buf_init(&c->c2.buf, c->c2.frame.buf.headroom));
ASSERT(buf_safe(&c->c2.buf, c->c2.frame.buf.payload_size));
c->c2.buf.len = read_tun(c->c1.tuntap, BPTR(&c->c2.buf), c->c2.frame.buf.payload_size);
#endif /* ifdef _WIN32 */
frag->defined = true;
frag->max_frag_size = size;
frag->map = 0;
- ASSERT(buf_init(&frag->buf, FRAME_HEADROOM(frame)));
+ ASSERT(buf_init(&frag->buf, frame->buf.headroom));
}
/* copy the data to fragment buffer */
{
FRAG_ERR("too many fragments would be required to send datagram");
}
- ASSERT(buf_init(&f->outgoing, FRAME_HEADROOM(frame)));
+ ASSERT(buf_init(&f->outgoing, frame->buf.headroom));
ASSERT(buf_copy(&f->outgoing, buf));
f->outgoing_seq_id = modulo_add(f->outgoing_seq_id, 1, N_SEQ_ID);
f->outgoing_frag_id = 0;
/* initialize return buffer */
*buf = f->outgoing_return;
- ASSERT(buf_init(buf, FRAME_HEADROOM(frame)));
+ ASSERT(buf_init(buf, frame->buf.headroom));
ASSERT(buf_copy_n(buf, &f->outgoing, size));
/* fragment flags differ based on whether or not we are sending the last fragment */
*/
if (buf->len >= COMPRESS_THRESHOLD && lzo_compression_enabled(compctx))
{
- const size_t ps = PAYLOAD_SIZE(frame);
- ASSERT(buf_init(&work, FRAME_HEADROOM(frame)));
+ const size_t ps = frame->buf.payload_size;
+ ASSERT(buf_init(&work, frame->buf.headroom));
ASSERT(buf_safe(&work, ps + COMP_EXTRA_BUFFER(ps)));
if (buf->len > ps)
return;
}
- ASSERT(buf_init(&work, FRAME_HEADROOM(frame)));
+ ASSERT(buf_init(&work, frame->buf.headroom));
c = *BPTR(buf);
ASSERT(buf_advance(buf, 1));
{
/* allocate buffer for overlapped I/O */
*buf = alloc_buf(BUF_SIZE(frame));
- ASSERT(buf_init(buf, FRAME_HEADROOM(frame)));
+ ASSERT(buf_init(buf, frame->buf.headroom));
buf->len = frame->buf.payload_size;
ASSERT(buf_safe(buf, 0));
}
/* Forward declarations, to prevent includes */
struct options;
-/* Routines which read struct frame should use the macros below */
-
-/*
- * Overhead added to packet payload due to encapsulation
- */
-#define EXTRA_FRAME(f) ((f)->extra_frame)
-
-/*
- * Delta between tun payload size and final TCP/UDP datagram size
- * (not including extra_link additions)
- */
-#define TUN_LINK_DELTA(f) ((f)->extra_frame + (f)->extra_tun)
-
-/*
- * This is the maximum packet size that we need to be able to
- * read from or write to a tun or tap device. For example,
- * a tap device ifconfiged to an MTU of 1200 might actually want
- * to return a packet size of 1214 on a read().
- */
-#define PAYLOAD_SIZE(f) ((f)->buf.payload_size)
-
/*
* Control buffer headroom allocations to allow for efficient prepending.
*/
*/
#define BUF_SIZE(f) ((f)->buf.headroom + (f)->buf.payload_size + (f)->buf.tailroom)
-#define FRAME_HEADROOM(f) ((f)->buf.headroom)
-
/*
* Function prototypes.
*/
struct packet_flood_parms parm = get_packet_flood_parms(level);
int i;
- ASSERT(buf_init(&buf, FRAME_HEADROOM(&m->top.c2.frame)));
+ ASSERT(buf_init(&buf, m->top.c2.frame.buf.headroom));
parm.packet_size = min_int(parm.packet_size, m->top.c2.frame.buf.payload_size);
msg(D_GREMLIN, "GREMLIN_FLOOD_CLIENTS: flooding clients with %d packets of size %d",
bool doit = false;
c->c2.buf = c->c2.buffers->aux_buf;
- ASSERT(buf_init(&c->c2.buf, FRAME_HEADROOM(&c->c2.frame)));
+ ASSERT(buf_init(&c->c2.buf, c->c2.frame.buf.headroom));
ASSERT(buf_safe(&c->c2.buf, c->c2.frame.buf.payload_size));
ASSERT(buf_write(&c->c2.buf, occ_magic, OCC_STRING_SIZE));
check_ping_send_dowork(struct context *c)
{
c->c2.buf = c->c2.buffers->aux_buf;
- ASSERT(buf_init(&c->c2.buf, FRAME_HEADROOM(&c->c2.frame)));
+ ASSERT(buf_init(&c->c2.buf, c->c2.frame.buf.headroom));
ASSERT(buf_safe(&c->c2.buf, c->c2.frame.buf.payload_size));
ASSERT(buf_write(&c->c2.buf, ping_string, sizeof(ping_string)));
ks->plaintext_write_buf = alloc_buf(TLS_CHANNEL_BUF_SIZE);
ks->ack_write_buf = alloc_buf(BUF_SIZE(&session->opt->frame));
reliable_init(ks->send_reliable, BUF_SIZE(&session->opt->frame),
- FRAME_HEADROOM(&session->opt->frame), TLS_RELIABLE_N_SEND_BUFFERS,
+ session->opt->frame.buf.headroom, TLS_RELIABLE_N_SEND_BUFFERS,
ks->key_id ? false : session->opt->xmit_hold);
reliable_init(ks->rec_reliable, BUF_SIZE(&session->opt->frame),
- FRAME_HEADROOM(&session->opt->frame), TLS_RELIABLE_N_REC_BUFFERS,
+ session->opt->frame.buf.headroom, TLS_RELIABLE_N_REC_BUFFERS,
false);
reliable_set_timeout(ks->send_reliable, session->opt->packet_timeout);
if (!to_link->len && !reliable_ack_empty(ks->rec_ack))
{
struct buffer buf = ks->ack_write_buf;
- ASSERT(buf_init(&buf, FRAME_HEADROOM(&multi->opt.frame)));
+ ASSERT(buf_init(&buf, multi->opt.frame.buf.headroom));
write_control_auth(session, ks, &buf, to_link_addr, P_ACK_V1,
RELIABLE_ACK_SIZE, false);
*to_link = buf;