#include "memdbg.h"
+/* calculates test - base while allowing for base or test wraparound. test is
+ * assumed to be higher than base */
+static inline packet_id_type
+subtract_pid(const packet_id_type test, const packet_id_type base)
+{
+ return test - base;
+}
+
/*
* verify that test - base < extent while allowing for base or test wraparound
*/
const packet_id_type base,
const unsigned int extent)
{
- if (test >= base)
- {
- if (test - base < extent)
- {
- return true;
- }
- }
- else
- {
- if ((test+0x80000000u) - (base+0x80000000u) < extent)
- {
- return true;
- }
- }
-
- return false;
+ return subtract_pid(test, base) < extent;
}
/*
return NULL;
}
+int
+reliable_get_num_output_sequenced_available(struct reliable *rel)
+{
+ struct gc_arena gc = gc_new();
+ packet_id_type min_id = 0;
+ bool min_id_defined = false;
+
+ /* find minimum active packet_id */
+ for (int i = 0; i < rel->size; ++i)
+ {
+ const struct reliable_entry *e = &rel->array[i];
+ if (e->active)
+ {
+ if (!min_id_defined || reliable_pid_min(e->packet_id, min_id))
+ {
+ min_id_defined = true;
+ min_id = e->packet_id;
+ }
+ }
+ }
+
+ int ret = rel->size;
+ if (min_id_defined)
+ {
+ ret -= subtract_pid(rel->packet_id, min_id);
+ }
+ gc_free(&gc);
+ return ret;
+}
+
/* grab a free buffer, fail if buffer clogged by unacknowledged low packet IDs */
struct buffer *
reliable_get_buf_output_sequenced(struct reliable *rel)
* be stored in one \c reliable_ack
* structure. */
-#define RELIABLE_CAPACITY 8 /**< The maximum number of packets that
+#define RELIABLE_CAPACITY 12 /**< The maximum number of packets that
* the reliability layer for one VPN
* tunnel in one direction can store. */
int size;
interval_t initial_timeout;
packet_id_type packet_id;
- int offset;
+ int offset; /**< Offset of the bufs in the reliable_entry array */
bool hold; /* don't xmit until reliable_schedule_now is called */
struct reliable_entry array[RELIABLE_CAPACITY];
};
return !ack->len;
}
+/**
+ * Returns the number of packets that need to be acked.
+ *
+ * @param ack The acknowledgment structure to check.
+ *
+ * @returns the number of outstanding acks
+ */
+static inline int
+reliable_ack_outstanding(struct reliable_ack *ack)
+{
+ return ack->len;
+}
+
+
/**
* Write a packet ID acknowledgment record to a buffer.
*
*/
struct buffer *reliable_get_buf_output_sequenced(struct reliable *rel);
+
+/**
+ * Counts the number of free buffers in output that can be potentially used
+ * for sending
+ *
+ * @param rel The reliable structure in which to search for a free
+ * entry.
+ *
+ * @return the number of buffer that are available for sending without
+ * breaking ack sequence
+ * */
+int
+reliable_get_num_output_sequenced_available(struct reliable *rel);
+
/**
* Mark the reliable entry associated with the given buffer as
* active outgoing.
* if --tls-auth is enabled.
*/
- /* calculate the maximum overhead that control channel frames may have */
+ /* calculates the maximum overhead that control channel frames can have */
int overhead = 0;
/* Socks */
/* Previous OpenVPN version calculated the maximum size and buffer of a
* control frame depending on the overhead of the data channel frame
* overhead and limited its maximum size to 1250. We always allocate the
- * 1250 buffer size since a lot of code blindly assumes a large buffer
- * (e.g. PUSH_BUNDLE_SIZE) and set frame->mtu_mtu as suggestion for the
+ * TLS_CHANNEL_BUF_SIZE buffer size since a lot of code blindly assumes
+ * a large buffer (e.g. PUSH_BUNDLE_SIZE) and also our peer might have
+ * a higher size configured and we still want to be able to receive the
+ * packets. frame->mtu_mtu is set as suggestion for the maximum packet
* size */
frame->buf.payload_size = 1250 + overhead;
frame->tun_mtu = min_int(data_channel_frame->tun_mtu, 1250);
}
+/**
+ * calculate the maximum overhead that control channel frames have
+ * This includes header, op code and everything apart from the
+ * payload itself. This method is a bit pessimistic and might give higher
+ * overhead than we actually have */
+static int
+calc_control_channel_frame_overhead(const struct tls_session *session)
+{
+ const struct key_state *ks = &session->key[KS_PRIMARY];
+ int overhead = 0;
+
+ /* opcode */
+ overhead += 1;
+
+ /* our own session id */
+ overhead += SID_SIZE;
+
+ /* ACK array and remote SESSION ID (part of the ACK array) */
+ overhead += ACK_SIZE(min_int(reliable_ack_outstanding(ks->rec_ack), CONTROL_SEND_ACK_MAX));
+
+ /* Message packet id */
+ overhead += sizeof(packet_id_type);
+
+ if (session->tls_wrap.mode == TLS_WRAP_CRYPT)
+ {
+ overhead += tls_crypt_buf_overhead();
+ }
+ else if (session->tls_wrap.mode == TLS_WRAP_AUTH)
+ {
+ overhead += hmac_ctx_size(session->tls_wrap.opt.key_ctx_bi.encrypt.hmac);
+ overhead += packet_id_size(true);
+ }
+
+ /* Add the typical UDP overhead for an IPv6 UDP packet. TCP+IPv6 has a
+ * larger overhead but the risk of a TCP connection getting dropped because
+ * we try to send a too large packet is basically zero */
+ overhead += datagram_overhead(session->untrusted_addr.dest.addr.sa.sa_family,
+ PROTO_UDP);
+
+ return overhead;
+}
+
void
init_ssl_lib(void)
{
{
ASSERT(buf_init(buf, 0));
- int status = key_state_read_plaintext(&ks->ks_ssl, buf, TLS_CHANNEL_BUF_SIZE);
+ int status = key_state_read_plaintext(&ks->ks_ssl, buf);
update_time();
if (status == -1)
return true;
}
+static bool
+write_outgoing_tls_ciphertext(struct tls_session *session, bool *state_change)
+{
+ struct key_state *ks = &session->key[KS_PRIMARY];
+
+ int rel_avail = reliable_get_num_output_sequenced_available(ks->send_reliable);
+ if (rel_avail == 0)
+ {
+ return true;
+ }
+
+ /* We need to determine how much space is actually available in the control
+ * channel frame */
+ int max_pkt_len = min_int(TLS_CHANNEL_BUF_SIZE, session->opt->frame.tun_mtu);
+
+ /* Subtract overhead */
+ max_pkt_len -= calc_control_channel_frame_overhead(session);
+
+ /* calculate total available length for outgoing tls ciphertext */
+ int maxlen = max_pkt_len * rel_avail;
+
+ /* Is first packet one that will have a WKC appended? */
+ if (control_packet_needs_wkc(ks))
+ {
+ maxlen -= buf_len(session->tls_wrap.tls_crypt_v2_wkc);
+ }
+
+ /* If we end up with a size that leaves no room for payload, ignore the
+ * constraints to still be to send a packet. This might have gone negative
+ * if we have a large wrapped client key. */
+ if (maxlen < 16)
+ {
+ msg(D_TLS_ERRORS, "Warning: --max-packet-size (%d) setting too low. "
+ "Sending minimum sized packet.",
+ session->opt->frame.tun_mtu);
+ maxlen = 16;
+ /* We set the maximum length here to ensure a packet with a wrapped
+ * key can actually carry the 16 byte of payload */
+ max_pkt_len = TLS_CHANNEL_BUF_SIZE;
+ }
+
+ /* This seems a bit wasteful to allocate every time */
+ struct gc_arena gc = gc_new();
+ struct buffer tmp = alloc_buf_gc(maxlen, &gc);
+
+ int status = key_state_read_ciphertext(&ks->ks_ssl, &tmp);
+
+ if (status == -1)
+ {
+ msg(D_TLS_ERRORS,
+ "TLS Error: Ciphertext -> reliable TCP/UDP transport read error");
+ gc_free(&gc);
+ return false;
+ }
+ if (status == 1)
+ {
+ /* Split the TLS ciphertext (TLS record) into multiple small packets
+ * that respect tls_mtu */
+ while (tmp.len > 0)
+ {
+ int len = max_pkt_len;
+ int opcode = P_CONTROL_V1;
+ if (control_packet_needs_wkc(ks))
+ {
+ opcode = P_CONTROL_WKC_V1;
+ len = max_int(0, len - buf_len(session->tls_wrap.tls_crypt_v2_wkc));
+ }
+ /* do not send more than available */
+ len = min_int(len, tmp.len);
+
+ struct buffer *buf = reliable_get_buf_output_sequenced(ks->send_reliable);
+ /* we assert here since we checked for its availability before */
+ ASSERT(buf);
+ buf_copy_n(buf, &tmp, len);
+
+ reliable_mark_active_outgoing(ks->send_reliable, buf, opcode);
+ INCR_GENERATED;
+ *state_change = true;
+ }
+ dmsg(D_TLS_DEBUG, "Outgoing Ciphertext -> Reliable");
+ }
+
+ gc_free(&gc);
+ return true;
+}
+
static bool
tls_process_state(struct tls_multi *multi,
buf = reliable_get_buf_output_sequenced(ks->send_reliable);
if (buf)
{
- int status = key_state_read_ciphertext(&ks->ks_ssl, buf, multi->opt.frame.tun_mtu);
-
- if (status == -1)
+ if (!write_outgoing_tls_ciphertext(session, &state_change))
{
- msg(D_TLS_ERRORS,
- "TLS Error: Ciphertext -> reliable TCP/UDP transport read error");
goto error;
}
- if (status == 1)
- {
- int opcode = P_CONTROL_V1;
- if (control_packet_needs_wkc(ks))
- {
- opcode = P_CONTROL_WKC_V1;
- }
- reliable_mark_active_outgoing(ks->send_reliable, buf, opcode);
- INCR_GENERATED;
- state_change = true;
- dmsg(D_TLS_DEBUG, "Outgoing Ciphertext -> Reliable");
- }
}
}
* Read from an OpenSSL BIO in non-blocking mode.
*/
static int
-bio_read(BIO *bio, struct buffer *buf, int maxlen, const char *desc)
+bio_read(BIO *bio, struct buffer *buf, const char *desc)
{
int i;
int ret = 0;
else
{
int len = buf_forward_capacity(buf);
- if (maxlen < len)
- {
- len = maxlen;
- }
/*
* BIO_read brackets most of the serious RSA
}
int
-key_state_read_ciphertext(struct key_state_ssl *ks_ssl, struct buffer *buf,
- int maxlen)
+key_state_read_ciphertext(struct key_state_ssl *ks_ssl, struct buffer *buf)
{
int ret = 0;
perf_push(PERF_BIO_READ_CIPHERTEXT);
ASSERT(NULL != ks_ssl);
- ret = bio_read(ks_ssl->ct_out, buf, maxlen, "tls_read_ciphertext");
+ ret = bio_read(ks_ssl->ct_out, buf, "tls_read_ciphertext");
perf_pop();
return ret;
}
int
-key_state_read_plaintext(struct key_state_ssl *ks_ssl, struct buffer *buf,
- int maxlen)
+key_state_read_plaintext(struct key_state_ssl *ks_ssl, struct buffer *buf)
{
int ret = 0;
perf_push(PERF_BIO_READ_PLAINTEXT);
ASSERT(NULL != ks_ssl);
- ret = bio_read(ks_ssl->ssl_bio, buf, maxlen, "tls_read_plaintext");
+ ret = bio_read(ks_ssl->ssl_bio, buf, "tls_read_plaintext");
perf_pop();
return ret;