HT_GENERATE2(channel_idmap, channel_idmap_entry_s, node, channel_idmap_hash,
channel_idmap_eq, 0.5, tor_reallocarray_, tor_free_)
-static cell_queue_entry_t * cell_queue_entry_dup(cell_queue_entry_t *q);
-#if 0
-static int cell_queue_entry_is_padding(cell_queue_entry_t *q);
-#endif
-static cell_queue_entry_t *
-cell_queue_entry_new_fixed(cell_t *cell);
-static cell_queue_entry_t *
-cell_queue_entry_new_var(var_cell_t *var_cell);
-static int is_destroy_cell(channel_t *chan,
- const cell_queue_entry_t *q, circid_t *circid_out);
-
-static void channel_assert_counter_consistency(void);
-
/* Functions to maintain the digest map */
-static void channel_add_to_digest_map(channel_t *chan);
static void channel_remove_from_digest_map(channel_t *chan);
- static void channel_force_free(channel_t *chan);
- static void
- channel_free_list(smartlist_t *channels, int mark_for_close);
- static void
- channel_listener_free_list(smartlist_t *channels, int mark_for_close);
- static void channel_listener_force_free(channel_listener_t *chan_l);
-/*
- * Flush cells from just the outgoing queue without trying to get them
- * from circuits; used internall by channel_flush_some_cells().
- */
-static ssize_t
-channel_flush_some_cells_from_outgoing_queue(channel_t *chan,
- ssize_t num_cells);
+ static void channel_force_xfree(channel_t *chan);
-static void channel_free_list(smartlist_t *channels, int mark_for_close);
++static void channel_free_list(smartlist_t *channels,
++ int mark_for_close);
+ static void channel_listener_free_list(smartlist_t *channels,
- int mark_for_close);
++ int mark_for_close);
+ static void channel_listener_force_xfree(channel_listener_t *chan_l);
-static size_t channel_get_cell_queue_entry_size(channel_t *chan,
- cell_queue_entry_t *q);
-static void
-channel_write_cell_queue_entry(channel_t *chan, cell_queue_entry_t *q);
/***********************************
* Channel state utility functions *
*/
static void
- channel_force_free(channel_t *chan)
+ channel_force_xfree(channel_t *chan)
{
- cell_queue_entry_t *cell, *cell_tmp;
tor_assert(chan);
log_debug(LD_CHANNEL,
}
/**
- * Set the remote end metadata (identity_digest/nickname) of a channel
+ * Write to a channel the given packed cell.
*
- * Return 0 on success or -1 on error.
- * This function sets new remote end info on a channel; this is intended
- * for use by the lower layer.
- */
-
-void
-channel_set_remote_end(channel_t *chan,
- const char *identity_digest,
- const char *nickname)
-{
- int was_in_digest_map, should_be_in_digest_map, state_not_in_map;
-
- tor_assert(chan);
-
- log_debug(LD_CHANNEL,
- "Setting remote endpoint identity on channel %p with "
- "global ID " U64_FORMAT " to nickname %s, digest %s",
- chan, U64_PRINTF_ARG(chan->global_identifier),
- nickname ? nickname : "(null)",
- identity_digest ?
- hex_str(identity_digest, DIGEST_LEN) : "(null)");
-
- state_not_in_map = CHANNEL_CONDEMNED(chan);
-
- was_in_digest_map =
- !state_not_in_map &&
- chan->registered &&
- !tor_digest_is_zero(chan->identity_digest);
- should_be_in_digest_map =
- !state_not_in_map &&
- chan->registered &&
- (identity_digest &&
- !tor_digest_is_zero(identity_digest));
-
- if (was_in_digest_map)
- /* We should always remove it; we'll add it back if we're writing
- * in a new digest.
- */
- channel_remove_from_digest_map(chan);
-
- if (identity_digest) {
- memcpy(chan->identity_digest,
- identity_digest,
- sizeof(chan->identity_digest));
-
- } else {
- memset(chan->identity_digest, 0,
- sizeof(chan->identity_digest));
- }
-
- tor_free(chan->nickname);
- if (nickname)
- chan->nickname = tor_strdup(nickname);
-
- /* Put it in the digest map if we should */
- if (should_be_in_digest_map)
- channel_add_to_digest_map(chan);
-}
-
-/**
- * Duplicate a cell queue entry; this is a shallow copy intended for use
- * in channel_write_cell_queue_entry().
- */
-
-static cell_queue_entry_t *
-cell_queue_entry_dup(cell_queue_entry_t *q)
-{
- cell_queue_entry_t *rv = NULL;
-
- tor_assert(q);
-
- rv = tor_malloc(sizeof(*rv));
- memcpy(rv, q, sizeof(*rv));
-
- return rv;
-}
-
-/**
- * Free a cell_queue_entry_t; the handed_off parameter indicates whether
- * the contents were passed to the lower layer (it is responsible for
- * them) or not (we should free).
- */
-
-STATIC void
-cell_queue_entry_xfree(cell_queue_entry_t *q, int handed_off)
-{
- if (!q) return;
-
- if (!handed_off) {
- /*
- * If we handed it off, the recipient becomes responsible (or
- * with packed cells the channel_t subclass calls packed_cell
- * free after writing out its contents; see, e.g.,
- * channel_tls_write_packed_cell_method(). Otherwise, we have
- * to take care of it here if possible.
- */
- switch (q->type) {
- case CELL_QUEUE_FIXED:
- if (q->u.fixed.cell) {
- /*
- * There doesn't seem to be a cell_free() function anywhere in the
- * pre-channel code; just use tor_free()
- */
- tor_free(q->u.fixed.cell);
- }
- break;
- case CELL_QUEUE_PACKED:
- if (q->u.packed.packed_cell) {
- packed_cell_free(q->u.packed.packed_cell);
- }
- break;
- case CELL_QUEUE_VAR:
- if (q->u.var.var_cell) {
- /*
- * This one's in connection_or.c; it'd be nice to figure out the
- * whole flow of cells from one end to the other and factor the
- * cell memory management functions like this out of the specific
- * TLS lower layer.
- */
- var_cell_free(q->u.var.var_cell);
- }
- break;
- default:
- /*
- * Nothing we can do if we don't know the type; this will
- * have been warned about elsewhere.
- */
- break;
- }
- }
- tor_free(q);
-}
-
-#if 0
-/**
- * Check whether a cell queue entry is padding; this is a helper function
- * for channel_write_cell_queue_entry()
- */
-
-static int
-cell_queue_entry_is_padding(cell_queue_entry_t *q)
-{
- tor_assert(q);
-
- if (q->type == CELL_QUEUE_FIXED) {
- if (q->u.fixed.cell) {
- if (q->u.fixed.cell->command == CELL_PADDING ||
- q->u.fixed.cell->command == CELL_VPADDING) {
- return 1;
- }
- }
- } else if (q->type == CELL_QUEUE_VAR) {
- if (q->u.var.var_cell) {
- if (q->u.var.var_cell->command == CELL_PADDING ||
- q->u.var.var_cell->command == CELL_VPADDING) {
- return 1;
- }
- }
- }
-
- return 0;
-}
-#endif /* 0 */
-
-/**
- * Allocate a new cell queue entry for a fixed-size cell
- */
-
-static cell_queue_entry_t *
-cell_queue_entry_new_fixed(cell_t *cell)
-{
- cell_queue_entry_t *q = NULL;
-
- tor_assert(cell);
-
- q = tor_malloc(sizeof(*q));
- q->type = CELL_QUEUE_FIXED;
- q->u.fixed.cell = cell;
-
- return q;
-}
-
-/**
- * Allocate a new cell queue entry for a variable-size cell
- */
-
-static cell_queue_entry_t *
-cell_queue_entry_new_var(var_cell_t *var_cell)
-{
- cell_queue_entry_t *q = NULL;
-
- tor_assert(var_cell);
-
- q = tor_malloc(sizeof(*q));
- q->type = CELL_QUEUE_VAR;
- q->u.var.var_cell = var_cell;
-
- return q;
-}
-
-/**
- * Ask how big the cell contained in a cell_queue_entry_t is
- */
-
-static size_t
-channel_get_cell_queue_entry_size(channel_t *chan, cell_queue_entry_t *q)
-{
- size_t rv = 0;
-
- tor_assert(chan);
- tor_assert(q);
-
- switch (q->type) {
- case CELL_QUEUE_FIXED:
- rv = get_cell_network_size(chan->wide_circ_ids);
- break;
- case CELL_QUEUE_VAR:
- rv = get_var_cell_header_size(chan->wide_circ_ids) +
- (q->u.var.var_cell ? q->u.var.var_cell->payload_len : 0);
- break;
- case CELL_QUEUE_PACKED:
- rv = get_cell_network_size(chan->wide_circ_ids);
- break;
- default:
- tor_assert_nonfatal_unreached_once();
- }
-
- return rv;
-}
-
-/**
- * Write to a channel based on a cell_queue_entry_t
*
- * Given a cell_queue_entry_t filled out by the caller, try to send the cell
- * and queue it if we can't.
+ * Two possible errors can happen. Either the channel is not opened or the
+ * lower layer (specialized channel) failed to write it. In both cases, it is
+ * the caller responsability to free the cell.
*/
-
-static void
-channel_write_cell_queue_entry(channel_t *chan, cell_queue_entry_t *q)
+static int
+write_packed_cell(channel_t *chan, packed_cell_t *cell)
{
- int result = 0, sent = 0;
- cell_queue_entry_t *tmp = NULL;
+ int ret = -1;
size_t cell_bytes;
tor_assert(chan);
return;
}
+/* Test outbound cell. The callstack is:
+ * channel_flush_some_cells()
+ * -> channel_flush_from_first_active_circuit()
+ * -> channel_write_packed_cell()
+ * -> write_packed_cell()
+ * -> chan->write_packed_cell() fct ptr.
+ *
+ * This test goes from a cell in a circuit up to the channel write handler
+ * that should put them on the connection outbuf. */
static void
-test_channel_flush(void *arg)
+test_channel_outbound_cell(void *arg)
{
- channel_t *ch = NULL;
- cell_t *cell = NULL;
- packed_cell_t *p_cell = NULL;
- var_cell_t *v_cell = NULL;
- int init_count;
-
- (void)arg;
+ int old_count;
+ channel_t *chan = NULL;
+ packed_cell_t *p_cell = NULL, *p_cell2 = NULL;
+ origin_circuit_t *circ = NULL;
+ cell_queue_t *queue;
- ch = new_fake_channel();
- tt_assert(ch);
+ (void) arg;
- /* Cache the original count */
- init_count = test_cells_written;
+ /* The channel will be freed so we need to hijack this so the scheduler
+ * doesn't get confused. */
+ MOCK(scheduler_release_channel, scheduler_release_channel_mock);
- /* Stop accepting so we can queue some */
- test_chan_accept_cells = 0;
+ /* Accept cells to lower layer */
+ test_chan_accept_cells = 1;
- /* Queue a regular cell */
- cell = tor_malloc_zero(sizeof(cell_t));
- make_fake_cell(cell);
- channel_write_cell(ch, cell);
- /* It should be queued, so assert that we didn't write it */
- tt_int_op(test_cells_written, OP_EQ, init_count);
-
- /* Queue a var cell */
- v_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
- make_fake_var_cell(v_cell);
- channel_write_var_cell(ch, v_cell);
- /* It should be queued, so assert that we didn't write it */
- tt_int_op(test_cells_written, OP_EQ, init_count);
-
- /* Try a packed cell now */
+ /* Setup a valid circuit to queue a cell. */
+ circ = origin_circuit_new();
+ tt_assert(circ);
+ /* Circuit needs an origin purpose to be considered origin. */
+ TO_CIRCUIT(circ)->purpose = CIRCUIT_PURPOSE_C_GENERAL;
+ TO_CIRCUIT(circ)->n_circ_id = 42;
+ /* This is the outbound test so use the next channel queue. */
+ queue = &TO_CIRCUIT(circ)->n_chan_cells;
+ /* Setup packed cell to queue on the circuit. */
p_cell = packed_cell_new();
tt_assert(p_cell);
- channel_write_packed_cell(ch, p_cell);
- /* It should be queued, so assert that we didn't write it */
- tt_int_op(test_cells_written, OP_EQ, init_count);
-
- /* Now allow writes through again */
- test_chan_accept_cells = 1;
-
- /* ...and flush */
- channel_flush_cells(ch);
-
- /* All three should have gone through */
- tt_int_op(test_cells_written, OP_EQ, init_count + 3);
-
- done:
- tor_free(ch);
-
- return;
-}
-
-/**
- * Channel flush tests that require cmux mocking
- */
-
-static void
-test_channel_flushmux(void *arg)
-{
- channel_t *ch = NULL;
- int old_count, q_len_before, q_len_after;
- ssize_t result;
-
- (void)arg;
-
- /* Install mocks we need for this test */
- MOCK(channel_flush_from_first_active_circuit,
- chan_test_channel_flush_from_first_active_circuit_mock);
- MOCK(circuitmux_num_cells,
- chan_test_circuitmux_num_cells_mock);
-
- ch = new_fake_channel();
- tt_assert(ch);
- ch->cmux = circuitmux_alloc();
-
+ p_cell2 = packed_cell_new();
+ tt_assert(p_cell2);
+ /* Setup a channel to put the circuit on. */
+ chan = new_fake_channel();
+ tt_assert(chan);
+ chan->state = CHANNEL_STATE_OPENING;
+ channel_change_state_open(chan);
+ /* Outbound channel. */
+ channel_mark_outgoing(chan);
+ /* Try to register it so we can clean it through the channel cleanup
+ * process. */
+ channel_register(chan);
+ tt_int_op(chan->registered, OP_EQ, 1);
+ /* Set EWMA policy so we can pick it when flushing. */
+ channel_set_cmux_policy_everywhere(&ewma_policy);
+ tt_ptr_op(circuitmux_get_policy(chan->cmux), OP_EQ, &ewma_policy);
+
+ /* Register circuit to the channel circid map which will attach the circuit
+ * to the channel's cmux as well. */
+ circuit_set_n_circid_chan(TO_CIRCUIT(circ), 42, chan);
+ tt_int_op(channel_num_circuits(chan), OP_EQ, 1);
+ tt_assert(!TO_CIRCUIT(circ)->next_active_on_n_chan);
+ tt_assert(!TO_CIRCUIT(circ)->prev_active_on_n_chan);
+ /* Test the cmux state. */
+ tt_ptr_op(TO_CIRCUIT(circ)->n_mux, OP_EQ, chan->cmux);
+ tt_int_op(circuitmux_is_circuit_attached(chan->cmux, TO_CIRCUIT(circ)),
+ OP_EQ, 1);
+
+ /* Flush the channel without any cell on it. */
old_count = test_cells_written;
-
- test_target_cmux = ch->cmux;
- test_cmux_cells = 1;
-
- /* Enable cell acceptance */
- test_chan_accept_cells = 1;
-
- result = channel_flush_some_cells(ch, 1);
-
- tt_int_op(result, OP_EQ, 1);
+ ssize_t flushed = channel_flush_some_cells(chan, 1);
+ tt_i64_op(flushed, OP_EQ, 0);
+ tt_int_op(test_cells_written, OP_EQ, old_count);
+ tt_int_op(channel_more_to_flush(chan), OP_EQ, 0);
+ tt_int_op(circuitmux_num_active_circuits(chan->cmux), OP_EQ, 0);
+ tt_int_op(circuitmux_num_cells(chan->cmux), OP_EQ, 0);
+ tt_int_op(circuitmux_is_circuit_active(chan->cmux, TO_CIRCUIT(circ)),
+ OP_EQ, 0);
+ tt_u64_op(chan->n_cells_xmitted, OP_EQ, 0);
+ tt_u64_op(chan->n_bytes_xmitted, OP_EQ, 0);
+
+ /* Queue cell onto the next queue that is the outbound direction. Than
+ * update its cmux so the circuit can be picked when flushing cells. */
+ cell_queue_append(queue, p_cell);
+ p_cell = NULL;
+ tt_int_op(queue->n, OP_EQ, 1);
+ cell_queue_append(queue, p_cell2);
+ p_cell2 = NULL;
+ tt_int_op(queue->n, OP_EQ, 2);
+
+ update_circuit_on_cmux(TO_CIRCUIT(circ), CELL_DIRECTION_OUT);
+ tt_int_op(circuitmux_num_active_circuits(chan->cmux), OP_EQ, 1);
+ tt_int_op(circuitmux_num_cells(chan->cmux), OP_EQ, 2);
+ tt_int_op(circuitmux_is_circuit_active(chan->cmux, TO_CIRCUIT(circ)),
+ OP_EQ, 1);
+
+ /* From this point on, we have a queued cell on an active circuit attached
+ * to the channel's cmux. */
+
+ /* Flush the first cell. This is going to go down the call stack. */
+ old_count = test_cells_written;
+ flushed = channel_flush_some_cells(chan, 1);
+ tt_i64_op(flushed, OP_EQ, 1);
tt_int_op(test_cells_written, OP_EQ, old_count + 1);
- tt_int_op(test_cmux_cells, OP_EQ, 0);
-
- /* Now try it without accepting to force them into the queue */
- test_chan_accept_cells = 0;
- test_cmux_cells = 1;
- q_len_before = chan_cell_queue_len(&(ch->outgoing_queue));
-
- result = channel_flush_some_cells(ch, 1);
-
- /* We should not have actually flushed any */
- tt_int_op(result, OP_EQ, 0);
+ tt_int_op(circuitmux_num_cells(chan->cmux), OP_EQ, 1);
+ tt_int_op(channel_more_to_flush(chan), OP_EQ, 1);
+ /* Circuit should remain active because there is a second cell queued. */
+ tt_int_op(circuitmux_is_circuit_active(chan->cmux, TO_CIRCUIT(circ)),
+ OP_EQ, 1);
+ /* Should still be attached. */
+ tt_int_op(circuitmux_is_circuit_attached(chan->cmux, TO_CIRCUIT(circ)),
+ OP_EQ, 1);
+ tt_u64_op(chan->n_cells_xmitted, OP_EQ, 1);
+ tt_u64_op(chan->n_bytes_xmitted, OP_EQ, get_cell_network_size(0));
+
+ /* Flush second cell. This is going to go down the call stack. */
+ old_count = test_cells_written;
+ flushed = channel_flush_some_cells(chan, 1);
+ tt_i64_op(flushed, OP_EQ, 1);
tt_int_op(test_cells_written, OP_EQ, old_count + 1);
- /* But we should have gotten to the fake cellgen loop */
- tt_int_op(test_cmux_cells, OP_EQ, 0);
- /* ...and we should have a queued cell */
- q_len_after = chan_cell_queue_len(&(ch->outgoing_queue));
- tt_int_op(q_len_after, OP_EQ, q_len_before + 1);
-
- /* Now accept cells again and drain the queue */
- test_chan_accept_cells = 1;
- channel_flush_cells(ch);
- tt_int_op(test_cells_written, OP_EQ, old_count + 2);
- tt_int_op(chan_cell_queue_len(&(ch->outgoing_queue)), OP_EQ, 0);
-
- test_target_cmux = NULL;
- test_cmux_cells = 0;
+ tt_int_op(circuitmux_num_cells(chan->cmux), OP_EQ, 0);
+ tt_int_op(channel_more_to_flush(chan), OP_EQ, 0);
+ /* No more cells should make the circuit inactive. */
+ tt_int_op(circuitmux_is_circuit_active(chan->cmux, TO_CIRCUIT(circ)),
+ OP_EQ, 0);
+ /* Should still be attached. */
+ tt_int_op(circuitmux_is_circuit_attached(chan->cmux, TO_CIRCUIT(circ)),
+ OP_EQ, 1);
+ tt_u64_op(chan->n_cells_xmitted, OP_EQ, 2);
+ tt_u64_op(chan->n_bytes_xmitted, OP_EQ, get_cell_network_size(0) * 2);
done:
- if (ch)
- circuitmux_free(ch->cmux);
- tor_free(ch);
-
- UNMOCK(channel_flush_from_first_active_circuit);
- UNMOCK(circuitmux_num_cells);
-
- test_chan_accept_cells = 0;
-
- return;
+ if (circ) {
- circuit_free(TO_CIRCUIT(circ));
++ circuit_free_(TO_CIRCUIT(circ));
+ }
+ tor_free(p_cell);
+ channel_free_all();
+ UNMOCK(scheduler_release_channel);
}
+/* Test inbound cell. The callstack is:
+ * channel_process_cell()
+ * -> chan->cell_handler()
+ *
+ * This test is about checking if we can process an inbound cell down to the
+ * channel handler. */
static void
-test_channel_incoming(void *arg)
+test_channel_inbound_cell(void *arg)
{
- channel_t *ch = NULL;
+ channel_t *chan = NULL;
cell_t *cell = NULL;
- var_cell_t *var_cell = NULL;
int old_count;
- (void)arg;
+ (void) arg;
- /* Mock these for duration of the test */
- MOCK(scheduler_channel_doesnt_want_writes,
- scheduler_channel_doesnt_want_writes_mock);
- MOCK(scheduler_release_channel,
- scheduler_release_channel_mock);
+ /* The channel will be freed so we need to hijack this so the scheduler
+ * doesn't get confused. */
+ MOCK(scheduler_release_channel, scheduler_release_channel_mock);
/* Accept cells to lower layer */
test_chan_accept_cells = 1;