return eb64_entry(&ar->node, struct quic_arng_node, first)->last;
}
+/* Increment the reference counter of <pkt> */
+static inline void quic_tx_packet_refinc(struct quic_tx_packet *pkt)
+{
+ HA_ATOMIC_ADD(&pkt->refcnt, 1);
+}
+
+/* Decrement the reference counter of <pkt> */
+static inline void quic_tx_packet_refdec(struct quic_tx_packet *pkt)
+{
+ if (!HA_ATOMIC_SUB_FETCH(&pkt->refcnt, 1)) {
+ BUG_ON(!LIST_ISEMPTY(&pkt->frms));
+ pool_free(pool_head_quic_tx_packet, pkt);
+ }
+}
+
/* Discard <pktns> packet number space attached to <qc> QUIC connection.
* Its loss information are reset. Deduce the outstanding bytes for this
* packet number space from the outstanding bytes for the path of this
node = eb64_next(node);
list_for_each_entry_safe(frm, frmbak, &pkt->frms, list) {
LIST_DELETE(&frm->list);
+ quic_tx_packet_refdec(frm->pkt);
pool_free(pool_head_quic_frame, frm);
}
eb64_delete(&pkt->pn_node);
- pool_free(pool_head_quic_tx_packet, pkt);
+ quic_tx_packet_refdec(pkt);
}
}
} while (refcnt && !HA_ATOMIC_CAS(&pkt->refcnt, &refcnt, refcnt - 1));
}
-/* Increment the reference counter of <pkt> */
-static inline void quic_tx_packet_refinc(struct quic_tx_packet *pkt)
-{
- HA_ATOMIC_ADD(&pkt->refcnt, 1);
-}
-
-/* Decrement the reference counter of <pkt> */
-static inline void quic_tx_packet_refdec(struct quic_tx_packet *pkt)
-{
- if (!HA_ATOMIC_SUB_FETCH(&pkt->refcnt, 1))
- pool_free(pool_head_quic_tx_packet, pkt);
-}
-
/* Delete all RX packets for <qel> QUIC encryption level */
static inline void qc_el_rx_pkts_del(struct quic_enc_level *qel)
{
frm_node = eb64_first(&qcs->tx.acked_frms);
while (frm_node) {
struct quic_stream *strm;
+ struct quic_frame *frm;
strm = eb64_entry(&frm_node->node, struct quic_stream, offset);
if (strm->offset.key > qcs->tx.ack_offset)
frm_node = eb64_next(frm_node);
eb64_delete(&strm->offset);
- /* TODO
- *
- * memleak: The quic_frame container of the quic_stream should
- * be liberated here, as in qc_treat_acked_tx_frm. However this
- * code seems to cause a bug which can lead to interrupted
- * transfers.
- *
- * struct quic_frame frm = container_of(strm, struct quic_frame, stream);
- * LIST_DELETE(&frm->list);
- * pool_free(pool_head_quic_frame, frm);
- */
+ frm = container_of(strm, struct quic_frame, stream);
+ LIST_DELETE(&frm->list);
+ quic_tx_packet_refdec(frm->pkt);
+ pool_free(pool_head_quic_frame, frm);
}
return ret;
}
LIST_DELETE(&frm->list);
+ quic_tx_packet_refdec(frm->pkt);
pool_free(pool_head_quic_frame, frm);
}
else {
break;
default:
LIST_DELETE(&frm->list);
+ quic_tx_packet_refdec(frm->pkt);
pool_free(pool_head_quic_frame, frm);
}
list_for_each_entry_safe(frm, frmbak, pkt_frm_list, list) {
LIST_DELETE(&frm->list);
+ quic_tx_packet_refdec(frm->pkt);
+ /* This frame is not freed but removed from its packet */
+ frm->pkt = NULL;
TRACE_PROTO("to resend frame", QUIC_EV_CONN_PRSAFRM, qc, frm);
LIST_APPEND(&tmp, &frm->list);
}
LIST_SPLICE(pktns_frm_list, &tmp);
}
+/* Free <pkt> TX packet and its attached frames.
+ * This is the responsability of the caller to remove this packet of
+ * any data structure it was possibly attached to.
+ */
+static inline void free_quic_tx_packet(struct quic_tx_packet *pkt)
+{
+ struct quic_frame *frm, *frmbak;
+
+ if (!pkt)
+ return;
+
+ list_for_each_entry_safe(frm, frmbak, &pkt->frms, list) {
+ LIST_DELETE(&frm->list);
+ pool_free(pool_head_quic_frame, frm);
+ }
+ pool_free(pool_head_quic_tx_packet, pkt);
+}
+
/* Free the TX packets of <pkts> list */
static inline void free_quic_tx_pkts(struct list *pkts)
{
list_for_each_entry_safe(pkt, tmp, pkts, list) {
LIST_DELETE(&pkt->list);
eb64_delete(&pkt->pn_node);
- quic_tx_packet_refdec(pkt);
+ free_quic_tx_packet(pkt);
}
}
list_for_each_entry_safe(frm, frmbak, &pkt->frms, list) {
TRACE_PROTO("to resend frame", QUIC_EV_CONN_PRSAFRM, qc, frm);
LIST_DELETE(&frm->list);
+ frm->pkt = NULL;
LIST_APPEND(tmp, &frm->list);
}
tmpbuf.size = tmpbuf.data = dglen;
TRACE_PROTO("to send", QUIC_EV_CONN_SPPKTS, qc);
- for (pkt = first_pkt; pkt; pkt = pkt->next)
- quic_tx_packet_refinc(pkt);
- if(qc_snd_buf(qc, &tmpbuf, tmpbuf.data, 0) <= 0) {
- for (pkt = first_pkt; pkt; pkt = pkt->next)
- quic_tx_packet_refdec(pkt);
+ if(qc_snd_buf(qc, &tmpbuf, tmpbuf.data, 0) <= 0)
break;
- }
cb_del(cbuf, dglen + headlen);
qc->tx.bytes += tmpbuf.data;
qc_set_timer(qc);
TRACE_PROTO("sent pkt", QUIC_EV_CONN_SPPKTS, qc, pkt);
next_pkt = pkt->next;
+ quic_tx_packet_refinc(pkt);
eb64_insert(&pkt->pktns->tx.pkts, &pkt->pn_node);
- quic_tx_packet_refdec(pkt);
}
}
int64_t rx_largest_acked_pn;
int add_ping_frm;
struct list frm_list = LIST_HEAD_INIT(frm_list);
+ struct quic_frame *cf;
/* Length field value with CRYPTO frames if present. */
len_frms = 0;
/* Ack-eliciting frames */
if (!LIST_ISEMPTY(&frm_list)) {
- struct quic_frame *cf;
-
list_for_each_entry(cf, &frm_list, list) {
if (!qc_build_frm(&pos, end, cf, pkt, qc)) {
ssize_t room = end - pos;
qc, NULL, NULL, &room);
break;
}
+ quic_tx_packet_refinc(pkt);
+ cf->pkt = pkt;
}
}
LIST_INIT(&pkt->frms);
pkt->next = NULL;
pkt->largest_acked_pn = -1;
- pkt->refcnt = 1;
-}
-
-/* Free <pkt> TX packet which has not already attached to any tree. */
-static inline void free_quic_tx_packet(struct quic_tx_packet *pkt)
-{
- struct quic_frame *frm, *frmbak;
-
- if (!pkt)
- return;
-
- list_for_each_entry_safe(frm, frmbak, &pkt->frms, list) {
- LIST_DELETE(&frm->list);
- pool_free(pool_head_quic_frame, frm);
- }
- quic_tx_packet_refdec(pkt);
+ pkt->refcnt = 0;
}
/* Build a packet into <buf> packet buffer with <pkt_type> as packet
return pkt;
err:
+ /* TODO: what about the frames which have been built
+ * for this packet.
+ */
free_quic_tx_packet(pkt);
TRACE_DEVEL("leaving in error", QUIC_EV_CONN_HPKT, qc);
return NULL;