int make_proxy_line_v1(char *buf, int buf_len, struct sockaddr_storage *src, struct sockaddr_storage *dst);
int make_proxy_line_v2(char *buf, int buf_len, struct server *srv, struct connection *remote);
-int conn_subscribe(struct connection *conn, int event_type, void *param);
-int conn_unsubscribe(struct connection *conn, int event_type, void *param);
+int conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, void *param);
+int conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, void *param);
/* receive a NetScaler Client IP insertion header over a connection */
int conn_recv_netscaler_cip(struct connection *conn, int flag);
int ret = 0;
if (!conn_xprt_ready(conn) && conn->xprt && conn->xprt->init)
- ret = conn->xprt->init(conn);
+ ret = conn->xprt->init(conn, &conn->xprt_ctx);
if (ret >= 0)
conn->flags |= CO_FL_XPRT_READY;
{
if ((conn->flags & (CO_FL_XPRT_READY|CO_FL_XPRT_TRACKED)) == CO_FL_XPRT_READY) {
if (conn->xprt->close)
- conn->xprt->close(conn);
+ conn->xprt->close(conn, conn->xprt_ctx);
conn->flags &= ~CO_FL_XPRT_READY;
}
}
/* clean data-layer shutdown */
if (c->xprt && c->xprt->shutw)
- c->xprt->shutw(c, 1);
+ c->xprt->shutw(c, c->xprt_ctx, 1);
}
static inline void conn_xprt_shutw_hard(struct connection *c)
/* unclean data-layer shutdown */
if (c->xprt && c->xprt->shutw)
- c->xprt->shutw(c, 0);
+ c->xprt->shutw(c, c->xprt_ctx, 0);
}
/* shut read */
{
if (!conn_xprt_ready(conn) || !conn->xprt->get_alpn)
return 0;
- return conn->xprt->get_alpn(conn, str, len);
+ return conn->xprt->get_alpn(conn, conn->xprt_ctx, str, len);
}
/* registers proto mux list <list>. Modifies the list element! */
* and the other ones are used to setup and release the transport layer.
*/
struct xprt_ops {
- size_t (*rcv_buf)(struct connection *conn, struct buffer *buf, size_t count, int flags); /* recv callback */
- size_t (*snd_buf)(struct connection *conn, const struct buffer *buf, size_t count, int flags); /* send callback */
- int (*rcv_pipe)(struct connection *conn, struct pipe *pipe, unsigned int count); /* recv-to-pipe callback */
- int (*snd_pipe)(struct connection *conn, struct pipe *pipe); /* send-to-pipe callback */
- void (*shutr)(struct connection *, int); /* shutr function */
- void (*shutw)(struct connection *, int); /* shutw function */
- void (*close)(struct connection *); /* close the transport layer */
- int (*init)(struct connection *conn); /* initialize the transport layer */
+ size_t (*rcv_buf)(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags); /* recv callback */
+ size_t (*snd_buf)(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags); /* send callback */
+ int (*rcv_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count); /* recv-to-pipe callback */
+ int (*snd_pipe)(struct connection *conn, void *xprt_ctx, struct pipe *pipe); /* send-to-pipe callback */
+ void (*shutr)(struct connection *conn, void *xprt_ctx, int); /* shutr function */
+ void (*shutw)(struct connection *conn, void *xprt_ctx, int); /* shutw function */
+ void (*close)(struct connection *conn, void *xprt_ctx); /* close the transport layer */
+ int (*init)(struct connection *conn, void **ctx); /* initialize the transport layer */
int (*prepare_bind_conf)(struct bind_conf *conf); /* prepare a whole bind_conf */
void (*destroy_bind_conf)(struct bind_conf *conf); /* destroy a whole bind_conf */
int (*prepare_srv)(struct server *srv); /* prepare a server context */
void (*destroy_srv)(struct server *srv); /* destroy a server context */
- int (*get_alpn)(const struct connection *conn, const char **str, int *len); /* get application layer name */
+ int (*get_alpn)(const struct connection *conn, void *xprt_ctx, const char **str, int *len); /* get application layer name */
char name[8]; /* transport layer name, zero-terminated */
- int (*subscribe)(struct connection *conn, int event_type, void *param); /* Subscribe to events, such as "being able to send" */
- int (*unsubscribe)(struct connection *conn, int event_type, void *param); /* Unsubscribe to events */
+ int (*subscribe)(struct connection *conn, void *xprt_ctx, int event_type, void *param); /* Subscribe to events, such as "being able to send" */
+ int (*unsubscribe)(struct connection *conn, void *xprt_ctx, int event_type, void *param); /* Unsubscribe to events */
};
/* mux_ops describes the mux operations, which are to be performed at the
return ret;
}
-int conn_unsubscribe(struct connection *conn, int event_type, void *param)
+int conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, void *param)
{
struct wait_event *sw;
return 0;
}
-int conn_subscribe(struct connection *conn, int event_type, void *param)
+int conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, void *param)
{
struct wait_event *sw;
h1s_destroy(h1c->h1s);
if (conn && h1c->wait_event.events != 0)
- conn->xprt->unsubscribe(conn, h1c->wait_event.events,
+ conn->xprt->unsubscribe(conn, conn->xprt_ctx, h1c->wait_event.events,
&h1c->wait_event);
pool_free(pool_head_h1c, h1c);
}
*/
h1c->ibuf.head = sizeof(struct htx);
}
- ret = conn->xprt->rcv_buf(conn, &h1c->ibuf, max, 0);
+ ret = conn->xprt->rcv_buf(conn, conn->xprt_ctx, &h1c->ibuf, max, 0);
}
if (ret > 0) {
rcvd = 1;
goto end;
}
- conn->xprt->subscribe(conn, SUB_RETRY_RECV, &h1c->wait_event);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h1c->wait_event);
end:
if (ret > 0 || (conn->flags & CO_FL_ERROR) || conn_xprt_read0_pending(conn))
if (h1c->flags & H1C_F_CS_WAIT_CONN) {
if (!(h1c->wait_event.events & SUB_RETRY_SEND))
- conn->xprt->subscribe(conn, SUB_RETRY_SEND, &h1c->wait_event);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &h1c->wait_event);
return 0;
}
if (h1c->flags & H1C_F_OUT_FULL)
flags |= CO_SFL_MSG_MORE;
- ret = conn->xprt->snd_buf(conn, &h1c->obuf, b_data(&h1c->obuf), flags);
+ ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, &h1c->obuf, b_data(&h1c->obuf), flags);
if (ret > 0) {
h1c->flags &= ~H1C_F_OUT_FULL;
b_del(&h1c->obuf, ret);
h1_shutw_conn(conn, CS_SHW_NORMAL);
}
else if (!(h1c->wait_event.events & SUB_RETRY_SEND))
- conn->xprt->subscribe(conn, SUB_RETRY_SEND, &h1c->wait_event);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &h1c->wait_event);
return sent;
}
if (cs->flags & CS_FL_SHR)
return;
if (conn_xprt_ready(cs->conn) && cs->conn->xprt->shutr)
- cs->conn->xprt->shutr(cs->conn, (mode == CS_SHR_DRAIN));
+ cs->conn->xprt->shutr(cs->conn, cs->conn->xprt_ctx,
+ (mode == CS_SHR_DRAIN));
if ((cs->conn->flags & (CO_FL_SOCK_RD_SH|CO_FL_SOCK_WR_SH)) == (CO_FL_SOCK_RD_SH|CO_FL_SOCK_WR_SH))
h1c->flags = (h1c->flags & ~H1C_F_CS_SHUTW_NOW) | H1C_F_CS_SHUTDOWN;
}
if ((h1m->state != H1_MSG_DATA && h1m->state != H1_MSG_TUNNEL) ||
(h1m->state == H1_MSG_DATA && !h1m->curr_len)) {
h1s->flags &= ~(H1S_F_BUF_FLUSH|H1S_F_SPLICED_DATA);
- cs->conn->xprt->subscribe(cs->conn, SUB_RETRY_RECV, &h1s->h1c->wait_event);
+ cs->conn->xprt->subscribe(cs->conn, cs->conn->xprt_ctx, SUB_RETRY_RECV, &h1s->h1c->wait_event);
goto end;
}
h1s->flags |= H1S_F_SPLICED_DATA;
if (h1m->state == H1_MSG_DATA && count > h1m->curr_len)
count = h1m->curr_len;
- ret = cs->conn->xprt->rcv_pipe(cs->conn, pipe, count);
+ ret = cs->conn->xprt->rcv_pipe(cs->conn, cs->conn->xprt_ctx, pipe, count);
if (h1m->state == H1_MSG_DATA && ret > 0) {
h1m->curr_len -= ret;
if (!h1m->curr_len)
if (b_data(&h1s->h1c->obuf))
goto end;
- ret = cs->conn->xprt->snd_pipe(cs->conn, pipe);
+ ret = cs->conn->xprt->snd_pipe(cs->conn, cs->conn->xprt_ctx, pipe);
end:
if (pipe->data) {
if (!(h1s->h1c->wait_event.events & SUB_RETRY_SEND))
- cs->conn->xprt->subscribe(cs->conn, SUB_RETRY_SEND, &h1s->h1c->wait_event);
+ cs->conn->xprt->subscribe(cs->conn, cs->conn->xprt_ctx, SUB_RETRY_SEND, &h1s->h1c->wait_event);
}
return ret;
}
if (h2c->wait_event.task)
tasklet_free(h2c->wait_event.task);
if (h2c->wait_event.events != 0)
- conn->xprt->unsubscribe(conn, h2c->wait_event.events,
+ conn->xprt->unsubscribe(conn, conn->xprt_ctx, h2c->wait_event.events,
&h2c->wait_event);
pool_free(pool_head_h2c, h2c);
max = b_room(buf);
if (max)
- ret = conn->xprt->rcv_buf(conn, buf, max, 0);
+ ret = conn->xprt->rcv_buf(conn, conn->xprt_ctx, buf, max, 0);
else
ret = 0;
} while (ret > 0);
if (h2_recv_allowed(h2c) && (b_data(buf) < buf->size))
- conn->xprt->subscribe(conn, SUB_RETRY_RECV, &h2c->wait_event);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h2c->wait_event);
if (!b_data(buf)) {
h2_release_buf(h2c, &h2c->dbuf);
flags |= CO_SFL_MSG_MORE;
if (b_data(&h2c->mbuf)) {
- int ret = conn->xprt->snd_buf(conn, &h2c->mbuf, b_data(&h2c->mbuf), flags);
+ int ret = conn->xprt->snd_buf(conn, conn->xprt_ctx, &h2c->mbuf, b_data(&h2c->mbuf), flags);
if (!ret)
break;
sent = 1;
return sent;
schedule:
if (!(h2c->wait_event.events & SUB_RETRY_SEND))
- conn->xprt->subscribe(conn, SUB_RETRY_SEND, &h2c->wait_event);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_SEND, &h2c->wait_event);
return sent;
}
h2c->flags |= H2_CF_GOAWAY_FAILED;
if (b_data(&h2c->mbuf) && !(h2c->flags & H2_CF_GOAWAY_FAILED) && conn_xprt_ready(h2c->conn)) {
- int ret = h2c->conn->xprt->snd_buf(h2c->conn, &h2c->mbuf, b_data(&h2c->mbuf), 0);
+ int ret = h2c->conn->xprt->snd_buf(h2c->conn, h2c->conn->xprt_ctx, &h2c->mbuf, b_data(&h2c->mbuf), 0);
if (ret > 0) {
b_del(&h2c->mbuf, ret);
b_realign_if_empty(&h2c->mbuf);
if (ctx->conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH))
mux_pt_destroy(ctx);
else
- ctx->conn->xprt->subscribe(ctx->conn, SUB_RETRY_RECV,
+ ctx->conn->xprt->subscribe(ctx->conn, ctx->conn->xprt_ctx, SUB_RETRY_RECV,
&ctx->wait_event);
return NULL;
struct conn_stream *cs;
struct mux_pt_ctx *ctx = conn->ctx;
- conn->xprt->unsubscribe(conn, SUB_RETRY_RECV, &ctx->wait_event);
+ conn->xprt->unsubscribe(ctx->conn, conn->xprt_ctx, SUB_RETRY_RECV, &ctx->wait_event);
cs = cs_new(conn);
if (!cs)
goto fail;
if (conn->owner != NULL &&
!(conn->flags & (CO_FL_ERROR | CO_FL_SOCK_RD_SH | CO_FL_SOCK_WR_SH))) {
ctx->cs = NULL;
- conn->xprt->subscribe(conn, SUB_RETRY_RECV, &ctx->wait_event);
+ conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &ctx->wait_event);
} else
/* There's no session attached to that connection, destroy it */
mux_pt_destroy(ctx);
return;
cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
if (conn_xprt_ready(cs->conn) && cs->conn->xprt->shutr)
- cs->conn->xprt->shutr(cs->conn, (mode == CS_SHR_DRAIN));
+ cs->conn->xprt->shutr(cs->conn, cs->conn->xprt_ctx,
+ (mode == CS_SHR_DRAIN));
if (cs->flags & CS_FL_SHW)
conn_full_close(cs->conn);
/* Maybe we've been put in the list of available idle connections,
if (cs->flags & CS_FL_SHW)
return;
if (conn_xprt_ready(cs->conn) && cs->conn->xprt->shutw)
- cs->conn->xprt->shutw(cs->conn, (mode == CS_SHW_NORMAL));
+ cs->conn->xprt->shutw(cs->conn, cs->conn->xprt_ctx,
+ (mode == CS_SHW_NORMAL));
if (!(cs->flags & CS_FL_SHR))
conn_sock_shutw(cs->conn, (mode == CS_SHW_NORMAL));
else
return 0;
}
b_realign_if_empty(buf);
- ret = cs->conn->xprt->rcv_buf(cs->conn, buf, count, flags);
+ ret = cs->conn->xprt->rcv_buf(cs->conn, cs->conn->xprt_ctx, buf, count, flags);
if (conn_xprt_read0_pending(cs->conn)) {
if (ret == 0)
cs->flags &= ~(CS_FL_RCV_MORE | CS_FL_WANT_ROOM);
if (cs->conn->flags & CO_FL_HANDSHAKE)
return 0;
- ret = cs->conn->xprt->snd_buf(cs->conn, buf, count, flags);
+ ret = cs->conn->xprt->snd_buf(cs->conn, cs->conn->xprt_ctx, buf, count, flags);
if (ret > 0)
b_del(buf, ret);
/* Called from the upper layer, to subscribe to events */
static int mux_pt_subscribe(struct conn_stream *cs, int event_type, void *param)
{
- return (cs->conn->xprt->subscribe(cs->conn, event_type, param));
+ return (cs->conn->xprt->subscribe(cs->conn, cs->conn->xprt_ctx, event_type, param));
}
static int mux_pt_unsubscribe(struct conn_stream *cs, int event_type, void *param)
{
- return (cs->conn->xprt->unsubscribe(cs->conn, event_type, param));
+ return (cs->conn->xprt->unsubscribe(cs->conn, cs->conn->xprt_ctx, event_type, param));
}
#if defined(CONFIG_HAP_LINUX_SPLICE)
{
int ret;
- ret = cs->conn->xprt->rcv_pipe(cs->conn, pipe, count);
+ ret = cs->conn->xprt->rcv_pipe(cs->conn, cs->conn->xprt_ctx, pipe, count);
if (conn_xprt_read0_pending(cs->conn))
cs->flags |= CS_FL_EOS;
if (cs->conn->flags & CO_FL_ERROR)
static int mux_pt_snd_pipe(struct conn_stream *cs, struct pipe *pipe)
{
- return (cs->conn->xprt->snd_pipe(cs->conn, pipe));
+ return (cs->conn->xprt->snd_pipe(cs->conn, cs->conn->xprt_ctx, pipe));
}
#endif
* connection flags are updated (error, read0, wait_room, wait_data).
* The caller must have previously allocated the pipe.
*/
-int raw_sock_to_pipe(struct connection *conn, struct pipe *pipe, unsigned int count)
+int raw_sock_to_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pipe, unsigned int count)
{
#ifndef ASSUME_SPLICE_WORKS
static THREAD_LOCAL int splice_detects_close;
/* Send as many bytes as possible from the pipe to the connection's socket.
*/
-int raw_sock_from_pipe(struct connection *conn, struct pipe *pipe)
+int raw_sock_from_pipe(struct connection *conn, void *xprt_ctx, struct pipe *pipe)
{
int ret, done;
* errno is cleared before starting so that the caller knows that if it spots an
* error without errno, it's pending and can be retrieved via getsockopt(SO_ERROR).
*/
-static size_t raw_sock_to_buf(struct connection *conn, struct buffer *buf, size_t count, int flags)
+static size_t raw_sock_to_buf(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags)
{
ssize_t ret;
size_t try, done = 0;
* is responsible for this. It's up to the caller to update the buffer's contents
* based on the return value.
*/
-static size_t raw_sock_from_buf(struct connection *conn, const struct buffer *buf, size_t count, int flags)
+static size_t raw_sock_from_buf(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags)
{
ssize_t ret;
size_t try, done;
return done;
}
+static int raw_sock_subscribe(struct connection *conn, void *xprt_ctx, int event_type, void *param)
+{
+ return conn_subscribe(conn, xprt_ctx, event_type, param);
+}
+
+static int raw_sock_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, void *param)
+{
+ return conn_unsubscribe(conn, xprt_ctx, event_type, param);
+}
/* transport-layer operations for RAW sockets */
static struct xprt_ops raw_sock = {
.snd_buf = raw_sock_from_buf,
.rcv_buf = raw_sock_to_buf,
- .subscribe = conn_subscribe,
- .unsubscribe = conn_unsubscribe,
+ .subscribe = raw_sock_subscribe,
+ .unsubscribe = raw_sock_unsubscribe,
#if defined(CONFIG_HAP_LINUX_SPLICE)
.rcv_pipe = raw_sock_to_pipe,
.snd_pipe = raw_sock_from_pipe,
* handshake flag on the connection. It is safe to call it multiple times.
* It returns 0 on success and -1 in error case.
*/
-static int ssl_sock_init(struct connection *conn)
+static int ssl_sock_init(struct connection *conn, void **xprt_ctx)
{
struct ssl_sock_ctx *ctx;
/* already initialized */
- if (conn->xprt_ctx)
+ if (*xprt_ctx)
return 0;
if (!conn_ctrl_ready(conn))
/* leave init state and start handshake */
conn->flags |= CO_FL_SSL_WAIT_HS | CO_FL_WAIT_L6_CONN;
- conn->xprt_ctx = ctx;
_HA_ATOMIC_ADD(&sslconns, 1);
_HA_ATOMIC_ADD(&totalsslconns, 1);
+ *xprt_ctx = ctx;
return 0;
}
else if (objt_listener(conn->target)) {
_HA_ATOMIC_ADD(&sslconns, 1);
_HA_ATOMIC_ADD(&totalsslconns, 1);
- conn->xprt_ctx = ctx;
+ *xprt_ctx = ctx;
return 0;
}
/* don't know how to handle such a target */
return 0;
}
-static int ssl_subscribe(struct connection *conn, int event_type, void *param)
+static int ssl_subscribe(struct connection *conn, void *xprt_ctx, int event_type, void *param)
{
- return conn_subscribe(conn, event_type, param);
+
+ return conn_subscribe(conn, NULL, event_type, param);
}
-static int ssl_unsubscribe(struct connection *conn, int event_type, void *param)
+static int ssl_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, void *param)
{
- return conn_unsubscribe(conn, event_type, param);
+
+ return conn_unsubscribe(conn, NULL, event_type, param);
}
/* Receive up to <count> bytes from connection <conn>'s socket and store them
* avoiding the call if inappropriate. The function does not call the
* connection's polling update function, so the caller is responsible for this.
*/
-static size_t ssl_sock_to_buf(struct connection *conn, struct buffer *buf, size_t count, int flags)
+static size_t ssl_sock_to_buf(struct connection *conn, void *xprt_ctx, struct buffer *buf, size_t count, int flags)
{
- struct ssl_sock_ctx *ctx = conn->xprt_ctx;
+ struct ssl_sock_ctx *ctx = xprt_ctx;
ssize_t ret;
size_t try, done = 0;
conn_refresh_polling_flags(conn);
- if (!conn->xprt_ctx)
+ if (!ctx)
goto out_error;
if (conn->flags & CO_FL_HANDSHAKE)
* caller to take care of this. It's up to the caller to update the buffer's
* contents based on the return value.
*/
-static size_t ssl_sock_from_buf(struct connection *conn, const struct buffer *buf, size_t count, int flags)
+static size_t ssl_sock_from_buf(struct connection *conn, void *xprt_ctx, const struct buffer *buf, size_t count, int flags)
{
- struct ssl_sock_ctx *ctx = conn->xprt_ctx;
+ struct ssl_sock_ctx *ctx = xprt_ctx;
ssize_t ret;
size_t try, done;
done = 0;
conn_refresh_polling_flags(conn);
- if (!conn->xprt_ctx)
+ if (!ctx)
goto out_error;
if (conn->flags & CO_FL_HANDSHAKE)
goto leave;
}
-static void ssl_sock_close(struct connection *conn) {
+static void ssl_sock_close(struct connection *conn, void *xprt_ctx) {
- struct ssl_sock_ctx *ctx = conn->xprt_ctx;
+ struct ssl_sock_ctx *ctx = xprt_ctx;
- if (conn->xprt_ctx) {
+ if (ctx) {
#if (OPENSSL_VERSION_NUMBER >= 0x1010000fL) && !defined(OPENSSL_NO_ASYNC)
if (global_ssl.async) {
OSSL_ASYNC_FD all_fd[32], afd;
fd_cant_recv(afd);
}
pool_free(ssl_sock_ctx_pool, ctx);
- conn->xprt_ctx = NULL;
_HA_ATOMIC_ADD(&jobs, 1);
return;
}
#endif
SSL_free(ctx->ssl);
pool_free(ssl_sock_ctx_pool, ctx);
- conn->xprt_ctx = NULL;
_HA_ATOMIC_SUB(&sslconns, 1);
}
}
/* This function tries to perform a clean shutdown on an SSL connection, and in
* any case, flags the connection as reusable if no handshake was in progress.
*/
-static void ssl_sock_shutw(struct connection *conn, int clean)
+static void ssl_sock_shutw(struct connection *conn, void *xprt_ctx, int clean)
{
- struct ssl_sock_ctx *ctx = conn->xprt_ctx;
+ struct ssl_sock_ctx *ctx = xprt_ctx;
if (conn->flags & CO_FL_HANDSHAKE)
return;
* freed by the caller. NPN is also checked if available since older versions
* of openssl (1.0.1) which are more common in field only support this one.
*/
-static int ssl_sock_get_alpn(const struct connection *conn, const char **str, int *len)
+static int ssl_sock_get_alpn(const struct connection *conn, void *xprt_ctx, const char **str, int *len)
{
#if defined(TLSEXT_TYPE_application_layer_protocol_negotiation) || \
defined(OPENSSL_NPN_NEGOTIATED) && !defined(OPENSSL_NO_NEXTPROTONEG)
-struct ssl_sock_ctx *ctx = conn->xprt_ctx;
-#endif
-
- if (!conn || !conn->xprt_ctx || conn->xprt != &ssl_sock)
+ struct ssl_sock_ctx *ctx = xprt_ctx;
+ if (!ctx)
return 0;
*str = NULL;
SSL_get0_next_proto_negotiated(ctx->ssl, (const unsigned char **)str, (unsigned *)len);
if (*str)
return 1;
+#endif
#endif
return 0;
}