* Determines the number of bytes available still to be read, and (if
* include_fin is 1) whether a FIN or reset has yet to be read.
*/
-static ossl_inline ossl_unused int ossl_quic_stream_recv_pending(const QUIC_STREAM *s,
- int include_fin)
+static ossl_inline ossl_unused size_t ossl_quic_stream_recv_pending(const QUIC_STREAM *s,
+ int include_fin)
{
size_t avail;
int fin = 0;
* function returns non-zero does not guarantee that
* ossl_quic_wire_encode_pkt_hdr() will succeed.
*/
-int ossl_quic_wire_get_encoded_pkt_hdr_len(size_t short_conn_id_len,
- const QUIC_PKT_HDR *hdr);
+size_t ossl_quic_wire_get_encoded_pkt_hdr_len(size_t short_conn_id_len,
+ const QUIC_PKT_HDR *hdr);
/*
* Packet Number Encoding
static int ssl_puts(BIO *bp, const char *str)
{
- int n, ret;
+ int ret;
+ size_t n = strlen(str);
- n = strlen(str);
- ret = BIO_write(bp, str, n);
+ if (n > INT_MAX)
+ return -1;
+ ret = BIO_write(bp, str, (int)n);
return ret;
}
BIO_ADDR_free(tmpclient);
tmpclient = NULL;
- if (BIO_write(wbio, wbuf, wreclen) < (int)wreclen) {
+ if (BIO_write(wbio, wbuf, (int)wreclen) < (int)wreclen) {
if (BIO_should_retry(wbio)) {
/*
* Non-blocking IO...but we're stateless, so we're just
}
if (check_channel)
+ /* We care about boolean result here only */
avail = ossl_quic_stream_recv_pending(ctx.xso->stream,
- /*include_fin=*/1)
- || ossl_quic_channel_has_pending(ctx.qc->ch)
- || ossl_quic_channel_is_term_any(ctx.qc->ch);
+ /*include_fin=*/1) > 0
+ || ossl_quic_channel_has_pending(ctx.qc->ch)
+ || ossl_quic_channel_is_term_any(ctx.qc->ch);
else
avail = ossl_quic_stream_recv_pending(ctx.xso->stream,
/*include_fin=*/0);
qctx_lock(&ctx);
- ret = ossl_quic_port_get_num_incoming_channels(ctx.ql->port);
+ ret = (int)ossl_quic_port_get_num_incoming_channels(ctx.ql->port);
qctx_unlock(&ctx);
return ret;
static int test_poll_event_is(QUIC_CONNECTION *qc, int is_uni)
{
return ossl_quic_stream_map_get_accept_queue_len(ossl_quic_channel_get_qsm(qc->ch),
- is_uni);
+ is_uni) > 0;
}
/* Do we have the OS (outgoing: stream) condition? */
size_t *ct_len)
{
int iv_len, len, ret = 0;
- size_t tag_len;
+ int tag_len;
unsigned char *iv = ciphertext, *data, *tag;
- if ((tag_len = EVP_CIPHER_CTX_get_tag_length(port->token_ctx)) == 0
+ if ((tag_len = EVP_CIPHER_CTX_get_tag_length(port->token_ctx)) <= 0
|| (iv_len = EVP_CIPHER_CTX_get_iv_length(port->token_ctx)) <= 0)
goto err;
if (!RAND_bytes_ex(port->engine->libctx, ciphertext, iv_len, 0)
|| !EVP_EncryptInit_ex(port->token_ctx, NULL, NULL, NULL, iv)
- || !EVP_EncryptUpdate(port->token_ctx, data, &len, plaintext, pt_len)
+ || !EVP_EncryptUpdate(port->token_ctx, data, &len, plaintext, (int)pt_len)
|| !EVP_EncryptFinal_ex(port->token_ctx, data + pt_len, &len)
|| !EVP_CIPHER_CTX_ctrl(port->token_ctx, EVP_CTRL_GCM_GET_TAG, tag_len, tag))
goto err;
size_t *pt_len)
{
int iv_len, len = 0, ret = 0;
- size_t tag_len;
+ int tag_len;
const unsigned char *iv = ciphertext, *data, *tag;
- if ((tag_len = EVP_CIPHER_CTX_get_tag_length(port->token_ctx)) == 0
+ if ((tag_len = EVP_CIPHER_CTX_get_tag_length(port->token_ctx)) <= 0
|| (iv_len = EVP_CIPHER_CTX_get_iv_length(port->token_ctx)) <= 0)
goto err;
/* Prevent decryption of a buffer that is not within reasonable bounds */
- if (ct_len < (iv_len + tag_len) || ct_len > ENCRYPTED_TOKEN_MAX_LEN)
+ if (ct_len < (size_t)(iv_len + tag_len) || ct_len > ENCRYPTED_TOKEN_MAX_LEN)
goto err;
*pt_len = ct_len - iv_len - tag_len;
if (!EVP_DecryptInit_ex(port->token_ctx, NULL, NULL, NULL, iv)
|| !EVP_DecryptUpdate(port->token_ctx, plaintext, &len, data,
- ct_len - iv_len - tag_len)
+ (int)(ct_len - iv_len - tag_len))
|| !EVP_CIPHER_CTX_ctrl(port->token_ctx, EVP_CTRL_GCM_SET_TAG, tag_len,
(void *)tag)
|| !EVP_DecryptFinal_ex(port->token_ctx, plaintext + len, &len))
return 0;
/* Feed AAD data. */
- if (EVP_CipherUpdate(cctx, NULL, &l, aad, aad_len) != 1)
+ if (EVP_CipherUpdate(cctx, NULL, &l, aad, (int)aad_len) != 1)
return 0;
/* Feed encrypted packet body. */
- if (EVP_CipherUpdate(cctx, dst, &l, src, src_len - el->tag_len) != 1)
+ if (EVP_CipherUpdate(cctx, dst, &l, src, (int)(src_len - el->tag_len)) != 1)
return 0;
#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
}
/* Feed AAD data. */
- if (EVP_CipherUpdate(cctx, NULL, &l, hdr, hdr_len) != 1) {
+ if (EVP_CipherUpdate(cctx, NULL, &l, hdr, (int)hdr_len) != 1) {
ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
return 0;
}
break;
if (EVP_CipherUpdate(cctx, txe_data(txe) + txe->data_len,
- &l, src, src_len) != 1) {
+ &l, src, (int)src_len) != 1) {
ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
return 0;
}
alpnlen = srv->args.alpnlen;
}
- if (SSL_select_next_proto((unsigned char **)out, outlen, alpn, alpnlen,
+ if (SSL_select_next_proto((unsigned char **)out, outlen, alpn,
+ (unsigned int)alpnlen,
in, inlen) != OPENSSL_NPN_NEGOTIATED)
return SSL_TLSEXT_ERR_ALERT_FATAL;
/* Assemble packet header. */
phdr->type = ossl_quic_enc_level_to_pkt_type(enc_level);
phdr->spin_bit = 0;
- phdr->pn_len = txp_determine_pn_len(txp);
+ phdr->pn_len = (unsigned int)txp_determine_pn_len(txp);
phdr->partial = 0;
phdr->fixed = 1;
phdr->reserved = 0;
cid->id_len = (unsigned char)len;
- if (RAND_bytes_ex(libctx, cid->id, len, len * 8) != 1) {
+ if (RAND_bytes_ex(libctx, cid->id, len, 0) != 1) {
ERR_raise(ERR_LIB_SSL, ERR_R_RAND_LIB);
cid->id_len = 0;
return 0;
return 1;
}
-int ossl_quic_wire_get_encoded_pkt_hdr_len(size_t short_conn_id_len,
- const QUIC_PKT_HDR *hdr)
+size_t ossl_quic_wire_get_encoded_pkt_hdr_len(size_t short_conn_id_len,
+ const QUIC_PKT_HDR *hdr)
{
size_t len = 0, enclen;
}
/* Feed packet header as AAD data. */
- if (EVP_CipherUpdate(cctx, NULL, &l, buf, hdr_enc_len) != 1) {
+ if (EVP_CipherUpdate(cctx, NULL, &l, buf, (int)hdr_enc_len) != 1) {
ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
goto err;
}
/* Feed packet body as AAD data. */
if (EVP_CipherUpdate(cctx, NULL, &l, hdr->data,
- hdr->len - QUIC_RETRY_INTEGRITY_TAG_LEN) != 1) {
+ (int)(hdr->len - QUIC_RETRY_INTEGRITY_TAG_LEN)) != 1) {
ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
goto err;
}
ERR_clear_last_mark();
OSSL_TRACE_BEGIN(TLS) {
BIO_printf(trc_out, "dec %zd\n", rr->length);
- BIO_dump_indent(trc_out, rr->data, rr->length, 4);
+ BIO_dump_indent(trc_out, rr->data, (int)rr->length, 4);
} OSSL_TRACE_END(TLS);
/* r->length is now the compressed data plus mac */
mode = EVP_CIPHER_get_mode(ciph);
if (EVP_CipherInit_ex(ciph_ctx, ciph, NULL, NULL, NULL, enc) <= 0
- || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_IVLEN, ivlen,
+ || EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_IVLEN, (int)ivlen,
NULL) <= 0
|| (mode == EVP_CIPH_CCM_MODE
- && EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_TAG, taglen,
+ && EVP_CIPHER_CTX_ctrl(ciph_ctx, EVP_CTRL_AEAD_SET_TAG, (int)taglen,
NULL) <= 0)
|| EVP_CipherInit_ex(ciph_ctx, NULL, NULL, key, NULL, enc) <= 0) {
ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
if (EVP_CipherInit_ex(enc_ctx, NULL, NULL, NULL, nonce, sending) <= 0
|| (!sending && EVP_CIPHER_CTX_ctrl(enc_ctx, EVP_CTRL_AEAD_SET_TAG,
- rl->taglen,
+ (int)rl->taglen,
rec->data + rec->length) <= 0)) {
RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
return 0;
}
if (sending) {
/* Add the tag */
- if (EVP_CIPHER_CTX_ctrl(enc_ctx, EVP_CTRL_AEAD_GET_TAG, rl->taglen,
+ if (EVP_CIPHER_CTX_ctrl(enc_ctx, EVP_CTRL_AEAD_GET_TAG, (int)rl->taglen,
rec->data + rec->length) <= 0) {
RLAYERfatal(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
return 0;
BIO_printf(trc_out, "seq:\n");
BIO_dump_indent(trc_out, seq, 8, 4);
BIO_printf(trc_out, "rec:\n");
- BIO_dump_indent(trc_out, rec->data, rec->length, 4);
+ BIO_dump_indent(trc_out, rec->data, (int)rec->length, 4);
} OSSL_TRACE_END(TLS);
if (!rl->isdtls && !tls_increment_sequence_ctr(rl)) {
OSSL_TRACE_BEGIN(TLS) {
BIO_printf(trc_out, "md:\n");
- BIO_dump_indent(trc_out, md, md_size, 4);
+ BIO_dump_indent(trc_out, md, (int)md_size, 4);
} OSSL_TRACE_END(TLS);
ret = 1;
end:
clear_sys_error();
if (bio != NULL) {
- ret = BIO_read(bio, pkt + len + left, max - left);
+ ret = BIO_read(bio, pkt + len + left, (int)(max - left));
if (ret > 0) {
bioread = ret;
ret = OSSL_RECORD_RETURN_SUCCESS;
}
/* If we are dealing with ciphertext we need to allow for the overhead */
- max_early_data += overhead;
+ max_early_data += (uint32_t)overhead;
if (rl->early_data_count + length > max_early_data) {
RLAYERfatal(rl, send ? SSL_AD_INTERNAL_ERROR : SSL_AD_UNEXPECTED_MESSAGE,
}
OSSL_TRACE_BEGIN(TLS) {
BIO_printf(trc_out, "dec %lu\n", (unsigned long)rr[0].length);
- BIO_dump_indent(trc_out, rr[0].data, rr[0].length, 4);
+ BIO_dump_indent(trc_out, rr[0].data, (int)rr[0].length, 4);
} OSSL_TRACE_END(TLS);
/* r->length is now the compressed data plus mac */
void tls_set_max_frag_len(OSSL_RECORD_LAYER *rl, size_t max_frag_len)
{
- rl->max_frag_len = max_frag_len;
+ rl->max_frag_len = (unsigned int)max_frag_len;
/*
* We don't need to adjust buffer sizes. Write buffer sizes are
* automatically checked anyway. We should only be changing the read buffer
}
wb = &rl->wbuf[0];
- mb_param.interleave = numtempl;
+ mb_param.interleave = (unsigned int)numtempl;
memcpy(aad, rl->sequence, 8);
aad[8] = templates[0].type;
aad[9] = (unsigned char)(templates[0].version >> 8);
*/
aux1 = rotated_mac[rotate_offset & ~32];
aux2 = rotated_mac[rotate_offset | 32];
- mask = constant_time_eq_8(rotate_offset & ~32, rotate_offset);
+ mask = constant_time_eq_8((unsigned int)(rotate_offset & ~32),
+ (unsigned int)rotate_offset);
aux3 = constant_time_select_8(mask, aux1, aux2);
rotate_offset++;
rotate_offset &= constant_time_lt_s(rotate_offset, mac_size);
for (i = 0; i < mac_size; i++) {
for (j = 0; j < mac_size; j++)
- out[j] |= rotated_mac[i] & constant_time_eq_8_s(j, rotate_offset);
+ out[j] |= rotated_mac[i] & constant_time_eq_8_s((unsigned int)j,
+ (unsigned int)rotate_offset);
rotate_offset++;
rotate_offset &= constant_time_lt_s(rotate_offset, mac_size);
}
/* If we are dealing with ciphertext we need to allow for the overhead */
- max_early_data += overhead;
+ max_early_data += (uint32_t)overhead;
if (s->early_data_count + length > max_early_data) {
SSLfatal(s, send ? SSL_AD_INTERNAL_ERROR : SSL_AD_UNEXPECTED_MESSAGE,
SSL_R_TOO_MUCH_EARLY_DATA);
return 0;
}
- s->early_data_count += length;
+ s->early_data_count += (uint32_t)length;
return 1;
}
return 0;
if (pctype)
*pctype = sc->s3.tmp.ctype;
- return sc->s3.tmp.ctype_len;
+ return (long)sc->s3.tmp.ctype_len;
}
case SSL_CTRL_SET_CLIENT_CERT_TYPES:
* that.
*/
if (s->psk_server_callback != NULL) {
- for (j = 0; j < s->ssl_pkey_num && !ssl_has_cert(s, j); j++);
+ for (j = 0; j < s->ssl_pkey_num && !ssl_has_cert(s, (int)j); j++);
if (j == s->ssl_pkey_num) {
/* There are no certificates */
prefer_sha256 = 1;
}
if ((max_length = ossl_calculate_comp_expansion(alg, len)) == 0
+ || max_length > INT_MAX
|| method == NULL
|| (comp_ctx = COMP_CTX_new(method)) == NULL
|| (comp_data = OPENSSL_zalloc(max_length)) == NULL)
goto err;
- comp_length = COMP_compress_block(comp_ctx, comp_data, max_length, data, len);
+ comp_length = COMP_compress_block(comp_ctx, comp_data, (int)max_length, data, (int)len);
if (comp_length <= 0)
goto err;
if (sk_SSL_CIPHER_find(srvrsk, c) < 0)
continue;
- n = OPENSSL_strnlen(c->name, size);
+ n = (int)OPENSSL_strnlen(c->name, size);
if (n >= size) {
if (p != buf)
--p;
return GET_MAX_FRAGMENT_LENGTH(sc->session);
/* return current SSL connection setting */
- return sc->max_send_fragment;
+ return (unsigned int)sc->max_send_fragment;
}
__owur unsigned int ssl_get_split_send_fragment(const SSL_CONNECTION *sc)
/* else limit |split_send_fragment| to current |max_send_fragment| */
if (sc->split_send_fragment > sc->max_send_fragment)
- return sc->max_send_fragment;
+ return (unsigned int)sc->max_send_fragment;
/* return current SSL connection setting */
- return sc->split_send_fragment;
+ return (unsigned int)sc->split_send_fragment;
}
int SSL_stateless(SSL *s)
break;
}
/* Check that PEM name starts with "BEGIN SERVERINFO FOR " */
- name_len = strlen(name);
+ name_len = (unsigned int)strlen(name);
if (name_len < sizeof(NAME_PREFIX1) - 1) {
ERR_raise(ERR_LIB_SSL, SSL_R_PEM_NAME_TOO_SHORT);
goto end;
int copy = 1;
ret = s->session_ctx->get_session_cb(SSL_CONNECTION_GET_USER_SSL(s),
- sess_id, sess_id_len, ©);
+ sess_id, (int)sess_id_len, ©);
if (ret != NULL) {
if (ret->not_resumable) {
SSLfatal(s, SSL_AD_ILLEGAL_PARAMETER, SSL_R_BAD_EXTENSION);
goto err;
}
- idx = thisex - raw_extensions;
+ idx = (unsigned int)(thisex - raw_extensions);
/*-
* Check that we requested this extension (if appropriate). Requests can
* be sent in the ClientHello and CertificateRequest. Unsolicited
if (s->ext.debug_cb)
s->ext.debug_cb(SSL_CONNECTION_GET_USER_SSL(s), !s->server,
thisex->type, PACKET_data(&thisex->data),
- PACKET_remaining(&thisex->data),
+ (int)PACKET_remaining(&thisex->data),
s->ext.debug_arg);
}
}
if (s->ext.session_ticket_cb != NULL &&
!s->ext.session_ticket_cb(ssl, PACKET_data(pkt),
- PACKET_remaining(pkt),
+ (int)PACKET_remaining(pkt),
s->ext.session_ticket_cb_arg)) {
SSLfatal(s, SSL_AD_HANDSHAKE_FAILURE, SSL_R_BAD_EXTENSION);
return 0;
}
if (sctx->ext.npn_select_cb(SSL_CONNECTION_GET_USER_SSL(s),
&selected, &selected_len,
- PACKET_data(pkt), PACKET_remaining(pkt),
+ PACKET_data(pkt), (unsigned int)PACKET_remaining(pkt),
sctx->ext.npn_select_cb_arg) != SSL_TLSEXT_ERR_OK
|| selected_len == 0) {
SSLfatal(s, SSL_AD_HANDSHAKE_FAILURE, SSL_R_BAD_EXTENSION);
{
if (s->ext.session_ticket_cb &&
!s->ext.session_ticket_cb(SSL_CONNECTION_GET_USER_SSL(s),
- PACKET_data(pkt), PACKET_remaining(pkt),
+ PACKET_data(pkt), (int)PACKET_remaining(pkt),
s->ext.session_ticket_cb_arg)) {
SSLfatal(s, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
return 0;
if (new_group_idx < group_idx) {
group_idx = new_group_idx;
*candidate_group_idx = current_group;
- *prio_group_idx = group_idx;
+ *prio_group_idx = (int)group_idx;
*selected_group = prio_groups[group_idx];
}
}
|| !WPACKET_reserve_bytes(pkt, max_length, NULL))
goto err;
- comp_len = COMP_compress_block(comp, WPACKET_get_curr(pkt), max_length,
- (unsigned char *)buf->data, length);
+ comp_len = COMP_compress_block(comp, WPACKET_get_curr(pkt), (int)max_length,
+ (unsigned char *)buf->data, (int)length);
if (comp_len <= 0)
goto err;
|| EVP_PKEY_get_id(pkey) == NID_id_GostR3410_2012_256))
|| (PACKET_remaining(pkt) == 128
&& EVP_PKEY_get_id(pkey) == NID_id_GostR3410_2012_512))) {
- len = PACKET_remaining(pkt);
+ len = (unsigned int)PACKET_remaining(pkt);
} else
#endif
if (!PACKET_get_net_2(pkt, &len)) {
default:
break;
}
- if (!ssl_has_cert(s, i))
+ if (!ssl_has_cert(s, (int)i))
continue;
if (i != SSL_PKEY_ECC)
return 1;
if (!BUF_MEM_grow(buf, expected_length)
|| !PACKET_buf_init(tmppkt, (unsigned char *)buf->data, expected_length)
- || COMP_expand_block(comp, (unsigned char *)buf->data, expected_length,
- (unsigned char*)PACKET_data(pkt), comp_length) != (int)expected_length) {
+ || COMP_expand_block(comp, (unsigned char *)buf->data, (int)expected_length,
+ (unsigned char*)PACKET_data(pkt),
+ (int)comp_length) != (int)expected_length) {
SSLfatal(sc, SSL_AD_BAD_CERTIFICATE, SSL_R_BAD_DECOMPRESSION);
goto err;
}
if (SSL_get_options(ssl) & SSL_OP_COOKIE_EXCHANGE) {
if (sctx->app_verify_cookie_cb != NULL) {
if (sctx->app_verify_cookie_cb(ussl, clienthello->dtls_cookie,
- clienthello->dtls_cookie_len) == 0) {
+ (unsigned int)clienthello->dtls_cookie_len) == 0) {
SSLfatal(s, SSL_AD_HANDSHAKE_FAILURE,
SSL_R_COOKIE_MISMATCH);
goto err;
ptr = PACKET_data(pkt);
/* Some implementations provide extra data in the opaqueBlob
* We have nothing to do with this blob so we just skip it */
- pKX = d2i_GOST_KX_MESSAGE(NULL, &ptr, PACKET_remaining(pkt));
+ pKX = d2i_GOST_KX_MESSAGE(NULL, &ptr, (long)PACKET_remaining(pkt));
if (pKX == NULL
|| pKX->kxBlob == NULL
|| ASN1_TYPE_get(pKX->kxBlob) != V_ASN1_SEQUENCE) {
BIO_printf(trc_out, "which = %04X, key:\n", which);
BIO_dump_indent(trc_out, key, EVP_CIPHER_get_key_length(c), 4);
BIO_printf(trc_out, "iv:\n");
- BIO_dump_indent(trc_out, iv, k, 4);
+ BIO_dump_indent(trc_out, iv, (int)k, 4);
} OSSL_TRACE_END(TLS);
return 1;
BIO_printf(trc_out, "master key\n");
BIO_dump_indent(trc_out,
s->session->master_key,
- s->session->master_key_length, 4);
+ (int)s->session->master_key_length, 4);
} OSSL_TRACE_END(TLS);
if (!tls1_generate_key_block(s, p, num)) {
OSSL_TRACE_BEGIN(TLS) {
BIO_printf(trc_out, "key block\n");
- BIO_dump_indent(trc_out, p, num, 4);
+ BIO_dump_indent(trc_out, p, (int)num, 4);
} OSSL_TRACE_END(TLS);
ret = 1;
}
OSSL_TRACE_BEGIN(TLS) {
BIO_printf(trc_out, "Handshake hashes:\n");
- BIO_dump(trc_out, (char *)hash, hashlen);
+ BIO_dump(trc_out, (char *)hash, (int)hashlen);
} OSSL_TRACE_END(TLS);
if (!tls1_PRF(s,
TLS_MD_EXTENDED_MASTER_SECRET_CONST,
OSSL_TRACE_BEGIN(TLS) {
BIO_printf(trc_out, "Premaster Secret:\n");
- BIO_dump_indent(trc_out, p, len, 4);
+ BIO_dump_indent(trc_out, p, (int)len, 4);
BIO_printf(trc_out, "Client Random:\n");
BIO_dump_indent(trc_out, s->s3.client_random, SSL3_RANDOM_SIZE, 4);
BIO_printf(trc_out, "Server Random:\n");
TLS_GROUP_IX *gix;
uint16_t id = 0;
int ret = 0;
- size_t ix;
+ int ix;
- if (grps == NULL || out == NULL)
+ if (grps == NULL || out == NULL || num > INT_MAX)
return 0;
if ((collect = sk_TLS_GROUP_IX_new(tls_group_ix_cmp)) == NULL)
return 0;
- for (ix = 0; ix < num; ++ix, ++grps) {
+ for (ix = 0; ix < (int)num; ++ix, ++grps) {
if (grps->mintls > 0 && max_proto_version > 0
&& grps->mintls > max_proto_version)
continue;
sk_TLS_GROUP_IX_sort(collect);
num = sk_TLS_GROUP_IX_num(collect);
- for (ix = 0; ix < num; ++ix) {
+ for (ix = 0; ix < (int)num; ++ix) {
gix = sk_TLS_GROUP_IX_value(collect, ix);
if (!all && gix->grp->group_id == id)
continue;
cache[cache_idx].hash = si.hash_name?OBJ_txt2nid(si.hash_name):NID_undef;
cache[cache_idx].hash_idx = ssl_get_md_idx(cache[cache_idx].hash);
cache[cache_idx].sig = OBJ_txt2nid(si.sigalg_name);
- cache[cache_idx].sig_idx = i + SSL_PKEY_NUM;
+ cache[cache_idx].sig_idx = (int)(i + SSL_PKEY_NUM);
cache[cache_idx].sigandhash = OBJ_txt2nid(si.sigalg_name);
cache[cache_idx].curve = NID_undef;
cache[cache_idx].mintls = TLS1_3_VERSION;
if (clu == NULL)
continue;
if (clu->amask & s->s3.tmp.new_cipher->algorithm_auth) {
- idx = i;
+ idx = (int)i;
break;
}
}
}
}
} else {
- idx = s->cert->key - s->cert->pkeys;
+ idx = (int)(s->cert->key - s->cert->pkeys);
}
}
if (idx < 0 || idx >= (int)OSSL_NELEM(tls_default_sigalg))
if (ssl_cert_lookup_by_pkey(pkey, &idx, SSL_CONNECTION_GET_CTX(s)) == NULL)
return 0;
- lu = tls1_get_legacy_sigalg(s, idx);
+ lu = tls1_get_legacy_sigalg(s, (int)idx);
if (lu == NULL)
return 0;
s->s3.tmp.peer_sigalg = lu;
size_t sent_sigslen = tls12_get_psigalgs(s, 1, &sent_sigs);
for (i = 0; i < s->ssl_pkey_num; i++) {
- const SIGALG_LOOKUP *lu = tls1_get_legacy_sigalg(s, i);
+ const SIGALG_LOOKUP *lu = tls1_get_legacy_sigalg(s, (int)i);
size_t j;
if (lu == NULL)
p = sdec;
sess = d2i_SSL_SESSION_ex(NULL, &p, slen, sctx->libctx, sctx->propq);
- slen -= p - sdec;
+ slen -= (int)(p - sdec);
OPENSSL_free(sdec);
if (sess) {
/* Some additional consistency checks */
if (ssl_cert_lookup_by_pkey(pk, &certidx,
SSL_CONNECTION_GET_CTX(s)) == NULL)
return 0;
- idx = certidx;
+ idx = (int)certidx;
pvalid = s->s3.tmp.valid_flags + idx;
if (c->cert_flags & SSL_CERT_FLAGS_CHECK_TLS_STRICT)
/* If ciphersuite doesn't require a cert nothing to do */
if (!(s->s3.tmp.new_cipher->algorithm_auth & SSL_aCERT))
return 1;
- if (!s->server && !ssl_has_cert(s, s->cert->key - s->cert->pkeys))
+ if (!s->server && !ssl_has_cert(s, (int)(s->cert->key - s->cert->pkeys)))
return 1;
if (SSL_USE_SIGALGS(s)) {
if ((sig_idx = tls12_get_cert_sigalg_idx(s, lu)) == -1)
continue;
} else {
- int cc_idx = s->cert->key - s->cert->pkeys;
+ int cc_idx = (int)(s->cert->key - s->cert->pkeys);
sig_idx = lu->sig_idx;
if (cc_idx != sig_idx)
if (plen + 1 > xlen)
return 0;
BIO_indent(bio, indent + 2, 80);
- BIO_write(bio, ext, plen);
+ BIO_write(bio, ext, (int)plen);
BIO_puts(bio, "\n");
ext += plen;
xlen -= plen + 1;
return ssl_trace_list(bio, indent + 2, ext + 1, xlen, 1, ssl_cert_type_tbl);
default:
- BIO_dump_indent(bio, (const char *)ext, extlen, indent + 2);
+ BIO_dump_indent(bio, (const char *)ext, (int)extlen, indent + 2);
}
return 1;
}
if (extslen < extlen + 4) {
BIO_printf(bio, "extensions, extype = %d, extlen = %d\n", extype,
(int)extlen);
- BIO_dump_indent(bio, (const char *)msg, extslen, indent + 2);
+ BIO_dump_indent(bio, (const char *)msg, (int)extslen, indent + 2);
return 0;
}
msg += 4;
BIO_indent(bio, indent, 80);
BIO_printf(bio, "ASN.1Cert, length=%d", (int)clen);
x = X509_new_ex(ctx->libctx, ctx->propq);
- if (x != NULL && d2i_X509(&x, &q, clen) == NULL) {
+ if (x != NULL && d2i_X509(&x, &q, (long)clen) == NULL) {
X509_free(x);
x = NULL;
}
BIO_indent(bio, indent, 80);
BIO_printf(bio, "raw_public_key, length=%d\n", (int)clen);
- pkey = d2i_PUBKEY_ex(NULL, &msg, clen, ssl->ctx->libctx, ssl->ctx->propq);
+ pkey = d2i_PUBKEY_ex(NULL, &msg, (long)clen, ssl->ctx->libctx, ssl->ctx->propq);
if (pkey == NULL)
return 0;
EVP_PKEY_print_public(bio, pkey, indent + 2, NULL);
else
BIO_printf(bio, "Compressed length=%d, Ratio=unknown\n", (int)clen);
- BIO_dump_indent(bio, (const char *)msg, clen, indent);
+ BIO_dump_indent(bio, (const char *)msg, (int)clen, indent);
#ifndef OPENSSL_NO_COMP_ALG
if (!ossl_comp_has_alg(alg))
}
if ((comp = COMP_CTX_new(method)) == NULL
- || COMP_expand_block(comp, ucdata, uclen, (unsigned char*)msg, clen) != (int)uclen)
+ || COMP_expand_block(comp, ucdata, (int)uclen,
+ (unsigned char*)msg, (int)clen) != (int)uclen)
goto err;
ret = ssl_print_certificates(bio, sc, server, indent, ucdata, uclen);
BIO_indent(bio, indent + 2, 80);
BIO_printf(bio, "DistinguishedName (len=%d): ", (int)dlen);
p = msg;
- nm = d2i_X509_NAME(NULL, &p, dlen);
+ nm = d2i_X509_NAME(NULL, &p, (long)dlen);
if (!nm) {
BIO_puts(bio, "<UNPARSEABLE DN>\n");
} else {
default:
BIO_indent(bio, indent + 2, 80);
BIO_puts(bio, "Unsupported, hex dump follows:\n");
- BIO_dump_indent(bio, (const char *)msg, msglen, indent + 4);
+ BIO_dump_indent(bio, (const char *)msg, (int)msglen, indent + 4);
}
return 1;
}
int ssl_hmac_old_init(SSL_HMAC *ctx, void *key, size_t len, char *md)
{
- return HMAC_Init_ex(ctx->old_ctx, key, len, EVP_get_digestbyname(md), NULL);
+ return HMAC_Init_ex(ctx->old_ctx, key, (int)len, EVP_get_digestbyname(md), NULL);
}
int ssl_hmac_old_update(SSL_HMAC *ctx, const unsigned char *data, size_t len)