#define GSS_IOV_BUFFER_TYPE_PADDING 9 /* Padding */
#define GSS_IOV_BUFFER_TYPE_STREAM 10 /* Complete wrap token */
#define GSS_IOV_BUFFER_TYPE_SIGN_ONLY 11 /* Sign only packet data */
+#define GSS_IOV_BUFFER_TYPE_MIC_TOKEN 12 /* MIC token destination */
#define GSS_IOV_BUFFER_FLAG_MASK 0xFFFF0000
#define GSS_IOV_BUFFER_FLAG_ALLOCATE 0x00010000 /* indicates GSS should allocate */
gss_iov_buffer_desc *, /* iov */
int); /* iov_count */
+/*
+ * Produce a GSSAPI MIC token for a sequence of buffers. All SIGN_ONLY and
+ * DATA buffers will be signed, in the order they appear. One MIC_TOKEN buffer
+ * must be included for the result. Suitable space should be provided for the
+ * MIC_TOKEN buffer by calling gss_get_mic_iov_length, or the ALLOCATE flag
+ * should be set on that buffer. If the ALLOCATE flag is used, use
+ * gss_release_iov_buffer to free the allocated buffer within the iov list when
+ * it is no longer needed.
+ */
+OM_uint32 KRB5_CALLCONV gss_get_mic_iov
+(
+ OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ gss_qop_t, /* qop_req */
+ gss_iov_buffer_desc *, /* iov */
+ int); /* iov_count */
+
+/*
+ * Query the MIC_TOKEN buffer length within the iov list.
+ */
+OM_uint32 KRB5_CALLCONV gss_get_mic_iov_length(
+ OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ gss_qop_t, /* qop_req */
+ gss_iov_buffer_desc *, /* iov */
+ int); /* iov_count */
+
+/*
+ * Verify the MIC_TOKEN buffer within the iov list against the SIGN_ONLY and
+ * DATA buffers in the order they appear. Return values are the same as for
+ * gss_verify_mic.
+ */
+OM_uint32 KRB5_CALLCONV gss_verify_mic_iov
+(
+ OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ gss_qop_t *, /* qop_state */
+ gss_iov_buffer_desc *, /* iov */
+ int); /* iov_count */
+
/*
* Release buffers that have the ALLOCATED flag set.
*/
int iov_count,
OM_uint32 type);
+gss_iov_buffer_t kg_locate_header_iov(gss_iov_buffer_desc *iov, int iov_count,
+ int toktype);
+
void kg_iov_msglen(gss_iov_buffer_desc *iov,
int iov_count,
size_t *data_length,
krb5_key key,
krb5_keyusage sign_usage,
gss_iov_buffer_desc *iov,
- int iov_count);
+ int iov_count,
+ int toktype);
krb5_error_code kg_verify_checksum_iov_v3(krb5_context context,
krb5_cksumtype type,
krb5_keyusage sign_usage,
gss_iov_buffer_desc *iov,
int iov_count,
+ int toktype,
krb5_boolean *valid);
OM_uint32 kg_seal_iov (OM_uint32 *minor_status,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
- int iov_count);
+ int iov_count,
+ int toktype);
krb5_cryptotype kg_translate_flag_iov(OM_uint32 type);
gss_buffer_t /* message_token */
);
+OM_uint32 KRB5_CALLCONV krb5_gss_get_mic_iov
+(OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ gss_qop_t, /* qop_req */
+ gss_iov_buffer_desc *, /* iov */
+ int /* iov_count */
+);
+
+OM_uint32 KRB5_CALLCONV krb5_gss_get_mic_iov_length
+(OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ gss_qop_t, /* qop_req */
+ gss_iov_buffer_desc *, /* iov */
+ int /* iov_count */
+);
+
OM_uint32 KRB5_CALLCONV krb5_gss_verify_mic
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t * /* qop_state */
);
+OM_uint32 KRB5_CALLCONV krb5_gss_verify_mic_iov
+(OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ gss_qop_t *, /* qop_state */
+ gss_iov_buffer_desc *, /* iov */
+ int /* iov_count */
+);
+
OM_uint32 KRB5_CALLCONV krb5_gss_wrap
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
krb5_gss_acquire_cred_with_password,
krb5_gss_export_cred,
krb5_gss_import_cred,
+ NULL, /* import_sec_context_by_mech */
+ NULL, /* import_name_by_mech */
+ NULL, /* import_cred_by_mech */
+ krb5_gss_get_mic_iov,
+ krb5_gss_verify_mic_iov,
+ krb5_gss_get_mic_iov_length,
};
#ifdef _GSS_STATIC_LINK
unsigned char *ptr;
krb5_keyusage sign_usage = KG_USAGE_SIGN;
- assert(toktype == KG_TOK_WRAP_MSG);
-
md5cksum.length = cksum.length = 0;
md5cksum.contents = cksum.contents = NULL;
- header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ header = kg_locate_header_iov(iov, iov_count, toktype);
if (header == NULL)
return EINVAL;
padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
- if (padding == NULL && (ctx->gss_flags & GSS_C_DCE_STYLE) == 0)
+ if (padding == NULL && toktype == KG_TOK_WRAP_MSG &&
+ (ctx->gss_flags & GSS_C_DCE_STYLE) == 0)
return EINVAL;
trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
goto cleanup;
md5cksum.length = k5_trailerlen;
- if (k5_headerlen != 0) {
+ if (k5_headerlen != 0 && toktype == KG_TOK_WRAP_MSG) {
code = kg_make_confounder(context, ctx->enc->keyblock.enctype,
ptr + 14 + ctx->cksum_size);
if (code != 0)
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
- int iov_count)
+ int iov_count,
+ int toktype)
{
krb5_gss_ctx_id_rec *ctx;
gss_iov_buffer_t header, trailer, padding;
unsigned int k5_headerlen = 0, k5_trailerlen = 0, k5_padlen = 0;
krb5_error_code code;
krb5_context context;
- int dce_style;
+ int dce_or_mic;
if (qop_req != GSS_C_QOP_DEFAULT) {
*minor_status = (OM_uint32)G_UNKNOWN_QOP;
return GSS_S_NO_CONTEXT;
}
- header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ header = kg_locate_header_iov(iov, iov_count, toktype);
if (header == NULL) {
*minor_status = EINVAL;
return GSS_S_FAILURE;
INIT_IOV_DATA(trailer);
}
- dce_style = ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0);
+ /* MIC tokens and DCE-style wrap tokens have similar length considerations:
+ * no padding, and the framing surrounds the header only, not the data. */
+ dce_or_mic = ((ctx->gss_flags & GSS_C_DCE_STYLE) != 0 ||
+ toktype == KG_TOK_MIC_MSG);
/* For CFX, EC is used instead of padding, and is placed in header or trailer */
padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
if (padding == NULL) {
- if (conf_req_flag && ctx->proto == 0 && !dce_style) {
+ if (conf_req_flag && ctx->proto == 0 && !dce_or_mic) {
*minor_status = EINVAL;
return GSS_S_FAILURE;
}
return GSS_S_FAILURE;
}
- if (k5_padlen == 0 && dce_style) {
+ if (k5_padlen == 0 && dce_or_mic) {
/* Windows rejects AEAD tokens with non-zero EC */
code = krb5_c_block_size(context, enctype, &ec);
if (code != 0) {
} else {
gss_trailerlen = k5_trailerlen; /* Kerb-Checksum */
}
- } else if (!dce_style) {
+ } else if (!dce_or_mic) {
k5_padlen = (ctx->sealalg == SEAL_ALG_MICROSOFT_RC4) ? 1 : 8;
if (k5_padlen == 1)
data_size = 14 /* Header */ + ctx->cksum_size + k5_headerlen;
- if (!dce_style)
+ if (!dce_or_mic)
data_size += data_length;
gss_headerlen = g_token_size(ctx->mech_used, data_size);
/* g_token_size() will include data_size as well as the overhead, so
* subtract data_length just to get the overhead (ie. token size) */
- if (!dce_style)
+ if (!dce_or_mic)
gss_headerlen -= data_length;
}
{
OM_uint32 major_status;
- major_status = kg_seal_iov_length(minor_status, context_handle, conf_req_flag,
- qop_req, conf_state, iov, iov_count);
+ major_status = kg_seal_iov_length(minor_status, context_handle,
+ conf_req_flag, qop_req, conf_state, iov,
+ iov_count, KG_TOK_WRAP_MSG);
return major_status;
}
-#if 0
-OM_uint32
+OM_uint32 KRB5_CALLCONV
krb5_gss_get_mic_iov(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_qop_t qop_req,
return major_status;
}
-OM_uint32
+OM_uint32 KRB5_CALLCONV
krb5_gss_get_mic_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
- int conf_req_flag,
gss_qop_t qop_req,
- int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count)
{
OM_uint32 major_status;
- major_status = kg_seal_iov_length(minor_status, context_handle, conf_req_flag,
- qop_req, conf_state, iov, iov_count);
+ major_status = kg_seal_iov_length(minor_status, context_handle, FALSE,
+ qop_req, NULL, iov, iov_count,
+ KG_TOK_MIC_MSG);
return major_status;
}
-#endif
kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length);
- header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ header = kg_locate_header_iov(iov, iov_count, toktype);
if (header == NULL)
return EINVAL;
code = kg_make_checksum_iov_v3(context, cksumtype,
rrc, key, key_usage,
- iov, iov_count);
+ iov, iov_count, toktype);
if (code != 0)
goto cleanup;
if (qop_state != NULL)
*qop_state = GSS_C_QOP_DEFAULT;
- header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ header = kg_locate_header_iov(iov, iov_count, toktype);
assert(header != NULL);
padding = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
code = kg_verify_checksum_iov_v3(context, cksumtype, rrc,
key, key_usage,
- iov, iov_count, &valid);
+ iov, iov_count, toktype, &valid);
if (code != 0 || valid == FALSE) {
*minor_status = code;
return GSS_S_BAD_SIG;
goto defective;
seqnum = load_64_be(ptr + 8);
- code = kg_verify_checksum_iov_v3(context, cksumtype, 0,
+ /* For MIC tokens, the GSS header and checksum are in the same buffer.
+ * Fake up an RRC so that the checksum is expected in the header. */
+ rrc = (trailer != NULL) ? 0 : header->buffer.length - 16;
+ code = kg_verify_checksum_iov_v3(context, cksumtype, rrc,
key, key_usage,
- iov, iov_count, &valid);
+ iov, iov_count, toktype, &valid);
if (code != 0 || valid == FALSE) {
*minor_status = code;
return GSS_S_BAD_SIG;
size_t sumlen;
krb5_keyusage sign_usage = KG_USAGE_SIGN;
- assert(toktype == KG_TOK_WRAP_MSG);
-
md5cksum.length = cksum.length = 0;
md5cksum.contents = cksum.contents = NULL;
- header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ header = kg_locate_header_iov(iov, iov_count, toktype);
assert(header != NULL);
trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
unsigned int bodysize;
int toktype2;
- header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ header = kg_locate_header_iov(iov, iov_count, toktype);
if (header == NULL) {
*minor_status = EINVAL;
return GSS_S_FAILURE;
ptr = (unsigned char *)header->buffer.value;
input_length = header->buffer.length;
- if ((ctx->gss_flags & GSS_C_DCE_STYLE) == 0) {
+ if ((ctx->gss_flags & GSS_C_DCE_STYLE) == 0 &&
+ toktype == KG_TOK_WRAP_MSG) {
size_t data_length, assoc_data_length;
kg_iov_msglen(iov, iov_count, &data_length, &assoc_data_length);
return major_status;
}
-#if 0
-OM_uint32
+OM_uint32 KRB5_CALLCONV
krb5_gss_verify_mic_iov(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_qop_t *qop_state,
major_status = kg_unseal_iov(minor_status, context_handle,
NULL, qop_state,
- iov, iov_count, KG_TOK_WRAP_MSG);
+ iov, iov_count, KG_TOK_MIC_MSG);
return major_status;
}
-#endif
krb5_error_code code;
gss_iov_buffer_desc *header;
krb5_crypto_iov *kiov;
- size_t kiov_count;
int i = 0, j;
size_t conf_len = 0, token_header_len;
- header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ header = kg_locate_header_iov(iov, iov_count, toktype);
assert(header != NULL);
- kiov_count = 3 + iov_count;
- kiov = (krb5_crypto_iov *)xmalloc(kiov_count * sizeof(krb5_crypto_iov));
+ kiov = calloc(iov_count + 3, sizeof(krb5_crypto_iov));
if (kiov == NULL)
return ENOMEM;
i++;
}
- code = krb5_k_make_checksum_iov(context, type, seq, sign_usage, kiov, kiov_count);
+ code = krb5_k_make_checksum_iov(context, type, seq, sign_usage, kiov, i);
if (code == 0) {
checksum->length = kiov[0].data.length;
checksum->contents = (unsigned char *)kiov[0].data.data;
krb5_keyusage sign_usage,
gss_iov_buffer_desc *iov,
int iov_count,
+ int toktype,
krb5_boolean verify,
krb5_boolean *valid)
{
if (code != 0)
return code;
- header = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+ header = kg_locate_header_iov(iov, iov_count, toktype);
assert(header != NULL);
trailer = kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
krb5_key key,
krb5_keyusage sign_usage,
gss_iov_buffer_desc *iov,
- int iov_count)
+ int iov_count,
+ int toktype)
{
return checksum_iov_v3(context, type, rrc, key,
- sign_usage, iov, iov_count, 0, NULL);
+ sign_usage, iov, iov_count, toktype, 0, NULL);
}
krb5_error_code
krb5_keyusage sign_usage,
gss_iov_buffer_desc *iov,
int iov_count,
+ int toktype,
krb5_boolean *valid)
{
return checksum_iov_v3(context, type, rrc, key,
- sign_usage, iov, iov_count, 1, valid);
+ sign_usage, iov, iov_count, toktype, 1, valid);
}
return p;
}
+/* Return the IOV where the GSSAPI token header should be placed (and possibly
+ * the checksum as well, depending on the token type). */
+gss_iov_buffer_t
+kg_locate_header_iov(gss_iov_buffer_desc *iov, int iov_count, int toktype)
+{
+ if (toktype == KG_TOK_MIC_MSG)
+ return kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_MIC_TOKEN);
+ else
+ return kg_locate_iov(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
+}
+
void
kg_iov_msglen(gss_iov_buffer_desc *iov, int iov_count, size_t *data_length_p,
size_t *assoc_data_length_p)
gss_export_name_composite
gss_export_sec_context
gss_get_mic
+gss_get_mic_iov
+gss_get_mic_iov_length
gss_get_name_attribute
gss_import_cred
gss_import_name
gss_userok
gss_verify
gss_verify_mic
+gss_verify_mic_iov
gss_wrap
gss_wrap_aead
gss_wrap_iov
return (GSS_S_BAD_MECH);
}
+
+OM_uint32 KRB5_CALLCONV
+gss_verify_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
+ gss_qop_t *qop_state, gss_iov_buffer_desc *iov,
+ int iov_count)
+{
+ OM_uint32 status;
+ gss_union_ctx_id_t ctx;
+ gss_mechanism mech;
+
+ status = val_unwrap_iov_args(minor_status, context_handle, NULL,
+ qop_state, iov, iov_count);
+ if (status != GSS_S_COMPLETE)
+ return status;
+
+ /* Select the approprate underlying mechanism routine and call it. */
+ ctx = (gss_union_ctx_id_t)context_handle;
+ mech = gssint_get_mechanism(ctx->mech_type);
+ if (mech == NULL)
+ return GSS_S_BAD_MECH;
+ if (mech->gss_verify_mic_iov == NULL)
+ return GSS_S_UNAVAILABLE;
+ return mech->gss_verify_mic_iov(minor_status, ctx->internal_ctx_id,
+ qop_state, iov, iov_count);
+}
return (GSS_S_BAD_MECH);
}
+OM_uint32 KRB5_CALLCONV
+gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
+ gss_qop_t qop_req, gss_iov_buffer_desc *iov, int iov_count)
+{
+ OM_uint32 status;
+ gss_union_ctx_id_t ctx;
+ gss_mechanism mech;
+
+ status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL,
+ iov, iov_count);
+ if (status != GSS_S_COMPLETE)
+ return status;
+
+ /* Select the approprate underlying mechanism routine and call it. */
+ ctx = (gss_union_ctx_id_t)context_handle;
+ mech = gssint_get_mechanism(ctx->mech_type);
+ if (mech == NULL)
+ return GSS_S_BAD_MECH;
+ if (mech->gss_get_mic_iov == NULL)
+ return GSS_S_UNAVAILABLE;
+ return mech->gss_get_mic_iov(minor_status, ctx->internal_ctx_id, qop_req,
+ iov, iov_count);
+}
+
+OM_uint32 KRB5_CALLCONV
+gss_get_mic_iov_length(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
+ gss_qop_t qop_req, gss_iov_buffer_desc *iov,
+ int iov_count)
+{
+ OM_uint32 status;
+ gss_union_ctx_id_t ctx;
+ gss_mechanism mech;
+
+ status = val_wrap_iov_args(minor_status, context_handle, 0, qop_req, NULL,
+ iov, iov_count);
+ if (status != GSS_S_COMPLETE)
+ return status;
+
+ /* Select the approprate underlying mechanism routine and call it. */
+ ctx = (gss_union_ctx_id_t)context_handle;
+ mech = gssint_get_mechanism(ctx->mech_type);
+ if (mech == NULL)
+ return GSS_S_BAD_MECH;
+ if (mech->gss_get_mic_iov_length == NULL)
+ return GSS_S_UNAVAILABLE;
+ return mech->gss_get_mic_iov_length(minor_status, ctx->internal_ctx_id,
+ qop_req, iov, iov_count);
+}
+
OM_uint32 KRB5_CALLCONV
gss_release_iov_buffer (minor_status,
iov,
gss_cred_id_t * /* cred_handle */
/* */);
+ /* get_mic_iov extensions, added in 1.12 */
+
+ OM_uint32 (KRB5_CALLCONV *gss_get_mic_iov)
+ (
+ OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ gss_qop_t, /* qop_req */
+ gss_iov_buffer_desc *, /* iov */
+ int /* iov_count */
+ );
+
+ OM_uint32 (KRB5_CALLCONV *gss_verify_mic_iov)
+ (
+ OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ gss_qop_t *, /* qop_state */
+ gss_iov_buffer_desc *, /* iov */
+ int /* iov_count */
+ );
+
+ OM_uint32 (KRB5_CALLCONV *gss_get_mic_iov_length)
+ (
+ OM_uint32 *, /* minor_status */
+ gss_ctx_id_t, /* context_handle */
+ gss_qop_t, /* qop_req */
+ gss_iov_buffer_desc *, /* iov */
+ int /* iov_count */
+ );
+
} *gss_mechanism;
/*
gss_cred_id_t *cred_handle
);
+OM_uint32 KRB5_CALLCONV
+spnego_gss_get_mic_iov(
+ OM_uint32 *minor_status,
+ gss_ctx_id_t context_handle,
+ gss_qop_t qop_req,
+ gss_iov_buffer_desc *iov,
+ int iov_count
+);
+
+OM_uint32 KRB5_CALLCONV
+spnego_gss_verify_mic_iov(
+ OM_uint32 *minor_status,
+ gss_ctx_id_t context_handle,
+ gss_qop_t *qop_state,
+ gss_iov_buffer_desc *iov,
+ int iov_count
+);
+
+OM_uint32 KRB5_CALLCONV
+spnego_gss_get_mic_iov_length(
+ OM_uint32 *minor_status,
+ gss_ctx_id_t context_handle,
+ gss_qop_t qop_req,
+ gss_iov_buffer_desc *iov,
+ int iov_count
+);
+
#ifdef __cplusplus
}
#endif
spnego_gss_acquire_cred_with_password,
spnego_gss_export_cred,
spnego_gss_import_cred,
+ NULL, /* gssspi_import_sec_context_by_mech */
+ NULL, /* gssspi_import_name_by_mech */
+ NULL, /* gssspi_import_cred_by_mech */
+ spnego_gss_get_mic_iov,
+ spnego_gss_verify_mic_iov,
+ spnego_gss_get_mic_iov_length
};
#ifdef _GSS_STATIC_LINK
return (ret);
}
+OM_uint32 KRB5_CALLCONV
+spnego_gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
+ gss_qop_t qop_req, gss_iov_buffer_desc *iov,
+ int iov_count)
+{
+ return gss_get_mic_iov(minor_status, context_handle, qop_req, iov,
+ iov_count);
+}
+
+OM_uint32 KRB5_CALLCONV
+spnego_gss_verify_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
+ gss_qop_t *qop_state, gss_iov_buffer_desc *iov,
+ int iov_count)
+{
+ return gss_verify_mic_iov(minor_status, context_handle, qop_state, iov,
+ iov_count);
+}
+
+OM_uint32 KRB5_CALLCONV
+spnego_gss_get_mic_iov_length(OM_uint32 *minor_status,
+ gss_ctx_id_t context_handle, gss_qop_t qop_req,
+ gss_iov_buffer_desc *iov, int iov_count)
+{
+ return gss_get_mic_iov_length(minor_status, context_handle, qop_req, iov,
+ iov_count);
+}
+
/*
* We will release everything but the ctx_handle so that it
* can be passed back to init/accept context. This routine should
gss_store_cred_into @141
gss_export_cred @142
gss_import_cred @143
+; Added in 1.12
+ gss_get_mic_iov @144
+ gss_get_mic_iov_length @145
+ gss_verify_mic_iov @146
(void)gss_release_iov_buffer(&minor, stiov, 3);
}
+/*
+ * Get a MIC for sign1, sign2, and sign3 using the caller-provided array iov,
+ * which must have space for four elements, and the caller-provided buffer
+ * data, which must be big enough for the MIC. If data is NULL, the library
+ * will be asked to allocate the MIC buffer. The MIC will be located in
+ * iov[3].buffer.
+ */
+static void
+mic(gss_ctx_id_t ctx, const char *sign1, const char *sign2, const char *sign3,
+ gss_iov_buffer_desc *iov, char *data)
+{
+ OM_uint32 minor, major;
+ krb5_boolean allocated;
+
+ /* Lay out iov array. */
+ iov[0].type = GSS_IOV_BUFFER_TYPE_DATA;
+ iov[0].buffer.value = (char *)sign1;
+ iov[0].buffer.length = strlen(sign1);
+ iov[1].type = GSS_IOV_BUFFER_TYPE_SIGN_ONLY;
+ iov[1].buffer.value = (char *)sign2;
+ iov[1].buffer.length = strlen(sign2);
+ iov[2].type = GSS_IOV_BUFFER_TYPE_SIGN_ONLY;
+ iov[2].buffer.value = (char *)sign3;
+ iov[2].buffer.length = strlen(sign3);
+ iov[3].type = GSS_IOV_BUFFER_TYPE_MIC_TOKEN;
+ if (data == NULL) {
+ /* Ask the library to allocate the MIC buffer. */
+ iov[3].type |= GSS_IOV_BUFFER_FLAG_ALLOCATE;
+ } else {
+ /* Get the MIC length and use the caller-provided buffer. */
+ major = gss_get_mic_iov_length(&minor, ctx, GSS_C_QOP_DEFAULT, iov, 4);
+ check_gsserr("gss_get_mic_iov_length", major, minor);
+ iov[3].buffer.value = data;
+ }
+ major = gss_get_mic_iov(&minor, ctx, GSS_C_QOP_DEFAULT, iov, 4);
+ check_gsserr("gss_get_mic_iov", major, minor);
+ allocated = (GSS_IOV_BUFFER_FLAGS(iov[3].type) &
+ GSS_IOV_BUFFER_FLAG_ALLOCATED) != 0;
+ if (allocated != (data == NULL))
+ errout("gss_get_mic_iov allocated");
+}
+
+static void
+test_mic(gss_ctx_id_t ctx1, gss_ctx_id_t ctx2)
+{
+ OM_uint32 major, minor;
+ gss_iov_buffer_desc iov[4];
+ gss_qop_t qop;
+ gss_buffer_desc concatbuf, micbuf;
+ const char *sign1 = "Data and sign-only ";
+ const char *sign2 = "buffers are treated ";
+ const char *sign3 = "equally by gss_get_mic_iov";
+ char concat[1024], data[1024];
+
+ (void)snprintf(concat, sizeof(concat), "%s%s%s", sign1, sign2, sign3);
+ concatbuf.value = concat;
+ concatbuf.length = strlen(concat);
+
+ /* MIC with a caller-provided buffer and verify with the IOV array. */
+ mic(ctx1, sign1, sign2, sign3, iov, data);
+ major = gss_verify_mic_iov(&minor, ctx2, &qop, iov, 4);
+ check_gsserr("gss_verify_mic_iov(mic1)", major, minor);
+ if (qop != GSS_C_QOP_DEFAULT)
+ errout("gss_verify_mic_iov(mic1) qop");
+
+ /* MIC with an allocated buffer and verify with gss_verify_mic. */
+ mic(ctx1, sign1, sign2, sign3, iov, NULL);
+ major = gss_verify_mic(&minor, ctx2, &concatbuf, &iov[3].buffer, &qop);
+ check_gsserr("gss_verify_mic(mic2)", major, minor);
+ if (qop != GSS_C_QOP_DEFAULT)
+ errout("gss_verify_mic(mic2) qop");
+ (void)gss_release_iov_buffer(&minor, iov, 4);
+
+ /* MIC with gss_c_get_mic and verify using the IOV array (which is still
+ * mostly set up from the last call to mic(). */
+ major = gss_get_mic(&minor, ctx1, GSS_C_QOP_DEFAULT, &concatbuf, &micbuf);
+ check_gsserr("gss_get_mic(mic3)", major, minor);
+ iov[3].buffer = micbuf;
+ major = gss_verify_mic_iov(&minor, ctx2, &qop, iov, 4);
+ check_gsserr("gss_verify_mic_iov(mic3)", major, minor);
+ if (qop != GSS_C_QOP_DEFAULT)
+ errout("gss_verify_mic_iov(mic3) qop");
+ (void)gss_release_buffer(&minor, &micbuf);
+}
+
/* Create a DCE-style token and make sure we can unwrap it. */
static void
test_dce(gss_ctx_id_t ctx1, gss_ctx_id_t ctx2, int conf)
test_aead(actx, ictx, 0);
test_aead(actx, ictx, 1);
+ /* Test MIC tokens. */
+ test_mic(ictx, actx);
+ test_mic(actx, ictx);
+
/* Test DCE wrapping with DCE-style contexts. */
(void)gss_delete_sec_context(&minor, &ictx, NULL);
(void)gss_delete_sec_context(&minor, &actx, NULL);