#include "k5-platform.h"
-/* not used in krb5.h (yet) */
-typedef UINT64_TYPE krb5_ui_8;
-typedef INT64_TYPE krb5_int64;
-
#define KRB5_KDB_MAX_LIFE (60*60*24) /* one day */
#define KRB5_KDB_MAX_RLIFE (60*60*24*7) /* one week */
/* [De]serialize 8-byte integer */
krb5_error_code KRB5_CALLCONV
-krb5_ser_pack_int64(krb5_int64, krb5_octet **, size_t *);
+krb5_ser_pack_int64(int64_t, krb5_octet **, size_t *);
krb5_error_code KRB5_CALLCONV
-krb5_ser_unpack_int64(krb5_int64 *, krb5_octet **, size_t *);
+krb5_ser_unpack_int64(int64_t *, krb5_octet **, size_t *);
/* [De]serialize byte string */
krb5_error_code KRB5_CALLCONV
krb5_error_code (*mandatory_cksumtype)(krb5_context, krb5_enctype,
krb5_cksumtype *);
- krb5_error_code (KRB5_CALLCONV *ser_pack_int64)(krb5_int64, krb5_octet **,
+ krb5_error_code (KRB5_CALLCONV *ser_pack_int64)(int64_t, krb5_octet **,
size_t *);
- krb5_error_code (KRB5_CALLCONV *ser_unpack_int64)(krb5_int64 *,
- krb5_octet **, size_t *);
+ krb5_error_code (KRB5_CALLCONV *ser_unpack_int64)(int64_t *, krb5_octet **,
+ size_t *);
/* Used for KDB LDAP back end. */
krb5_error_code
#endif
-#define INT64_TYPE int64_t
-#define UINT64_TYPE uint64_t
-
#ifndef SIZE_MAX
# define SIZE_MAX ((size_t)((size_t)0 - 1))
#endif
#endif
}
static inline void
-store_64_be (UINT64_TYPE val, void *vp)
+store_64_be (uint64_t val, void *vp)
{
unsigned char *p = (unsigned char *) vp;
#if defined(__GNUC__) && defined(K5_BE) && !defined(__cplusplus)
| ((uint32_t) p[0] << 24));
#endif
}
-static inline UINT64_TYPE
+static inline uint64_t
load_64_be (const void *cvp)
{
const unsigned char *p = (const unsigned char *) cvp;
#elif defined(__GNUC__) && defined(K5_LE) && defined(SWAP64) && !defined(__cplusplus)
return GETSWAPPED(64,p);
#else
- return ((UINT64_TYPE)load_32_be(p) << 32) | load_32_be(p+4);
+ return ((uint64_t)load_32_be(p) << 32) | load_32_be(p+4);
#endif
}
static inline void
#endif
}
static inline void
-store_64_le (UINT64_TYPE val, void *vp)
+store_64_le (uint64_t val, void *vp)
{
unsigned char *p = (unsigned char *) vp;
#if defined(__GNUC__) && defined(K5_LE) && !defined(__cplusplus)
return (p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24));
#endif
}
-static inline UINT64_TYPE
+static inline uint64_t
load_64_le (const void *cvp)
{
const unsigned char *p = (const unsigned char *) cvp;
#elif defined(__GNUC__) && defined(K5_BE) && defined(SWAP64) && !defined(__cplusplus)
return GETSWAPPED(64,p);
#else
- return ((UINT64_TYPE)load_32_le(p+4) << 32) | load_32_le(p);
+ return ((uint64_t)load_32_le(p+4) << 32) | load_32_le(p);
#endif
}
memcpy(vp, &n, 4);
}
static inline void
-store_64_n (UINT64_TYPE val, void *vp)
+store_64_n (uint64_t val, void *vp)
{
- UINT64_TYPE n = val;
+ uint64_t n = val;
memcpy(vp, &n, 8);
}
static inline unsigned short
memcpy(&n, p, 4);
return n;
}
-static inline UINT64_TYPE
+static inline uint64_t
load_64_n (const void *p)
{
- UINT64_TYPE n;
+ uint64_t n;
memcpy(&n, p, 8);
return n;
}
#undef UINT32_TYPE
/* Assume for simplicity that these swaps are identical. */
-static inline UINT64_TYPE
-k5_htonll (UINT64_TYPE val)
+static inline uint64_t
+k5_htonll (uint64_t val)
{
#ifdef K5_BE
return val;
return load_64_be ((unsigned char *)&val);
#endif
}
-static inline UINT64_TYPE
-k5_ntohll (UINT64_TYPE val)
+static inline uint64_t
+k5_ntohll (uint64_t val)
{
return k5_htonll (val);
}
static void
inc_counter(struct fortuna_state *st)
{
- UINT64_TYPE val;
+ uint64_t val;
val = load_64_le(st->counter) + 1;
store_64_le(val, st->counter);
#include "k5-platform.h"
#include "k5-buf.h"
-typedef UINT64_TYPE gssint_uint64;
/** helper macros **/
OM_uint32 status_value,
gss_buffer_t status_string);
-gss_int32 g_order_init (void **queue, gssint_uint64 seqnum,
+gss_int32 g_order_init (void **queue, uint64_t seqnum,
int do_replay, int do_sequence, int wide);
-gss_int32 g_order_check (void **queue, gssint_uint64 seqnum);
+gss_int32 g_order_check (void **queue, uint64_t seqnum);
void g_order_free (void **queue);
int do_sequence;
int start;
int length;
- gssint_uint64 firstnum;
+ uint64_t firstnum;
/* Stored as deltas from firstnum. This way, the high bit won't
overflow unless we've actually gone through 2**n messages, or
gotten something *way* out of sequence. */
- gssint_uint64 elem[QUEUE_LENGTH];
+ uint64_t elem[QUEUE_LENGTH];
/* All ones for 64-bit sequence numbers; 32 ones for 32-bit
sequence numbers. */
- gssint_uint64 mask;
+ uint64_t mask;
} queue;
/* rep invariant:
#define QELEM(q,i) ((q)->elem[(i)%QSIZE(q)])
static void
-queue_insert(queue *q, int after, gssint_uint64 seqnum)
+queue_insert(queue *q, int after, uint64_t seqnum)
{
/* insert. this is not the fastest way, but it's easy, and it's
optimized for insert at end, which is the common case */
}
gss_int32
-g_order_init(void **vqueue, gssint_uint64 seqnum,
+g_order_init(void **vqueue, uint64_t seqnum,
int do_replay, int do_sequence, int wide_nums)
{
queue *q;
q->do_replay = do_replay;
q->do_sequence = do_sequence;
- q->mask = wide_nums ? ~(gssint_uint64)0 : 0xffffffffUL;
+ q->mask = wide_nums ? ~(uint64_t)0 : 0xffffffffUL;
q->start = 0;
q->length = 1;
q->firstnum = seqnum;
- q->elem[q->start] = ((gssint_uint64)0 - 1) & q->mask;
+ q->elem[q->start] = ((uint64_t)0 - 1) & q->mask;
*vqueue = (void *) q;
return(0);
}
gss_int32
-g_order_check(void **vqueue, gssint_uint64 seqnum)
+g_order_check(void **vqueue, uint64_t seqnum)
{
queue *q;
int i;
- gssint_uint64 expected;
+ uint64_t expected;
q = (queue *) (*vqueue);
/* XXX these used to be signed. the old spec is inspecific, and
the new spec specifies unsigned. I don't believe that the change
affects the wire encoding. */
- gssint_uint64 seq_send;
- gssint_uint64 seq_recv;
+ uint64_t seq_send;
+ uint64_t seq_recv;
void *seqstate;
krb5_context k5_context;
krb5_auth_context auth_context;
#define gss_krb5_nt_machine_uid_name gss_nt_machine_uid_name
#define gss_krb5_nt_string_uid_name gss_nt_string_uid_name
-typedef uint64_t gss_uint64;
-
typedef struct gss_krb5_lucid_key {
OM_uint32 type; /* key encryption type */
OM_uint32 length; /* length of key data */
MUST be at beginning of struct! */
OM_uint32 initiate; /* Are we the initiator? */
OM_uint32 endtime; /* expiration time of context */
- gss_uint64 send_seq; /* sender sequence number */
- gss_uint64 recv_seq; /* receive sequence number */
+ uint64_t send_seq; /* sender sequence number */
+ uint64_t recv_seq; /* receive sequence number */
OM_uint32 protocol; /* 0: rfc1964,
1: draft-ietf-krb-wg-gssapi-cfx-07 */
/*
make_seal_token_v1 (krb5_context context,
krb5_key enc,
krb5_key seq,
- gssint_uint64 *seqnum,
+ uint64_t *seqnum,
int direction,
gss_buffer_t text,
gss_buffer_t token,
{
krb5_context context = *contextptr;
krb5_data plain;
- gssint_uint64 seqnum;
+ uint64_t seqnum;
size_t ec, rrc;
int key_usage;
unsigned char acceptor_flag;
size_t rrc, ec;
size_t data_length, assoc_data_length;
krb5_key key;
- gssint_uint64 seqnum;
+ uint64_t seqnum;
krb5_boolean valid;
krb5_cksumtype cksumtype;
int conf_flag = 0;
return(GSS_S_BAD_SIG);
}
- retval = g_order_check(&(ctx->seqstate), (gssint_uint64)seqnum);
+ retval = g_order_check(&(ctx->seqstate), (uint64_t)seqnum);
/* success or ordering violation */
}
code = 0;
- retval = g_order_check(&ctx->seqstate, (gssint_uint64)seqnum);
+ retval = g_order_check(&ctx->seqstate, (uint64_t)seqnum);
cleanup:
krb5_free_checksum_contents(context, &md5cksum);
* krb5_int32 for endtime.
* krb5_int32 for renew_till.
* krb5_int32 for flags.
- * krb5_int64 for seq_send.
- * krb5_int64 for seq_recv.
+ * int64_t for seq_send.
+ * int64_t for seq_recv.
* ... for seqstate
* ... for auth_context
* ... for mech_used
kret = EINVAL;
if ((ctx = (krb5_gss_ctx_id_rec *) arg)) {
required = 21*sizeof(krb5_int32);
- required += 2*sizeof(krb5_int64);
+ required += 2*sizeof(int64_t);
required += sizeof(ctx->seed);
kret = 0;
&bp, &remain);
(void) krb5_ser_pack_int32((krb5_int32) ctx->krb_flags,
&bp, &remain);
- (void) (*kaccess.ser_pack_int64)((krb5_int64) ctx->seq_send,
+ (void) (*kaccess.ser_pack_int64)((int64_t) ctx->seq_send,
&bp, &remain);
- (void) (*kaccess.ser_pack_int64)((krb5_int64) ctx->seq_recv,
+ (void) (*kaccess.ser_pack_int64)((int64_t) ctx->seq_recv,
&bp, &remain);
/* Now dynamic data */
/* Get a context */
if ((remain >= (17*sizeof(krb5_int32)
- + 2*sizeof(krb5_int64)
+ + 2*sizeof(int64_t)
+ sizeof(ctx->seed))) &&
(ctx = (krb5_gss_ctx_id_rec *)
xmalloc(sizeof(krb5_gss_ctx_id_rec)))) {
ctx->krb_times.renew_till = (krb5_timestamp) ibuf;
(void) krb5_ser_unpack_int32(&ibuf, &bp, &remain);
ctx->krb_flags = (krb5_flags) ibuf;
- (void) (*kaccess.ser_unpack_int64)((krb5_int64 *)&ctx->seq_send,
+ (void) (*kaccess.ser_unpack_int64)((int64_t *)&ctx->seq_send,
&bp, &remain);
- kret = (*kaccess.ser_unpack_int64)((krb5_int64 *)&ctx->seq_recv,
+ kret = (*kaccess.ser_unpack_int64)((int64_t *)&ctx->seq_recv,
&bp, &remain);
if (kret) {
free(ctx);
/**** Functions for encoding primitive types ****/
asn1_error_code
-k5_asn1_encode_bool(asn1buf *buf, asn1_intmax val, size_t *len_out)
+k5_asn1_encode_bool(asn1buf *buf, intmax_t val, size_t *len_out)
{
asn1_octet bval = val ? 0xFF : 0x00;
}
asn1_error_code
-k5_asn1_encode_int(asn1buf *buf, asn1_intmax val, size_t *len_out)
+k5_asn1_encode_int(asn1buf *buf, intmax_t val, size_t *len_out)
{
asn1_error_code ret;
size_t len = 0;
}
asn1_error_code
-k5_asn1_encode_uint(asn1buf *buf, asn1_uintmax val, size_t *len_out)
+k5_asn1_encode_uint(asn1buf *buf, uintmax_t val, size_t *len_out)
{
asn1_error_code ret;
size_t len = 0;
- asn1_uintmax valcopy;
+ uintmax_t valcopy;
int digit;
valcopy = val;
/**** Functions for decoding primitive types ****/
asn1_error_code
-k5_asn1_decode_bool(const unsigned char *asn1, size_t len, asn1_intmax *val)
+k5_asn1_decode_bool(const unsigned char *asn1, size_t len, intmax_t *val)
{
if (len != 1)
return ASN1_BAD_LENGTH;
/* Decode asn1/len as the contents of a DER integer, placing the signed result
* in val. */
asn1_error_code
-k5_asn1_decode_int(const unsigned char *asn1, size_t len, asn1_intmax *val)
+k5_asn1_decode_int(const unsigned char *asn1, size_t len, intmax_t *val)
{
- asn1_intmax n;
+ intmax_t n;
size_t i;
if (len == 0)
return ASN1_BAD_LENGTH;
n = (asn1[0] & 0x80) ? -1 : 0;
/* Check length; allow extra octet if first octet is 0. */
- if (len > sizeof(asn1_intmax) + (asn1[0] == 0))
+ if (len > sizeof(intmax_t) + (asn1[0] == 0))
return ASN1_OVERFLOW;
for (i = 0; i < len; i++)
n = (n << 8) | asn1[i];
/* Decode asn1/len as the contents of a DER integer, placing the unsigned
* result in val. */
asn1_error_code
-k5_asn1_decode_uint(const unsigned char *asn1, size_t len, asn1_uintmax *val)
+k5_asn1_decode_uint(const unsigned char *asn1, size_t len, uintmax_t *val)
{
- asn1_uintmax n;
+ uintmax_t n;
size_t i;
if (len == 0)
return ASN1_BAD_LENGTH;
/* Check for negative values and check length. */
- if ((asn1[0] & 0x80) || len > sizeof(asn1_uintmax) + (asn1[0] == 0))
+ if ((asn1[0] & 0x80) || len > sizeof(uintmax_t) + (asn1[0] == 0))
return ASN1_OVERFLOW;
for (i = 0, n = 0; i < len; i++)
n = (n << 8) | asn1[i];
return encode_sequence_of(buf, len, val, type, len_out);
}
-static asn1_intmax
+static intmax_t
load_int(const void *val, size_t size)
{
switch (size) {
case 1: return *(signed char *)val;
case 2: return *(krb5_int16 *)val;
case 4: return *(krb5_int32 *)val;
- case 8: return *(INT64_TYPE *)val;
+ case 8: return *(int64_t *)val;
default: abort();
}
}
-static asn1_uintmax
+static uintmax_t
load_uint(const void *val, size_t size)
{
switch (size) {
case 1: return *(unsigned char *)val;
case 2: return *(krb5_ui_2 *)val;
case 4: return *(krb5_ui_4 *)val;
- case 8: return *(UINT64_TYPE *)val;
+ case 8: return *(uint64_t *)val;
default: abort();
}
}
{
const void *countptr = (const char *)val + counted->lenoff;
- assert(sizeof(size_t) <= sizeof(asn1_uintmax));
+ assert(sizeof(size_t) <= sizeof(uintmax_t));
if (counted->lensigned) {
- asn1_intmax xlen = load_int(countptr, counted->lensize);
- if (xlen < 0 || (asn1_uintmax)xlen > SIZE_MAX)
+ intmax_t xlen = load_int(countptr, counted->lensize);
+ if (xlen < 0 || (uintmax_t)xlen > SIZE_MAX)
return EINVAL;
*count_out = xlen;
} else {
- asn1_uintmax xlen = load_uint(countptr, counted->lensize);
+ uintmax_t xlen = load_uint(countptr, counted->lensize);
if ((size_t)xlen != xlen || xlen > SIZE_MAX)
return EINVAL;
*count_out = xlen;
}
static asn1_error_code
-store_int(asn1_intmax intval, size_t size, void *val)
+store_int(intmax_t intval, size_t size, void *val)
{
switch (size) {
case 1:
*(krb5_int32 *)val = intval;
return 0;
case 8:
- if ((INT64_TYPE)intval != intval)
+ if ((int64_t)intval != intval)
return ASN1_OVERFLOW;
- *(INT64_TYPE *)intval = intval;
+ *(int64_t *)intval = intval;
return 0;
default:
abort();
}
static asn1_error_code
-store_uint(asn1_uintmax intval, size_t size, void *val)
+store_uint(uintmax_t intval, size_t size, void *val)
{
switch (size) {
case 1:
*(krb5_ui_4 *)val = intval;
return 0;
case 8:
- if ((UINT64_TYPE)intval != intval)
+ if ((uint64_t)intval != intval)
return ASN1_OVERFLOW;
- *(UINT64_TYPE *)val = intval;
+ *(uint64_t *)val = intval;
return 0;
default:
abort();
if (counted->lensigned) {
if (count == SIZE_MAX)
return store_int(-1, counted->lensize, countptr);
- else if ((asn1_intmax)count < 0)
+ else if ((intmax_t)count < 0)
return ASN1_OVERFLOW;
else
return store_int(count, counted->lensize, countptr);
return decode_atype(tp, asn1, len, tag->basetype, val);
}
case atype_bool: {
- asn1_intmax intval;
+ intmax_t intval;
ret = k5_asn1_decode_bool(asn1, len, &intval);
if (ret)
return ret;
return store_int(intval, a->size, val);
}
case atype_int: {
- asn1_intmax intval;
+ intmax_t intval;
ret = k5_asn1_decode_int(asn1, len, &intval);
if (ret)
return ret;
return store_int(intval, a->size, val);
}
case atype_uint: {
- asn1_uintmax intval;
+ uintmax_t intval;
ret = k5_asn1_decode_uint(asn1, len, &intval);
if (ret)
return ret;
}
case atype_int_immediate: {
const struct immediate_info *imm = a->tinfo;
- asn1_intmax intval;
+ intmax_t intval;
ret = k5_asn1_decode_int(asn1, len, &intval);
if (ret)
return ret;
/* These functions are referenced by encoder structures. They handle the
* encoding of primitive ASN.1 types. */
-asn1_error_code k5_asn1_encode_bool(asn1buf *buf, asn1_intmax val,
+asn1_error_code k5_asn1_encode_bool(asn1buf *buf, intmax_t val,
size_t *len_out);
-asn1_error_code k5_asn1_encode_int(asn1buf *buf, asn1_intmax val,
+asn1_error_code k5_asn1_encode_int(asn1buf *buf, intmax_t val,
size_t *len_out);
-asn1_error_code k5_asn1_encode_uint(asn1buf *buf, asn1_uintmax val,
+asn1_error_code k5_asn1_encode_uint(asn1buf *buf, uintmax_t val,
size_t *len_out);
asn1_error_code k5_asn1_encode_bytestring(asn1buf *buf,
unsigned char *const *val,
/* These functions are referenced by encoder structures. They handle the
* decoding of primitive ASN.1 types. */
asn1_error_code k5_asn1_decode_bool(const unsigned char *asn1, size_t len,
- asn1_intmax *val);
+ intmax_t *val);
asn1_error_code k5_asn1_decode_int(const unsigned char *asn1, size_t len,
- asn1_intmax *val);
+ intmax_t *val);
asn1_error_code k5_asn1_decode_uint(const unsigned char *asn1, size_t len,
- asn1_uintmax *val);
+ uintmax_t *val);
asn1_error_code k5_asn1_decode_generaltime(const unsigned char *asn1,
size_t len, time_t *time_out);
asn1_error_code k5_asn1_decode_bytestring(const unsigned char *asn1,
};
struct immediate_info {
- asn1_intmax val;
+ intmax_t val;
asn1_error_code err;
};
decode_seqno(const taginfo *t, const unsigned char *asn1, size_t len, void *p)
{
asn1_error_code ret;
- asn1_intmax val;
+ intmax_t val;
ret = k5_asn1_decode_int(asn1, len, &val);
if (ret)
return ret;
void *p)
{
asn1_error_code ret;
- asn1_intmax val;
+ intmax_t val;
ret = k5_asn1_decode_int(asn1, len, &val);
if (ret)
return ret;
typedef enum { UNIVERSAL = 0x00, APPLICATION = 0x40,
CONTEXT_SPECIFIC = 0x80, PRIVATE = 0xC0 } asn1_class;
-typedef INT64_TYPE asn1_intmax;
-typedef UINT64_TYPE asn1_uintmax;
-
typedef int asn1_tagnum;
#define ASN1_TAGNUM_CEILING INT_MAX
#define ASN1_TAGNUM_MAX (ASN1_TAGNUM_CEILING-1)
size_t remain;
krb5_ktfile_data *ktdata;
krb5_int32 file_is_open;
- krb5_int64 file_pos;
+ int64_t file_pos;
char *ktname;
const char *fnamep;
char *ktname = NULL;
krb5_ktfile_data *ktdata;
krb5_int32 file_is_open;
- krb5_int64 foff;
+ int64_t foff;
*argp = NULL;
bp = *buffer;
typedef struct _PAC_INFO_BUFFER {
krb5_ui_4 ulType;
krb5_ui_4 cbBufferSize;
- krb5_ui_8 Offset;
+ uint64_t Offset;
} PAC_INFO_BUFFER;
typedef struct _PACTYPE {
krb5_data *out_data);
krb5_error_code
-k5_seconds_since_1970_to_time(krb5_timestamp elapsedSeconds,
- krb5_ui_8 *ntTime);
+k5_seconds_since_1970_to_time(krb5_timestamp elapsedSeconds, uint64_t *ntTime);
#endif /* !KRB_AUTHDATA_H */
of using uint64_t, the possibility does exist that we're
wrong. */
{
- krb5_ui_8 i64;
+ uint64_t i64;
assert(sizeof(i64) == 8);
i64 = 0, i64--, i64 >>= 62;
assert(i64 == 3);
}
static krb5_error_code
-k5_time_to_seconds_since_1970(krb5_int64 ntTime,
- krb5_timestamp *elapsedSeconds)
+k5_time_to_seconds_since_1970(int64_t ntTime, krb5_timestamp *elapsedSeconds)
{
- krb5_ui_8 abstime;
+ uint64_t abstime;
ntTime /= 10000000;
}
krb5_error_code
-k5_seconds_since_1970_to_time(krb5_timestamp elapsedSeconds,
- krb5_ui_8 *ntTime)
+k5_seconds_since_1970_to_time(krb5_timestamp elapsedSeconds, uint64_t *ntTime)
{
*ntTime = elapsedSeconds;
unsigned char *p;
krb5_timestamp pac_authtime;
krb5_ui_2 pac_princname_length;
- krb5_int64 pac_nt_authtime;
+ int64_t pac_nt_authtime;
krb5_principal pac_principal;
ret = k5_pac_locate_buffer(context, pac, KRB5_PAC_CLIENT_INFO,
char *princ_name_utf8 = NULL;
unsigned char *princ_name_ucs2 = NULL, *p;
size_t princ_name_ucs2_len = 0;
- krb5_ui_8 nt_authtime;
+ uint64_t nt_authtime;
/* If we already have a CLIENT_INFO buffer, then just validate it */
if (k5_pac_locate_buffer(context, pac, KRB5_PAC_CLIENT_INFO,
* Update buffer pointer and remaining space.
*/
krb5_error_code KRB5_CALLCONV
-krb5_ser_pack_int64(krb5_int64 iarg, krb5_octet **bufp, size_t *remainp)
+krb5_ser_pack_int64(int64_t iarg, krb5_octet **bufp, size_t *remainp)
{
- if (*remainp >= sizeof(krb5_int64)) {
+ if (*remainp >= sizeof(int64_t)) {
store_64_be(iarg, (unsigned char *)*bufp);
- *bufp += sizeof(krb5_int64);
- *remainp -= sizeof(krb5_int64);
+ *bufp += sizeof(int64_t);
+ *remainp -= sizeof(int64_t);
return(0);
}
else
* krb5_ser_unpack_int64() - Unpack an 8-byte integer if it's there.
*/
krb5_error_code KRB5_CALLCONV
-krb5_ser_unpack_int64(krb5_int64 *intp, krb5_octet **bufp, size_t *remainp)
+krb5_ser_unpack_int64(int64_t *intp, krb5_octet **bufp, size_t *remainp)
{
- if (*remainp >= sizeof(krb5_int64)) {
+ if (*remainp >= sizeof(int64_t)) {
*intp = load_64_be((unsigned char *)*bufp);
- *bufp += sizeof(krb5_int64);
- *remainp -= sizeof(krb5_int64);
+ *bufp += sizeof(int64_t);
+ *remainp -= sizeof(int64_t);
return(0);
}
else
#define SSF_WRITE 0x02
#define SSF_EXCEPTION 0x04
-typedef krb5_int64 time_ms;
+typedef int64_t time_ms;
/* Since fd_set is large on some platforms (8K on AIX 5.2), this probably
* shouldn't be allocated in automatic storage. */
#endif
#include <errno.h>
-typedef int32_t prof_int32;
-
/* Create a vtable profile, possibly with a library handle. The new profile
* takes ownership of the handle refcount on success. */
static errcode_t
size_t required;
prf_file_t pfp;
- required = 3*sizeof(prof_int32);
+ required = 3*sizeof(int32_t);
for (pfp = profile->first_file; pfp; pfp = pfp->next) {
- required += sizeof(prof_int32);
+ required += sizeof(int32_t);
required += strlen(pfp->data->filespec);
}
*sizep += required;
return 0;
}
-static void pack_int32(prof_int32 oval, unsigned char **bufpp, size_t *remainp)
+static void pack_int32(int32_t oval, unsigned char **bufpp, size_t *remainp)
{
store_32_be(oval, *bufpp);
- *bufpp += sizeof(prof_int32);
- *remainp -= sizeof(prof_int32);
+ *bufpp += sizeof(int32_t);
+ *remainp -= sizeof(int32_t);
}
errcode_t profile_ser_externalize(const char *unused, profile_t profile,
unsigned char *bp;
size_t remain;
prf_file_t pfp;
- prof_int32 fcount, slen;
+ int32_t fcount, slen;
required = 0;
bp = *bufpp;
pack_int32(PROF_MAGIC_PROFILE, &bp, &remain);
pack_int32(fcount, &bp, &remain);
for (pfp = profile->first_file; pfp; pfp = pfp->next) {
- slen = (prof_int32) strlen(pfp->data->filespec);
+ slen = (int32_t) strlen(pfp->data->filespec);
pack_int32(slen, &bp, &remain);
if (slen) {
memcpy(bp, pfp->data->filespec, (size_t) slen);
return(retval);
}
-static int unpack_int32(prof_int32 *intp, unsigned char **bufpp,
+static int unpack_int32(int32_t *intp, unsigned char **bufpp,
size_t *remainp)
{
- if (*remainp >= sizeof(prof_int32)) {
+ if (*remainp >= sizeof(int32_t)) {
*intp = load_32_be(*bufpp);
- *bufpp += sizeof(prof_int32);
- *remainp -= sizeof(prof_int32);
+ *bufpp += sizeof(int32_t);
+ *remainp -= sizeof(int32_t);
return 0;
}
else
unsigned char *bp;
size_t remain;
int i;
- prof_int32 fcount, tmp;
+ int32_t fcount, tmp;
profile_filespec_t *flist = 0;
bp = *bufpp;
This is icky. I just hope it's adequate.
For next major release, fix this. */
- union { double d; void *p; UINT64_TYPE ll; k5_mutex_t m; } pad;
+ union { double d; void *p; uint64_t ll; k5_mutex_t m; } pad;
int refcount; /* prf_file_t references */
struct _prf_data_t *next;
on. */
union {
- UINT64_TYPE n64;
+ uint64_t n64;
uint32_t n32;
uint16_t n16;
unsigned char b[9];