def test_sha256_gil(self):
gil_minsize = hashlib_helper.find_gil_minsize(['_sha2', '_hashlib'])
+ data = b'1' + b'#' * gil_minsize + b'1'
+ expected = hashlib.sha256(data).hexdigest()
+
m = hashlib.sha256()
m.update(b'1')
m.update(b'#' * gil_minsize)
m.update(b'1')
- self.assertEqual(
- m.hexdigest(),
- '1cfceca95989f51f658e3f3ffe7f1cd43726c9e088c13ee10b46f57cef135b94'
- )
+ self.assertEqual(m.hexdigest(), expected)
- m = hashlib.sha256(b'1' + b'#' * gil_minsize + b'1')
- self.assertEqual(
- m.hexdigest(),
- '1cfceca95989f51f658e3f3ffe7f1cd43726c9e088c13ee10b46f57cef135b94'
- )
+ @threading_helper.reap_threads
+ @threading_helper.requires_working_threading()
+ def test_threaded_hashing_fast(self):
+ # Same as test_threaded_hashing_slow() but only tests some functions
+ # since otherwise test_hashlib.py becomes too slow during development.
+ for name in ['md5', 'sha1', 'sha256', 'sha3_256', 'blake2s']:
+ if constructor := getattr(hashlib, name, None):
+ with self.subTest(name):
+ self.do_test_threaded_hashing(constructor, is_shake=False)
+ if shake_128 := getattr(hashlib, 'shake_128', None):
+ self.do_test_threaded_hashing(shake_128, is_shake=True)
+ @requires_resource('cpu')
@threading_helper.reap_threads
@threading_helper.requires_working_threading()
- def test_threaded_hashing(self):
+ def test_threaded_hashing_slow(self):
+ for algorithm, constructors in self.constructors_to_test.items():
+ is_shake = algorithm in self.shakes
+ for constructor in constructors:
+ with self.subTest(constructor.__name__, is_shake=is_shake):
+ self.do_test_threaded_hashing(constructor, is_shake)
+
+ def do_test_threaded_hashing(self, constructor, is_shake):
# Updating the same hash object from several threads at once
# using data chunk sizes containing the same byte sequences.
#
# If the internal locks are working to prevent multiple
# updates on the same object from running at once, the resulting
# hash will be the same as doing it single threaded upfront.
- hasher = hashlib.sha1()
- num_threads = 5
- smallest_data = b'swineflu'
- data = smallest_data * 200000
- expected_hash = hashlib.sha1(data*num_threads).hexdigest()
-
- def hash_in_chunks(chunk_size):
- index = 0
- while index < len(data):
- hasher.update(data[index:index + chunk_size])
- index += chunk_size
+
+ # The data to hash has length s|M|q^N and the chunk size for the i-th
+ # thread is s|M|q^(N-i), where N is the number of threads, M is a fixed
+ # message of small length, and s >= 1 and q >= 2 are small integers.
+ smallest_size, num_threads, s, q = 8, 5, 2, 10
+
+ smallest_data = os.urandom(smallest_size)
+ data = s * smallest_data * (q ** num_threads)
+
+ h1 = constructor(usedforsecurity=False)
+ h2 = constructor(data * num_threads, usedforsecurity=False)
+
+ def update(chunk_size):
+ for index in range(0, len(data), chunk_size):
+ h1.update(data[index:index + chunk_size])
threads = []
- for threadnum in range(num_threads):
- chunk_size = len(data) // (10 ** threadnum)
+ for thread_num in range(num_threads):
+ # chunk_size = len(data) // (q ** thread_num)
+ chunk_size = s * smallest_size * q ** (num_threads - thread_num)
self.assertGreater(chunk_size, 0)
- self.assertEqual(chunk_size % len(smallest_data), 0)
- thread = threading.Thread(target=hash_in_chunks,
- args=(chunk_size,))
+ self.assertEqual(chunk_size % smallest_size, 0)
+ thread = threading.Thread(target=update, args=(chunk_size,))
threads.append(thread)
for thread in threads:
for thread in threads:
thread.join()
- self.assertEqual(expected_hash, hasher.hexdigest())
+ if is_shake:
+ self.assertEqual(h1.hexdigest(16), h2.hexdigest(16))
+ else:
+ self.assertEqual(h1.hexdigest(), h2.hexdigest())
def test_get_fips_mode(self):
fips_mode = self.is_fips_mode
}
typedef struct {
- PyObject_HEAD
+ HASHLIB_OBJECT_HEAD
EVP_MD_CTX *ctx; /* OpenSSL message digest context */
- // Prevents undefined behavior via multiple threads entering the C API.
- bool use_mutex;
- PyMutex mutex; /* OpenSSL context lock */
} HASHobject;
#define HASHobject_CAST(op) ((HASHobject *)(op))
typedef struct {
- PyObject_HEAD
+ HASHLIB_OBJECT_HEAD
HMAC_CTX *ctx; /* OpenSSL hmac context */
- // Prevents undefined behavior via multiple threads entering the C API.
- bool use_mutex;
- PyMutex mutex; /* HMAC context lock */
} HMACobject;
#define HMACobject_CAST(op) ((HMACobject *)(op))
_hashlib_HASH_copy_locked(HASHobject *self, EVP_MD_CTX *new_ctx_p)
{
int result;
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
result = EVP_MD_CTX_copy(new_ctx_p, self->ctx);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
if (result == 0) {
notify_smart_ssl_error_occurred_in(Py_STRINGIFY(EVP_MD_CTX_copy));
return -1;
{
int result;
Py_buffer view;
-
GET_BUFFER_VIEW_OR_ERROUT(obj, &view);
-
- if (!self->use_mutex && view.len >= HASHLIB_GIL_MINSIZE) {
- self->use_mutex = true;
- }
- if (self->use_mutex) {
- Py_BEGIN_ALLOW_THREADS
- PyMutex_Lock(&self->mutex);
- result = _hashlib_HASH_hash(self, view.buf, view.len);
- PyMutex_Unlock(&self->mutex);
- Py_END_ALLOW_THREADS
- } else {
- result = _hashlib_HASH_hash(self, view.buf, view.len);
- }
-
+ HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
+ self, view.len,
+ result = _hashlib_HASH_hash(self, view.buf, view.len)
+ );
PyBuffer_Release(&view);
-
- if (result == -1)
- return NULL;
- Py_RETURN_NONE;
+ return result < 0 ? NULL : Py_None;
}
static PyMethodDef HASH_methods[] = {
}
if (view.buf && view.len) {
- if (view.len >= HASHLIB_GIL_MINSIZE) {
- /* We do not initialize self->lock here as this is the constructor
- * where it is not yet possible to have concurrent access. */
- Py_BEGIN_ALLOW_THREADS
- result = _hashlib_HASH_hash(self, view.buf, view.len);
- Py_END_ALLOW_THREADS
- } else {
- result = _hashlib_HASH_hash(self, view.buf, view.len);
- }
+ /* Do not use self->mutex here as this is the constructor
+ * where it is not yet possible to have concurrent access. */
+ HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
+ view.len,
+ result = _hashlib_HASH_hash(self, view.buf, view.len)
+ );
if (result == -1) {
assert(PyErr_Occurred());
Py_CLEAR(self);
locked_HMAC_CTX_copy(HMAC_CTX *new_ctx_p, HMACobject *self)
{
int result;
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
result = HMAC_CTX_copy(new_ctx_p, self->ctx);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
if (result == 0) {
notify_smart_ssl_error_occurred_in(Py_STRINGIFY(HMAC_CTX_copy));
return -1;
Py_buffer view = {0};
GET_BUFFER_VIEW_OR_ERROR(obj, &view, return 0);
-
- if (!self->use_mutex && view.len >= HASHLIB_GIL_MINSIZE) {
- self->use_mutex = true;
- }
- if (self->use_mutex) {
- Py_BEGIN_ALLOW_THREADS
- PyMutex_Lock(&self->mutex);
- r = HMAC_Update(self->ctx,
- (const unsigned char *)view.buf,
- (size_t)view.len);
- PyMutex_Unlock(&self->mutex);
- Py_END_ALLOW_THREADS
- } else {
- r = HMAC_Update(self->ctx,
- (const unsigned char *)view.buf,
- (size_t)view.len);
- }
-
+ HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
+ self, view.len,
+ r = HMAC_Update(
+ self->ctx, (const unsigned char *)view.buf, (size_t)view.len
+ )
+ );
PyBuffer_Release(&view);
if (r == 0) {
}
typedef struct {
- PyObject_HEAD
+ HASHLIB_OBJECT_HEAD
union {
Hacl_Hash_Blake2s_state_t *blake2s_state;
Hacl_Hash_Blake2b_state_t *blake2b_state;
#endif
};
blake2_impl impl;
- bool use_mutex;
- PyMutex mutex;
} Blake2Object;
#define _Blake2Object_CAST(op) ((Blake2Object *)(op))
} while (0)
static void
-update(Blake2Object *self, uint8_t *buf, Py_ssize_t len)
+blake2_update_unlocked(Blake2Object *self, uint8_t *buf, Py_ssize_t len)
{
switch (self->impl) {
// blake2b_256_state and blake2s_128_state must be if'd since
if (data != NULL) {
Py_buffer buf;
GET_BUFFER_VIEW_OR_ERROR(data, &buf, goto error);
- if (buf.len >= HASHLIB_GIL_MINSIZE) {
- Py_BEGIN_ALLOW_THREADS
- update(self, buf.buf, buf.len);
- Py_END_ALLOW_THREADS
- }
- else {
- update(self, buf.buf, buf.len);
- }
+ /* Do not use self->mutex here as this is the constructor
+ * where it is not yet possible to have concurrent access. */
+ HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
+ buf.len,
+ blake2_update_unlocked(self, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
}
}
static int
-blake2_blake2b_copy_locked(Blake2Object *self, Blake2Object *cpy)
+blake2_blake2b_copy_unlocked(Blake2Object *self, Blake2Object *cpy)
{
assert(cpy != NULL);
#define BLAKE2_COPY(TYPE, STATE_ATTR) \
return NULL;
}
- ENTER_HASHLIB(self);
- rc = blake2_blake2b_copy_locked(self, cpy);
- LEAVE_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
+ rc = blake2_blake2b_copy_unlocked(self, cpy);
+ HASHLIB_RELEASE_LOCK(self);
if (rc < 0) {
Py_DECREF(cpy);
return NULL;
/*[clinic end generated code: output=99330230068e8c99 input=ffc4aa6a6a225d31]*/
{
Py_buffer buf;
-
GET_BUFFER_VIEW_OR_ERROUT(data, &buf);
-
- if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
- self->use_mutex = true;
- }
- if (self->use_mutex) {
- Py_BEGIN_ALLOW_THREADS
- PyMutex_Lock(&self->mutex);
- update(self, buf.buf, buf.len);
- PyMutex_Unlock(&self->mutex);
- Py_END_ALLOW_THREADS
- }
- else {
- update(self, buf.buf, buf.len);
- }
-
+ HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
+ self, buf.len,
+ blake2_update_unlocked(self, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
-
Py_RETURN_NONE;
}
/*[clinic end generated code: output=31ab8ad477f4a2f7 input=7d21659e9c5fff02]*/
{
uint8_t digest_length = 0, digest[HACL_HASH_BLAKE2B_OUT_BYTES];
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
digest_length = blake2_blake2b_compute_digest(self, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, digest_length);
}
/*[clinic end generated code: output=5ef54b138db6610a input=76930f6946351f56]*/
{
uint8_t digest_length = 0, digest[HACL_HASH_BLAKE2B_OUT_BYTES];
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
digest_length = blake2_blake2b_compute_digest(self, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, digest_length);
}
/*
* Helper code to synchronize access to the hash object when the GIL is
- * released around a CPU consuming hashlib operation. All code paths that
- * access a mutable part of obj must be enclosed in an ENTER_HASHLIB /
- * LEAVE_HASHLIB block or explicitly acquire and release the lock inside
- * a PY_BEGIN / END_ALLOW_THREADS block if they wish to release the GIL for
- * an operation.
+ * released around a CPU consuming hashlib operation.
*
- * These only drop the GIL if the lock acquisition itself is likely to
- * block. Thus the non-blocking acquire gating the GIL release for a
- * blocking lock acquisition. The intent of these macros is to surround
- * the assumed always "fast" operations that you aren't releasing the
- * GIL around. Otherwise use code similar to what you see in hash
- * function update() methods.
+ * Code accessing a mutable part of the hash object must be enclosed in
+ * an HASHLIB_{ACQUIRE,RELEASE}_LOCK block or explicitly acquire and release
+ * the mutex inside a Py_BEGIN_ALLOW_THREADS -- Py_END_ALLOW_THREADS block if
+ * they wish to release the GIL for an operation.
*/
-#include "pythread.h"
-#define ENTER_HASHLIB(obj) \
- if ((obj)->use_mutex) { \
- PyMutex_Lock(&(obj)->mutex); \
- }
-#define LEAVE_HASHLIB(obj) \
- if ((obj)->use_mutex) { \
- PyMutex_Unlock(&(obj)->mutex); \
- }
+#define HASHLIB_OBJECT_HEAD \
+ PyObject_HEAD \
+ /* Guard against race conditions during incremental update(). */ \
+ PyMutex mutex;
-#ifdef Py_GIL_DISABLED
-#define HASHLIB_INIT_MUTEX(obj) \
- do { \
- (obj)->mutex = (PyMutex){0}; \
- (obj)->use_mutex = true; \
+#define HASHLIB_INIT_MUTEX(OBJ) \
+ do { \
+ (OBJ)->mutex = (PyMutex){0}; \
} while (0)
-#else
-#define HASHLIB_INIT_MUTEX(obj) \
- do { \
- (obj)->mutex = (PyMutex){0}; \
- (obj)->use_mutex = false; \
+
+#define HASHLIB_ACQUIRE_LOCK(OBJ) PyMutex_Lock(&(OBJ)->mutex)
+#define HASHLIB_RELEASE_LOCK(OBJ) PyMutex_Unlock(&(OBJ)->mutex)
+
+/*
+ * Message length above which the GIL is to be released
+ * when performing hashing operations.
+ */
+#define HASHLIB_GIL_MINSIZE 2048
+
+// Macros for executing code while conditionally holding the GIL.
+//
+// These only drop the GIL if the lock acquisition itself is likely to
+// block. Thus the non-blocking acquire gating the GIL release for a
+// blocking lock acquisition. The intent of these macros is to surround
+// the assumed always "fast" operations that you aren't releasing the
+// GIL around.
+
+/*
+ * Execute a suite of C statements 'STATEMENTS'.
+ *
+ * The GIL is held if 'SIZE' is below the HASHLIB_GIL_MINSIZE threshold.
+ */
+#define HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(SIZE, STATEMENTS) \
+ do { \
+ if ((SIZE) > HASHLIB_GIL_MINSIZE) { \
+ Py_BEGIN_ALLOW_THREADS \
+ STATEMENTS; \
+ Py_END_ALLOW_THREADS \
+ } \
+ else { \
+ STATEMENTS; \
+ } \
} while (0)
-#endif
-/* TODO(gpshead): We should make this a module or class attribute
- * to allow the user to optimize based on the platform they're using. */
-#define HASHLIB_GIL_MINSIZE 2048
+/*
+ * Lock 'OBJ' and execute a suite of C statements 'STATEMENTS'.
+ *
+ * The GIL is held if 'SIZE' is below the HASHLIB_GIL_MINSIZE threshold.
+ */
+#define HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(OBJ, SIZE, STATEMENTS) \
+ do { \
+ if ((SIZE) > HASHLIB_GIL_MINSIZE) { \
+ Py_BEGIN_ALLOW_THREADS \
+ HASHLIB_ACQUIRE_LOCK(OBJ); \
+ STATEMENTS; \
+ HASHLIB_RELEASE_LOCK(OBJ); \
+ Py_END_ALLOW_THREADS \
+ } \
+ else { \
+ HASHLIB_ACQUIRE_LOCK(OBJ); \
+ STATEMENTS; \
+ HASHLIB_RELEASE_LOCK(OBJ); \
+ } \
+ } while (0)
static inline int
_Py_hashlib_data_argument(PyObject **res, PyObject *data, PyObject *string)
#define Py_CHECK_HACL_UINT32_T_LENGTH(LEN)
#endif
-/*
- * Call the HACL* HMAC-HASH update function on the given data.
- *
- * The magnitude of 'LEN' is not checked and thus 'LEN' must be
- * safely convertible to a uint32_t value.
- */
-#define Py_HMAC_HACL_UPDATE_CALL(HACL_STATE, BUF, LEN) \
- Hacl_Streaming_HMAC_update(HACL_STATE, BUF, (uint32_t)(LEN))
-
-/*
- * Call the HACL* HMAC-HASH update function on the given data.
- *
- * On DEBUG builds, the 'ERRACTION' statements are executed if
- * the update() call returned a non-successful HACL* exit code.
- *
- * The buffer 'BUF' and its length 'LEN' are left untouched.
- *
- * The formal signature of this macro is:
- *
- * (HACL_HMAC_state *, uint8_t *, uint32_t, (C statements))
- */
-#ifndef NDEBUG
-#define Py_HMAC_HACL_UPDATE_ONCE( \
- HACL_STATE, BUF, LEN, \
- ERRACTION \
-) \
- do { \
- Py_CHECK_HACL_UINT32_T_LENGTH(LEN); \
- hacl_errno_t code = Py_HMAC_HACL_UPDATE_CALL(HACL_STATE, BUF, LEN); \
- if (_hacl_convert_errno(code) < 0) { \
- ERRACTION; \
- } \
- } while (0)
-#else
-#define Py_HMAC_HACL_UPDATE_ONCE( \
- HACL_STATE, BUF, LEN, \
- _ERRACTION \
-) \
- do { \
- (void)Py_HMAC_HACL_UPDATE_CALL(HACL_STATE, BUF, (LEN)); \
- } while (0)
-#endif
-
-/*
- * Repetivively call the HACL* HMAC-HASH update function on the given
- * data until the buffer length 'LEN' is strictly less than UINT32_MAX.
- *
- * On builds with PY_SSIZE_T_MAX <= UINT32_MAX, this is a no-op.
- *
- * The buffer 'BUF' (resp. 'LEN') is advanced (resp. decremented)
- * by UINT32_MAX after each update. On DEBUG builds, each update()
- * call is verified and the 'ERRACTION' statements are executed if
- * a non-successful HACL* exit code is being returned.
- *
- * In particular, 'BUF' and 'LEN' must be variable names and not
- * expressions on their own.
- *
- * The formal signature of this macro is:
- *
- * (HACL_HMAC_state *, uint8_t *, C integer, (C statements))
- */
-#ifdef Py_HMAC_SSIZE_LARGER_THAN_UINT32
-#define Py_HMAC_HACL_UPDATE_LOOP( \
- HACL_STATE, BUF, LEN, \
- ERRACTION \
-) \
- do { \
- while ((Py_ssize_t)LEN > UINT32_MAX_AS_SSIZE_T) { \
- Py_HMAC_HACL_UPDATE_ONCE(HACL_STATE, BUF, UINT32_MAX, \
- ERRACTION); \
- BUF += UINT32_MAX; \
- LEN -= UINT32_MAX; \
- } \
- } while (0)
-#else
-#define Py_HMAC_HACL_UPDATE_LOOP( \
- HACL_STATE, BUF, LEN, \
- _ERRACTION \
-)
-#endif
-
-/*
- * Perform the HMAC-HASH update() operation in a streaming fashion.
- *
- * The formal signature of this macro is:
- *
- * (HACL_HMAC_state *, uint8_t *, C integer, (C statements))
- */
-#define Py_HMAC_HACL_UPDATE( \
- HACL_STATE, BUF, LEN, \
- ERRACTION \
-) \
- do { \
- Py_HMAC_HACL_UPDATE_LOOP(HACL_STATE, BUF, LEN, \
- ERRACTION); \
- Py_HMAC_HACL_UPDATE_ONCE(HACL_STATE, BUF, LEN, \
- ERRACTION); \
- } while (0)
-
/*
* HMAC underlying hash function static information.
*/
typedef Hacl_Streaming_HMAC_agile_state HACL_HMAC_state;
typedef struct HMACObject {
- PyObject_HEAD
-
- bool use_mutex;
- PyMutex mutex;
-
+ HASHLIB_OBJECT_HEAD
// Hash function information
PyObject *name; // rendered name (exact unicode object)
HMAC_Hash_Kind kind; // can be used for runtime dispatch (must be known)
}
}
+/*
+ * Call the HACL* HMAC-HASH update function on the given data.
+ *
+ * On DEBUG builds, the update() call is verified.
+ *
+ * Return 0 on success; otherwise, set an exception and return -1 on failure.
+*/
+static int
+_hacl_hmac_state_update_once(HACL_HMAC_state *state,
+ uint8_t *buf, uint32_t len)
+{
+#ifndef NDEBUG
+ hacl_errno_t code = Hacl_Streaming_HMAC_update(state, buf, len);
+ return _hacl_convert_errno(code);
+#else
+ (void)Hacl_Streaming_HMAC_update(state, buf, len);
+ return 0;
+#endif
+}
+
+/*
+ * Perform the HMAC-HASH update() operation in a streaming fashion.
+ *
+ * On DEBUG builds, each update() call is verified.
+ *
+ * Return 0 on success; otherwise, set an exception and return -1 on failure.
+ */
+static int
+_hacl_hmac_state_update(HACL_HMAC_state *state, uint8_t *buf, Py_ssize_t len)
+{
+ assert(len >= 0);
+#ifdef Py_HMAC_SSIZE_LARGER_THAN_UINT32
+ while (len > UINT32_MAX_AS_SSIZE_T) {
+ if (_hacl_hmac_state_update_once(state, buf, UINT32_MAX) < 0) {
+ assert(PyErr_Occurred());
+ return -1;
+ }
+ buf += UINT32_MAX;
+ len -= UINT32_MAX;
+ }
+#endif
+ Py_CHECK_HACL_UINT32_T_LENGTH(len);
+ return _hacl_hmac_state_update_once(state, buf, (uint32_t)len);
+}
+
/* Static information used to construct the hash table. */
static const py_hmac_hinfo py_hmac_static_hinfo[] = {
#define Py_HMAC_HINFO_HACL_API(HACL_HID) \
return self->state == NULL ? -1 : 0;
}
-/*
- * Feed initial data.
- *
- * This function MUST only be called by the HMAC object constructor
- * and after hmac_set_hinfo() and hmac_new_initial_state() have been
- * called, lest the behaviour is undefined.
- *
- * Return 0 on success; otherwise, set an exception and return -1 on failure.
- */
-static int
-hmac_feed_initial_data(HMACObject *self, uint8_t *msg, Py_ssize_t len)
-{
- assert(self->name != NULL);
- assert(self->state != NULL);
- if (len == 0) {
- // do nothing if the buffer is empty
- return 0;
- }
-
- if (len < HASHLIB_GIL_MINSIZE) {
- Py_HMAC_HACL_UPDATE(self->state, msg, len, return -1);
- return 0;
- }
-
- int res = 0;
- Py_BEGIN_ALLOW_THREADS
- Py_HMAC_HACL_UPDATE(self->state, msg, len, goto error);
- goto done;
-#ifndef NDEBUG
-error:
- res = -1;
-#else
- Py_UNREACHABLE();
-#endif
-done:
- Py_END_ALLOW_THREADS
- return res;
-}
-
/*[clinic input]
_hmac.new
if (msgobj != NULL && msgobj != Py_None) {
Py_buffer msg;
GET_BUFFER_VIEW_OR_ERROR(msgobj, &msg, goto error);
- rc = hmac_feed_initial_data(self, msg.buf, msg.len);
+ /* Do not use self->mutex here as this is the constructor
+ * where it is not yet possible to have concurrent access. */
+ HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
+ msg.len,
+ rc = _hacl_hmac_state_update(self->state, msg.buf, msg.len)
+ );
PyBuffer_Release(&msg);
#ifndef NDEBUG
if (rc < 0) {
return NULL;
}
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
/* copy hash information */
hmac_copy_hinfo(copy, self);
/* copy internal state */
int rc = hmac_copy_state(copy, self);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
if (rc < 0) {
Py_DECREF(copy);
return (PyObject *)copy;
}
-/*
- * Update the HMAC object with the given buffer.
- *
- * This unconditionally acquires the lock on the HMAC object.
- *
- * On DEBUG builds, each update() call is verified.
- *
- * Return 0 on success; otherwise, set an exception and return -1 on failure.
- */
-static int
-hmac_update_state_with_lock(HMACObject *self, uint8_t *buf, Py_ssize_t len)
-{
- int res = 0;
- Py_BEGIN_ALLOW_THREADS
- PyMutex_Lock(&self->mutex); // unconditionally acquire a lock
- Py_HMAC_HACL_UPDATE(self->state, buf, len, goto error);
- goto done;
-#ifndef NDEBUG
-error:
- res = -1;
-#else
- Py_UNREACHABLE();
-#endif
-done:
- PyMutex_Unlock(&self->mutex);
- Py_END_ALLOW_THREADS
- return res;
-}
-
-/*
- * Update the HMAC object with the given buffer.
- *
- * This conditionally acquires the lock on the HMAC object.
- *
- * On DEBUG builds, each update() call is verified.
- *
- * Return 0 on success; otherwise, set an exception and return -1 on failure.
- */
-static int
-hmac_update_state_cond_lock(HMACObject *self, uint8_t *buf, Py_ssize_t len)
-{
- ENTER_HASHLIB(self); // conditionally acquire a lock
- Py_HMAC_HACL_UPDATE(self->state, buf, len, goto error);
- LEAVE_HASHLIB(self);
- return 0;
-
-#ifndef NDEBUG
-error:
- LEAVE_HASHLIB(self);
- return -1;
-#else
- Py_UNREACHABLE();
-#endif
-}
-
-/*
- * Update the internal HMAC state with the given buffer.
- *
- * Return 0 on success; otherwise, set an exception and return -1 on failure.
- */
-static inline int
-hmac_update_state(HMACObject *self, uint8_t *buf, Py_ssize_t len)
-{
- assert(buf != 0);
- assert(len >= 0);
- return len == 0
- ? 0 /* nothing to do */
- : len < HASHLIB_GIL_MINSIZE
- ? hmac_update_state_cond_lock(self, buf, len)
- : hmac_update_state_with_lock(self, buf, len);
-}
-
/*[clinic input]
_hmac.HMAC.update
_hmac_HMAC_update_impl(HMACObject *self, PyObject *msgobj)
/*[clinic end generated code: output=962134ada5e55985 input=7c0ea830efb03367]*/
{
+ int rc = 0;
Py_buffer msg;
GET_BUFFER_VIEW_OR_ERROUT(msgobj, &msg);
- int rc = hmac_update_state(self, msg.buf, msg.len);
+ HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
+ self, msg.len,
+ rc = _hacl_hmac_state_update(self->state, msg.buf, msg.len)
+ );
PyBuffer_Release(&msg);
return rc < 0 ? NULL : Py_None;
}
* Note: this function may raise a MemoryError.
*/
static int
-hmac_digest_compute_cond_lock(HMACObject *self, uint8_t *digest)
+hmac_digest_compute_locked(HMACObject *self, uint8_t *digest)
{
assert(digest != NULL);
hacl_errno_t rc;
- ENTER_HASHLIB(self); // conditionally acquire a lock
+ HASHLIB_ACQUIRE_LOCK(self);
rc = Hacl_Streaming_HMAC_digest(self->state, digest, self->digest_size);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
assert(
rc == Hacl_Streaming_Types_Success ||
rc == Hacl_Streaming_Types_OutOfMemory
{
assert(self->digest_size <= Py_hmac_hash_max_digest_size);
uint8_t digest[Py_hmac_hash_max_digest_size];
- if (hmac_digest_compute_cond_lock(self, digest) < 0) {
+ if (hmac_digest_compute_locked(self, digest) < 0) {
return NULL;
}
return PyBytes_FromStringAndSize((const char *)digest, self->digest_size);
{
assert(self->digest_size <= Py_hmac_hash_max_digest_size);
uint8_t digest[Py_hmac_hash_max_digest_size];
- if (hmac_digest_compute_cond_lock(self, digest) < 0) {
+ if (hmac_digest_compute_locked(self, digest) < 0) {
return NULL;
}
return _Py_strhex((const char *)digest, self->digest_size);
#include "_hacl/Hacl_Hash_MD5.h"
-
typedef struct {
- PyObject_HEAD
- // Prevents undefined behavior via multiple threads entering the C API.
- bool use_mutex;
- PyMutex mutex;
+ HASHLIB_OBJECT_HEAD
Hacl_Hash_MD5_state_t *hash_state;
} MD5object;
return NULL;
}
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
newobj->hash_state = Hacl_Hash_MD5_copy(self->hash_state);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
if (newobj->hash_state == NULL) {
Py_DECREF(newobj);
return PyErr_NoMemory();
/*[clinic end generated code: output=eb691dc4190a07ec input=bc0c4397c2994be6]*/
{
uint8_t digest[MD5_DIGESTSIZE];
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_MD5_digest(self->hash_state, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, MD5_DIGESTSIZE);
}
/*[clinic end generated code: output=17badced1f3ac932 input=b60b19de644798dd]*/
{
uint8_t digest[MD5_DIGESTSIZE];
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_MD5_digest(self->hash_state, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, MD5_DIGESTSIZE);
}
* take more than 1 billion years to overflow the maximum admissible length
* for MD5 (2^61 - 1).
*/
+ assert(len >= 0);
#if PY_SSIZE_T_MAX > UINT32_MAX
while (len > UINT32_MAX) {
(void)Hacl_Hash_MD5_update(state, buf, UINT32_MAX);
/*[clinic end generated code: output=b0fed9a7ce7ad253 input=6e1efcd9ecf17032]*/
{
Py_buffer buf;
-
GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);
-
- if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
- self->use_mutex = true;
- }
- if (self->use_mutex) {
- Py_BEGIN_ALLOW_THREADS
- PyMutex_Lock(&self->mutex);
- update(self->hash_state, buf.buf, buf.len);
- PyMutex_Unlock(&self->mutex);
- Py_END_ALLOW_THREADS
- } else {
- update(self->hash_state, buf.buf, buf.len);
- }
-
+ HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
+ self, buf.len,
+ update(self->hash_state, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
}
if (string) {
- if (buf.len >= HASHLIB_GIL_MINSIZE) {
- /* We do not initialize self->lock here as this is the constructor
- * where it is not yet possible to have concurrent access. */
- Py_BEGIN_ALLOW_THREADS
- update(new->hash_state, buf.buf, buf.len);
- Py_END_ALLOW_THREADS
- }
- else {
- update(new->hash_state, buf.buf, buf.len);
- }
+ /* Do not use self->mutex here as this is the constructor
+ * where it is not yet possible to have concurrent access. */
+ HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
+ buf.len,
+ update(new->hash_state, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
}
#include "_hacl/Hacl_Hash_SHA1.h"
typedef struct {
- PyObject_HEAD
- // Prevents undefined behavior via multiple threads entering the C API.
- bool use_mutex;
- PyMutex mutex;
- PyThread_type_lock lock;
+ HASHLIB_OBJECT_HEAD
Hacl_Hash_SHA1_state_t *hash_state;
} SHA1object;
return NULL;
}
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
newobj->hash_state = Hacl_Hash_SHA1_copy(self->hash_state);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
if (newobj->hash_state == NULL) {
Py_DECREF(newobj);
return PyErr_NoMemory();
/*[clinic end generated code: output=2f05302a7aa2b5cb input=13824b35407444bd]*/
{
unsigned char digest[SHA1_DIGESTSIZE];
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_SHA1_digest(self->hash_state, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, SHA1_DIGESTSIZE);
}
/*[clinic end generated code: output=4161fd71e68c6659 input=97691055c0c74ab0]*/
{
unsigned char digest[SHA1_DIGESTSIZE];
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_SHA1_digest(self->hash_state, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, SHA1_DIGESTSIZE);
}
/*[clinic end generated code: output=cdc8e0e106dbec5f input=aad8e07812edbba3]*/
{
Py_buffer buf;
-
GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);
-
- if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
- self->use_mutex = true;
- }
- if (self->use_mutex) {
- Py_BEGIN_ALLOW_THREADS
- PyMutex_Lock(&self->mutex);
- update(self->hash_state, buf.buf, buf.len);
- PyMutex_Unlock(&self->mutex);
- Py_END_ALLOW_THREADS
- } else {
- update(self->hash_state, buf.buf, buf.len);
- }
-
+ HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
+ self, buf.len,
+ update(self->hash_state, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
return PyErr_NoMemory();
}
if (string) {
- if (buf.len >= HASHLIB_GIL_MINSIZE) {
- /* We do not initialize self->lock here as this is the constructor
- * where it is not yet possible to have concurrent access. */
- Py_BEGIN_ALLOW_THREADS
- update(new->hash_state, buf.buf, buf.len);
- Py_END_ALLOW_THREADS
- }
- else {
- update(new->hash_state, buf.buf, buf.len);
- }
+ /* Do not use self->mutex here as this is the constructor
+ * where it is not yet possible to have concurrent access. */
+ HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
+ buf.len,
+ update(new->hash_state, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
}
// TODO: Get rid of int digestsize in favor of Hacl state info?
typedef struct {
- PyObject_HEAD
+ HASHLIB_OBJECT_HEAD
int digestsize;
- // Prevents undefined behavior via multiple threads entering the C API.
- bool use_mutex;
- PyMutex mutex;
Hacl_Hash_SHA2_state_t_256 *state;
} SHA256object;
typedef struct {
- PyObject_HEAD
+ HASHLIB_OBJECT_HEAD
int digestsize;
- // Prevents undefined behavior via multiple threads entering the C API.
- bool use_mutex;
- PyMutex mutex;
Hacl_Hash_SHA2_state_t_512 *state;
} SHA512object;
}
}
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
rc = SHA256copy(self, newobj);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
if (rc < 0) {
Py_DECREF(newobj);
return NULL;
}
}
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
rc = SHA512copy(self, newobj);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
if (rc < 0) {
Py_DECREF(newobj);
return NULL;
{
uint8_t digest[SHA256_DIGESTSIZE];
assert(self->digestsize <= SHA256_DIGESTSIZE);
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
// HACL* performs copies under the hood so that self->state remains valid
// after this call.
Hacl_Hash_SHA2_digest_256(self->state, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, self->digestsize);
}
{
uint8_t digest[SHA512_DIGESTSIZE];
assert(self->digestsize <= SHA512_DIGESTSIZE);
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
// HACL* performs copies under the hood so that self->state remains valid
// after this call.
Hacl_Hash_SHA2_digest_512(self->state, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest, self->digestsize);
}
{
uint8_t digest[SHA256_DIGESTSIZE];
assert(self->digestsize <= SHA256_DIGESTSIZE);
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_SHA2_digest_256(self->state, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, self->digestsize);
}
{
uint8_t digest[SHA512_DIGESTSIZE];
assert(self->digestsize <= SHA512_DIGESTSIZE);
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
Hacl_Hash_SHA2_digest_512(self->state, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest, self->digestsize);
}
/*[clinic end generated code: output=dc58a580cf8905a5 input=b2d449d5b30f0f5a]*/
{
Py_buffer buf;
-
GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);
-
- if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
- self->use_mutex = true;
- }
- if (self->use_mutex) {
- Py_BEGIN_ALLOW_THREADS
- PyMutex_Lock(&self->mutex);
- update_256(self->state, buf.buf, buf.len);
- PyMutex_Unlock(&self->mutex);
- Py_END_ALLOW_THREADS
- } else {
- update_256(self->state, buf.buf, buf.len);
- }
-
+ HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
+ self, buf.len,
+ update_256(self->state, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
/*[clinic end generated code: output=9af211766c0b7365 input=ded2b46656566283]*/
{
Py_buffer buf;
-
GET_BUFFER_VIEW_OR_ERROUT(obj, &buf);
-
- if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
- self->use_mutex = true;
- }
- if (self->use_mutex) {
- Py_BEGIN_ALLOW_THREADS
- PyMutex_Lock(&self->mutex);
- update_512(self->state, buf.buf, buf.len);
- PyMutex_Unlock(&self->mutex);
- Py_END_ALLOW_THREADS
- } else {
- update_512(self->state, buf.buf, buf.len);
- }
-
+ HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
+ self, buf.len,
+ update_512(self->state, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
return PyErr_NoMemory();
}
if (string) {
- if (buf.len >= HASHLIB_GIL_MINSIZE) {
- /* We do not initialize self->lock here as this is the constructor
- * where it is not yet possible to have concurrent access. */
- Py_BEGIN_ALLOW_THREADS
- update_256(new->state, buf.buf, buf.len);
- Py_END_ALLOW_THREADS
- }
- else {
- update_256(new->state, buf.buf, buf.len);
- }
+ /* Do not use self->mutex here as this is the constructor
+ * where it is not yet possible to have concurrent access. */
+ HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
+ buf.len,
+ update_256(new->state, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
}
return PyErr_NoMemory();
}
if (string) {
- if (buf.len >= HASHLIB_GIL_MINSIZE) {
- /* We do not initialize self->lock here as this is the constructor
- * where it is not yet possible to have concurrent access. */
- Py_BEGIN_ALLOW_THREADS
- update_256(new->state, buf.buf, buf.len);
- Py_END_ALLOW_THREADS
- }
- else {
- update_256(new->state, buf.buf, buf.len);
- }
+ /* Do not use self->mutex here as this is the constructor
+ * where it is not yet possible to have concurrent access. */
+ HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
+ buf.len,
+ update_256(new->state, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
}
return PyErr_NoMemory();
}
if (string) {
- if (buf.len >= HASHLIB_GIL_MINSIZE) {
- /* We do not initialize self->lock here as this is the constructor
- * where it is not yet possible to have concurrent access. */
- Py_BEGIN_ALLOW_THREADS
- update_512(new->state, buf.buf, buf.len);
- Py_END_ALLOW_THREADS
- }
- else {
- update_512(new->state, buf.buf, buf.len);
- }
+ /* Do not use self->mutex here as this is the constructor
+ * where it is not yet possible to have concurrent access. */
+ HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
+ buf.len,
+ update_512(new->state, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
}
return PyErr_NoMemory();
}
if (string) {
- if (buf.len >= HASHLIB_GIL_MINSIZE) {
- /* We do not initialize self->lock here as this is the constructor
- * where it is not yet possible to have concurrent access. */
- Py_BEGIN_ALLOW_THREADS
- update_512(new->state, buf.buf, buf.len);
- Py_END_ALLOW_THREADS
- }
- else {
- update_512(new->state, buf.buf, buf.len);
- }
+ /* Do not use self->mutex here as this is the constructor
+ * where it is not yet possible to have concurrent access. */
+ HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
+ buf.len,
+ update_512(new->state, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
}
#include "_hacl/Hacl_Hash_SHA3.h"
typedef struct {
- PyObject_HEAD
- // Prevents undefined behavior via multiple threads entering the C API.
- bool use_mutex;
- PyMutex mutex;
+ HASHLIB_OBJECT_HEAD
Hacl_Hash_SHA3_state_t *hash_state;
} SHA3object;
if (data) {
GET_BUFFER_VIEW_OR_ERROR(data, &buf, goto error);
- if (buf.len >= HASHLIB_GIL_MINSIZE) {
- /* We do not initialize self->lock here as this is the constructor
- * where it is not yet possible to have concurrent access. */
- Py_BEGIN_ALLOW_THREADS
- sha3_update(self->hash_state, buf.buf, buf.len);
- Py_END_ALLOW_THREADS
- }
- else {
- sha3_update(self->hash_state, buf.buf, buf.len);
- }
+ /* Do not use self->mutex here as this is the constructor
+ * where it is not yet possible to have concurrent access. */
+ HASHLIB_EXTERNAL_INSTRUCTIONS_UNLOCKED(
+ buf.len,
+ sha3_update(self->hash_state, buf.buf, buf.len)
+ );
}
PyBuffer_Release(&buf);
if ((newobj = newSHA3object(Py_TYPE(self))) == NULL) {
return NULL;
}
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
newobj->hash_state = Hacl_Hash_SHA3_copy(self->hash_state);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
if (newobj->hash_state == NULL) {
Py_DECREF(newobj);
return PyErr_NoMemory();
unsigned char digest[SHA3_MAX_DIGESTSIZE];
// This function errors out if the algorithm is SHAKE. Here, we know this
// not to be the case, and therefore do not perform error checking.
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
(void)Hacl_Hash_SHA3_digest(self->hash_state, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return PyBytes_FromStringAndSize((const char *)digest,
Hacl_Hash_SHA3_hash_len(self->hash_state));
}
/*[clinic end generated code: output=75ad03257906918d input=2d91bb6e0d114ee3]*/
{
unsigned char digest[SHA3_MAX_DIGESTSIZE];
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
(void)Hacl_Hash_SHA3_digest(self->hash_state, digest);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return _Py_strhex((const char *)digest,
Hacl_Hash_SHA3_hash_len(self->hash_state));
}
/*[clinic end generated code: output=390b7abf7c9795a5 input=a887f54dcc4ae227]*/
{
Py_buffer buf;
-
GET_BUFFER_VIEW_OR_ERROUT(data, &buf);
-
- if (!self->use_mutex && buf.len >= HASHLIB_GIL_MINSIZE) {
- self->use_mutex = true;
- }
- if (self->use_mutex) {
- Py_BEGIN_ALLOW_THREADS
- PyMutex_Lock(&self->mutex);
- sha3_update(self->hash_state, buf.buf, buf.len);
- PyMutex_Unlock(&self->mutex);
- Py_END_ALLOW_THREADS
- } else {
- sha3_update(self->hash_state, buf.buf, buf.len);
- }
-
+ HASHLIB_EXTERNAL_INSTRUCTIONS_LOCKED(
+ self, buf.len,
+ sha3_update(self->hash_state, buf.buf, buf.len)
+ );
PyBuffer_Release(&buf);
Py_RETURN_NONE;
}
CHECK_HACL_UINT32_T_LENGTH(length);
PyObject *digest = PyBytes_FromStringAndSize(NULL, length);
uint8_t *buffer = (uint8_t *)PyBytes_AS_STRING(digest);
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
(void)Hacl_Hash_SHA3_squeeze(self->hash_state, buffer, (uint32_t)length);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
return digest;
}
return PyErr_NoMemory();
}
- ENTER_HASHLIB(self);
+ HASHLIB_ACQUIRE_LOCK(self);
(void)Hacl_Hash_SHA3_squeeze(self->hash_state, buffer, (uint32_t)length);
- LEAVE_HASHLIB(self);
+ HASHLIB_RELEASE_LOCK(self);
PyObject *digest = _Py_strhex((const char *)buffer, length);
PyMem_Free(buffer);
return digest;