struct server_id exclusive;
size_t num_shared;
uint8_t *shared;
+ uint64_t unique_lock_epoch;
uint64_t unique_data_epoch;
size_t datalen;
uint8_t *data;
{
struct server_id exclusive;
size_t num_shared, shared_len;
+ uint64_t unique_lock_epoch;
uint64_t unique_data_epoch;
if (buflen < (SERVER_ID_BUF_LENGTH + /* exclusive */
sizeof(uint32_t))) { /* num_shared */
struct g_lock ret = {
.exclusive.pid = 0,
+ .unique_lock_epoch = generate_unique_u64(0),
.unique_data_epoch = generate_unique_u64(0),
};
*lck = ret;
buf += SERVER_ID_BUF_LENGTH;
buflen -= SERVER_ID_BUF_LENGTH;
+ unique_lock_epoch = BVAL(buf, 0);
+ buf += sizeof(uint64_t);
+ buflen -= sizeof(uint64_t);
+
unique_data_epoch = BVAL(buf, 0);
buf += sizeof(uint64_t);
buflen -= sizeof(uint64_t);
.exclusive = exclusive,
.num_shared = num_shared,
.shared = buf,
+ .unique_lock_epoch = unique_lock_epoch,
.unique_data_epoch = unique_data_epoch,
.datalen = buflen-shared_len,
.data = buf+shared_len,
size_t num_new_dbufs)
{
uint8_t exclusive[SERVER_ID_BUF_LENGTH];
- uint8_t seqnum_buf[sizeof(uint64_t)];
+ uint8_t seqnum_buf[sizeof(uint64_t)*2];
uint8_t sizebuf[sizeof(uint32_t)];
uint8_t new_shared_buf[SERVER_ID_BUF_LENGTH];
}
server_id_put(exclusive, lck->exclusive);
- SBVAL(seqnum_buf, 0, lck->unique_data_epoch);
+ SBVAL(seqnum_buf, 0, lck->unique_lock_epoch);
+ SBVAL(seqnum_buf, 8, lck->unique_data_epoch);
if (new_shared != NULL) {
if (lck->num_shared >= UINT32_MAX) {
g_lock_cleanup_dead(&lck, state->dead_blocker);
+ lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
+
if (lck.exclusive.pid != 0) {
bool self_exclusive = server_id_equal(&self, &lck.exclusive);
goto not_granted;
}
+ lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
+
if (state->type == G_LOCK_WRITE) {
if (lck.num_shared != 0) {
DBG_DEBUG("num_shared=%zu\n", lck.num_shared);
}
}
+ lck.unique_lock_epoch = generate_unique_u64(lck.unique_lock_epoch);
+
state->status = g_lock_store(rec, &lck, NULL, NULL, 0);
}