zone_count_t zone;
struct action_manager *manager = as_action_manager(completion);
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)),
- "%s() called on acting zones's thread", __func__);
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == get_acting_zone_thread_id(manager)),
+ "%s() called on acting zones's thread", __func__);
zone = manager->acting_zone++;
if (manager->acting_zone == manager->zones) {
{
struct action *current_action;
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id),
- "action initiated from correct thread");
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == manager->initiator_thread_id),
+ "action initiated from correct thread");
if (!manager->current_action->in_use) {
current_action = manager->current_action;
} else if (!manager->current_action->next->in_use) {
{
thread_id_t thread_id = vdo_get_callback_thread_id();
- ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id),
- "%s() must only be called on cache thread %d, not thread %d",
- function_name, cache->zone->thread_id, thread_id);
+ VDO_ASSERT_LOG_ONLY((thread_id == cache->zone->thread_id),
+ "%s() must only be called on cache thread %d, not thread %d",
+ function_name, cache->zone->thread_id, thread_id);
}
/** assert_io_allowed() - Assert that a page cache may issue I/O. */
static inline void assert_io_allowed(struct vdo_page_cache *cache)
{
- ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state),
- "VDO page cache may issue I/O");
+ VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&cache->zone->state),
+ "VDO page cache may issue I/O");
}
/** report_cache_pressure() - Log and, if enabled, report cache pressure. */
BUILD_BUG_ON(ARRAY_SIZE(state_names) != PAGE_STATE_COUNT);
- result = ASSERT(state < ARRAY_SIZE(state_names),
- "Unknown page_state value %d", state);
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(state < ARRAY_SIZE(state_names),
+ "Unknown page_state value %d", state);
+ if (result != VDO_SUCCESS)
return "[UNKNOWN PAGE STATE]";
return state_names[state];
struct vdo_page_cache *cache = info->cache;
/* Either the new or the old page number must be NO_PAGE. */
- int result = ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE),
- "Must free a page before reusing it.");
+ int result = VDO_ASSERT((pbn == NO_PAGE) || (info->pbn == NO_PAGE),
+ "Must free a page before reusing it.");
if (result != VDO_SUCCESS)
return result;
{
int result;
- result = ASSERT(info->busy == 0, "VDO Page must not be busy");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(info->busy == 0, "VDO Page must not be busy");
+ if (result != VDO_SUCCESS)
return result;
- result = ASSERT(!vdo_waitq_has_waiters(&info->waiting),
- "VDO Page must not have waiters");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(!vdo_waitq_has_waiters(&info->waiting),
+ "VDO Page must not have waiters");
+ if (result != VDO_SUCCESS)
return result;
result = set_info_pbn(info, NO_PAGE);
{
int result;
- result = ASSERT(completion->ready, "VDO Page completion not ready");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(completion->ready, "VDO Page completion not ready");
+ if (result != VDO_SUCCESS)
return result;
- result = ASSERT(completion->info != NULL,
- "VDO Page Completion must be complete");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(completion->info != NULL,
+ "VDO Page Completion must be complete");
+ if (result != VDO_SUCCESS)
return result;
- result = ASSERT(completion->info->pbn == completion->pbn,
- "VDO Page Completion pbn must be consistent");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(completion->info->pbn == completion->pbn,
+ "VDO Page Completion pbn must be consistent");
+ if (result != VDO_SUCCESS)
return result;
- result = ASSERT(is_valid(completion->info),
- "VDO Page Completion page must be valid");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(is_valid(completion->info),
+ "VDO Page Completion page must be valid");
+ if (result != VDO_SUCCESS)
return result;
if (writable) {
- result = ASSERT(completion->writable,
- "VDO Page Completion must be writable");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(completion->writable,
+ "VDO Page Completion must be writable");
+ if (result != VDO_SUCCESS)
return result;
}
if (result != VDO_SUCCESS)
return result;
- result = ASSERT((info->busy == 0), "Page is not busy before loading.");
+ result = VDO_ASSERT((info->busy == 0), "Page is not busy before loading.");
if (result != VDO_SUCCESS)
return result;
return;
}
- ASSERT_LOG_ONLY(!is_in_flight(info),
- "page selected for discard is not in flight");
+ VDO_ASSERT_LOG_ONLY(!is_in_flight(info),
+ "page selected for discard is not in flight");
cache->discard_count++;
info->write_status = WRITE_STATUS_DISCARD;
discard_info = page_completion->info;
}
- ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
- "Page being released after leaving all queues");
+ VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
+ "Page being released after leaving all queues");
page_completion->info = NULL;
cache = page_completion->cache;
struct page_info *info;
assert_on_cache_thread(cache, __func__);
- ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
- "New page completion was not already on a wait queue");
+ VDO_ASSERT_LOG_ONLY((page_completion->waiter.next_waiter == NULL),
+ "New page completion was not already on a wait queue");
*page_completion = (struct vdo_page_completion) {
.pbn = pbn,
}
/* Something horrible has gone wrong. */
- ASSERT_LOG_ONLY(false, "Info found in a usable state.");
+ VDO_ASSERT_LOG_ONLY(false, "Info found in a usable state.");
}
/* The page must be fetched. */
/* Make sure we don't throw away any dirty pages. */
for (info = cache->infos; info < cache->infos + cache->page_count; info++) {
- int result = ASSERT(!is_dirty(info), "cache must have no dirty pages");
+ int result = VDO_ASSERT(!is_dirty(info), "cache must have no dirty pages");
if (result != VDO_SUCCESS)
return result;
{
int result;
- result = ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) &&
- in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)),
- "generation(s) %u, %u are out of range [%u, %u]",
- a, b, zone->oldest_generation, zone->generation);
+ result = VDO_ASSERT((in_cyclic_range(zone->oldest_generation, a, zone->generation, 1 << 8) &&
+ in_cyclic_range(zone->oldest_generation, b, zone->generation, 1 << 8)),
+ "generation(s) %u, %u are out of range [%u, %u]",
+ a, b, zone->oldest_generation, zone->generation);
if (result != VDO_SUCCESS) {
enter_zone_read_only_mode(zone, result);
return true;
{
int result;
- result = ASSERT((zone->dirty_page_counts[generation] > 0),
- "dirty page count underflow for generation %u", generation);
+ result = VDO_ASSERT((zone->dirty_page_counts[generation] > 0),
+ "dirty page count underflow for generation %u", generation);
if (result != VDO_SUCCESS) {
enter_zone_read_only_mode(zone, result);
return;
page->generation = new_generation;
new_count = ++zone->dirty_page_counts[new_generation];
- result = ASSERT((new_count != 0), "dirty page count overflow for generation %u",
- new_generation);
+ result = VDO_ASSERT((new_count != 0), "dirty page count overflow for generation %u",
+ new_generation);
if (result != VDO_SUCCESS) {
enter_zone_read_only_mode(zone, result);
return;
struct tree_lock *lock_holder;
struct tree_lock *lock = &data_vio->tree_lock;
- ASSERT_LOG_ONLY(lock->locked,
- "release of unlocked block map page %s for key %llu in tree %u",
- what, (unsigned long long) lock->key, lock->root_index);
+ VDO_ASSERT_LOG_ONLY(lock->locked,
+ "release of unlocked block map page %s for key %llu in tree %u",
+ what, (unsigned long long) lock->key, lock->root_index);
zone = data_vio->logical.zone->block_map_zone;
lock_holder = vdo_int_map_remove(zone->loading_pages, lock->key);
- ASSERT_LOG_ONLY((lock_holder == lock),
- "block map page %s mismatch for key %llu in tree %u",
- what, (unsigned long long) lock->key, lock->root_index);
+ VDO_ASSERT_LOG_ONLY((lock_holder == lock),
+ "block map page %s mismatch for key %llu in tree %u",
+ what, (unsigned long long) lock->key, lock->root_index);
lock->locked = false;
}
list_del_init(&page->entry);
- result = ASSERT(!vdo_waiter_is_waiting(&page->waiter),
- "Newly expired page not already waiting to write");
+ result = VDO_ASSERT(!vdo_waiter_is_waiting(&page->waiter),
+ "Newly expired page not already waiting to write");
if (result != VDO_SUCCESS) {
enter_zone_read_only_mode(zone, result);
continue;
BUILD_BUG_ON(VDO_BLOCK_MAP_ENTRIES_PER_PAGE !=
((VDO_BLOCK_SIZE - sizeof(struct block_map_page)) /
sizeof(struct block_map_entry)));
- result = ASSERT(cache_size > 0, "block map cache size is specified");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(cache_size > 0, "block map cache size is specified");
+ if (result != VDO_SUCCESS)
return result;
result = vdo_allocate_extended(struct block_map,
for (z = 0; z < map->zone_count; z++) {
struct dirty_lists *dirty_lists = map->zones[z].dirty_lists;
- ASSERT_LOG_ONLY(dirty_lists->next_period == 0, "current period not set");
+ VDO_ASSERT_LOG_ONLY(dirty_lists->next_period == 0, "current period not set");
dirty_lists->oldest_period = map->current_era_point;
dirty_lists->next_period = map->current_era_point + 1;
dirty_lists->offset = map->current_era_point % dirty_lists->maximum_age;
{
struct block_map_zone *zone = container_of(state, struct block_map_zone, state);
- ASSERT_LOG_ONLY((zone->active_lookups == 0),
- "%s() called with no active lookups", __func__);
+ VDO_ASSERT_LOG_ONLY((zone->active_lookups == 0),
+ "%s() called with no active lookups", __func__);
if (!vdo_is_state_suspending(state)) {
while (zone->dirty_lists->oldest_period < zone->dirty_lists->next_period)
static inline void assert_incomplete(struct vdo_completion *completion)
{
- ASSERT_LOG_ONLY(!completion->complete, "completion is not complete");
+ VDO_ASSERT_LOG_ONLY(!completion->complete, "completion is not complete");
}
/**
struct vdo *vdo = completion->vdo;
thread_id_t thread_id = completion->callback_thread_id;
- if (ASSERT(thread_id < vdo->thread_config.thread_count,
- "thread_id %u (completion type %d) is less than thread count %u",
- thread_id, completion->type,
- vdo->thread_config.thread_count) != UDS_SUCCESS)
+ if (VDO_ASSERT(thread_id < vdo->thread_config.thread_count,
+ "thread_id %u (completion type %d) is less than thread count %u",
+ thread_id, completion->type,
+ vdo->thread_config.thread_count) != VDO_SUCCESS)
BUG();
completion->requeue = false;
static inline int vdo_assert_completion_type(struct vdo_completion *completion,
enum vdo_completion_type expected)
{
- return ASSERT(expected == completion->type,
- "completion type should be %u, not %u", expected,
- completion->type);
+ return VDO_ASSERT(expected == completion->type,
+ "completion type should be %u, not %u", expected,
+ completion->type);
}
static inline void vdo_set_completion_callback(struct vdo_completion *completion,
if (pool->limiter.busy > 0)
return false;
- ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0),
- "no outstanding discard permits");
+ VDO_ASSERT_LOG_ONLY((pool->discard_limiter.busy == 0),
+ "no outstanding discard permits");
return (bio_list_empty(&pool->limiter.new_waiters) &&
bio_list_empty(&pool->discard_limiter.new_waiters));
if (bio == NULL)
return;
- ASSERT_LOG_ONLY((data_vio->remaining_discard <=
- (u32) (VDO_BLOCK_SIZE - data_vio->offset)),
- "data_vio to acknowledge is not an incomplete discard");
+ VDO_ASSERT_LOG_ONLY((data_vio->remaining_discard <=
+ (u32) (VDO_BLOCK_SIZE - data_vio->offset)),
+ "data_vio to acknowledge is not an incomplete discard");
data_vio->user_bio = NULL;
vdo_count_bios(&vdo->stats.bios_acknowledged, bio);
return;
}
- result = ASSERT(lock_holder->logical.locked, "logical block lock held");
+ result = VDO_ASSERT(lock_holder->logical.locked, "logical block lock held");
if (result != VDO_SUCCESS) {
continue_data_vio_with_error(data_vio, result);
return;
struct bio_list *waiters = &limiter->waiters;
data_vio_count_t available = limiter->limit - limiter->busy;
- ASSERT_LOG_ONLY((limiter->release_count <= limiter->busy),
- "Release count %u is not more than busy count %u",
- limiter->release_count, limiter->busy);
+ VDO_ASSERT_LOG_ONLY((limiter->release_count <= limiter->busy),
+ "Release count %u is not more than busy count %u",
+ limiter->release_count, limiter->busy);
get_waiters(limiter);
for (; (limiter->release_count > 0) && !bio_list_empty(waiters); limiter->release_count--)
if (result != VDO_SUCCESS)
return result;
- ASSERT_LOG_ONLY((discard_limit <= pool_size),
- "discard limit does not exceed pool size");
+ VDO_ASSERT_LOG_ONLY((discard_limit <= pool_size),
+ "discard limit does not exceed pool size");
initialize_limiter(&pool->discard_limiter, pool, assign_discard_permit,
discard_limit);
pool->discard_limiter.permitted_waiters = &pool->permitted_discards;
BUG_ON(atomic_read(&pool->processing));
spin_lock(&pool->lock);
- ASSERT_LOG_ONLY((pool->limiter.busy == 0),
- "data_vio pool must not have %u busy entries when being freed",
- pool->limiter.busy);
- ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) &&
- bio_list_empty(&pool->limiter.new_waiters)),
- "data_vio pool must not have threads waiting to read or write when being freed");
- ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) &&
- bio_list_empty(&pool->discard_limiter.new_waiters)),
- "data_vio pool must not have threads waiting to discard when being freed");
+ VDO_ASSERT_LOG_ONLY((pool->limiter.busy == 0),
+ "data_vio pool must not have %u busy entries when being freed",
+ pool->limiter.busy);
+ VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->limiter.waiters) &&
+ bio_list_empty(&pool->limiter.new_waiters)),
+ "data_vio pool must not have threads waiting to read or write when being freed");
+ VDO_ASSERT_LOG_ONLY((bio_list_empty(&pool->discard_limiter.waiters) &&
+ bio_list_empty(&pool->discard_limiter.new_waiters)),
+ "data_vio pool must not have threads waiting to discard when being freed");
spin_unlock(&pool->lock);
list_for_each_entry_safe(data_vio, tmp, &pool->available, pool_entry) {
{
struct data_vio *data_vio;
- ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state),
- "data_vio_pool not quiescent on acquire");
+ VDO_ASSERT_LOG_ONLY(!vdo_is_state_quiescent(&pool->state),
+ "data_vio_pool not quiescent on acquire");
bio->bi_private = (void *) jiffies;
spin_lock(&pool->lock);
static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name)
{
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread),
- "%s called on cpu thread", name);
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.cpu_thread),
+ "%s called on cpu thread", name);
}
/**
/* The lock is not locked, so it had better not be registered in the lock map. */
struct data_vio *lock_holder = vdo_int_map_get(lock_map, lock->lbn);
- ASSERT_LOG_ONLY((data_vio != lock_holder),
- "no logical block lock held for block %llu",
- (unsigned long long) lock->lbn);
+ VDO_ASSERT_LOG_ONLY((data_vio != lock_holder),
+ "no logical block lock held for block %llu",
+ (unsigned long long) lock->lbn);
return;
}
/* Release the lock by removing the lock from the map. */
lock_holder = vdo_int_map_remove(lock_map, lock->lbn);
- ASSERT_LOG_ONLY((data_vio == lock_holder),
- "logical block lock mismatch for block %llu",
- (unsigned long long) lock->lbn);
+ VDO_ASSERT_LOG_ONLY((data_vio == lock_holder),
+ "logical block lock mismatch for block %llu",
+ (unsigned long long) lock->lbn);
lock->locked = false;
}
struct data_vio *lock_holder, *next_lock_holder;
int result;
- ASSERT_LOG_ONLY(lock->locked, "lbn_lock with waiters is not locked");
+ VDO_ASSERT_LOG_ONLY(lock->locked, "lbn_lock with waiters is not locked");
/* Another data_vio is waiting for the lock, transfer it in a single lock map operation. */
next_lock_holder =
return;
}
- ASSERT_LOG_ONLY((lock_holder == data_vio),
- "logical block lock mismatch for block %llu",
- (unsigned long long) lock->lbn);
+ VDO_ASSERT_LOG_ONLY((lock_holder == data_vio),
+ "logical block lock mismatch for block %llu",
+ (unsigned long long) lock->lbn);
lock->locked = false;
/*
{
struct vdo_completion *completion = &data_vio->vio.completion;
- ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL,
- "complete data_vio has no allocation lock");
- ASSERT_LOG_ONLY(data_vio->hash_lock == NULL,
- "complete data_vio has no hash lock");
+ VDO_ASSERT_LOG_ONLY(data_vio->allocation.lock == NULL,
+ "complete data_vio has no allocation lock");
+ VDO_ASSERT_LOG_ONLY(data_vio->hash_lock == NULL,
+ "complete data_vio has no hash lock");
if ((data_vio->remaining_discard <= VDO_BLOCK_SIZE) ||
(completion->result != VDO_SUCCESS)) {
struct data_vio_pool *pool = completion->vdo->data_vio_pool;
{
struct allocation *allocation = &data_vio->allocation;
- ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK),
- "data_vio does not have an allocation");
+ VDO_ASSERT_LOG_ONLY((allocation->pbn == VDO_ZERO_BLOCK),
+ "data_vio does not have an allocation");
allocation->write_lock_type = write_lock_type;
allocation->zone = vdo_get_next_allocation_zone(data_vio->logical.zone);
allocation->first_allocation_zone = allocation->zone->zone_number;
*/
void launch_compress_data_vio(struct data_vio *data_vio)
{
- ASSERT_LOG_ONLY(!data_vio->is_duplicate, "compressing a non-duplicate block");
- ASSERT_LOG_ONLY(data_vio->hash_lock != NULL,
- "data_vio to compress has a hash_lock");
- ASSERT_LOG_ONLY(data_vio_has_allocation(data_vio),
- "data_vio to compress has an allocation");
+ VDO_ASSERT_LOG_ONLY(!data_vio->is_duplicate, "compressing a non-duplicate block");
+ VDO_ASSERT_LOG_ONLY(data_vio->hash_lock != NULL,
+ "data_vio to compress has a hash_lock");
+ VDO_ASSERT_LOG_ONLY(data_vio_has_allocation(data_vio),
+ "data_vio to compress has an allocation");
/*
* There are 4 reasons why a data_vio which has reached this point will not be eligible for
struct data_vio *data_vio = as_data_vio(completion);
assert_data_vio_on_cpu_thread(data_vio);
- ASSERT_LOG_ONLY(!data_vio->is_zero, "zero blocks should not be hashed");
+ VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "zero blocks should not be hashed");
murmurhash3_128(data_vio->vio.data, VDO_BLOCK_SIZE, 0x62ea60be,
&data_vio->record_name);
static void prepare_for_dedupe(struct data_vio *data_vio)
{
/* We don't care what thread we are on. */
- ASSERT_LOG_ONLY(!data_vio->is_zero, "must not prepare to dedupe zero blocks");
+ VDO_ASSERT_LOG_ONLY(!data_vio->is_zero, "must not prepare to dedupe zero blocks");
/*
* Before we can dedupe, we need to know the record name, so the first
struct data_vio *data_vio = as_data_vio(completion);
struct vdo *vdo = completion->vdo;
- ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) ||
- (vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)),
- "%s() called on bio ack queue", __func__);
- ASSERT_LOG_ONLY(data_vio_has_flush_generation_lock(data_vio),
- "write VIO to be acknowledged has a flush generation lock");
+ VDO_ASSERT_LOG_ONLY((!vdo_uses_bio_ack_queue(vdo) ||
+ (vdo_get_callback_thread_id() == vdo->thread_config.bio_ack_thread)),
+ "%s() called on bio ack queue", __func__);
+ VDO_ASSERT_LOG_ONLY(data_vio_has_flush_generation_lock(data_vio),
+ "write VIO to be acknowledged has a flush generation lock");
acknowledge_data_vio(data_vio);
if (data_vio->new_mapped.pbn == VDO_ZERO_BLOCK) {
/* This is a zero write or discard */
static int assert_is_discard(struct data_vio *data_vio)
{
- int result = ASSERT(data_vio->is_discard,
- "data_vio with no block map page is a discard");
+ int result = VDO_ASSERT(data_vio->is_discard,
+ "data_vio with no block map page is a discard");
return ((result == VDO_SUCCESS) ? result : VDO_READ_ONLY);
}
static inline struct data_vio *vio_as_data_vio(struct vio *vio)
{
- ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
+ VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio");
return container_of(vio, struct data_vio, vio);
}
* It's odd to use the LBN, but converting the record name to hex is a bit clunky for an
* inline, and the LBN better than nothing as an identifier.
*/
- ASSERT_LOG_ONLY((expected == thread_id),
- "data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
- (unsigned long long) data_vio->logical.lbn, thread_id, expected);
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id, expected);
}
static inline void set_data_vio_hash_zone_callback(struct data_vio *data_vio,
thread_id_t expected = data_vio->logical.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
- ASSERT_LOG_ONLY((expected == thread_id),
- "data_vio for logical block %llu on thread %u, should be on thread %u",
- (unsigned long long) data_vio->logical.lbn, thread_id, expected);
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id, expected);
}
static inline void set_data_vio_logical_callback(struct data_vio *data_vio,
thread_id_t expected = data_vio->allocation.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
- ASSERT_LOG_ONLY((expected == thread_id),
- "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
- (unsigned long long) data_vio->allocation.pbn, thread_id,
- expected);
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->allocation.pbn, thread_id,
+ expected);
}
static inline void set_data_vio_allocated_zone_callback(struct data_vio *data_vio,
thread_id_t expected = data_vio->duplicate.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
- ASSERT_LOG_ONLY((expected == thread_id),
- "data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
- (unsigned long long) data_vio->duplicate.pbn, thread_id,
- expected);
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->duplicate.pbn, thread_id,
+ expected);
}
static inline void set_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
thread_id_t expected = data_vio->mapped.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
- ASSERT_LOG_ONLY((expected == thread_id),
- "data_vio for mapped physical block %llu on thread %u, should be on thread %u",
- (unsigned long long) data_vio->mapped.pbn, thread_id, expected);
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for mapped physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->mapped.pbn, thread_id, expected);
}
static inline void set_data_vio_mapped_zone_callback(struct data_vio *data_vio,
thread_id_t expected = data_vio->new_mapped.zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id();
- ASSERT_LOG_ONLY((expected == thread_id),
- "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
- (unsigned long long) data_vio->new_mapped.pbn, thread_id,
- expected);
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
+ (unsigned long long) data_vio->new_mapped.pbn, thread_id,
+ expected);
}
static inline void set_data_vio_new_mapped_zone_callback(struct data_vio *data_vio,
thread_id_t journal_thread = vdo_from_data_vio(data_vio)->thread_config.journal_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
- ASSERT_LOG_ONLY((journal_thread == thread_id),
- "data_vio for logical block %llu on thread %u, should be on journal thread %u",
- (unsigned long long) data_vio->logical.lbn, thread_id,
- journal_thread);
+ VDO_ASSERT_LOG_ONLY((journal_thread == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on journal thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id,
+ journal_thread);
}
static inline void set_data_vio_journal_callback(struct data_vio *data_vio,
thread_id_t packer_thread = vdo_from_data_vio(data_vio)->thread_config.packer_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
- ASSERT_LOG_ONLY((packer_thread == thread_id),
- "data_vio for logical block %llu on thread %u, should be on packer thread %u",
- (unsigned long long) data_vio->logical.lbn, thread_id,
- packer_thread);
+ VDO_ASSERT_LOG_ONLY((packer_thread == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on packer thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id,
+ packer_thread);
}
static inline void set_data_vio_packer_callback(struct data_vio *data_vio,
thread_id_t cpu_thread = vdo_from_data_vio(data_vio)->thread_config.cpu_thread;
thread_id_t thread_id = vdo_get_callback_thread_id();
- ASSERT_LOG_ONLY((cpu_thread == thread_id),
- "data_vio for logical block %llu on thread %u, should be on cpu thread %u",
- (unsigned long long) data_vio->logical.lbn, thread_id,
- cpu_thread);
+ VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id),
+ "data_vio for logical block %llu on thread %u, should be on cpu thread %u",
+ (unsigned long long) data_vio->logical.lbn, thread_id,
+ cpu_thread);
}
static inline void set_data_vio_cpu_callback(struct data_vio *data_vio,
static inline void assert_in_hash_zone(struct hash_zone *zone, const char *name)
{
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
- "%s called on hash zone thread", name);
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
+ "%s called on hash zone thread", name);
}
static inline bool change_context_state(struct dedupe_context *context, int old, int new)
{
/* Not safe to access the agent field except from the hash zone. */
assert_data_vio_in_hash_zone(data_vio);
- ASSERT_LOG_ONLY(data_vio == data_vio->hash_lock->agent,
- "%s must be for the hash lock agent", where);
+ VDO_ASSERT_LOG_ONLY(data_vio == data_vio->hash_lock->agent,
+ "%s must be for the hash lock agent", where);
}
/**
*/
static void set_duplicate_lock(struct hash_lock *hash_lock, struct pbn_lock *pbn_lock)
{
- ASSERT_LOG_ONLY((hash_lock->duplicate_lock == NULL),
- "hash lock must not already hold a duplicate lock");
-
+ VDO_ASSERT_LOG_ONLY((hash_lock->duplicate_lock == NULL),
+ "hash lock must not already hold a duplicate lock");
pbn_lock->holder_count += 1;
hash_lock->duplicate_lock = pbn_lock;
}
struct hash_lock *old_lock = data_vio->hash_lock;
if (old_lock != NULL) {
- ASSERT_LOG_ONLY(data_vio->hash_zone != NULL,
- "must have a hash zone when holding a hash lock");
- ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry),
- "must be on a hash lock ring when holding a hash lock");
- ASSERT_LOG_ONLY(old_lock->reference_count > 0,
- "hash lock reference must be counted");
+ VDO_ASSERT_LOG_ONLY(data_vio->hash_zone != NULL,
+ "must have a hash zone when holding a hash lock");
+ VDO_ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry),
+ "must be on a hash lock ring when holding a hash lock");
+ VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 0,
+ "hash lock reference must be counted");
if ((old_lock->state != VDO_HASH_LOCK_BYPASSING) &&
(old_lock->state != VDO_HASH_LOCK_UNLOCKING)) {
* If the reference count goes to zero in a non-terminal state, we're most
* likely leaking this lock.
*/
- ASSERT_LOG_ONLY(old_lock->reference_count > 1,
- "hash locks should only become unreferenced in a terminal state, not state %s",
- get_hash_lock_state_name(old_lock->state));
+ VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 1,
+ "hash locks should only become unreferenced in a terminal state, not state %s",
+ get_hash_lock_state_name(old_lock->state));
}
list_del_init(&data_vio->hash_lock_entry);
assert_hash_lock_agent(agent, __func__);
- ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
- "must have released the duplicate lock for the hash lock");
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
+ "must have released the duplicate lock for the hash lock");
if (!lock->verified) {
/*
struct hash_lock *lock = agent->hash_lock;
assert_data_vio_in_duplicate_zone(agent);
- ASSERT_LOG_ONLY(lock->duplicate_lock != NULL,
- "must have a duplicate lock to release");
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock != NULL,
+ "must have a duplicate lock to release");
vdo_release_physical_zone_pbn_lock(agent->duplicate.zone, agent->duplicate.pbn,
vdo_forget(lock->duplicate_lock));
{
lock->state = VDO_HASH_LOCK_UPDATING;
- ASSERT_LOG_ONLY(lock->verified, "new advice should have been verified");
- ASSERT_LOG_ONLY(lock->update_advice, "should only update advice if needed");
+ VDO_ASSERT_LOG_ONLY(lock->verified, "new advice should have been verified");
+ VDO_ASSERT_LOG_ONLY(lock->update_advice, "should only update advice if needed");
agent->last_async_operation = VIO_ASYNC_OP_UPDATE_DEDUPE_INDEX;
set_data_vio_hash_zone_callback(agent, finish_updating);
{
struct data_vio *agent = data_vio;
- ASSERT_LOG_ONLY(lock->agent == NULL, "shouldn't have an agent in DEDUPING");
- ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters),
- "shouldn't have any lock waiters in DEDUPING");
+ VDO_ASSERT_LOG_ONLY(lock->agent == NULL, "shouldn't have an agent in DEDUPING");
+ VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters),
+ "shouldn't have any lock waiters in DEDUPING");
/* Just release the lock reference if other data_vios are still deduping. */
if (lock->reference_count > 1) {
* Borrow and prepare a lock from the pool so we don't have to do two int_map accesses
* in the common case of no lock contention.
*/
- result = ASSERT(!list_empty(&zone->lock_pool),
- "never need to wait for a free hash lock");
+ result = VDO_ASSERT(!list_empty(&zone->lock_pool),
+ "never need to wait for a free hash lock");
if (result != VDO_SUCCESS)
return result;
if (replace_lock != NULL) {
/* On mismatch put the old lock back and return a severe error */
- ASSERT_LOG_ONLY(lock == replace_lock,
- "old lock must have been in the lock map");
+ VDO_ASSERT_LOG_ONLY(lock == replace_lock,
+ "old lock must have been in the lock map");
/* TODO: Check earlier and bail out? */
- ASSERT_LOG_ONLY(replace_lock->registered,
- "old lock must have been marked registered");
+ VDO_ASSERT_LOG_ONLY(replace_lock->registered,
+ "old lock must have been marked registered");
replace_lock->registered = false;
}
* deduplicate against it.
*/
if (lock->duplicate_lock == NULL) {
- ASSERT_LOG_ONLY(!vdo_is_state_compressed(agent->new_mapped.state),
- "compression must have shared a lock");
- ASSERT_LOG_ONLY(agent_is_done,
- "agent must have written the new duplicate");
+ VDO_ASSERT_LOG_ONLY(!vdo_is_state_compressed(agent->new_mapped.state),
+ "compression must have shared a lock");
+ VDO_ASSERT_LOG_ONLY(agent_is_done,
+ "agent must have written the new duplicate");
transfer_allocation_lock(agent);
}
- ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(lock->duplicate_lock),
- "duplicate_lock must be a PBN read lock");
+ VDO_ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(lock->duplicate_lock),
+ "duplicate_lock must be a PBN read lock");
/*
* This state is not like any of the other states. There is no designated agent--the agent
agent->scratch_block);
lock->state = VDO_HASH_LOCK_VERIFYING;
- ASSERT_LOG_ONLY(!lock->verified, "hash lock only verifies advice once");
+ VDO_ASSERT_LOG_ONLY(!lock->verified, "hash lock only verifies advice once");
agent->last_async_operation = VIO_ASYNC_OP_VERIFY_DUPLICATION;
result = vio_reset_bio(vio, buffer, verify_endio, REQ_OP_READ,
assert_hash_lock_agent(agent, __func__);
if (!agent->is_duplicate) {
- ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
- "must not hold duplicate_lock if not flagged as a duplicate");
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
+ "must not hold duplicate_lock if not flagged as a duplicate");
/*
* LOCKING -> WRITING transition: The advice block is being modified or has no
* available references, so try to write or compress the data, remembering to
return;
}
- ASSERT_LOG_ONLY(lock->duplicate_lock != NULL,
- "must hold duplicate_lock if flagged as a duplicate");
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock != NULL,
+ "must hold duplicate_lock if flagged as a duplicate");
if (!lock->verified) {
/*
*/
static void start_locking(struct hash_lock *lock, struct data_vio *agent)
{
- ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
- "must not acquire a duplicate lock when already holding it");
+ VDO_ASSERT_LOG_ONLY(lock->duplicate_lock == NULL,
+ "must not acquire a duplicate lock when already holding it");
lock->state = VDO_HASH_LOCK_LOCKING;
*/
static void report_bogus_lock_state(struct hash_lock *lock, struct data_vio *data_vio)
{
- ASSERT_LOG_ONLY(false, "hash lock must not be in unimplemented state %s",
- get_hash_lock_state_name(lock->state));
+ VDO_ASSERT_LOG_ONLY(false, "hash lock must not be in unimplemented state %s",
+ get_hash_lock_state_name(lock->state));
continue_data_vio_with_error(data_vio, VDO_LOCK_ERROR);
}
switch (lock->state) {
case VDO_HASH_LOCK_WRITING:
- ASSERT_LOG_ONLY(data_vio == lock->agent,
- "only the lock agent may continue the lock");
+ VDO_ASSERT_LOG_ONLY(data_vio == lock->agent,
+ "only the lock agent may continue the lock");
finish_writing(lock, data_vio);
break;
int result;
/* FIXME: BUG_ON() and/or enter read-only mode? */
- result = ASSERT(data_vio->hash_lock == NULL,
- "must not already hold a hash lock");
+ result = VDO_ASSERT(data_vio->hash_lock == NULL,
+ "must not already hold a hash lock");
if (result != VDO_SUCCESS)
return result;
- result = ASSERT(list_empty(&data_vio->hash_lock_entry),
- "must not already be a member of a hash lock ring");
+ result = VDO_ASSERT(list_empty(&data_vio->hash_lock_entry),
+ "must not already be a member of a hash lock ring");
if (result != VDO_SUCCESS)
return result;
- return ASSERT(data_vio->recovery_sequence_number == 0,
- "must not hold a recovery lock when getting a hash lock");
+ return VDO_ASSERT(data_vio->recovery_sequence_number == 0,
+ "must not hold a recovery lock when getting a hash lock");
}
/**
struct hash_lock *removed;
removed = vdo_int_map_remove(zone->hash_lock_map, lock_key);
- ASSERT_LOG_ONLY(lock == removed,
- "hash lock being released must have been mapped");
+ VDO_ASSERT_LOG_ONLY(lock == removed,
+ "hash lock being released must have been mapped");
} else {
- ASSERT_LOG_ONLY(lock != vdo_int_map_get(zone->hash_lock_map, lock_key),
- "unregistered hash lock must not be in the lock map");
- }
-
- ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters),
- "hash lock returned to zone must have no waiters");
- ASSERT_LOG_ONLY((lock->duplicate_lock == NULL),
- "hash lock returned to zone must not reference a PBN lock");
- ASSERT_LOG_ONLY((lock->state == VDO_HASH_LOCK_BYPASSING),
- "returned hash lock must not be in use with state %s",
- get_hash_lock_state_name(lock->state));
- ASSERT_LOG_ONLY(list_empty(&lock->pool_node),
- "hash lock returned to zone must not be in a pool ring");
- ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring),
- "hash lock returned to zone must not reference DataVIOs");
+ VDO_ASSERT_LOG_ONLY(lock != vdo_int_map_get(zone->hash_lock_map, lock_key),
+ "unregistered hash lock must not be in the lock map");
+ }
+
+ VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&lock->waiters),
+ "hash lock returned to zone must have no waiters");
+ VDO_ASSERT_LOG_ONLY((lock->duplicate_lock == NULL),
+ "hash lock returned to zone must not reference a PBN lock");
+ VDO_ASSERT_LOG_ONLY((lock->state == VDO_HASH_LOCK_BYPASSING),
+ "returned hash lock must not be in use with state %s",
+ get_hash_lock_state_name(lock->state));
+ VDO_ASSERT_LOG_ONLY(list_empty(&lock->pool_node),
+ "hash lock returned to zone must not be in a pool ring");
+ VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring),
+ "hash lock returned to zone must not reference DataVIOs");
return_hash_lock_to_pool(zone, lock);
}
struct allocation *allocation = &data_vio->allocation;
struct hash_lock *hash_lock = data_vio->hash_lock;
- ASSERT_LOG_ONLY(data_vio->new_mapped.pbn == allocation->pbn,
- "transferred lock must be for the block written");
+ VDO_ASSERT_LOG_ONLY(data_vio->new_mapped.pbn == allocation->pbn,
+ "transferred lock must be for the block written");
allocation->pbn = VDO_ZERO_BLOCK;
- ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(allocation->lock),
- "must have downgraded the allocation lock before transfer");
+ VDO_ASSERT_LOG_ONLY(vdo_is_pbn_read_lock(allocation->lock),
+ "must have downgraded the allocation lock before transfer");
hash_lock->duplicate = data_vio->new_mapped;
data_vio->duplicate = data_vio->new_mapped;
{
bool claimed;
- ASSERT_LOG_ONLY(vdo_get_duplicate_lock(data_vio) == NULL,
- "a duplicate PBN lock should not exist when writing");
- ASSERT_LOG_ONLY(vdo_is_state_compressed(data_vio->new_mapped.state),
- "lock transfer must be for a compressed write");
+ VDO_ASSERT_LOG_ONLY(vdo_get_duplicate_lock(data_vio) == NULL,
+ "a duplicate PBN lock should not exist when writing");
+ VDO_ASSERT_LOG_ONLY(vdo_is_state_compressed(data_vio->new_mapped.state),
+ "lock transfer must be for a compressed write");
assert_data_vio_in_new_mapped_zone(data_vio);
/* First sharer downgrades the lock. */
* deduplicating against it before our incRef.
*/
claimed = vdo_claim_pbn_lock_increment(pbn_lock);
- ASSERT_LOG_ONLY(claimed, "impossible to fail to claim an initial increment");
+ VDO_ASSERT_LOG_ONLY(claimed, "impossible to fail to claim an initial increment");
}
static void dedupe_kobj_release(struct kobject *directory)
*/
if (!change_context_state(context, DEDUPE_CONTEXT_TIMED_OUT,
DEDUPE_CONTEXT_TIMED_OUT_COMPLETE)) {
- ASSERT_LOG_ONLY(false, "uds request was timed out (state %d)",
- atomic_read(&context->state));
+ VDO_ASSERT_LOG_ONLY(false, "uds request was timed out (state %d)",
+ atomic_read(&context->state));
}
uds_funnel_queue_put(context->zone->timed_out_complete, &context->queue_entry);
if (recycled > 0)
WRITE_ONCE(zone->active, zone->active - recycled);
- ASSERT_LOG_ONLY(READ_ONCE(zone->active) == 0, "all contexts inactive");
+ VDO_ASSERT_LOG_ONLY(READ_ONCE(zone->active) == 0, "all contexts inactive");
vdo_finish_draining(&zone->state);
}
struct vdo_work_queue *current_work_queue;
const struct admin_state_code *code = vdo_get_admin_state_code(&vdo->admin.state);
- ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s",
- code->name);
+ VDO_ASSERT_LOG_ONLY(code->normal, "vdo should not receive bios while in state %s",
+ code->name);
/* Count all incoming bios. */
vdo_count_bios(&vdo->stats.bios_in, bio);
/* Assert that we are operating on the correct thread for the current phase. */
static void assert_admin_phase_thread(struct vdo *vdo, const char *what)
{
- ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo),
- "%s on correct thread for %s", what,
- ADMIN_PHASE_NAMES[vdo->admin.phase]);
+ VDO_ASSERT_LOG_ONLY(vdo_get_callback_thread_id() == get_thread_id_for_phase(vdo),
+ "%s on correct thread for %s", what,
+ ADMIN_PHASE_NAMES[vdo->admin.phase]);
}
/**
{
mutex_lock(&instances_lock);
if (instance >= instances.bit_count) {
- ASSERT_LOG_ONLY(false,
- "instance number %u must be less than bit count %u",
- instance, instances.bit_count);
+ VDO_ASSERT_LOG_ONLY(false,
+ "instance number %u must be less than bit count %u",
+ instance, instances.bit_count);
} else if (test_bit(instance, instances.words) == 0) {
- ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance);
+ VDO_ASSERT_LOG_ONLY(false, "instance number %u must be allocated", instance);
} else {
__clear_bit(instance, instances.words);
instances.count -= 1;
if (instance >= instances.bit_count) {
/* Nothing free after next, so wrap around to instance zero. */
instance = find_first_zero_bit(instances.words, instances.bit_count);
- result = ASSERT(instance < instances.bit_count,
- "impossibly, no zero bit found");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(instance < instances.bit_count,
+ "impossibly, no zero bit found");
+ if (result != VDO_SUCCESS)
return result;
}
uds_log_info("Preparing to resize physical to %llu",
(unsigned long long) new_physical_blocks);
- ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks),
- "New physical size is larger than current physical size");
+ VDO_ASSERT_LOG_ONLY((new_physical_blocks > current_physical_blocks),
+ "New physical size is larger than current physical size");
result = perform_admin_operation(vdo, PREPARE_GROW_PHYSICAL_PHASE_START,
check_may_grow_physical,
finish_operation_callback,
uds_log_info("Preparing to resize logical to %llu",
(unsigned long long) config->logical_blocks);
- ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks),
- "New logical size is larger than current size");
+ VDO_ASSERT_LOG_ONLY((config->logical_blocks > logical_blocks),
+ "New logical size is larger than current size");
result = vdo_prepare_to_grow_block_map(vdo->block_map,
config->logical_blocks);
if (dm_registered)
dm_unregister_target(&vdo_target_bio);
- ASSERT_LOG_ONLY(instances.count == 0,
- "should have no instance numbers still in use, but have %u",
- instances.count);
+ VDO_ASSERT_LOG_ONLY(instances.count == 0,
+ "should have no instance numbers still in use, but have %u",
+ instances.count);
vdo_free(instances.words);
memset(&instances, 0, sizeof(struct instance_tracker));
decode_volume_geometry(block, &offset, geometry, header.version.major_version);
- result = ASSERT(header.size == offset + sizeof(u32),
- "should have decoded up to the geometry checksum");
+ result = VDO_ASSERT(header.size == offset + sizeof(u32),
+ "should have decoded up to the geometry checksum");
if (result != VDO_SUCCESS)
return result;
initial_offset = *offset;
decode_u64_le(buffer, offset, &flat_page_origin);
- result = ASSERT(flat_page_origin == VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN,
- "Flat page origin must be %u (recorded as %llu)",
- VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN,
- (unsigned long long) state->flat_page_origin);
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(flat_page_origin == VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN,
+ "Flat page origin must be %u (recorded as %llu)",
+ VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN,
+ (unsigned long long) state->flat_page_origin);
+ if (result != VDO_SUCCESS)
return result;
decode_u64_le(buffer, offset, &flat_page_count);
- result = ASSERT(flat_page_count == 0,
- "Flat page count must be 0 (recorded as %llu)",
- (unsigned long long) state->flat_page_count);
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(flat_page_count == 0,
+ "Flat page count must be 0 (recorded as %llu)",
+ (unsigned long long) state->flat_page_count);
+ if (result != VDO_SUCCESS)
return result;
decode_u64_le(buffer, offset, &root_origin);
decode_u64_le(buffer, offset, &root_count);
- result = ASSERT(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset,
- "decoded block map component size must match header size");
+ result = VDO_ASSERT(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset,
+ "decoded block map component size must match header size");
if (result != VDO_SUCCESS)
return result;
encode_u64_le(buffer, offset, state.root_origin);
encode_u64_le(buffer, offset, state.root_count);
- ASSERT_LOG_ONLY(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset,
- "encoded block map component size must match header size");
+ VDO_ASSERT_LOG_ONLY(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset,
+ "encoded block map component size must match header size");
}
/**
encode_u64_le(buffer, offset, state.logical_blocks_used);
encode_u64_le(buffer, offset, state.block_map_data_blocks);
- ASSERT_LOG_ONLY(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset,
- "encoded recovery journal component size must match header size");
+ VDO_ASSERT_LOG_ONLY(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset,
+ "encoded recovery journal component size must match header size");
}
/**
decode_u64_le(buffer, offset, &logical_blocks_used);
decode_u64_le(buffer, offset, &block_map_data_blocks);
- result = ASSERT(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset,
- "decoded recovery journal component size must match header size");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset,
+ "decoded recovery journal component size must match header size");
+ if (result != VDO_SUCCESS)
return result;
*state = (struct recovery_journal_state_7_0) {
encode_u64_le(buffer, offset, state.last_block);
buffer[(*offset)++] = state.zone_count;
- ASSERT_LOG_ONLY(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset,
- "encoded block map component size must match header size");
+ VDO_ASSERT_LOG_ONLY(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset,
+ "encoded block map component size must match header size");
}
/**
decode_u64_le(buffer, offset, &last_block);
zone_count = buffer[(*offset)++];
- result = ASSERT(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset,
- "decoded slab depot component size must match header size");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(VDO_SLAB_DEPOT_HEADER_2_0.size == *offset - initial_offset,
+ "decoded slab depot component size must match header size");
+ if (result != VDO_SUCCESS)
return result;
*state = (struct slab_depot_state_2_0) {
struct partition *partition;
int result = vdo_get_partition(layout, id, &partition);
- ASSERT_LOG_ONLY(result == VDO_SUCCESS, "layout has expected partition: %u", id);
+ VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "layout has expected partition: %u", id);
return partition;
}
struct header header = VDO_LAYOUT_HEADER_3_0;
BUILD_BUG_ON(sizeof(enum partition_id) != sizeof(u8));
- ASSERT_LOG_ONLY(layout->num_partitions <= U8_MAX,
- "layout partition count must fit in a byte");
+ VDO_ASSERT_LOG_ONLY(layout->num_partitions <= U8_MAX,
+ "layout partition count must fit in a byte");
vdo_encode_header(buffer, offset, &header);
encode_u64_le(buffer, offset, layout->last_free);
buffer[(*offset)++] = layout->num_partitions;
- ASSERT_LOG_ONLY(sizeof(struct layout_3_0) == *offset - initial_offset,
- "encoded size of a layout header must match structure");
+ VDO_ASSERT_LOG_ONLY(sizeof(struct layout_3_0) == *offset - initial_offset,
+ "encoded size of a layout header must match structure");
for (partition = layout->head; partition != NULL; partition = partition->next) {
buffer[(*offset)++] = partition->id;
encode_u64_le(buffer, offset, partition->count);
}
- ASSERT_LOG_ONLY(header.size == *offset - initial_offset,
- "encoded size of a layout must match header size");
+ VDO_ASSERT_LOG_ONLY(header.size == *offset - initial_offset,
+ "encoded size of a layout must match header size");
}
static int decode_layout(u8 *buffer, size_t *offset, physical_block_number_t start,
.partition_count = partition_count,
};
- result = ASSERT(sizeof(struct layout_3_0) == *offset - initial_offset,
- "decoded size of a layout header must match structure");
+ result = VDO_ASSERT(sizeof(struct layout_3_0) == *offset - initial_offset,
+ "decoded size of a layout header must match structure");
if (result != VDO_SUCCESS)
return result;
struct slab_config slab_config;
int result;
- result = ASSERT(config->slab_size > 0, "slab size unspecified");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(config->slab_size > 0, "slab size unspecified");
+ if (result != VDO_SUCCESS)
return result;
- result = ASSERT(is_power_of_2(config->slab_size),
- "slab size must be a power of two");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(is_power_of_2(config->slab_size),
+ "slab size must be a power of two");
+ if (result != VDO_SUCCESS)
return result;
- result = ASSERT(config->slab_size <= (1 << MAX_VDO_SLAB_BITS),
- "slab size must be less than or equal to 2^%d",
- MAX_VDO_SLAB_BITS);
+ result = VDO_ASSERT(config->slab_size <= (1 << MAX_VDO_SLAB_BITS),
+ "slab size must be less than or equal to 2^%d",
+ MAX_VDO_SLAB_BITS);
if (result != VDO_SUCCESS)
return result;
- result = ASSERT(config->slab_journal_blocks >= MINIMUM_VDO_SLAB_JOURNAL_BLOCKS,
- "slab journal size meets minimum size");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(config->slab_journal_blocks >= MINIMUM_VDO_SLAB_JOURNAL_BLOCKS,
+ "slab journal size meets minimum size");
+ if (result != VDO_SUCCESS)
return result;
- result = ASSERT(config->slab_journal_blocks <= config->slab_size,
- "slab journal size is within expected bound");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(config->slab_journal_blocks <= config->slab_size,
+ "slab journal size is within expected bound");
+ if (result != VDO_SUCCESS)
return result;
result = vdo_configure_slab(config->slab_size, config->slab_journal_blocks,
if (result != VDO_SUCCESS)
return result;
- result = ASSERT((slab_config.data_blocks >= 1),
- "slab must be able to hold at least one block");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT((slab_config.data_blocks >= 1),
+ "slab must be able to hold at least one block");
+ if (result != VDO_SUCCESS)
return result;
- result = ASSERT(config->physical_blocks > 0, "physical blocks unspecified");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(config->physical_blocks > 0, "physical blocks unspecified");
+ if (result != VDO_SUCCESS)
return result;
- result = ASSERT(config->physical_blocks <= MAXIMUM_VDO_PHYSICAL_BLOCKS,
- "physical block count %llu exceeds maximum %llu",
- (unsigned long long) config->physical_blocks,
- (unsigned long long) MAXIMUM_VDO_PHYSICAL_BLOCKS);
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(config->physical_blocks <= MAXIMUM_VDO_PHYSICAL_BLOCKS,
+ "physical block count %llu exceeds maximum %llu",
+ (unsigned long long) config->physical_blocks,
+ (unsigned long long) MAXIMUM_VDO_PHYSICAL_BLOCKS);
+ if (result != VDO_SUCCESS)
return VDO_OUT_OF_RANGE;
if (physical_block_count != config->physical_blocks) {
}
if (logical_block_count > 0) {
- result = ASSERT((config->logical_blocks > 0),
- "logical blocks unspecified");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT((config->logical_blocks > 0),
+ "logical blocks unspecified");
+ if (result != VDO_SUCCESS)
return result;
if (logical_block_count != config->logical_blocks) {
}
}
- result = ASSERT(config->logical_blocks <= MAXIMUM_VDO_LOGICAL_BLOCKS,
- "logical blocks too large");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(config->logical_blocks <= MAXIMUM_VDO_LOGICAL_BLOCKS,
+ "logical blocks too large");
+ if (result != VDO_SUCCESS)
return result;
- result = ASSERT(config->recovery_journal_size > 0,
- "recovery journal size unspecified");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(config->recovery_journal_size > 0,
+ "recovery journal size unspecified");
+ if (result != VDO_SUCCESS)
return result;
- result = ASSERT(is_power_of_2(config->recovery_journal_size),
- "recovery journal size must be a power of two");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(is_power_of_2(config->recovery_journal_size),
+ "recovery journal size must be a power of two");
+ if (result != VDO_SUCCESS)
return result;
return result;
if (result != VDO_SUCCESS)
return result;
- ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE,
- "All decoded component data was used");
+ VDO_ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE,
+ "All decoded component data was used");
return VDO_SUCCESS;
}
encode_slab_depot_state_2_0(buffer, offset, states->slab_depot);
encode_block_map_state_2_0(buffer, offset, states->block_map);
- ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE,
- "All super block component data was encoded");
+ VDO_ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE,
+ "All super block component data was encoded");
}
/**
* Even though the buffer is a full block, to avoid the potential corruption from a torn
* write, the entire encoding must fit in the first sector.
*/
- ASSERT_LOG_ONLY(offset <= VDO_SECTOR_SIZE,
- "entire superblock must fit in one sector");
+ VDO_ASSERT_LOG_ONLY(offset <= VDO_SECTOR_SIZE,
+ "entire superblock must fit in one sector");
}
/**
checksum = vdo_crc32(buffer, offset);
decode_u32_le(buffer, &offset, &saved_checksum);
- result = ASSERT(offset == VDO_SUPER_BLOCK_FIXED_SIZE + VDO_COMPONENT_DATA_SIZE,
- "must have decoded entire superblock payload");
+ result = VDO_ASSERT(offset == VDO_SUPER_BLOCK_FIXED_SIZE + VDO_COMPONENT_DATA_SIZE,
+ "must have decoded entire superblock payload");
if (result != VDO_SUCCESS)
return result;
.infos = infos,
};
- result = ASSERT(first_error < next_free_error, "well-defined error block range");
- if (result != UDS_SUCCESS)
+ result = VDO_ASSERT(first_error < next_free_error,
+ "well-defined error block range");
+ if (result != VDO_SUCCESS)
return result;
if (registered_errors.count == registered_errors.allocated) {
*/
static inline void assert_on_flusher_thread(struct flusher *flusher, const char *caller)
{
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id),
- "%s() called from flusher thread", caller);
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == flusher->thread_id),
+ "%s() called from flusher thread", caller);
}
/**
int result;
assert_on_flusher_thread(flusher, __func__);
- result = ASSERT(vdo_is_state_normal(&flusher->state),
- "flusher is in normal operation");
+ result = VDO_ASSERT(vdo_is_state_normal(&flusher->state),
+ "flusher is in normal operation");
if (result != VDO_SUCCESS) {
vdo_enter_read_only_mode(flusher->vdo, result);
vdo_complete_flush(flush);
if (flush->flush_generation >= oldest_active_generation)
return;
- ASSERT_LOG_ONLY((flush->flush_generation ==
- flusher->first_unacknowledged_generation),
- "acknowledged next expected flush, %llu, was: %llu",
- (unsigned long long) flusher->first_unacknowledged_generation,
- (unsigned long long) flush->flush_generation);
+ VDO_ASSERT_LOG_ONLY((flush->flush_generation ==
+ flusher->first_unacknowledged_generation),
+ "acknowledged next expected flush, %llu, was: %llu",
+ (unsigned long long) flusher->first_unacknowledged_generation,
+ (unsigned long long) flush->flush_generation);
vdo_waitq_dequeue_waiter(&flusher->pending_flushes);
vdo_complete_flush(flush);
flusher->first_unacknowledged_generation++;
struct flusher *flusher = vdo->flusher;
const struct admin_state_code *code = vdo_get_admin_state_code(&flusher->state);
- ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s",
- code->name);
+ VDO_ASSERT_LOG_ONLY(!code->quiescent, "Flushing not allowed in state %s",
+ code->name);
spin_lock(&flusher->lock);
static void enqueue_work_queue_completion(struct simple_work_queue *queue,
struct vdo_completion *completion)
{
- ASSERT_LOG_ONLY(completion->my_queue == NULL,
- "completion %px (fn %px) to enqueue (%px) is not already queued (%px)",
- completion, completion->callback, queue, completion->my_queue);
+ VDO_ASSERT_LOG_ONLY(completion->my_queue == NULL,
+ "completion %px (fn %px) to enqueue (%px) is not already queued (%px)",
+ completion, completion->callback, queue, completion->my_queue);
if (completion->priority == VDO_WORK_Q_DEFAULT_PRIORITY)
completion->priority = queue->common.type->default_priority;
- if (ASSERT(completion->priority <= queue->common.type->max_priority,
- "priority is in range for queue") != VDO_SUCCESS)
+ if (VDO_ASSERT(completion->priority <= queue->common.type->max_priority,
+ "priority is in range for queue") != VDO_SUCCESS)
completion->priority = 0;
completion->my_queue = &queue->common;
static void process_completion(struct simple_work_queue *queue,
struct vdo_completion *completion)
{
- if (ASSERT(completion->my_queue == &queue->common,
- "completion %px from queue %px marked as being in this queue (%px)",
- completion, queue, completion->my_queue) == UDS_SUCCESS)
+ if (VDO_ASSERT(completion->my_queue == &queue->common,
+ "completion %px from queue %px marked as being in this queue (%px)",
+ completion, queue, completion->my_queue) == VDO_SUCCESS)
completion->my_queue = NULL;
vdo_run_completion(completion);
struct task_struct *thread = NULL;
int result;
- ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY),
- "queue priority count %u within limit %u", type->max_priority,
- VDO_WORK_Q_MAX_PRIORITY);
+ VDO_ASSERT_LOG_ONLY((type->max_priority <= VDO_WORK_Q_MAX_PRIORITY),
+ "queue priority count %u within limit %u", type->max_priority,
+ VDO_WORK_Q_MAX_PRIORITY);
result = vdo_allocate(1, struct simple_work_queue, "simple work queue", &queue);
if (result != VDO_SUCCESS)
*/
static void assert_in_bio_zone(struct vio *vio)
{
- ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context");
+ VDO_ASSERT_LOG_ONLY(!in_interrupt(), "not in interrupt context");
assert_vio_in_bio_zone(vio);
}
mutex_unlock(&bio_queue_data->lock);
/* We don't care about failure of int_map_put in this case. */
- ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds");
+ VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "bio map insertion succeeds");
return merged;
}
const struct admin_state_code *code = vdo_get_admin_state(completion->vdo);
- ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name);
- ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio");
+ VDO_ASSERT_LOG_ONLY(!code->quiescent, "I/O not allowed in state %s", code->name);
+ VDO_ASSERT_LOG_ONLY(vio->bio->bi_next == NULL, "metadata bio has no next bio");
vdo_reset_completion(completion);
completion->error_handler = error_handler;
static inline void assert_on_zone_thread(struct logical_zone *zone, const char *what)
{
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
- "%s() called on correct thread", what);
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == zone->thread_id),
+ "%s() called on correct thread", what);
}
/**
sequence_number_t expected_generation)
{
assert_on_zone_thread(zone, __func__);
- ASSERT_LOG_ONLY((zone->flush_generation == expected_generation),
- "logical zone %u flush generation %llu should be %llu before increment",
- zone->zone_number, (unsigned long long) zone->flush_generation,
- (unsigned long long) expected_generation);
+ VDO_ASSERT_LOG_ONLY((zone->flush_generation == expected_generation),
+ "logical zone %u flush generation %llu should be %llu before increment",
+ zone->zone_number, (unsigned long long) zone->flush_generation,
+ (unsigned long long) expected_generation);
zone->flush_generation++;
zone->ios_in_flush_generation = 0;
struct logical_zone *zone = data_vio->logical.zone;
assert_on_zone_thread(zone, __func__);
- ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal");
+ VDO_ASSERT_LOG_ONLY(vdo_is_state_normal(&zone->state), "vdo state is normal");
data_vio->flush_generation = zone->flush_generation;
list_add_tail(&data_vio->write_entry, &zone->write_vios);
return;
list_del_init(&data_vio->write_entry);
- ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation),
- "data_vio releasing lock on generation %llu is not older than oldest active generation %llu",
- (unsigned long long) data_vio->flush_generation,
- (unsigned long long) zone->oldest_active_generation);
+ VDO_ASSERT_LOG_ONLY((zone->oldest_active_generation <= data_vio->flush_generation),
+ "data_vio releasing lock on generation %llu is not older than oldest active generation %llu",
+ (unsigned long long) data_vio->flush_generation,
+ (unsigned long long) zone->oldest_active_generation);
if (!update_oldest_active_generation(zone) || zone->notifying)
return;
void vdo_memory_exit(void)
{
- ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0,
- "kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
- memory_stats.kmalloc_bytes, memory_stats.kmalloc_blocks);
- ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0,
- "vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
- memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks);
+ VDO_ASSERT_LOG_ONLY(memory_stats.kmalloc_bytes == 0,
+ "kmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
+ memory_stats.kmalloc_bytes, memory_stats.kmalloc_blocks);
+ VDO_ASSERT_LOG_ONLY(memory_stats.vmalloc_bytes == 0,
+ "vmalloc memory used (%zd bytes in %zd blocks) is returned to the kernel",
+ memory_stats.vmalloc_bytes, memory_stats.vmalloc_blocks);
uds_log_debug("peak usage %zd bytes", memory_stats.peak_bytes);
}
*/
static inline void assert_on_packer_thread(struct packer *packer, const char *caller)
{
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id),
- "%s() called from packer thread", caller);
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == packer->thread_id),
+ "%s() called from packer thread", caller);
}
/**
assert_on_packer_thread(packer, __func__);
- result = ASSERT((status.stage == DATA_VIO_COMPRESSING),
- "attempt to pack data_vio not ready for packing, stage: %u",
- status.stage);
+ result = VDO_ASSERT((status.stage == DATA_VIO_COMPRESSING),
+ "attempt to pack data_vio not ready for packing, stage: %u",
+ status.stage);
if (result != VDO_SUCCESS)
return;
lock_holder = vdo_forget(data_vio->compression.lock_holder);
bin = lock_holder->compression.bin;
- ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin");
+ VDO_ASSERT_LOG_ONLY((bin != NULL), "data_vio in packer has a bin");
slot = lock_holder->compression.slot;
bin->slots_used--;
/* Utilities for asserting that certain conditions are met */
#define STRINGIFY(X) #X
-#define STRINGIFY_VALUE(X) STRINGIFY(X)
/*
* A hack to apply the "warn if unused" attribute to an integral expression.
* expression. With optimization enabled, this function contributes no additional instructions, but
* the warn_unused_result attribute still applies to the code calling it.
*/
-static inline int __must_check uds_must_use(int value)
+static inline int __must_check vdo_must_use(int value)
{
return value;
}
/* Assert that an expression is true and return an error if it is not. */
-#define ASSERT(expr, ...) uds_must_use(__UDS_ASSERT(expr, __VA_ARGS__))
+#define VDO_ASSERT(expr, ...) vdo_must_use(__VDO_ASSERT(expr, __VA_ARGS__))
/* Log a message if the expression is not true. */
-#define ASSERT_LOG_ONLY(expr, ...) __UDS_ASSERT(expr, __VA_ARGS__)
+#define VDO_ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__)
-#define __UDS_ASSERT(expr, ...) \
- (likely(expr) ? UDS_SUCCESS \
+/* For use by UDS */
+#define ASSERT(expr, ...) VDO_ASSERT(expr, __VA_ARGS__)
+#define ASSERT_LOG_ONLY(expr, ...) __VDO_ASSERT(expr, __VA_ARGS__)
+
+#define __VDO_ASSERT(expr, ...) \
+ (likely(expr) ? VDO_SUCCESS \
: uds_assertion_failed(STRINGIFY(expr), __FILE__, __LINE__, __VA_ARGS__))
/* Log an assertion failure message. */
*/
void vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write)
{
- ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock),
- "PBN lock must not already have been downgraded");
- ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK),
- "must not downgrade block map write locks");
- ASSERT_LOG_ONLY(lock->holder_count == 1,
- "PBN write lock should have one holder but has %u",
- lock->holder_count);
+ VDO_ASSERT_LOG_ONLY(!vdo_is_pbn_read_lock(lock),
+ "PBN lock must not already have been downgraded");
+ VDO_ASSERT_LOG_ONLY(!has_lock_type(lock, VIO_BLOCK_MAP_WRITE_LOCK),
+ "must not downgrade block map write locks");
+ VDO_ASSERT_LOG_ONLY(lock->holder_count == 1,
+ "PBN write lock should have one holder but has %u",
+ lock->holder_count);
/*
* data_vio write locks are downgraded in place--the writer retains the hold on the lock.
* If this was a compressed write, the holder has not yet journaled its own inc ref,
*/
void vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock)
{
- ASSERT_LOG_ONLY(!lock->has_provisional_reference,
- "lock does not have a provisional reference");
+ VDO_ASSERT_LOG_ONLY(!lock->has_provisional_reference,
+ "lock does not have a provisional reference");
lock->has_provisional_reference = true;
}
INIT_LIST_HEAD(&idle->entry);
list_add_tail(&idle->entry, &pool->idle_list);
- ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed");
+ VDO_ASSERT_LOG_ONLY(pool->borrowed > 0, "shouldn't return more than borrowed");
pool->borrowed -= 1;
}
if (pool == NULL)
return;
- ASSERT_LOG_ONLY(pool->borrowed == 0,
- "All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan",
- pool->borrowed);
+ VDO_ASSERT_LOG_ONLY(pool->borrowed == 0,
+ "All PBN locks must be returned to the pool before it is freed, but %zu locks are still on loan",
+ pool->borrowed);
vdo_free(pool);
}
"no free PBN locks left to borrow");
pool->borrowed += 1;
- result = ASSERT(!list_empty(&pool->idle_list),
- "idle list should not be empty if pool not at capacity");
+ result = VDO_ASSERT(!list_empty(&pool->idle_list),
+ "idle list should not be empty if pool not at capacity");
if (result != VDO_SUCCESS)
return result;
result = borrow_pbn_lock_from_pool(zone->lock_pool, type, &new_lock);
if (result != VDO_SUCCESS) {
- ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock");
+ VDO_ASSERT_LOG_ONLY(false, "must always be able to borrow a PBN lock");
return result;
}
if (lock != NULL) {
/* The lock is already held, so we don't need the borrowed one. */
return_pbn_lock_to_pool(zone->lock_pool, vdo_forget(new_lock));
- result = ASSERT(lock->holder_count > 0, "physical block %llu lock held",
- (unsigned long long) pbn);
+ result = VDO_ASSERT(lock->holder_count > 0, "physical block %llu lock held",
+ (unsigned long long) pbn);
if (result != VDO_SUCCESS)
return result;
*lock_ptr = lock;
int result;
struct pbn_lock *lock;
- ASSERT_LOG_ONLY(allocation->lock == NULL,
- "must not allocate a block while already holding a lock on one");
+ VDO_ASSERT_LOG_ONLY(allocation->lock == NULL,
+ "must not allocate a block while already holding a lock on one");
result = vdo_allocate_block(allocation->zone->allocator, &allocation->pbn);
if (result != VDO_SUCCESS)
if (lock == NULL)
return;
- ASSERT_LOG_ONLY(lock->holder_count > 0,
- "should not be releasing a lock that is not held");
+ VDO_ASSERT_LOG_ONLY(lock->holder_count > 0,
+ "should not be releasing a lock that is not held");
lock->holder_count -= 1;
if (lock->holder_count > 0) {
}
holder = vdo_int_map_remove(zone->pbn_operations, locked_pbn);
- ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu",
- (unsigned long long) locked_pbn);
+ VDO_ASSERT_LOG_ONLY((lock == holder), "physical block lock mismatch for block %llu",
+ (unsigned long long) locked_pbn);
release_pbn_lock_provisional_reference(lock, locked_pbn, zone->allocator);
return_pbn_lock_to_pool(zone->lock_pool, lock);
void vdo_priority_table_enqueue(struct priority_table *table, unsigned int priority,
struct list_head *entry)
{
- ASSERT_LOG_ONLY((priority <= table->max_priority),
- "entry priority must be valid for the table");
+ VDO_ASSERT_LOG_ONLY((priority <= table->max_priority),
+ "entry priority must be valid for the table");
/* Append the entry to the queue in the specified bucket. */
list_move_tail(entry, &table->buckets[priority].queue);
/* Pairs with barrier in vdo_release_journal_entry_lock() */
smp_rmb();
- ASSERT_LOG_ONLY((decrements <= journal_value),
- "journal zone lock counter must not underflow");
+ VDO_ASSERT_LOG_ONLY((decrements <= journal_value),
+ "journal zone lock counter must not underflow");
return (journal_value != decrements);
}
lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
current_value = get_counter(journal, lock_number, zone_type, zone_id);
- ASSERT_LOG_ONLY((*current_value >= 1),
- "decrement of lock counter must not underflow");
+ VDO_ASSERT_LOG_ONLY((*current_value >= 1),
+ "decrement of lock counter must not underflow");
*current_value -= 1;
if (zone_type == VDO_ZONE_TYPE_JOURNAL) {
static void assert_on_journal_thread(struct recovery_journal *journal,
const char *function_name)
{
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id),
- "%s() called on journal thread", function_name);
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == journal->thread_id),
+ "%s() called on journal thread", function_name);
}
/**
if (vdo_is_state_saving(&journal->state)) {
if (journal->active_block != NULL) {
- ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) ||
- !is_block_dirty(journal->active_block)),
- "journal being saved has clean active block");
+ VDO_ASSERT_LOG_ONLY(((result == VDO_READ_ONLY) ||
+ !is_block_dirty(journal->active_block)),
+ "journal being saved has clean active block");
recycle_journal_block(journal->active_block);
}
- ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
- "all blocks in a journal being saved must be inactive");
+ VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
+ "all blocks in a journal being saved must be inactive");
}
vdo_finish_draining_with_result(&journal->state, result);
* requires opening before use.
*/
if (!vdo_is_state_quiescent(&journal->state)) {
- ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
- "journal being freed has no active tail blocks");
+ VDO_ASSERT_LOG_ONLY(list_empty(&journal->active_tail_blocks),
+ "journal being freed has no active tail blocks");
} else if (!vdo_is_state_saved(&journal->state) &&
!list_empty(&journal->active_tail_blocks)) {
uds_log_warning("journal being freed has uncommitted entries");
atomic_t *decrement_counter = get_decrement_counter(journal, lock_number);
journal_value = get_counter(journal, lock_number, VDO_ZONE_TYPE_JOURNAL, 0);
- ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)),
- "count to be initialized not in use");
+ VDO_ASSERT_LOG_ONLY((*journal_value == atomic_read(decrement_counter)),
+ "count to be initialized not in use");
*journal_value = journal->entries_per_block + 1;
atomic_set(decrement_counter, 0);
}
int result = (is_read_only(journal) ? VDO_READ_ONLY : VDO_SUCCESS);
bool has_decrement;
- ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point,
- &data_vio->recovery_journal_point),
- "DataVIOs released from recovery journal in order. Recovery journal point is (%llu, %u), but commit waiter point is (%llu, %u)",
- (unsigned long long) journal->commit_point.sequence_number,
- journal->commit_point.entry_count,
- (unsigned long long) data_vio->recovery_journal_point.sequence_number,
- data_vio->recovery_journal_point.entry_count);
+ VDO_ASSERT_LOG_ONLY(vdo_before_journal_point(&journal->commit_point,
+ &data_vio->recovery_journal_point),
+ "DataVIOs released from recovery journal in order. Recovery journal point is (%llu, %u), but commit waiter point is (%llu, %u)",
+ (unsigned long long) journal->commit_point.sequence_number,
+ journal->commit_point.entry_count,
+ (unsigned long long) data_vio->recovery_journal_point.sequence_number,
+ data_vio->recovery_journal_point.entry_count);
journal->commit_point = data_vio->recovery_journal_point;
data_vio->last_async_operation = VIO_ASYNC_OP_UPDATE_REFERENCE_COUNTS;
journal->last_write_acknowledged = block->sequence_number;
last_active_block = get_journal_block(&journal->active_tail_blocks);
- ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number),
- "completed journal write is still active");
+ VDO_ASSERT_LOG_ONLY((block->sequence_number >= last_active_block->sequence_number),
+ "completed journal write is still active");
notify_commit_waiters(journal);
return;
}
- ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0,
- "journal lock not held for new entry");
+ VDO_ASSERT_LOG_ONLY(data_vio->recovery_sequence_number == 0,
+ "journal lock not held for new entry");
vdo_advance_journal_point(&journal->append_point, journal->entries_per_block);
vdo_waitq_enqueue_waiter(&journal->entry_waiters, &data_vio->waiter);
if (sequence_number == 0)
return;
- ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL),
- "invalid lock count increment from journal zone");
+ VDO_ASSERT_LOG_ONLY((zone_type != VDO_ZONE_TYPE_JOURNAL),
+ "invalid lock count increment from journal zone");
lock_number = vdo_get_recovery_journal_block_number(journal, sequence_number);
current_value = get_counter(journal, lock_number, zone_type, zone_id);
- ASSERT_LOG_ONLY(*current_value < U16_MAX,
- "increment of lock counter must not overflow");
+ VDO_ASSERT_LOG_ONLY(*current_value < U16_MAX,
+ "increment of lock counter must not overflow");
if (*current_value == 0) {
/*
if (needs_sort) {
struct numbered_block_mapping *just_sorted_entry =
sort_next_heap_element(repair);
- ASSERT_LOG_ONLY(just_sorted_entry < current_entry,
- "heap is returning elements in an unexpected order");
+ VDO_ASSERT_LOG_ONLY(just_sorted_entry < current_entry,
+ "heap is returning elements in an unexpected order");
}
current_entry--;
repair->current_entry = &repair->entries[repair->block_map_entry_count - 1];
first_sorted_entry = sort_next_heap_element(repair);
- ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry,
- "heap is returning elements in an unexpected order");
+ VDO_ASSERT_LOG_ONLY(first_sorted_entry == repair->current_entry,
+ "heap is returning elements in an unexpected order");
/* Prevent any page from being processed until all pages have been launched. */
repair->launching = true;
repair->block_map_entry_count++;
}
- result = ASSERT((repair->block_map_entry_count <= repair->entry_count),
- "approximate entry count is an upper bound");
+ result = VDO_ASSERT((repair->block_map_entry_count <= repair->entry_count),
+ "approximate entry count is an upper bound");
if (result != VDO_SUCCESS)
vdo_enter_read_only_mode(vdo, result);
struct slab_journal *dirty_journal;
struct list_head *dirty_list = &journal->slab->allocator->dirty_slab_journals;
- ASSERT_LOG_ONLY(journal->recovery_lock == 0, "slab journal was clean");
+ VDO_ASSERT_LOG_ONLY(journal->recovery_lock == 0, "slab journal was clean");
journal->recovery_lock = lock;
list_for_each_entry_reverse(dirty_journal, dirty_list, dirty_entry) {
{
block_count_t hint;
- ASSERT_LOG_ONLY((free_blocks < (1 << 23)), "free blocks must be less than 2^23");
+ VDO_ASSERT_LOG_ONLY((free_blocks < (1 << 23)), "free blocks must be less than 2^23");
if (free_blocks == 0)
return 0;
return;
}
- ASSERT_LOG_ONLY((adjustment != 0), "adjustment must be non-zero");
+ VDO_ASSERT_LOG_ONLY((adjustment != 0), "adjustment must be non-zero");
lock = get_lock(journal, sequence_number);
if (adjustment < 0) {
- ASSERT_LOG_ONLY((-adjustment <= lock->count),
- "adjustment %d of lock count %u for slab journal block %llu must not underflow",
- adjustment, lock->count,
- (unsigned long long) sequence_number);
+ VDO_ASSERT_LOG_ONLY((-adjustment <= lock->count),
+ "adjustment %d of lock count %u for slab journal block %llu must not underflow",
+ adjustment, lock->count,
+ (unsigned long long) sequence_number);
}
lock->count += adjustment;
struct slab_journal *journal = &slab->journal;
sequence_number_t block;
- ASSERT_LOG_ONLY(journal->tail_header.entry_count == 0,
- "vdo_slab journal's active block empty before reopening");
+ VDO_ASSERT_LOG_ONLY(journal->tail_header.entry_count == 0,
+ "vdo_slab journal's active block empty before reopening");
journal->head = journal->tail;
initialize_journal_state(journal);
/* Ensure no locks are spuriously held on an empty journal. */
for (block = 1; block <= journal->size; block++) {
- ASSERT_LOG_ONLY((get_lock(journal, block)->count == 0),
- "Scrubbed journal's block %llu is not locked",
- (unsigned long long) block);
+ VDO_ASSERT_LOG_ONLY((get_lock(journal, block)->count == 0),
+ "Scrubbed journal's block %llu is not locked",
+ (unsigned long long) block);
}
add_entries(journal);
/* Copy the tail block into the vio. */
memcpy(pooled->vio.data, journal->block, VDO_BLOCK_SIZE);
- ASSERT_LOG_ONLY(unused_entries >= 0, "vdo_slab journal block is not overfull");
+ VDO_ASSERT_LOG_ONLY(unused_entries >= 0, "vdo_slab journal block is not overfull");
if (unused_entries > 0) {
/*
* Release the per-entry locks for any unused entries in the block we are about to
struct packed_slab_journal_block *block = journal->block;
int result;
- result = ASSERT(vdo_before_journal_point(&journal->tail_header.recovery_point,
- &recovery_point),
- "recovery journal point is monotonically increasing, recovery point: %llu.%u, block recovery point: %llu.%u",
- (unsigned long long) recovery_point.sequence_number,
- recovery_point.entry_count,
- (unsigned long long) journal->tail_header.recovery_point.sequence_number,
- journal->tail_header.recovery_point.entry_count);
+ result = VDO_ASSERT(vdo_before_journal_point(&journal->tail_header.recovery_point,
+ &recovery_point),
+ "recovery journal point is monotonically increasing, recovery point: %llu.%u, block recovery point: %llu.%u",
+ (unsigned long long) recovery_point.sequence_number,
+ recovery_point.entry_count,
+ (unsigned long long) journal->tail_header.recovery_point.sequence_number,
+ journal->tail_header.recovery_point.entry_count);
if (result != VDO_SUCCESS) {
vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo, result);
return;
}
if (operation == VDO_JOURNAL_BLOCK_MAP_REMAPPING) {
- result = ASSERT((journal->tail_header.entry_count <
- journal->full_entries_per_block),
- "block has room for full entries");
+ result = VDO_ASSERT((journal->tail_header.entry_count <
+ journal->full_entries_per_block),
+ "block has room for full entries");
if (result != VDO_SUCCESS) {
vdo_enter_read_only_mode(journal->slab->allocator->depot->vdo,
result);
*/
static void prioritize_slab(struct vdo_slab *slab)
{
- ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
- "a slab must not already be on a ring when prioritizing");
+ VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
+ "a slab must not already be on a ring when prioritizing");
slab->priority = calculate_slab_priority(slab);
vdo_priority_table_enqueue(slab->allocator->prioritized_slabs,
slab->priority, &slab->allocq_entry);
* the last time it was clean. We must release the per-entry slab journal lock for
* the entry associated with the update we are now doing.
*/
- result = ASSERT(is_valid_journal_point(slab_journal_point),
- "Reference count adjustments need slab journal points.");
+ result = VDO_ASSERT(is_valid_journal_point(slab_journal_point),
+ "Reference count adjustments need slab journal points.");
if (result != VDO_SUCCESS)
return result;
* scrubbing thresholds, this should never happen.
*/
if (lock->count > 0) {
- ASSERT_LOG_ONLY((journal->head + journal->size) == journal->tail,
- "New block has locks, but journal is not full");
+ VDO_ASSERT_LOG_ONLY((journal->head + journal->size) == journal->tail,
+ "New block has locks, but journal is not full");
/*
* The blocking threshold must let the journal fill up if the new
* block has locks; if the blocking threshold is smaller than the
* journal size, the new block cannot possibly have locks already.
*/
- ASSERT_LOG_ONLY((journal->blocking_threshold >= journal->size),
- "New block can have locks already iff blocking threshold is at the end of the journal");
+ VDO_ASSERT_LOG_ONLY((journal->blocking_threshold >= journal->size),
+ "New block can have locks already iff blocking threshold is at the end of the journal");
WRITE_ONCE(journal->events->disk_full_count,
journal->events->disk_full_count + 1);
int result;
size_t index, bytes;
- result = ASSERT(slab->reference_blocks == NULL,
- "vdo_slab %u doesn't allocate refcounts twice",
- slab->slab_number);
+ result = VDO_ASSERT(slab->reference_blocks == NULL,
+ "vdo_slab %u doesn't allocate refcounts twice",
+ slab->slab_number);
if (result != VDO_SUCCESS)
return result;
* 1. This is impossible, due to the scrubbing threshold, on a real system, so
* don't bother reading the (bogus) data off disk.
*/
- ASSERT_LOG_ONLY(((journal->size < 16) ||
- (journal->scrubbing_threshold < (journal->size - 1))),
- "Scrubbing threshold protects against reads of unwritten slab journal blocks");
+ VDO_ASSERT_LOG_ONLY(((journal->size < 16) ||
+ (journal->scrubbing_threshold < (journal->size - 1))),
+ "Scrubbing threshold protects against reads of unwritten slab journal blocks");
vdo_finish_loading_with_result(&slab->state,
allocate_counters_if_clean(slab));
return;
{
struct slab_scrubber *scrubber = &slab->allocator->scrubber;
- ASSERT_LOG_ONLY((slab->status != VDO_SLAB_REBUILT),
- "slab to be scrubbed is unrecovered");
+ VDO_ASSERT_LOG_ONLY((slab->status != VDO_SLAB_REBUILT),
+ "slab to be scrubbed is unrecovered");
if (slab->status != VDO_SLAB_REQUIRES_SCRUBBING)
return;
block_count_t free_blocks;
int result;
- ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
+ VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
"a requeued slab must not already be on a ring");
if (vdo_is_read_only(allocator->depot->vdo))
return;
free_blocks = slab->free_blocks;
- result = ASSERT((free_blocks <= allocator->depot->slab_config.data_blocks),
- "rebuilt slab %u must have a valid free block count (has %llu, expected maximum %llu)",
- slab->slab_number, (unsigned long long) free_blocks,
- (unsigned long long) allocator->depot->slab_config.data_blocks);
+ result = VDO_ASSERT((free_blocks <= allocator->depot->slab_config.data_blocks),
+ "rebuilt slab %u must have a valid free block count (has %llu, expected maximum %llu)",
+ slab->slab_number, (unsigned long long) free_blocks,
+ (unsigned long long) allocator->depot->slab_config.data_blocks);
if (result != VDO_SUCCESS) {
vdo_enter_read_only_mode(allocator->depot->vdo, result);
return;
* At the end of rebuild, the reference counters should be accurate to the end of the
* journal we just applied.
*/
- result = ASSERT(!vdo_before_journal_point(&last_entry_applied,
- &ref_counts_point),
- "Refcounts are not more accurate than the slab journal");
+ result = VDO_ASSERT(!vdo_before_journal_point(&last_entry_applied,
+ &ref_counts_point),
+ "Refcounts are not more accurate than the slab journal");
if (result != VDO_SUCCESS) {
abort_scrubbing(scrubber, result);
return;
static inline void assert_on_allocator_thread(thread_id_t thread_id,
const char *function_name)
{
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == thread_id),
- "%s called on correct thread", function_name);
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == thread_id),
+ "%s called on correct thread", function_name);
}
static void register_slab_with_allocator(struct block_allocator *allocator,
if (!search_reference_blocks(slab, &free_index))
return VDO_NO_SPACE;
- ASSERT_LOG_ONLY((slab->counters[free_index] == EMPTY_REFERENCE_COUNT),
- "free block must have ref count of zero");
+ VDO_ASSERT_LOG_ONLY((slab->counters[free_index] == EMPTY_REFERENCE_COUNT),
+ "free block must have ref count of zero");
make_provisional_reference(slab, free_index);
adjust_free_block_count(slab, false);
sequence_number_t recovery_lock)
{
if (recovery_lock > journal->recovery_lock) {
- ASSERT_LOG_ONLY((recovery_lock < journal->recovery_lock),
- "slab journal recovery lock is not older than the recovery journal head");
+ VDO_ASSERT_LOG_ONLY((recovery_lock < journal->recovery_lock),
+ "slab journal recovery lock is not older than the recovery journal head");
return false;
}
return VDO_INCREMENT_TOO_SMALL;
/* Generate the depot configuration for the new block count. */
- ASSERT_LOG_ONLY(depot->first_block == partition->offset,
- "New slab depot partition doesn't change origin");
+ VDO_ASSERT_LOG_ONLY(depot->first_block == partition->offset,
+ "New slab depot partition doesn't change origin");
result = vdo_configure_slab_depot(partition, depot->slab_config,
depot->zone_count, &new_state);
if (result != VDO_SUCCESS)
*/
void vdo_use_new_slabs(struct slab_depot *depot, struct vdo_completion *parent)
{
- ASSERT_LOG_ONLY(depot->new_slabs != NULL, "Must have new slabs to use");
+ VDO_ASSERT_LOG_ONLY(depot->new_slabs != NULL, "Must have new slabs to use");
vdo_schedule_operation(depot->action_manager,
VDO_ADMIN_STATE_SUSPENDED_OPERATION,
NULL, register_new_slabs,
return;
case VDO_DRAIN_ALLOCATOR_STEP_FINISHED:
- ASSERT_LOG_ONLY(!is_vio_pool_busy(allocator->vio_pool),
- "vio pool not busy");
+ VDO_ASSERT_LOG_ONLY(!is_vio_pool_busy(allocator->vio_pool),
+ "vio pool not busy");
vdo_finish_draining_with_result(&allocator->state, completion->result);
return;
list_add_tail_rcu(&new_thread->links, ®istry->links);
spin_unlock(®istry->lock);
- ASSERT_LOG_ONLY(!found_it, "new thread not already in registry");
+ VDO_ASSERT_LOG_ONLY(!found_it, "new thread not already in registry");
if (found_it) {
/* Ensure no RCU iterators see it before re-initializing. */
synchronize_rcu();
}
spin_unlock(®istry->lock);
- ASSERT_LOG_ONLY(found_it, "thread found in registry");
+ VDO_ASSERT_LOG_ONLY(found_it, "thread found in registry");
if (found_it) {
/* Ensure no RCU iterators see it before re-initializing. */
synchronize_rcu();
type = &default_queue_type;
if (thread->queue != NULL) {
- return ASSERT(vdo_work_queue_type_is(thread->queue, type),
- "already constructed vdo thread %u is of the correct type",
- thread_id);
+ return VDO_ASSERT(vdo_work_queue_type_is(thread->queue, type),
+ "already constructed vdo thread %u is of the correct type",
+ thread_id);
}
thread->vdo = vdo;
int result;
write_lock(®istry.lock);
- result = ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL,
- "VDO not already registered");
+ result = VDO_ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL,
+ "VDO not already registered");
if (result == VDO_SUCCESS) {
INIT_LIST_HEAD(&vdo->registration);
list_add_tail(&vdo->registration, ®istry.links);
struct read_only_listener *read_only_listener;
int result;
- result = ASSERT(thread_id != vdo->thread_config.dedupe_thread,
- "read only listener not registered on dedupe thread");
+ result = VDO_ASSERT(thread_id != vdo->thread_config.dedupe_thread,
+ "read only listener not registered on dedupe thread");
if (result != VDO_SUCCESS)
return result;
*/
void vdo_assert_on_admin_thread(const struct vdo *vdo, const char *name)
{
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread),
- "%s called on admin thread", name);
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread),
+ "%s called on admin thread", name);
}
/**
void vdo_assert_on_logical_zone_thread(const struct vdo *vdo, zone_count_t logical_zone,
const char *name)
{
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
- vdo->thread_config.logical_threads[logical_zone]),
- "%s called on logical thread", name);
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
+ vdo->thread_config.logical_threads[logical_zone]),
+ "%s called on logical thread", name);
}
/**
void vdo_assert_on_physical_zone_thread(const struct vdo *vdo,
zone_count_t physical_zone, const char *name)
{
- ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
- vdo->thread_config.physical_threads[physical_zone]),
- "%s called on physical thread", name);
+ VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
+ vdo->thread_config.physical_threads[physical_zone]),
+ "%s called on physical thread", name);
}
/**
/* With the PBN already checked, we should always succeed in finding a slab. */
slab = vdo_get_slab(vdo->depot, pbn);
- result = ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs");
+ result = VDO_ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs");
if (result != VDO_SUCCESS)
return result;
struct bio *bio;
int result;
- result = ASSERT(block_count <= MAX_BLOCKS_PER_VIO,
- "block count %u does not exceed maximum %u", block_count,
- MAX_BLOCKS_PER_VIO);
+ result = VDO_ASSERT(block_count <= MAX_BLOCKS_PER_VIO,
+ "block count %u does not exceed maximum %u", block_count,
+ MAX_BLOCKS_PER_VIO);
if (result != VDO_SUCCESS)
return result;
- result = ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)),
- "%d is a metadata type", vio_type);
+ result = VDO_ASSERT(((vio_type != VIO_TYPE_UNINITIALIZED) && (vio_type != VIO_TYPE_DATA)),
+ "%d is a metadata type", vio_type);
if (result != VDO_SUCCESS)
return result;
return;
/* Remove all available vios from the object pool. */
- ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting),
- "VIO pool must not have any waiters when being freed");
- ASSERT_LOG_ONLY((pool->busy_count == 0),
- "VIO pool must not have %zu busy entries when being freed",
- pool->busy_count);
- ASSERT_LOG_ONLY(list_empty(&pool->busy),
- "VIO pool must not have busy entries when being freed");
+ VDO_ASSERT_LOG_ONLY(!vdo_waitq_has_waiters(&pool->waiting),
+ "VIO pool must not have any waiters when being freed");
+ VDO_ASSERT_LOG_ONLY((pool->busy_count == 0),
+ "VIO pool must not have %zu busy entries when being freed",
+ pool->busy_count);
+ VDO_ASSERT_LOG_ONLY(list_empty(&pool->busy),
+ "VIO pool must not have busy entries when being freed");
list_for_each_entry_safe(pooled, tmp, &pool->available, pool_entry) {
list_del(&pooled->pool_entry);
pool->size--;
}
- ASSERT_LOG_ONLY(pool->size == 0,
- "VIO pool must not have missing entries when being freed");
+ VDO_ASSERT_LOG_ONLY(pool->size == 0,
+ "VIO pool must not have missing entries when being freed");
vdo_free(vdo_forget(pool->buffer));
vdo_free(pool);
{
struct pooled_vio *pooled;
- ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
- "acquire from active vio_pool called from correct thread");
+ VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
+ "acquire from active vio_pool called from correct thread");
if (list_empty(&pool->available)) {
vdo_waitq_enqueue_waiter(&pool->waiting, waiter);
*/
void return_vio_to_pool(struct vio_pool *pool, struct pooled_vio *vio)
{
- ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
- "vio pool entry returned on same thread as it was acquired");
+ VDO_ASSERT_LOG_ONLY((pool->thread_id == vdo_get_callback_thread_id()),
+ "vio pool entry returned on same thread as it was acquired");
vio->vio.completion.error_handler = NULL;
vio->vio.completion.parent = NULL;
* shouldn't exist.
*/
default:
- ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush",
- bio_op(bio));
+ VDO_ASSERT_LOG_ONLY(0, "Bio operation %d not a write, read, discard, or empty flush",
+ bio_op(bio));
}
if ((bio->bi_opf & REQ_PREFLUSH) != 0)
thread_id_t expected = get_vio_bio_zone_thread_id(vio);
thread_id_t thread_id = vdo_get_callback_thread_id();
- ASSERT_LOG_ONLY((expected == thread_id),
- "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u",
- (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id,
- expected);
+ VDO_ASSERT_LOG_ONLY((expected == thread_id),
+ "vio I/O for physical block %llu on thread %u, should be on bio zone thread %u",
+ (unsigned long long) pbn_from_vio_bio(vio->bio), thread_id,
+ expected);
}
int vdo_create_bio(struct bio **bio_ptr);