return a;
}
+static inline void
+defer_expect(size_t size)
+{
+ lp_prealloc(local_deferred.lp, size);
+}
+
#endif
defer_call(&luqi.dc, sizeof luqi);
}
+static inline void lfuc_unlock_expected(uint count)
+{
+ defer_expect(count * sizeof (struct lfuc_unlock_queue_item));
+}
+
/**
* lfuc_finished - auxiliary routine for prune event
* @c: usecount structure
return c->data;
}
+void
+lp_prealloc(linpool *m, uint size)
+{
+ ASSERT_DIE(DG_IS_LOCKED(resource_parent(&m->r)->domain));
+
+ if (size > LP_DATA_SIZE)
+ bug("Requested block size is too big to prealloc");
+
+ byte *a = (byte *) BIRD_ALIGN((unsigned long) m->ptr, CPU_STRUCT_ALIGN);
+ byte *e = a + size;
+
+ if (e <= m->end)
+ return;
+
+ lp_alloc_slow(m, size);
+}
+
/**
* lp_allocu - allocate unaligned memory from a &linpool
* @m: linear memory pool
struct netindex *
net_find_index(netindex_hash *h, const net_addr *n)
{
+ lfuc_unlock_expected(1);
+
RCU_ANCHOR(u);
struct netindex *ni = net_find_index_fragile(h, n);
return (ni && net_validate_index(h, ni)) ? net_lock_revive_unlock(h, ni) : NULL;
void lp_flush(linpool *); /* Free everything, but leave linpool */
lp_state *lp_save(linpool *m); /* Save state */
void lp_restore(linpool *m, lp_state *p); /* Restore state */
+void lp_prealloc(linpool *m, uint size); /* Make sure we will be able to allocate the memory without requesting new blocks */
static inline void lp_saved_cleanup(struct lp_state **lps)
{
extern _Atomic int pages_kept_locally;
extern _Atomic int pages_kept_cold;
extern _Atomic int pages_kept_cold_index;
+extern _Atomic int cold_memory_failed_to_use;
void *alloc_page(void);
void free_page(void *);
void flush_local_pages(void);
struct size_args cold = get_size_args(atomic_load_explicit(&pages_kept_cold, memory_order_relaxed) * page_size);
cli_msg(-1018, "%-23s " SIZE_FORMAT, "Cold memory:", SIZE_ARGS(cold));
#endif
+ cli_msg(-1028, "Failed to use cold memory: %i times", atomic_load_explicit(&cold_memory_failed_to_use, memory_order_relaxed));
cli_msg(0, "");
}
* and check allowed directions */
uint max_up = 0, min_up = 0, max_down = 0, min_down = 0;
+ lfuc_unlock_expected(5); /* Deferring linpool memory reservation to avoid skipping cold memory */
+
RT_READ(tab, tr);
for (uint ap=0; ap<nsz; ap++)
return;
}
+ lfuc_unlock_expected(5); /* Deferring linpool memory reservation to avoid skipping cold memory */
+
RT_LOCKED(hook->table, tab)
{
u32 bs = atomic_load_explicit(&tab->routes_block_size, memory_order_acquire);
struct rt_export_feed *
rt_net_feed(rtable *t, const net_addr *a, const struct rt_pending_export *first)
{
+ lfuc_unlock_expected(5); /* Deferring linpool memory reservation to avoid skipping cold memory */
+
RT_READ(t, tr);
const struct netindex *ni = net_find_index(tr->t->netindex, a);
return ni ? rt_net_feed_internal(tr, ni->index, NULL, NULL, first) : NULL;
{
rte rt = {};
+ lfuc_unlock_expected(5); /* Deferring linpool memory reservation to avoid skipping cold memory */
+
RT_READ(t, tr);
struct netindex *i = net_find_index(t->netindex, a);
static struct empty_pages *empty_pages = NULL;
_Atomic int pages_kept_cold = 0;
_Atomic int pages_kept_cold_index = 0;
+ _Atomic int cold_memory_failed_to_use = 0;
static struct free_page * _Atomic page_stack = NULL;
static _Thread_local struct free_page * local_page_stack = NULL;
{
/* We can't lock and we actually shouldn't alloc either when rcu is active
* but that's a quest for another day. */
+ atomic_fetch_add_explicit(&cold_memory_failed_to_use, 1, memory_order_relaxed);
}
else
{