This ensures they're cleaned ASAP and release any resources.
return fr_heap_entry_inserted(request->runnable);
}
+/** Update a request's priority
+ *
+ */
+static void _worker_request_prioritise(request_t *request, void *uctx)
+{
+ fr_worker_t *worker = talloc_get_type_abort(uctx, fr_worker_t);
+
+ RDEBUG3("Request priority changed");
+
+ /* Extract the request from the runnable queue _if_ it's in the runnable queue */
+ if (fr_heap_extract(&worker->runnable, request) < 0) return;
+
+ /* Reinsert it to re-evaluate its new priority */
+ fr_heap_insert(&worker->runnable, request);
+}
+
/** Run a request
*
* Until it either yields, or is done.
.resume = _worker_request_resume,
.mark_runnable = _worker_request_runnable,
- .scheduled = _worker_request_scheduled
+ .scheduled = _worker_request_scheduled,
+ .prioritise = _worker_request_prioritise
},
worker);
if (!worker->intp){
intp->funcs.detach(request, intp->uctx);
}
+void unlang_interpret_request_prioritise(request_t *request, uint32_t priority)
+{
+ unlang_stack_t *stack = request->stack;
+ unlang_interpret_t *intp;
+
+ if (!fr_cond_assert(stack != NULL)) return;
+
+ intp = stack->intp;
+
+ request->async->priority = priority;
+
+ if (intp->funcs.prioritise) intp->funcs.prioritise(request, intp->uctx);
+}
+
/** Delivers a frame to one or more frames in the stack
*
* This is typically called via an "async" action, i.e. an action outside
*/
request->master_state = REQUEST_STOP_PROCESSING;
+ /*
+ * Give cancelled requests the highest priority
+ * to get them to release resources ASAP.
+ */
+ unlang_interpret_request_prioritise(request, UINT32_MAX);
+
/*
* If the request is yielded, mark it as runnable
*
*/
typedef bool (*unlang_request_scheduled_t)(request_t const *request, void *uctx);
+/** Re-priotise the request in the runnable queue
+ *
+ * The new priority will be available in request->async->priority.
+ */
+typedef void (*unlang_request_prioritise_t)(request_t *request, void *uctx);
+
/** External functions provided by the owner of the interpret
*
* These functions allow the event loop to signal the caller when a given
///< added back to the runnable queue.
unlang_request_scheduled_t scheduled; //!< Function to check if a request is already
///< scheduled.
+ unlang_request_prioritise_t prioritise; //!< Function to re-priotise a request in the
+ ///< runnable queue.
} unlang_request_func_t;
int unlang_interpret_push_section(request_t *request, CONF_SECTION *cs,
void unlang_interpret_request_done(request_t *request);
+void unlang_interpret_request_prioritise(request_t *request, uint32_t priority);
+
void unlang_interpret_mark_runnable(request_t *request);
bool unlang_interpret_is_resumable(request_t *request);
.yield = _request_yield,
.resume = _request_resume,
.mark_runnable = _request_runnable,
- .scheduled = _request_scheduled
+ .scheduled = _request_scheduled,
},
intps));