static int bpf_async_schedule_op(struct bpf_async_cb *cb, enum bpf_async_op op,
u64 nsec, u32 timer_mode)
{
- WARN_ON_ONCE(!in_hardirq());
-
struct bpf_async_cmd *cmd = kmalloc_nolock(sizeof(*cmd), 0, NUMA_NO_NODE);
if (!cmd) {
.arg2_type = ARG_PTR_TO_FUNC,
};
+static bool defer_timer_wq_op(void)
+{
+ return in_hardirq() || irqs_disabled();
+}
+
BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, async, u64, nsecs, u64, flags)
{
struct bpf_hrtimer *t;
if (!refcount_inc_not_zero(&t->cb.refcnt))
return -ENOENT;
- if (!in_hardirq()) {
+ if (!defer_timer_wq_op()) {
hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
bpf_async_refcount_put(&t->cb);
return 0;
bool inc = false;
int ret = 0;
- if (in_hardirq())
+ if (defer_timer_wq_op())
return -EOPNOTSUPP;
t = READ_ONCE(async->timer);
* refcnt. Either synchronously or asynchronously in irq_work.
*/
- if (!in_hardirq()) {
+ if (!defer_timer_wq_op()) {
bpf_async_process_op(cb, BPF_ASYNC_CANCEL, 0, 0);
} else {
(void)bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0);
if (!refcount_inc_not_zero(&w->cb.refcnt))
return -ENOENT;
- if (!in_hardirq()) {
+ if (!defer_timer_wq_op()) {
schedule_work(&w->work);
bpf_async_refcount_put(&w->cb);
return 0;
if (!refcount_inc_not_zero(&cb->refcnt))
return -ENOENT;
- if (!in_hardirq()) {
+ if (!defer_timer_wq_op()) {
struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb);
ret = hrtimer_try_to_cancel(&t->timer);