struct rfkill_event_ext ev;
};
+/* Max rfkill events that can be "in-flight" for one data source */
+#define MAX_RFKILL_EVENT 1000
struct rfkill_data {
struct list_head list;
struct list_head events;
struct mutex mtx;
wait_queue_head_t read_wait;
+ u32 event_count;
bool input_handler;
u8 max_size;
};
}
#endif /* CONFIG_RFKILL_LEDS */
-static void rfkill_fill_event(struct rfkill_event_ext *ev,
- struct rfkill *rfkill,
- enum rfkill_operation op)
+static int rfkill_fill_event(struct rfkill_int_event *int_ev,
+ struct rfkill *rfkill,
+ struct rfkill_data *data,
+ enum rfkill_operation op)
{
+ struct rfkill_event_ext *ev = &int_ev->ev;
unsigned long flags;
ev->idx = rfkill->idx;
RFKILL_BLOCK_SW_PREV));
ev->hard_block_reasons = rfkill->hard_block_reasons;
spin_unlock_irqrestore(&rfkill->lock, flags);
+
+ scoped_guard(mutex, &data->mtx) {
+ if (data->event_count++ > MAX_RFKILL_EVENT) {
+ data->event_count--;
+ return -ENOSPC;
+ }
+ list_add_tail(&int_ev->list, &data->events);
+ }
+ return 0;
}
static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op)
ev = kzalloc_obj(*ev);
if (!ev)
continue;
- rfkill_fill_event(&ev->ev, rfkill, op);
- mutex_lock(&data->mtx);
- list_add_tail(&ev->list, &data->events);
- mutex_unlock(&data->mtx);
+ if (rfkill_fill_event(ev, rfkill, data, op)) {
+ kfree(ev);
+ continue;
+ }
wake_up_interruptible(&data->read_wait);
}
}
if (!ev)
goto free;
rfkill_sync(rfkill);
- rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD);
- mutex_lock(&data->mtx);
- list_add_tail(&ev->list, &data->events);
- mutex_unlock(&data->mtx);
+ if (rfkill_fill_event(ev, rfkill, data, RFKILL_OP_ADD))
+ kfree(ev);
}
list_add(&data->list, &rfkill_fds);
mutex_unlock(&rfkill_global_mutex);
ret = -EFAULT;
list_del(&ev->list);
+ data->event_count--;
kfree(ev);
out:
mutex_unlock(&data->mtx);