refcount_t refcount;
/* The lock protects mode, size, area and t. */
spinlock_t lock;
- enum kcov_mode mode;
+ enum kcov_mode mode __guarded_by(&lock);
/* Size of arena (in long's). */
- unsigned int size;
+ unsigned int size __guarded_by(&lock);
/* Coverage buffer shared with user space. */
- void *area;
+ void *area __guarded_by(&lock);
/* Task for which we collect coverage, or NULL. */
- struct task_struct *t;
+ struct task_struct *t __guarded_by(&lock);
/* Collecting coverage from remote (background) threads. */
bool remote;
/* Size of remote area (in long's). */
}
static void kcov_reset(struct kcov *kcov)
+ __must_hold(&kcov->lock)
{
kcov->t = NULL;
kcov->mode = KCOV_MODE_INIT;
}
static void kcov_remote_reset(struct kcov *kcov)
+ __must_hold(&kcov->lock)
{
int bkt;
struct kcov_remote *remote;
}
static void kcov_disable(struct task_struct *t, struct kcov *kcov)
+ __must_hold(&kcov->lock)
{
kcov_task_reset(t);
if (kcov->remote)
static void kcov_put(struct kcov *kcov)
{
if (refcount_dec_and_test(&kcov->refcount)) {
- kcov_remote_reset(kcov);
- vfree(kcov->area);
+ /* Context-safety: no references left, object being destroyed. */
+ context_unsafe(
+ kcov_remote_reset(kcov);
+ vfree(kcov->area);
+ );
kfree(kcov);
}
}
unsigned long size, off;
struct page *page;
unsigned long flags;
+ void *area;
spin_lock_irqsave(&kcov->lock, flags);
size = kcov->size * sizeof(unsigned long);
res = -EINVAL;
goto exit;
}
+ area = kcov->area;
spin_unlock_irqrestore(&kcov->lock, flags);
vm_flags_set(vma, VM_DONTEXPAND);
for (off = 0; off < size; off += PAGE_SIZE) {
- page = vmalloc_to_page(kcov->area + off);
+ page = vmalloc_to_page(area + off);
res = vm_insert_page(vma, vma->vm_start + off, page);
if (res) {
pr_warn_once("kcov: vm_insert_page() failed\n");
kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
if (!kcov)
return -ENOMEM;
+ spin_lock_init(&kcov->lock);
kcov->mode = KCOV_MODE_DISABLED;
kcov->sequence = 1;
refcount_set(&kcov->refcount, 1);
- spin_lock_init(&kcov->lock);
filep->private_data = kcov;
return nonseekable_open(inode, filep);
}
* vmalloc fault handling path is instrumented.
*/
static void kcov_fault_in_area(struct kcov *kcov)
+ __must_hold(&kcov->lock)
{
unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
unsigned long *area = kcov->area;
static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
unsigned long arg)
+ __must_hold(&kcov->lock)
{
struct task_struct *t;
unsigned long flags, unused;
}
static void kcov_remote_softirq_start(struct task_struct *t)
+ __must_hold(&kcov_percpu_data.lock)
{
struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
unsigned int mode;
}
static void kcov_remote_softirq_stop(struct task_struct *t)
+ __must_hold(&kcov_percpu_data.lock)
{
struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
/* Put in kcov_remote_stop(). */
kcov_get(kcov);
/*
- * Read kcov fields before unlock to prevent races with
- * KCOV_DISABLE / kcov_remote_reset().
+ * Read kcov fields before unlocking kcov_remote_lock to prevent races
+ * with KCOV_DISABLE and kcov_remote_reset(); cannot acquire kcov->lock
+ * here, because it might lead to deadlock given kcov_remote_lock is
+ * acquired _after_ kcov->lock elsewhere.
*/
- mode = kcov->mode;
+ mode = context_unsafe(kcov->mode);
sequence = kcov->sequence;
if (in_task()) {
size = kcov->remote_size;