/*
* ep_remove variant for callers owing an additional reference to the ep
*/
-static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)
+static void ep_remove(struct eventpoll *ep, struct epitem *epi)
{
struct file *file = epi->ffd.file;
/*
* Walks through the whole tree and try to free each "struct epitem".
- * Note that ep_remove_safe() will not remove the epitem in case of a
+ * Note that ep_remove() will not remove the epitem in case of a
* racing eventpoll_release_file(); the latter will do the removal.
* At this point we are sure no poll callbacks will be lingering around.
* Since we still own a reference to the eventpoll struct, the loop can't
for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = next) {
next = rb_next(rbp);
epi = rb_entry(rbp, struct epitem, rbn);
- ep_remove_safe(ep, epi);
+ ep_remove(ep, epi);
cond_resched();
}
mutex_unlock(&tep->mtx);
/*
- * ep_remove_safe() calls in the later error paths can't lead to
+ * ep_remove() calls in the later error paths can't lead to
* ep_free() as the ep file itself still holds an ep reference.
*/
ep_get(ep);
/* now check if we've created too many backpaths */
if (unlikely(full_check && reverse_path_check())) {
- ep_remove_safe(ep, epi);
+ ep_remove(ep, epi);
return -EINVAL;
}
if (epi->event.events & EPOLLWAKEUP) {
error = ep_create_wakeup_source(epi);
if (error) {
- ep_remove_safe(ep, epi);
+ ep_remove(ep, epi);
return error;
}
}
* high memory pressure.
*/
if (unlikely(!epq.epi)) {
- ep_remove_safe(ep, epi);
+ ep_remove(ep, epi);
return -ENOMEM;
}
* The eventpoll itself is still alive: the refcount
* can't go to zero here.
*/
- ep_remove_safe(ep, epi);
+ ep_remove(ep, epi);
error = 0;
} else {
error = -ENOENT;