static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req)
{
struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
- struct io_waitid_async *iwa = req->async_data;
if (!atomic_sub_return(1, &iw->refs))
return false;
+ io_waitid_remove_wq(req);
+
/*
* Wakeup triggered, racing with us. It was prevented from
* completing because of that, queue up the tw to do that.
*/
req->io_task_work.func = io_waitid_cb;
io_req_task_work_add(req);
- remove_wait_queue(iw->head, &iwa->wo.child_wait);
return true;
}
return 0;
list_del_init(&wait->entry);
+ iw->head = NULL;
/* cancel is in progress */
if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
iw->which = READ_ONCE(sqe->len);
iw->upid = READ_ONCE(sqe->fd);
iw->options = READ_ONCE(sqe->file_index);
+ iw->head = NULL;
iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2));
return 0;
}
* callback.
*/
io_ring_submit_lock(ctx, issue_flags);
+
+ /*
+ * iw->head is valid under the ring lock, and as long as the request
+ * is on the waitid_list where cancelations may find it.
+ */
+ iw->head = ¤t->signal->wait_chldexit;
hlist_add_head(&req->hash_node, &ctx->waitid_list);
init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait);
iwa->wo.child_wait.private = req->tctx->task;
- iw->head = ¤t->signal->wait_chldexit;
add_wait_queue(iw->head, &iwa->wo.child_wait);
ret = __do_wait(&iwa->wo);
}
hlist_del_init(&req->hash_node);
- remove_wait_queue(iw->head, &iwa->wo.child_wait);
+ io_waitid_remove_wq(req);
ret = io_waitid_finish(req, ret);
io_ring_submit_unlock(ctx, issue_flags);