cqe = &r->cqes[(cq_head & cq_mask)];
if (cqe->flags & IORING_CQE_F_32 || ctx->flags & IORING_SETUP_CQE32)
cqe32 = true;
- seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x",
+ seq_printf(m, "%5u: user_data:%llu, res:%d, flags:%x",
cq_head & cq_mask, cqe->user_data, cqe->res,
cqe->flags);
if (cqe32)
}
/*
- * Must be called from inline task_work so we now a flush will happen later,
+ * Must be called from inline task_work so we know a flush will happen later,
* and obviously with ctx->uring_lock held (tw always has that).
*/
void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES);
/*
- * We don't know how many reuqests is there in the link and whether
+ * We don't know how many requests there are in the link and whether
* they can even be queued lazily, fall back to non-lazy.
*/
if (req->flags & IO_REQ_LINK_FLAGS)
prev_nd = container_of(prev_uarg, struct io_notif_data, uarg);
prev_notif = cmd_to_io_kiocb(prev_nd);
- /* make sure all noifications can be finished in the same task_work */
+ /* make sure all notifications can be finished in the same task_work */
if (unlikely(notif->ctx != prev_notif->ctx ||
notif->tctx != prev_notif->tctx))
return -EEXIST;
* This is really a bug in the core code that does this, any issue
* path should assume that a successful (or -EIOCBQUEUED) return can
* mean that the underlying data can be gone at any time. But that
- * should be fixed seperately, and then this check could be killed.
+ * should be fixed separately, and then this check could be killed.
*/
if (!(req->flags & (REQ_F_REISSUE | REQ_F_REFCOUNT))) {
req->flags &= ~REQ_F_NEED_CLEANUP;
/*
* Have to do this validation here, as this is in io_read() rw->len
- * might have chanaged due to buffer selection
+ * might have changed due to buffer selection
*/
return io_iov_buffer_select_prep(req);
}
iov_iter_restore(&io->iter, &io->iter_state);
} while (ret > 0);
done:
- /* it's faster to check here then delegate to kfree */
+ /* it's faster to check here than delegate to kfree */
return ret;
}