diff --git a/fs/aio.c b/fs/aio.c index 9031a1fba8..114e4528a7 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -45,6 +45,7 @@ struct aioctx *aioctx_new(int events_capacity, pid_t pid) { memset(aioctx_events, 0, sizeof(struct aioctx_event) * events_capacity); lock_init(&aioctx->lock); + cond_init(&aioctx->cond); aioctx->refcount = 1; aioctx->events_capacity = events_capacity; @@ -65,6 +66,7 @@ void aioctx_retain(struct aioctx *ctx) { static void _aioctx_decrement_ref(struct aioctx *ctx) { if (--ctx->refcount == 0) { + cond_destroy(ctx->cond); free(ctx->events); free(ctx); } else { @@ -143,6 +145,7 @@ void aioctx_complete_event(struct aioctx *ctx, unsigned int index, int64_t resul ctx->events[index].data.as_complete = data; } + notify_once(&ctx->cond); unlock(&ctx->lock); } @@ -171,6 +174,16 @@ bool aioctx_consume_completed_event(struct aioctx *ctx, uint64_t *user_data, add return result; } +int aioctx_wait_for_completion(struct aioctx *ctx, struct timespec *timeout) { + if (ctx == NULL) return _EINVAL; + + lock(&ctx->lock); + int err = wait_for(&ctx->cond, &ctx->lock, timeout); + unlock(&ctx->lock); + + return err; +} + void aioctx_lock(struct aioctx* ctx) { if (ctx == NULL) return; diff --git a/fs/aio.h b/fs/aio.h index b3be534587..1feaebf241 100644 --- a/fs/aio.h +++ b/fs/aio.h @@ -90,6 +90,7 @@ struct aioctx_event { struct aioctx { atomic_uint refcount; lock_t lock; + cond_t cond; // Indicates if this context is owned by a task. // @@ -192,6 +193,9 @@ void aioctx_cancel_event(struct aioctx *ctx, unsigned int index); // // This accepts two result parameters, whose meaning is determined solely by // the event opcode. +// +// This also signals any threads waiting on the context that an event has been +// completed. void aioctx_complete_event(struct aioctx *ctx, unsigned int index, int64_t result0, int64_t result1); // Consume a completed I/O event. @@ -205,6 +209,19 @@ void aioctx_complete_event(struct aioctx *ctx, unsigned int index, int64_t resul // from the queue, and the passed-in parameters should not be used. bool aioctx_consume_completed_event(struct aioctx *ctx, uint64_t *user_data, addr_t *iocbp, struct aioctx_event_complete *completed_data); +// Wait for an event to complete. +// +// This function blocks the current thread until an event completion is posted +// to the context, or the timeout expires. When new events are completed, this +// function will return 0. If the timeout expired, this function will return +// _ETIMEDOUT. Any other error codes should be sent to client code. +// +// Please note that this function returning with 0 is not a guarantee that +// `aioctx_consume_completed_event` will yield data. This function may +// spuriously return 0 or some other thread may have claimed the event in +// between this function returning and the other function being called. +int aioctx_wait_for_completion(struct aioctx *ctx, struct timespec *timeout); + void aioctx_lock(struct aioctx* ctx); void aioctx_unlock(struct aioctx* ctx); diff --git a/kernel/aio.c b/kernel/aio.c index e79e363042..5828b2e593 100644 --- a/kernel/aio.c +++ b/kernel/aio.c @@ -3,6 +3,7 @@ #include "kernel/task.h" #include "kernel/aio.h" #include "kernel/fs.h" +#include "kernel/time.h" #include "fs/aio.h" #include "fs/fd.h" @@ -60,12 +61,24 @@ dword_t sys_io_destroy(dword_t ctx_id) { return 0; } -dword_t sys_io_getevents(dword_t ctx_id, dword_t min_nr, dword_t nr, addr_t events, addr_t timeout) { - STRACE("io_getevents(0x%x, %d, %d, 0x%x, 0x%x)", ctx_id, min_nr, nr, events, timeout); +dword_t sys_io_getevents(dword_t ctx_id, dword_t min_nr, dword_t nr, addr_t events, addr_t timeout_addr) { + STRACE("io_getevents(0x%x, %d, %d, 0x%x, 0x%x)", ctx_id, min_nr, nr, events, timeout_addr); struct aioctx *ctx = aioctx_table_get_and_retain(current->aioctx, ctx_id); if (ctx == NULL) return _EINVAL; if (events == 0) return _EFAULT; + + struct timespec_ guest_timeout; + struct timespec host_timeout; + struct timespec *timeout = &host_timeout; + + if (timeout_addr != 0) { + if (user_get(timeout_addr, guest_timeout)) return _EFAULT; + host_timeout.tv_sec = guest_timeout.sec; + host_timeout.tv_nsec = guest_timeout.nsec; + } else { + timeout = NULL; + } dword_t i = 0; for (i = 0; i < nr; i += 1) { @@ -74,8 +87,13 @@ dword_t sys_io_getevents(dword_t ctx_id, dword_t min_nr, dword_t nr, addr_t even struct aioctx_event_complete cdata; if (!aioctx_consume_completed_event(ctx, &user_data, &iocbp, &cdata)) { - //TODO: Block until min_nr events recieved or timeout exceeded - break; + if (i >= min_nr) break; + + int err = aioctx_wait_for_completion(ctx, timeout); + + if (err === _ETIMEDOUT) break; + if (err < 0) return err; + continue; } uint64_t obj = (uint64_t)iocbp;