From 9829ea68e15c0671a9c277ae2d569a9d67f50ed3 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Thu, 15 Apr 2021 17:26:44 +0100 Subject: [PATCH 01/27] Add packed vring structures and create split and packed processing routines. --- src/host_interface/virtio.c | 86 ++++++++++++++++++++++++++- src/host_interface/virtio_blkdev.c | 39 ++++++++++-- src/host_interface/virtio_console.c | 36 +++++++++-- src/host_interface/virtio_netdev.c | 37 ++++++++++-- src/include/host/virtio_dev.h | 14 ++++- src/include/shared/virtio_ring_buff.h | 23 +++++++ 6 files changed, 218 insertions(+), 17 deletions(-) diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index 90e767846..d73031a3d 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -10,6 +10,12 @@ #define min_len(a, b) (a < b ? a : b) +#ifdef PACKED_RING +bool packed_ring = true; +#else +bool packed_ring = false; +#endif + struct _virtio_req { struct virtio_req req; @@ -148,6 +154,9 @@ static inline void virtio_deliver_irq(struct virtio_dev* dev) * req: local virtio request buffer * len: length of the data processed */ +/**PACKED implementation + * Will need to update this for packed implementation + */ void virtio_req_complete(struct virtio_req* req, uint32_t len) { int send_irq = 0; @@ -230,7 +239,7 @@ void virtio_req_complete(struct virtio_req* req, uint32_t len) * dev: device structure pointer * qidx: queue index to be processed */ -static int virtio_process_one(struct virtio_dev* dev, int qidx) +static int virtio_process_one_split(struct virtio_dev* dev, int qidx) { struct virtq* q = &dev->queue[qidx]; uint16_t idx = q->last_avail_idx; @@ -256,6 +265,38 @@ static int virtio_process_one(struct virtio_dev* dev, int qidx) return dev->ops->enqueue(dev, qidx, req); } +static int virtio_process_one_packed(struct virtio_dev* dev, int qidx) +{ + struct virtq_packed* q = &dev->queue[qidx]; + + /**Continue from here*/ + + uint16_t idx = q->last_avail_idx; + + struct _virtio_req _req = { + .dev = dev, + .q = q, + .idx = idx, + }; + + struct virtio_req* req = &_req.req; + memset(req, 0, sizeof(struct virtio_req)); + /**MODIFY HERE: + * struct virtq_packed_desc desc + */ + struct virtq_desc* desc = vring_desc_at_avail_idx(q, idx); + do + { + add_dev_buf_from_vring_desc(req, desc); + if (q->max_merge_len && req->total_len > q->max_merge_len) + break; + desc = get_next_desc(q, desc, &idx); + } while (desc && req->buf_count < VIRTIO_REQ_MAX_BUFS); + + // Return result of enqueue operation + return dev->ops->enqueue(dev, qidx, req); +} + static inline void virtio_set_avail_event(struct virtq* q, uint16_t val) { *((uint16_t*)&q->used->ring[q->num]) = val; @@ -274,7 +315,14 @@ void virtio_set_queue_max_merge_len(struct virtio_dev* dev, int q, int len) */ void virtio_process_queue(struct virtio_dev* dev, uint32_t qidx) { - struct virtq* q = &dev->queue[qidx]; + packed_ring ? virtio_process_queue_packed(dev, qidx) : + virtio_process_queue_split(dev, qidx); +} + + +void virtio_process_queue_split(struct virtio_dev* dev, uint32_t qidx) +{ + struct virtq* q = &dev->split.queue[qidx]; if (!q->ready) return; @@ -285,7 +333,7 @@ void virtio_process_queue(struct virtio_dev* dev, uint32_t qidx) while (q->last_avail_idx != q->avail->idx) { /* Make sure following loads happens after loading q->avail->idx */ - if (virtio_process_one(dev, qidx) < 0) + if (virtio_process_one_split(dev, qidx) < 0) break; if (q->last_avail_idx == le16toh(q->avail->idx)) virtio_set_avail_event(q, q->avail->idx); @@ -294,3 +342,35 @@ void virtio_process_queue(struct virtio_dev* dev, uint32_t qidx) if (dev->ops->release_queue) dev->ops->release_queue(dev, qidx); } + +void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) +{ + struct virtq_packed* q = &dev->packed.queue[qidx]; + + if (!q->ready) + return; + + if (dev->ops->acquire_queue) + dev->ops->acquire_queue(dev, qidx); + + // Have some loop that keeps going until we hit a desc that's not available + while (packed_desc_is_avail(q)) + { + // Need to process desc here + // Possible make some process_one_packed + // Question is what else do I include in this statement + if (virtio_process_one_packed(dev, qidx) < 0) + break; + } + + if (dev->ops->release_queue) + dev->ops->release_queue(dev, qidx); +} + +int packed_desc_is_avail(struct virtq_packed* q) +{ + struct virtq_packed_desc* desc = q->desc[q->avail_desc_idx & (q->num -1)]; + uint16_t avail = desc->flags & KL_VRING_PACKED_DESC_F_AVAIL; + uint16_t used = desc->flags & KL_VRING_PACKED_DESC_F_USED; + return avail != used && avail == q->driver_wrap_counter; +} \ No newline at end of file diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index e62c15db2..a69fd4465 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -36,6 +36,9 @@ static inline sgxlkl_host_disk_state_t* get_disk_config(uint8_t blkdev_id) /* * Virtio callback functions for processing virtio requests */ +/** + * Packed implementation: double check if I need to change this later + */ static int blk_enqueue(struct virtio_dev* dev, int q, struct virtio_req* req) { struct virtio_blk_outhdr* h; @@ -112,7 +115,12 @@ int blk_device_init( void* vq_mem = NULL; struct virtio_blk_dev* host_blk_device = NULL; size_t bdev_size = sizeof(struct virtio_blk_dev); - size_t vq_size = HOST_BLK_DEV_NUM_QUEUES * sizeof(struct virtq); + size_t vq_size; + + if (!packed_ring) + vq_size = HOST_BLK_DEV_NUM_QUEUES * sizeof(struct virtq); + else + vq_sze = HOST_BLK_DEV_NUM_QUEUES * sizeof(struct virtq_packed); /*Allocate memory for block device*/ bdev_size = next_pow2(bdev_size); @@ -140,10 +148,29 @@ int blk_device_init( } /* Initialize block device */ - host_blk_device->dev.queue = vq_mem; - memset(host_blk_device->dev.queue, 0, vq_size); + if (!packed_ring) + { + host_blk_device->dev.split.queue = vq_mem; + memset(host_blk_device->dev.split.queue, 0, vq_size); + } + else + { + host_blk_device->dev.packed.queue = vq_mem; + memset(host_blk_device->dev.packed.queue, 0, vq_size); + } for (int i = 0; i < HOST_BLK_DEV_NUM_QUEUES; i++) - host_blk_device->dev.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH; + { + if (!packed_ring) + { + host_blk_device->dev.split.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH; + } + else + { + host_blk_device->dev.packed.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH; + host_blk_device->dev.packed.queue[i].device_wrap_counter = true; + host_blk_device->dev.packed.queue[i].driver_wrap_counter = true; + } + } host_blk_device->config.capacity = disk->size / 512; @@ -158,6 +185,10 @@ int blk_device_init( host_blk_device->dev.device_features |= BIT(VIRTIO_F_VERSION_1) | BIT(VIRTIO_RING_F_EVENT_IDX); + if (packed_ring) + host_blk_device->dev.device_features |= BIT(VIRTIO_F_RING_PACKED); + + if (enable_swiotlb) host_blk_device->dev.device_features |= BIT(VIRTIO_F_IOMMU_PLATFORM); diff --git a/src/host_interface/virtio_console.c b/src/host_interface/virtio_console.c index ccb15486d..4a6e1522d 100644 --- a/src/host_interface/virtio_console.c +++ b/src/host_interface/virtio_console.c @@ -228,7 +228,13 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) void* console_vq_mem = NULL; size_t host_console_size = next_pow2(sizeof(struct virtio_console_dev)); - size_t console_vq_size = NUM_QUEUES * sizeof(struct virtq); + size_t console_vq_size; + + if (!packed_ring) + console_vq_size = NUM_QUEUES * sizeof(struct virtq); + else + console_vq_size = NUM_QUEUES * sizeof(struct virtq_packed); + console_vq_size = next_pow2(console_vq_size); /* Console host device configuration */ @@ -268,12 +274,31 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) _console_dev->out_console_fd = STDOUT_FILENO; struct virtio_dev* dev = &_console_dev->dev; - dev->queue = console_vq_mem; - memset(dev->queue, 0, console_vq_size); + if (!packed_ring) + { + dev->split.queue = console_vq_mem; + memset(dev->split.queue, 0, console_vq_size); + } + else + { + dev->packed.queue = console_vq_mem; + memset(dev->packed.queue, 0, console_vq_size); + } /* assign the queue depth to each virt queue */ for (int i = 0; i < NUM_QUEUES; i++) - dev->queue[i].num_max = QUEUE_DEPTH; + { + if (!packed_ring) + { + dev->split.queue[i].num_max = QUEUE_DEPTH; + } + else + { + dev->packed.queue[i].num_max = QUEUE_DEPTH; + dev->packed.queue[i].device_wrap_counter = true; + dev->packed.queue[i].driver_wrap_counter = true; + } + } /* set console device feature */ dev->device_id = VIRTIO_ID_CONSOLE; @@ -286,6 +311,9 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) if (host_state->enclave_config.mode != SW_DEBUG_MODE) dev->device_features |= BIT(VIRTIO_F_IOMMU_PLATFORM); + if (packed_ring) + dev->device_features |= BIT(VIRTIO_F_RING_PACKED); + dev->ops = &host_console_ops; _console_dev->qlocks = init_queue_locks(NUM_QUEUES); diff --git a/src/host_interface/virtio_netdev.c b/src/host_interface/virtio_netdev.c index 467d7b766..81b3585fb 100644 --- a/src/host_interface/virtio_netdev.c +++ b/src/host_interface/virtio_netdev.c @@ -534,6 +534,7 @@ int netdev_init(sgxlkl_host_state_t* host_state) void* netdev_vq_mem = NULL; struct virtio_net_dev* net_dev = NULL; char mac[6]; + size_t netdev_vq_size; // Generate a completely random MAC address size_t b = 0; while (b < sizeof(mac)) { @@ -550,7 +551,11 @@ int netdev_init(sgxlkl_host_state_t* host_state) mac[0] &= 0xfe; size_t host_netdev_size = next_pow2(sizeof(struct virtio_net_dev)); - size_t netdev_vq_size = NUM_QUEUES * sizeof(struct virtq); + + if (!packed_ring) + netdev_vq_size = NUM_QUEUES * sizeof(struct virtq); + else + netdev_vq_size = NUM_QUEUES * sizeof(struct virtq_packed); netdev_vq_size = next_pow2(netdev_vq_size); if (!_netdev_id) @@ -589,12 +594,30 @@ int netdev_init(sgxlkl_host_state_t* host_state) return -1; } - net_dev->dev.queue = netdev_vq_mem; - memset(net_dev->dev.queue, 0, netdev_vq_size); - + if (!packed_ring) + { + net_dev->dev.split.queue = netdev_vq_mem; + memset(net_dev->dev.split.queue, 0, netdev_vq_size); + } + else + { + net_dev->dev.packed.queue = netdev_vq_mem; + memset(net_dev->dev.packed.queue, 0, netdev_vq_size); + } /* assign the queue depth to each virt queue */ for (int i = 0; i < NUM_QUEUES; i++) - net_dev->dev.queue[i].num_max = QUEUE_DEPTH; + { + if (!packed_ring) + { + net_dev->dev.split.queue[i].num_max = QUEUE_DEPTH; + } + else + { + net_dev->dev.packed.queue[i].num_max = QUEUE_DEPTH; + net_dev->dev.packed.queue[i].device_wrap_counter = 1; + net_dev->dev.packed.queue[i].driver_wrap_counter = 1; + } + } /* set net device feature */ net_dev->dev.device_id = VIRTIO_ID_NET; @@ -617,6 +640,10 @@ int netdev_init(sgxlkl_host_state_t* host_state) } net_dev->dev.device_features |= BIT(VIRTIO_NET_F_MAC); + + if (packed_ring) + net_dev->dev.device_features |= BIT(VIRTIO_F_RING_PACKED); + memcpy(net_dev->config.mac, mac, ETH_ALEN); net_dev->dev.config_data = &net_dev->config; diff --git a/src/include/host/virtio_dev.h b/src/include/host/virtio_dev.h index 673578d58..9a508269b 100644 --- a/src/include/host/virtio_dev.h +++ b/src/include/host/virtio_dev.h @@ -9,6 +9,9 @@ #define VIRTIO_F_VERSION_1 32 #define VIRTIO_RING_F_EVENT_IDX 29 #define VIRTIO_F_IOMMU_PLATFORM 33 +#define VIRTIO_F_RING_PACKED 34 + +extern bool packed_ring; struct virtio_dev; @@ -57,7 +60,16 @@ struct virtio_dev uint64_t driver_features; _Atomic(uint32_t) driver_features_sel; _Atomic(uint32_t) queue_sel; - struct virtq* queue; + + union { + struct { + struct virtq* queue; + }split; + + struct { + struct virtq_packed* queue; //Not sure if I should keep the name as virtq_packed or use a similar structure in virtio_ring.c + }packed; + }; uint32_t queue_notify; _Atomic(uint32_t) int_status; _Atomic(uint32_t) status; diff --git a/src/include/shared/virtio_ring_buff.h b/src/include/shared/virtio_ring_buff.h index dba1449e8..35b2c62a0 100644 --- a/src/include/shared/virtio_ring_buff.h +++ b/src/include/shared/virtio_ring_buff.h @@ -10,6 +10,9 @@ /* This means the buffer contains a list of buffer descriptors. */ #define LKL_VRING_DESC_F_INDIRECT 4 +#define LKL_VRING_PACKED_DESC_F_AVAIL 7 +#define LKL_VRING_PACKED_DESC_F_USED 15 + struct virtq_desc { /* Address (guest-physical). */ @@ -58,4 +61,24 @@ struct virtq uint16_t last_used_idx_signaled; }; +struct virtq_packed_desc +{ + uint64_t addr; + uint32_t len; + uint16_t id; + uint16_t flags; +}; + +struct virtq_packed +{ + uint32_t num_max; + _Atomic(uint32_t) ready; + _Atomic(uint32_t) num; + //Add supression flags where necessary + _Atomic(struct virtq_packed_desc*) desc; + bool device_wrap_counter; //Initialise to 1, flip when we change last descriptor as used + bool driver_wrap_counter; //Initialise to 1 and flip when when avail_desc_idx becomes greater than queue and we need to wrap around it + uint16_t avail_desc_idx; //We increment this for each avail event we process + uint16_t used_desc_idx; +}; #endif From d4cafa32e79f2bbcdc370fe3ac8c9cc074a4b7ca Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Fri, 16 Apr 2021 12:57:17 +0100 Subject: [PATCH 02/27] Create function to add use packed vring desc. --- src/host_interface/virtio.c | 298 ++++++++++++++++++++++---- src/host_interface/virtio_blkdev.c | 1 + src/include/host/virtio_dev.h | 2 +- src/include/shared/virtio_ring_buff.h | 2 + 4 files changed, 257 insertions(+), 46 deletions(-) diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index d73031a3d..e58ef5e52 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -18,12 +18,33 @@ bool packed_ring = false; struct _virtio_req { + union { + struct { + virtq* q; + }split; + struct { + virtq* q; + }packed; + }; struct virtio_req req; struct virtio_dev* dev; - struct virtq* q; + uint16_t idx; }; +/* + * packed_desc_is_avail: Check if the current descriptor + * the driver is expected to fill in is available + * q: pointer to a packed virtio queue + */ +static int packed_desc_is_avail(struct virtq_packed* q) +{ + struct virtq_packed_desc* desc = q->desc[q->avail_desc_idx & (q->num -1)]; + uint16_t avail = desc->flags & KL_VRING_PACKED_DESC_F_AVAIL; + uint16_t used = desc->flags & KL_VRING_PACKED_DESC_F_USED; + return avail != used && avail == q->driver_wrap_counter; +} + /* * vring_desc_at_avail_idx : get the pointer to vring descriptor * at given available index from virtio_queue @@ -39,30 +60,48 @@ static inline struct virtq_desc* vring_desc_at_avail_idx( } /* - * add_dev_buf_from_vring_desc: - * read data buffer address from vring descriptors into local buffers + * add_dev_buf_from_vring_desc_split: + * read data buffer address from split vring descriptors into local buffers + * req : local buffer + * vring_desc_split : virtio ring descriptor + */ +static void add_dev_buf_from_vring_desc_split( + struct virtio_req* req, + struct virtq_desc* vring_desc_split) +{ + struct iovec* buf = &req->buf[req->buf_count++]; + + buf->iov_base = (void*)(uintptr_t)(vring_desc_split->addr); + buf->iov_len = vring_desc_split->len; + + req->total_len += buf->iov_len; +} + +/* + * add_dev_buf_from_vring_desc_packed: + * read data buffer address from packed vring descriptors into local buffers * req : local buffer - * vring_desc : virtio ring descriptor + * vring_desc_packed : virtio ring descriptor */ -static void add_dev_buf_from_vring_desc( +static void add_dev_buf_from_vring_desc_packed( struct virtio_req* req, - struct virtq_desc* vring_desc) + struct virtq_packed_desc* vring_desc_packed) { struct iovec* buf = &req->buf[req->buf_count++]; - buf->iov_base = (void*)(uintptr_t)(vring_desc->addr); - buf->iov_len = vring_desc->len; + buf->iov_base = (void*)(uintptr_t)(vring_desc_packed->addr); + buf->iov_len = vring_desc_packed->len; req->total_len += buf->iov_len; } /* - * get_next_desc : get next vring rescriptor pointer + * get_next_desc : get next split vring descriptor pointer * q: Virtio queue * desc: current descriptor * idx : available ring index */ -static struct virtq_desc* get_next_desc( +static struct virtq_desc* get_next_desc_split( struct virtq* q, struct virtq_desc* desc, uint16_t* idx) @@ -82,6 +121,31 @@ static struct virtq_desc* get_next_desc( return &q->desc[desc->next & (q->num - 1)]; } +/* + * get_next_desc : get next packed vring descriptor pointer + * q: Virtio queue + * desc: current descriptor + * idx : available ring index + */ +static struct virtq_packed_desc* get_next_desc_packed( + struct virtq_packed* q, + struct virtq_packed_desc* desc, + uint16_t* idx) +{ + if (q->max_merge_len) + { + if (++(*idx) == q->num_max) + return NULL; + struct virtq_packed_desc* next_desc = q->desc[*idx & (q->num-1)]; + packed_desc_is_avail(next_desc) ? next_desc : NULL; + } + + if (!(desc->flags & LKL_VRING_DESC_F_NEXT)) + return NULL; + return &q->desc[++(*idx) & (q->num - 1)]; +} + + /* * virtio_add_used: update used ring at used index with used discriptor index * q : input parameter @@ -102,6 +166,21 @@ static inline void virtio_add_used( q->used->ring[used_idx].len = htole16(len); } +static inline void virtio_add_used_packed( + struct virtq_packed* q, + uint16_t used_idx, + uint32_t len, + uint16_t id) +{ + struct virtq_packed_desc* desc = q->desc[used_idx & (q->num -1)]; + desc->id = id; + desc->len = htole32(len); + desc->flags |= + q->device_wrap_counter << LKL_VRING_PACKED_DESC_F_AVAIL | + q->device_wrap_counter << LKL_VRING_PACKED_DESC_F_USED; + desc->flags = htole16(desc->flags); +} + /* * virtio_sync_used_idx: update used index * q: virtio queue @@ -154,16 +233,13 @@ static inline void virtio_deliver_irq(struct virtio_dev* dev) * req: local virtio request buffer * len: length of the data processed */ -/**PACKED implementation - * Will need to update this for packed implementation - */ -void virtio_req_complete(struct virtio_req* req, uint32_t len) +void virtio_req_complete_split(struct virtio_req* req, uint32_t len) { int send_irq = 0; struct _virtio_req* _req = container_of(req, struct _virtio_req, req); - struct virtq* q = _req->q; + struct virtq* q = _req->split.q; uint16_t avail_idx = _req->idx; - uint16_t used_idx = virtio_get_used_idx(_req->q); + uint16_t used_idx = virtio_get_used_idx(_req->split.q); /* * We've potentially used up multiple (non-chained) descriptors and have @@ -235,7 +311,136 @@ void virtio_req_complete(struct virtio_req* req, uint32_t len) } /* - * virtio_process_one: Process one queue at a time + * virtio_req_complete: handle finishing activities after processing request + * req: local virtio request buffer + * len: length of the data processed + */ +void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) +{ + /** + * Requirements for this: + * Marking an available desc as unavailable + * Setting a single used desc for a descriptor chain + * Ensuring the id of a used desc for a desc chain is the id of the last buffer in the chain + * avail_desc_idx and used_desc_idx to be incremented and wrapped around as appropriate + * changing the wrap counters when the above are wrapped around + * + * Question: + * The below assumes non chained descriptors? + * But if we have chained descriptors + */ + int send_irq = 0; + struct _virtio_req* _req = container_of(req, struct _virtio_req, req); + struct virtq_packed* q = _req->packed.q; + uint16_t avail_desc_idx = _req->idx; + uint16_t used_desc_idx = q->used_desc_idx; + + /* + * We've potentially used up multiple (non-chained) descriptors and have + * to create one "used" entry for each descriptor we've consumed. + */ + + //Since this is all chained, just go to the last one, mark it as used, + //place it at the beginning + //I think the point is that it's not necessary to mark them all as used + uint16_t used_len; + if (!q->max_merge_len) + used_len = len; + else + used_len = min_len(len, req->buf[req->buff_count-1].iov_len); + + struct virtq_packed_desc* desc = q->desc[(avail_desc_idx+(req->buff_count-1)) & (q->num -1)]; + virtio_add_used_packed(q, q->used_desc_idx, used_len, desc->id); + + //Need to increment used and avail index, then see if I need the rest + //of this code + /* + for (int i = 0; i < req->buf_count; i++) + { + + + //Need to make available index unavailable and specify used index as now used + + // Do I need to set avail_desc_idx and used_desc_idx back to 0 when necessary? + // Maybe not, because the driver handles it? + + //Should these be getting incremented? + //What about the thing where we move them forward by the number of descriptors? + //Based on how this will get called, unless max_merge_len is set, + //this will only be called for a chained descriptor not individual + //so this would mean we only need one used? And we get the id from buf_count-1? + //Maybe my assumption is wrong that we only have one used per chain? + //the spec says that but that doesn't sound reasonable - e.g. vrtio_blkdev.c + + + len -= used_len; + if (!len) + break; + }*/ + + virtio_sync_used_idx(q, used_desc_idx); + q->last_avail_idx = avail_desc_idx; + + /* + * Triggers the irq whenever there is no available buffer. + */ + if (q->last_avail_idx == le16toh(q->avail->idx)) + send_irq = 1; + + /* + * There are two rings: q->avail and q->used for each of the rx and tx + * queues that are used to pass buffers between kernel driver and the + * virtio device implementation. + * + * Kernel maitains the first one and appends buffers to it. In rx queue, + * it's empty buffers kernel offers to store received packets. In tx + * queue, it's buffers containing packets to transmit. Kernel notifies + * the device by mmio write (see VIRTIO_MMIO_QUEUE_NOTIFY below). + * + * The virtio device (here in this file) maintains the + * q->used and appends buffer to it after consuming it from q->avail. + * + * The device needs to notify the driver by triggering irq here. The + * LKL_VIRTIO_RING_F_EVENT_IDX is enabled in this implementation so + * kernel can set virtio_get_used_event(q) to tell the device to "only + * trigger the irq when this item in q->used ring is populated." + * + * Because driver and device are run in two different threads. When + * driver sets virtio_get_used_event(q), q->used->idx may already be + * increased to a larger one. So we need to trigger the irq when + * virtio_get_used_event(q) < q->used->idx. + * + * To avoid unnessary irqs for each packet after + * virtio_get_used_event(q) < q->used->idx, last_used_idx_signaled is + * stored and irq is only triggered if + * last_used_idx_signaled <= virtio_get_used_event(q) < q->used->idx + * + * This is what lkl_vring_need_event() checks and it evens covers the + * case when those numbers wrap up. + */ + if (send_irq || lkl_vring_need_event( + le16toh(virtio_get_used_event(q)), + virtio_get_used_idx(q), + q->last_used_idx_signaled)) + { + q->last_used_idx_signaled = virtio_get_used_idx(q); + virtio_deliver_irq(_req->dev); + } +} + +/* + * virtio_req_complete: handle finishing activities after processing request + * req: local virtio request buffer + * len: length of the data processed + */ +void virtio_req_complete(struct virtio_req* req, uint32_t len) +{ + packed_ring ? virtio_req_complete_packed(req, len) : + virtio_req_complete_split(req, len); +} + +/* + * virtio_process_one: Process one split queue at a time * dev: device structure pointer * qidx: queue index to be processed */ @@ -246,7 +451,7 @@ static int virtio_process_one_split(struct virtio_dev* dev, int qidx) struct _virtio_req _req = { .dev = dev, - .q = q, + .split.q = q, .idx = idx, }; @@ -255,42 +460,41 @@ static int virtio_process_one_split(struct virtio_dev* dev, int qidx) struct virtq_desc* desc = vring_desc_at_avail_idx(q, idx); do { - add_dev_buf_from_vring_desc(req, desc); + add_dev_buf_from_vring_desc_split(req, desc); if (q->max_merge_len && req->total_len > q->max_merge_len) break; - desc = get_next_desc(q, desc, &idx); + desc = get_next_desc_split(q, desc, &idx); } while (desc && req->buf_count < VIRTIO_REQ_MAX_BUFS); // Return result of enqueue operation return dev->ops->enqueue(dev, qidx, req); } +/* + * virtio_process_one: Process one packed queue at a time + * dev: device structure pointer + * qidx: queue index to be processed + */ static int virtio_process_one_packed(struct virtio_dev* dev, int qidx) { struct virtq_packed* q = &dev->queue[qidx]; - - /**Continue from here*/ - - uint16_t idx = q->last_avail_idx; + uint16_t idx = q->avail_desc_idx; struct _virtio_req _req = { .dev = dev, - .q = q, + .packed.q = q, .idx = idx, }; struct virtio_req* req = &_req.req; - memset(req, 0, sizeof(struct virtio_req)); - /**MODIFY HERE: - * struct virtq_packed_desc desc - */ - struct virtq_desc* desc = vring_desc_at_avail_idx(q, idx); + struct virtq_packed_desc* desc = q->desc[idx & (q->num - 1)]; do { - add_dev_buf_from_vring_desc(req, desc); + add_dev_buf_from_vring_desc_packed(req, desc); + // Do we need this if (q->max_merge_len && req->total_len > q->max_merge_len) break; - desc = get_next_desc(q, desc, &idx); + desc = get_next_desc_packed(q, desc, &idx); } while (desc && req->buf_count < VIRTIO_REQ_MAX_BUFS); // Return result of enqueue operation @@ -302,24 +506,18 @@ static inline void virtio_set_avail_event(struct virtq* q, uint16_t val) *((uint16_t*)&q->used->ring[q->num]) = val; } +/**Packed implementaiton: change this*/ void virtio_set_queue_max_merge_len(struct virtio_dev* dev, int q, int len) { dev->queue[q].max_merge_len = len; } /* - * virtio_process_queue : process all the requests in the specific queue + * virtio_process_queue : process all the requests in the specific split queue * dev: virtio device structure pointer * qidx: queue index to be processed * fd: disk file descriptor */ -void virtio_process_queue(struct virtio_dev* dev, uint32_t qidx) -{ - packed_ring ? virtio_process_queue_packed(dev, qidx) : - virtio_process_queue_split(dev, qidx); -} - - void virtio_process_queue_split(struct virtio_dev* dev, uint32_t qidx) { struct virtq* q = &dev->split.queue[qidx]; @@ -343,6 +541,12 @@ void virtio_process_queue_split(struct virtio_dev* dev, uint32_t qidx) dev->ops->release_queue(dev, qidx); } +/* + * virtio_process_queue : process all the requests in the specific packed queue + * dev: virtio device structure pointer + * qidx: queue index to be processed + * fd: disk file descriptor + */ void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) { struct virtq_packed* q = &dev->packed.queue[qidx]; @@ -367,10 +571,14 @@ void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) dev->ops->release_queue(dev, qidx); } -int packed_desc_is_avail(struct virtq_packed* q) +/* + * virtio_process_queue : process all the requests in the specific queue + * dev: virtio device structure pointer + * qidx: queue index to be processed + * fd: disk file descriptor + */ +void virtio_process_queue(struct virtio_dev* dev, uint32_t qidx) { - struct virtq_packed_desc* desc = q->desc[q->avail_desc_idx & (q->num -1)]; - uint16_t avail = desc->flags & KL_VRING_PACKED_DESC_F_AVAIL; - uint16_t used = desc->flags & KL_VRING_PACKED_DESC_F_USED; - return avail != used && avail == q->driver_wrap_counter; + packed_ring ? virtio_process_queue_packed(dev, qidx) : + virtio_process_queue_split(dev, qidx); } \ No newline at end of file diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index a69fd4465..c1cbeb4e9 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -52,6 +52,7 @@ static int blk_enqueue(struct virtio_dev* dev, int q, struct virtio_req* req) if (req->buf_count < 3) goto out; + h = req->buf[0].iov_base; t = req->buf[req->buf_count - 1].iov_base; diff --git a/src/include/host/virtio_dev.h b/src/include/host/virtio_dev.h index 9a508269b..3ea4a5052 100644 --- a/src/include/host/virtio_dev.h +++ b/src/include/host/virtio_dev.h @@ -82,7 +82,7 @@ struct virtio_dev uint32_t virtio_mmio_id; }; -void virtio_req_complete(struct virtio_req* req, uint32_t len); +void virtio_req_complete_split(struct virtio_req* req, uint32_t len); void virtio_process_queue(struct virtio_dev* dev, uint32_t qidx); void virtio_set_queue_max_merge_len(struct virtio_dev* dev, int q, int len); diff --git a/src/include/shared/virtio_ring_buff.h b/src/include/shared/virtio_ring_buff.h index 35b2c62a0..bfed24413 100644 --- a/src/include/shared/virtio_ring_buff.h +++ b/src/include/shared/virtio_ring_buff.h @@ -74,6 +74,8 @@ struct virtq_packed uint32_t num_max; _Atomic(uint32_t) ready; _Atomic(uint32_t) num; + uint32_t max_merge_len; + //Add supression flags where necessary _Atomic(struct virtq_packed_desc*) desc; bool device_wrap_counter; //Initialise to 1, flip when we change last descriptor as used From 6879c3da4a39cdb32f98d7aa5249154ea5a525ad Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Sun, 18 Apr 2021 17:55:28 +0100 Subject: [PATCH 03/27] Implement virtio_req_complete_packed and refactoring. --- src/host_interface/virtio.c | 120 +++++++++-------------------- src/host_interface/virtio_blkdev.c | 3 - src/host_interface/virtio_netdev.c | 7 +- 3 files changed, 42 insertions(+), 88 deletions(-) diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index e58ef5e52..366a32e5e 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -319,111 +319,59 @@ void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) { /** * Requirements for this: - * Marking an available desc as unavailable * Setting a single used desc for a descriptor chain * Ensuring the id of a used desc for a desc chain is the id of the last buffer in the chain * avail_desc_idx and used_desc_idx to be incremented and wrapped around as appropriate * changing the wrap counters when the above are wrapped around * - * Question: - * The below assumes non chained descriptors? - * But if we have chained descriptors + + This function only gets called either with chained descriptors, + or max_merge_len (which I assume would also be chained descriptors). + + I know this as for example it gets called from blk_enqueue, + whose request is chained, and the same with network device + (and I assume the same for console) */ int send_irq = 0; struct _virtio_req* _req = container_of(req, struct _virtio_req, req); struct virtq_packed* q = _req->packed.q; uint16_t avail_desc_idx = _req->idx; uint16_t used_desc_idx = q->used_desc_idx; - - /* - * We've potentially used up multiple (non-chained) descriptors and have - * to create one "used" entry for each descriptor we've consumed. - */ - - //Since this is all chained, just go to the last one, mark it as used, - //place it at the beginning - //I think the point is that it's not necessary to mark them all as used + uint16_t last_buffer_idx = avail_desc_idx+(req->buff_count-1); uint16_t used_len; if (!q->max_merge_len) used_len = len; else used_len = min_len(len, req->buf[req->buff_count-1].iov_len); - struct virtq_packed_desc* desc = q->desc[(avail_desc_idx+(req->buff_count-1)) & (q->num -1)]; - virtio_add_used_packed(q, q->used_desc_idx, used_len, desc->id); - - //Need to increment used and avail index, then see if I need the rest - //of this code - /* - for (int i = 0; i < req->buf_count; i++) - { + struct virtq_packed_desc* desc = q->desc[last_buffer_idx & (q->num -1)]; + virtio_add_used_packed(q, used_desc_idx, used_len, desc->id); + used_desc_idx += req->buff_count; + avail_desc_idx += req->buff_count; - //Need to make available index unavailable and specify used index as now used - - // Do I need to set avail_desc_idx and used_desc_idx back to 0 when necessary? - // Maybe not, because the driver handles it? - - //Should these be getting incremented? - //What about the thing where we move them forward by the number of descriptors? - //Based on how this will get called, unless max_merge_len is set, - //this will only be called for a chained descriptor not individual - //so this would mean we only need one used? And we get the id from buf_count-1? - //Maybe my assumption is wrong that we only have one used per chain? - //the spec says that but that doesn't sound reasonable - e.g. vrtio_blkdev.c - + if (used_desc_idx >= q->num) + { + used_desc_idx -= q->num; + q->device_wrap_counter = !q->device_wrap_counter; + } - len -= used_len; - if (!len) - break; - }*/ + if (avail_desc_idx >= q->num) + { + avail_desc_idx -= q->num; + q->driver_wrap_counter = !q->driver_wrap_counter; + send_irq = 1; + } - virtio_sync_used_idx(q, used_desc_idx); - q->last_avail_idx = avail_desc_idx; + // Don't think we need to synchronise used + q->used_desc_idx = used_desc_idx; + q->avail_desc_idx = avail_desc_idx; - /* - * Triggers the irq whenever there is no available buffer. - */ - if (q->last_avail_idx == le16toh(q->avail->idx)) - send_irq = 1; - /* - * There are two rings: q->avail and q->used for each of the rx and tx - * queues that are used to pass buffers between kernel driver and the - * virtio device implementation. - * - * Kernel maitains the first one and appends buffers to it. In rx queue, - * it's empty buffers kernel offers to store received packets. In tx - * queue, it's buffers containing packets to transmit. Kernel notifies - * the device by mmio write (see VIRTIO_MMIO_QUEUE_NOTIFY below). - * - * The virtio device (here in this file) maintains the - * q->used and appends buffer to it after consuming it from q->avail. - * - * The device needs to notify the driver by triggering irq here. The - * LKL_VIRTIO_RING_F_EVENT_IDX is enabled in this implementation so - * kernel can set virtio_get_used_event(q) to tell the device to "only - * trigger the irq when this item in q->used ring is populated." - * - * Because driver and device are run in two different threads. When - * driver sets virtio_get_used_event(q), q->used->idx may already be - * increased to a larger one. So we need to trigger the irq when - * virtio_get_used_event(q) < q->used->idx. - * - * To avoid unnessary irqs for each packet after - * virtio_get_used_event(q) < q->used->idx, last_used_idx_signaled is - * stored and irq is only triggered if - * last_used_idx_signaled <= virtio_get_used_event(q) < q->used->idx - * - * This is what lkl_vring_need_event() checks and it evens covers the - * case when those numbers wrap up. - */ - if (send_irq || lkl_vring_need_event( - le16toh(virtio_get_used_event(q)), - virtio_get_used_idx(q), - q->last_used_idx_signaled)) + /**TODO*/ + // Need to use event supression here - but in theory this should work + if (send_irq) { - q->last_used_idx_signaled = virtio_get_used_idx(q); virtio_deliver_irq(_req->dev); } } @@ -506,10 +454,14 @@ static inline void virtio_set_avail_event(struct virtq* q, uint16_t val) *((uint16_t*)&q->used->ring[q->num]) = val; } -/**Packed implementaiton: change this*/ -void virtio_set_queue_max_merge_len(struct virtio_dev* dev, int q, int len) +void virtio_set_queue_max_merge_len_split(struct virtio_dev* dev, int q, int len) +{ + dev->split.queue[q].max_merge_len = len; +} + +void virtio_set_queue_max_merge_len_packed(struct virtio_dev* dev, int q, int len) { - dev->queue[q].max_merge_len = len; + dev->packed.queue[q].max_merge_len = len; } /* diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index c1cbeb4e9..86d952762 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -36,9 +36,6 @@ static inline sgxlkl_host_disk_state_t* get_disk_config(uint8_t blkdev_id) /* * Virtio callback functions for processing virtio requests */ -/** - * Packed implementation: double check if I need to change this later - */ static int blk_enqueue(struct virtio_dev* dev, int q, struct virtio_req* req) { struct virtio_blk_outhdr* h; diff --git a/src/host_interface/virtio_netdev.c b/src/host_interface/virtio_netdev.c index 81b3585fb..e9e0e3a39 100644 --- a/src/host_interface/virtio_netdev.c +++ b/src/host_interface/virtio_netdev.c @@ -656,7 +656,12 @@ int netdev_init(sgxlkl_host_state_t* host_state) * there are available up to 64KB in total len. */ if (net_dev->dev.device_features & BIT(VIRTIO_NET_F_MRG_RXBUF)) - virtio_set_queue_max_merge_len(&net_dev->dev, RX_QUEUE_IDX, 65536); + { + if (!packed_ring) + virtio_set_queue_max_merge_len_split(&net_dev->dev, RX_QUEUE_IDX, 65536); + else + virtio_set_queue_max_merge_len_packed(&net_dev->dev, RX_QUEUE_IDX, 65536); + } /* Register the netdev fd */ register_net_device(net_dev, host_state->net_fd); From 81f7eb58705674beebf11eb48e3064c61067d3f1 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Sun, 18 Apr 2021 20:59:21 +0100 Subject: [PATCH 04/27] Add packed ring in lkl/virtio.c. --- src/include/host/virtio_dev.h | 3 +-- src/include/lkl/virtio.h | 10 +++++++- src/lkl/virtio.c | 48 ++++++++++++++++++++++++++--------- 3 files changed, 46 insertions(+), 15 deletions(-) diff --git a/src/include/host/virtio_dev.h b/src/include/host/virtio_dev.h index 3ea4a5052..569e84ac6 100644 --- a/src/include/host/virtio_dev.h +++ b/src/include/host/virtio_dev.h @@ -60,14 +60,13 @@ struct virtio_dev uint64_t driver_features; _Atomic(uint32_t) driver_features_sel; _Atomic(uint32_t) queue_sel; - union { struct { struct virtq* queue; }split; struct { - struct virtq_packed* queue; //Not sure if I should keep the name as virtq_packed or use a similar structure in virtio_ring.c + struct virtq_packed* queue; }packed; }; uint32_t queue_notify; diff --git a/src/include/lkl/virtio.h b/src/include/lkl/virtio.h index 91f468c85..8f464f0f8 100644 --- a/src/include/lkl/virtio.h +++ b/src/include/lkl/virtio.h @@ -16,7 +16,15 @@ struct virtio_dev uint64_t driver_features; _Atomic(uint32_t) driver_features_sel; _Atomic(uint32_t) queue_sel; - struct virtq* queue; + union { + struct { + struct virtq* queue; + }split; + + struct { + struct virtq_packed* queue; + }packed; + }; uint32_t queue_notify; _Atomic(uint32_t) int_status; _Atomic(uint32_t) status; diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index fe9c99f3a..1b84f424d 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -26,6 +26,12 @@ #undef BIT #define BIT(x) (1ULL << x) +#ifdef PACKED_RING +bool packed_ring = true; +#else +bool packed_ring = false; +#endif + /* Used for notifying LKL for the list of virtio devices at bootup. * Currently block, network and console devices are passed */ char lkl_virtio_devs[4096]; @@ -123,10 +129,12 @@ static int virtio_read(void* data, int offset, void* res, int size) * host-write-once */ case VIRTIO_MMIO_QUEUE_NUM_MAX: - val = dev->queue[dev->queue_sel].num_max; + val = packed_ring ? dev->packed.queue[dev->queue_sel].num_max : + dev->split.queue[dev->queue_sel].num_max; break; case VIRTIO_MMIO_QUEUE_READY: - val = dev->queue[dev->queue_sel].ready; + val = packed_ring ? dev->packed.queue[dev->queue_sel].ready : + dev->split.queue[dev->queue_sel].ready; break; /* Security Review: dev->int_status is host-read-write */ case VIRTIO_MMIO_INTERRUPT_STATUS: @@ -258,8 +266,8 @@ static int virtio_write(void* data, int offset, void* res, int size) * structure). virtq desc and avail ring address handling is a special case. */ struct virtio_dev* dev = (struct virtio_dev*)data; - /* Security Review: dev->queue_sel should be host-read-only */ - struct virtq* q = &dev->queue[dev->queue_sel]; + struct virtq* split_q = packed_ring ? NULL : &dev->split.queue[dev->queue_sel]; + struct virtq_packed* packed_q = packed_ring ? &dev->packed.queue[dev->queue_sel] : NULL; uint32_t val; int ret = 0; @@ -307,12 +315,18 @@ static int virtio_write(void* data, int offset, void* res, int size) * host-read-only */ case VIRTIO_MMIO_QUEUE_NUM: - dev->queue[dev->queue_sel].num = val; + if (packed_ring) + dev->packed.queue[dev->queue_sel].num = val; + else + dev->split.queue[dev->queue_sel].num = val; break; /* Security Review: is dev->queue[dev->queue_sel].ready host-read-only? */ case VIRTIO_MMIO_QUEUE_READY: - dev->queue[dev->queue_sel].ready = val; + if (packed_ring) + dev->packed.queue[dev->queue_sel].ready = val; + else + dev->split.queue[dev->queue_sel].ready = val; break; /* Security Review: guest virtio driver(s) writes to virtq desc ring and * avail ring in guest memory. In queue notify flow, we need to copy the @@ -346,10 +360,16 @@ static int virtio_write(void* data, int offset, void* res, int size) * be required. */ case VIRTIO_MMIO_QUEUE_DESC_LOW: - set_ptr_low((_Atomic(uint64_t)*)&q->desc, val); + if (packed_ring) + set_ptr_low((_Atomic(uint64_t)*)&packed_q->desc, val); + else + set_ptr_low((_Atomic(uint64_t)*)&split_q->desc, val); break; case VIRTIO_MMIO_QUEUE_DESC_HIGH: - set_ptr_high((_Atomic(uint64_t)*)&q->desc, val); + if (packed_ring) + set_ptr_high((_Atomic(uint64_t)*)&packed_q->desc, val); + else + set_ptr_high((_Atomic(uint64_t)*)&split_q->desc, val); break; /* Security Review: For Split Queue, q->avail link list content should be * host-read-only. The Split Queue implementaiton @@ -364,10 +384,12 @@ static int virtio_write(void* data, int offset, void* res, int size) * to it. */ case VIRTIO_MMIO_QUEUE_AVAIL_LOW: - set_ptr_low((_Atomic(uint64_t)*)&q->avail, val); + if (!packed_ring) + set_ptr_low((_Atomic(uint64_t)*)&split_q->avail, val); break; case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: - set_ptr_high((_Atomic(uint64_t)*)&q->avail, val); + if (!packed_ring) + set_ptr_high((_Atomic(uint64_t)*)&split_q->avail, val); break; /* Security Review: For Split Queue, q->used link list content should be * guest-read-only. The Split Queue implementaiton in guest side virtio @@ -382,10 +404,12 @@ static int virtio_write(void* data, int offset, void* res, int size) * functionality. */ case VIRTIO_MMIO_QUEUE_USED_LOW: - set_ptr_low((_Atomic(uint64_t)*)&q->used, val); + if (!packed_ring) + set_ptr_low((_Atomic(uint64_t)*)&split_q->used, val); break; case VIRTIO_MMIO_QUEUE_USED_HIGH: - set_ptr_high((_Atomic(uint64_t)*)&q->used, val); + if (!packed_ring) + set_ptr_high((_Atomic(uint64_t)*)&split_q->used, val); break; default: ret = -1; From 2aa85f513ea1c939aba337ae6f3cf0e980668dce Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Sun, 18 Apr 2021 21:05:44 +0000 Subject: [PATCH 05/27] Fix typos and refactor. --- src/host_interface/virtio.c | 57 +++++++++++++++++------------- src/host_interface/virtio_blkdev.c | 2 +- src/host_interface/virtio_netdev.c | 5 +-- src/include/host/virtio_dev.h | 2 +- src/lkl/virtio.c | 9 +++++ 5 files changed, 44 insertions(+), 31 deletions(-) diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index 366a32e5e..153e2d903 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -20,10 +20,10 @@ struct _virtio_req { union { struct { - virtq* q; + struct virtq* q; }split; struct { - virtq* q; + struct virtq_packed* q; }packed; }; struct virtio_req req; @@ -36,12 +36,12 @@ struct _virtio_req * packed_desc_is_avail: Check if the current descriptor * the driver is expected to fill in is available * q: pointer to a packed virtio queue + * desc: pointer to the vring descriptor */ -static int packed_desc_is_avail(struct virtq_packed* q) +static int packed_desc_is_avail(struct virtq_packed *q, struct virtq_packed_desc* desc) { - struct virtq_packed_desc* desc = q->desc[q->avail_desc_idx & (q->num -1)]; - uint16_t avail = desc->flags & KL_VRING_PACKED_DESC_F_AVAIL; - uint16_t used = desc->flags & KL_VRING_PACKED_DESC_F_USED; + uint16_t avail = desc->flags & LKL_VRING_PACKED_DESC_F_AVAIL; + uint16_t used = desc->flags & LKL_VRING_PACKED_DESC_F_USED; return avail != used && avail == q->driver_wrap_counter; } @@ -136,8 +136,8 @@ static struct virtq_packed_desc* get_next_desc_packed( { if (++(*idx) == q->num_max) return NULL; - struct virtq_packed_desc* next_desc = q->desc[*idx & (q->num-1)]; - packed_desc_is_avail(next_desc) ? next_desc : NULL; + struct virtq_packed_desc* next_desc = &q->desc[*idx & (q->num-1)]; + packed_desc_is_avail(q,next_desc) ? next_desc : NULL; } if (!(desc->flags & LKL_VRING_DESC_F_NEXT)) @@ -172,7 +172,7 @@ static inline void virtio_add_used_packed( uint32_t len, uint16_t id) { - struct virtq_packed_desc* desc = q->desc[used_idx & (q->num -1)]; + struct virtq_packed_desc* desc = &q->desc[used_idx & (q->num -1)]; desc->id = id; desc->len = htole32(len); desc->flags |= @@ -233,13 +233,13 @@ static inline void virtio_deliver_irq(struct virtio_dev* dev) * req: local virtio request buffer * len: length of the data processed */ -void virtio_req_complete_split(struct virtio_req* req, uint32_t len) +static void virtio_req_complete_split(struct virtio_req* req, uint32_t len) { int send_irq = 0; struct _virtio_req* _req = container_of(req, struct _virtio_req, req); struct virtq* q = _req->split.q; uint16_t avail_idx = _req->idx; - uint16_t used_idx = virtio_get_used_idx(_req->split.q); + uint16_t used_idx = virtio_get_used_idx(q); /* * We've potentially used up multiple (non-chained) descriptors and have @@ -315,7 +315,7 @@ void virtio_req_complete_split(struct virtio_req* req, uint32_t len) * req: local virtio request buffer * len: length of the data processed */ -void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) +static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) { /** * Requirements for this: @@ -337,18 +337,19 @@ void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) struct virtq_packed* q = _req->packed.q; uint16_t avail_desc_idx = _req->idx; uint16_t used_desc_idx = q->used_desc_idx; - uint16_t last_buffer_idx = avail_desc_idx+(req->buff_count-1); + uint16_t last_buffer_idx = avail_desc_idx+(req->buf_count-1); uint16_t used_len; + if (!q->max_merge_len) used_len = len; else - used_len = min_len(len, req->buf[req->buff_count-1].iov_len); + used_len = min_len(len, req->buf[req->buf_count-1].iov_len); - struct virtq_packed_desc* desc = q->desc[last_buffer_idx & (q->num -1)]; + struct virtq_packed_desc* desc = &q->desc[last_buffer_idx & (q->num -1)]; virtio_add_used_packed(q, used_desc_idx, used_len, desc->id); - used_desc_idx += req->buff_count; - avail_desc_idx += req->buff_count; + used_desc_idx += req->buf_count; + avail_desc_idx += req->buf_count; if (used_desc_idx >= q->num) { @@ -394,7 +395,7 @@ void virtio_req_complete(struct virtio_req* req, uint32_t len) */ static int virtio_process_one_split(struct virtio_dev* dev, int qidx) { - struct virtq* q = &dev->queue[qidx]; + struct virtq* q = &dev->split.queue[qidx]; uint16_t idx = q->last_avail_idx; struct _virtio_req _req = { @@ -425,7 +426,7 @@ static int virtio_process_one_split(struct virtio_dev* dev, int qidx) */ static int virtio_process_one_packed(struct virtio_dev* dev, int qidx) { - struct virtq_packed* q = &dev->queue[qidx]; + struct virtq_packed* q = &dev->packed.queue[qidx]; uint16_t idx = q->avail_desc_idx; struct _virtio_req _req = { @@ -435,7 +436,7 @@ static int virtio_process_one_packed(struct virtio_dev* dev, int qidx) }; struct virtio_req* req = &_req.req; - struct virtq_packed_desc* desc = q->desc[idx & (q->num - 1)]; + struct virtq_packed_desc* desc = &q->desc[idx & (q->num - 1)]; do { add_dev_buf_from_vring_desc_packed(req, desc); @@ -454,23 +455,29 @@ static inline void virtio_set_avail_event(struct virtq* q, uint16_t val) *((uint16_t*)&q->used->ring[q->num]) = val; } -void virtio_set_queue_max_merge_len_split(struct virtio_dev* dev, int q, int len) +static void virtio_set_queue_max_merge_len_split(struct virtio_dev* dev, int q, int len) { dev->split.queue[q].max_merge_len = len; } -void virtio_set_queue_max_merge_len_packed(struct virtio_dev* dev, int q, int len) +static void virtio_set_queue_max_merge_len_packed(struct virtio_dev* dev, int q, int len) { dev->packed.queue[q].max_merge_len = len; } +void virtio_set_queue_max_merge_len(struct virtio_dev* dev, int q, int len) +{ + packed_ring ? virtio_set_queue_max_merge_len_packed(dev, q, len) : + virtio_set_queue_max_merge_len_split(dev, q, len); +} + /* * virtio_process_queue : process all the requests in the specific split queue * dev: virtio device structure pointer * qidx: queue index to be processed * fd: disk file descriptor */ -void virtio_process_queue_split(struct virtio_dev* dev, uint32_t qidx) +static void virtio_process_queue_split(struct virtio_dev* dev, uint32_t qidx) { struct virtq* q = &dev->split.queue[qidx]; @@ -499,7 +506,7 @@ void virtio_process_queue_split(struct virtio_dev* dev, uint32_t qidx) * qidx: queue index to be processed * fd: disk file descriptor */ -void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) +static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) { struct virtq_packed* q = &dev->packed.queue[qidx]; @@ -510,7 +517,7 @@ void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) dev->ops->acquire_queue(dev, qidx); // Have some loop that keeps going until we hit a desc that's not available - while (packed_desc_is_avail(q)) + while (packed_desc_is_avail(q,&q->desc[q->avail_desc_idx & (q->num-1)])) { // Need to process desc here // Possible make some process_one_packed diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index 86d952762..be7da085a 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -118,7 +118,7 @@ int blk_device_init( if (!packed_ring) vq_size = HOST_BLK_DEV_NUM_QUEUES * sizeof(struct virtq); else - vq_sze = HOST_BLK_DEV_NUM_QUEUES * sizeof(struct virtq_packed); + vq_size = HOST_BLK_DEV_NUM_QUEUES * sizeof(struct virtq_packed); /*Allocate memory for block device*/ bdev_size = next_pow2(bdev_size); diff --git a/src/host_interface/virtio_netdev.c b/src/host_interface/virtio_netdev.c index e9e0e3a39..c82980081 100644 --- a/src/host_interface/virtio_netdev.c +++ b/src/host_interface/virtio_netdev.c @@ -657,10 +657,7 @@ int netdev_init(sgxlkl_host_state_t* host_state) */ if (net_dev->dev.device_features & BIT(VIRTIO_NET_F_MRG_RXBUF)) { - if (!packed_ring) - virtio_set_queue_max_merge_len_split(&net_dev->dev, RX_QUEUE_IDX, 65536); - else - virtio_set_queue_max_merge_len_packed(&net_dev->dev, RX_QUEUE_IDX, 65536); + virtio_set_queue_max_merge_len(&net_dev->dev, RX_QUEUE_IDX, 65536); } /* Register the netdev fd */ diff --git a/src/include/host/virtio_dev.h b/src/include/host/virtio_dev.h index 569e84ac6..39939a2bb 100644 --- a/src/include/host/virtio_dev.h +++ b/src/include/host/virtio_dev.h @@ -81,7 +81,7 @@ struct virtio_dev uint32_t virtio_mmio_id; }; -void virtio_req_complete_split(struct virtio_req* req, uint32_t len); +void virtio_req_complete(struct virtio_req* req, uint32_t len); void virtio_process_queue(struct virtio_dev* dev, uint32_t qidx); void virtio_set_queue_max_merge_len(struct virtio_dev* dev, int q, int len); diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index 1b84f424d..9adab9aa1 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -32,6 +32,10 @@ bool packed_ring = true; bool packed_ring = false; #endif +#ifdef DEBUG +#include +#endif + /* Used for notifying LKL for the list of virtio devices at bootup. * Currently block, network and console devices are passed */ char lkl_virtio_devs[4096]; @@ -93,6 +97,11 @@ static int virtio_read(void* data, int offset, void* res, int size) uint32_t val = 0; struct virtio_dev* dev = (struct virtio_dev*)data; + if (packed_ring) + { + oe_host_printf("This is a packed ring\n"); + } + if (offset >= VIRTIO_MMIO_CONFIG) { offset -= VIRTIO_MMIO_CONFIG; From 120847581461d33a9e44478b62fef95200090e25 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Sun, 18 Apr 2021 22:11:55 +0000 Subject: [PATCH 06/27] Add PACKED_RING macro. --- config.mak | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/config.mak b/config.mak index ab9481853..9fffc6c86 100755 --- a/config.mak +++ b/config.mak @@ -3,6 +3,9 @@ # Create a DEBUG build of SGX-LKL DEBUG ?= false +# Use the packed ring implementation of the virtio implementation +PACKED_RING ?= false + # Turn on debug tracing for LKL LKL_DEBUG ?= false @@ -124,6 +127,10 @@ else CMAKE_BUILD_TYPE=Release endif +ifeq ($(PACKED_RING),true) + SGXLKL_CFLAGS_EXTRA += -DPACKED_RING +endif + # OpenEnclave OE_SUBMODULE := $(SGXLKL_ROOT)/openenclave OE_SDK_ROOT_DEFAULT := $(BUILD_DIR)/openenclave From 3e2d21354ccebb1ca0c4b29af9779fe6b42842f8 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Tue, 20 Apr 2021 19:34:26 +0100 Subject: [PATCH 07/27] Fixing bug for packed_desc_is_avail. --- src/host_interface/virtio.c | 4 ++-- src/include/shared/virtio_ring_buff.h | 8 ++++++++ 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index 153e2d903..f3de16bcc 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -40,8 +40,8 @@ struct _virtio_req */ static int packed_desc_is_avail(struct virtq_packed *q, struct virtq_packed_desc* desc) { - uint16_t avail = desc->flags & LKL_VRING_PACKED_DESC_F_AVAIL; - uint16_t used = desc->flags & LKL_VRING_PACKED_DESC_F_USED; + uint16_t avail = desc->flags & (1 << LKL_VRING_PACKED_DESC_F_AVAIL); + uint16_t used = desc->flags & (1 << LKL_VRING_PACKED_DESC_F_USED); return avail != used && avail == q->driver_wrap_counter; } diff --git a/src/include/shared/virtio_ring_buff.h b/src/include/shared/virtio_ring_buff.h index bfed24413..946656fa1 100644 --- a/src/include/shared/virtio_ring_buff.h +++ b/src/include/shared/virtio_ring_buff.h @@ -69,6 +69,12 @@ struct virtq_packed_desc uint16_t flags; }; +struct virtq_packed_desc_event +{ + uint16_t off_wrap; + uint16_t flags; +}; + struct virtq_packed { uint32_t num_max; @@ -78,6 +84,8 @@ struct virtq_packed //Add supression flags where necessary _Atomic(struct virtq_packed_desc*) desc; + //struct virtq_packed_desc_event driver; + //struct virtq_packed_desc_event device; bool device_wrap_counter; //Initialise to 1, flip when we change last descriptor as used bool driver_wrap_counter; //Initialise to 1 and flip when when avail_desc_idx becomes greater than queue and we need to wrap around it uint16_t avail_desc_idx; //We increment this for each avail event we process From bcf19cfa3205e2e89784babcfac723b9bd956461 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Tue, 20 Apr 2021 20:15:19 +0100 Subject: [PATCH 08/27] Add device and driver events into packed vring and initialise in virtio devices. --- src/host_interface/virtio.c | 2 ++ src/host_interface/virtio_blkdev.c | 28 +++++++++++++++++++++++++++ src/host_interface/virtio_console.c | 27 ++++++++++++++++++++++++++ src/host_interface/virtio_netdev.c | 27 ++++++++++++++++++++++++++ src/include/shared/virtio_ring_buff.h | 9 +++++++-- 5 files changed, 91 insertions(+), 2 deletions(-) diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index f3de16bcc..620b2501f 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -371,6 +371,7 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) /**TODO*/ // Need to use event supression here - but in theory this should work + // Read from driver event if (send_irq) { virtio_deliver_irq(_req->dev); @@ -516,6 +517,7 @@ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) if (dev->ops->acquire_queue) dev->ops->acquire_queue(dev, qidx); + // TODO - might need to check driver and see if we need to process a specific descriptor // Have some loop that keeps going until we hit a desc that's not available while (packed_desc_is_avail(q,&q->desc[q->avail_desc_idx & (q->num-1)])) { diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index be7da085a..c07db7f62 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -113,6 +113,7 @@ int blk_device_init( void* vq_mem = NULL; struct virtio_blk_dev* host_blk_device = NULL; size_t bdev_size = sizeof(struct virtio_blk_dev); + size_t event_size = sizeof(struct virtq_packed_desc_event); size_t vq_size; if (!packed_ring) @@ -167,6 +168,33 @@ int blk_device_init( host_blk_device->dev.packed.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH; host_blk_device->dev.packed.queue[i].device_wrap_counter = true; host_blk_device->dev.packed.queue[i].driver_wrap_counter = true; + host_blk_device->dev.packed.queue[i].driver = mmap( + 0, + event_size, + PROT_READ, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0 + ); + if (!host_blk_device->dev.packed.queue[i].driver) + { + sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); + return -1; + } + host_blk_device->dev.packed.queue[i].device = mmap( + 0, + event_size, + PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0 + ); + if (!host_blk_device->dev.packed.queue[i].device) + { + sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); + return -1; + } + host_blk_device->dev.packed.queue[i].device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE; } } diff --git a/src/host_interface/virtio_console.c b/src/host_interface/virtio_console.c index 4a6e1522d..d54cc7023 100644 --- a/src/host_interface/virtio_console.c +++ b/src/host_interface/virtio_console.c @@ -297,6 +297,33 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) dev->packed.queue[i].num_max = QUEUE_DEPTH; dev->packed.queue[i].device_wrap_counter = true; dev->packed.queue[i].driver_wrap_counter = true; + dev->packed.queue[i].driver = mmap( + 0, + event_size, + PROT_READ, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0 + ); + if (!dev->packed.queue[i].driver) + { + sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); + return -1; + } + dev->packed.queue[i].device = mmap( + 0, + event_size, + PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0 + ); + if (!dev->packed.queue[i].device) + { + sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); + return -1; + } + dev->packed.queue[i].device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE; } } diff --git a/src/host_interface/virtio_netdev.c b/src/host_interface/virtio_netdev.c index c82980081..175818309 100644 --- a/src/host_interface/virtio_netdev.c +++ b/src/host_interface/virtio_netdev.c @@ -616,6 +616,33 @@ int netdev_init(sgxlkl_host_state_t* host_state) net_dev->dev.packed.queue[i].num_max = QUEUE_DEPTH; net_dev->dev.packed.queue[i].device_wrap_counter = 1; net_dev->dev.packed.queue[i].driver_wrap_counter = 1; + net_dev->dev.packed.queue[i].driver = mmap( + 0, + event_size, + PROT_READ, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0 + ); + if (!net_dev->dev.packed.queue[i].driver) + { + sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); + return -1; + } + net_dev->dev.packed.queue[i].device = mmap( + 0, + event_size, + PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0 + ); + if (!net_dev->dev.packed.queue[i].device) + { + sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); + return -1; + } + net_dev->dev.packed.queue[i].device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE; } } diff --git a/src/include/shared/virtio_ring_buff.h b/src/include/shared/virtio_ring_buff.h index 946656fa1..3b41e21a8 100644 --- a/src/include/shared/virtio_ring_buff.h +++ b/src/include/shared/virtio_ring_buff.h @@ -13,6 +13,11 @@ #define LKL_VRING_PACKED_DESC_F_AVAIL 7 #define LKL_VRING_PACKED_DESC_F_USED 15 +#define LKL_VRING_PACKED_EVENT_FLAG_ENABLE 0x0 +#define LKL_VRING_PACKED_EVENT_FLAG_DISABLE 0x1 +#define LKL_VRING_PACKED_EVENT_FLAG_DESC 0x2 + + struct virtq_desc { /* Address (guest-physical). */ @@ -84,8 +89,8 @@ struct virtq_packed //Add supression flags where necessary _Atomic(struct virtq_packed_desc*) desc; - //struct virtq_packed_desc_event driver; - //struct virtq_packed_desc_event device; + struct virtq_packed_desc_event* driver; + struct virtq_packed_desc_event* device; bool device_wrap_counter; //Initialise to 1, flip when we change last descriptor as used bool driver_wrap_counter; //Initialise to 1 and flip when when avail_desc_idx becomes greater than queue and we need to wrap around it uint16_t avail_desc_idx; //We increment this for each avail event we process From 7cfaebc4290e1f918fef238436b21c5902524601 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Tue, 20 Apr 2021 20:21:10 +0100 Subject: [PATCH 09/27] Round queue desc event size to next pow 2. --- src/host_interface/virtio_blkdev.c | 2 +- src/host_interface/virtio_console.c | 1 + src/host_interface/virtio_netdev.c | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index c07db7f62..2778eab77 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -113,7 +113,7 @@ int blk_device_init( void* vq_mem = NULL; struct virtio_blk_dev* host_blk_device = NULL; size_t bdev_size = sizeof(struct virtio_blk_dev); - size_t event_size = sizeof(struct virtq_packed_desc_event); + size_t event_size = next_pow2(sizeof(struct virtq_packed_desc_event)); size_t vq_size; if (!packed_ring) diff --git a/src/host_interface/virtio_console.c b/src/host_interface/virtio_console.c index d54cc7023..a477fb1f6 100644 --- a/src/host_interface/virtio_console.c +++ b/src/host_interface/virtio_console.c @@ -228,6 +228,7 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) void* console_vq_mem = NULL; size_t host_console_size = next_pow2(sizeof(struct virtio_console_dev)); + size_t event_size = next_pow2(sizeof(struct virtq_packed_desc_event)); size_t console_vq_size; if (!packed_ring) diff --git a/src/host_interface/virtio_netdev.c b/src/host_interface/virtio_netdev.c index 175818309..15c1cb451 100644 --- a/src/host_interface/virtio_netdev.c +++ b/src/host_interface/virtio_netdev.c @@ -551,6 +551,7 @@ int netdev_init(sgxlkl_host_state_t* host_state) mac[0] &= 0xfe; size_t host_netdev_size = next_pow2(sizeof(struct virtio_net_dev)); + size_t event_size = next_pow2(sizeof(struct virtq_packed_desc_event)); if (!packed_ring) netdev_vq_size = NUM_QUEUES * sizeof(struct virtq); From 1dd6818dcd915b56ad348521533b49fb3d057f02 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Tue, 20 Apr 2021 22:36:49 +0100 Subject: [PATCH 10/27] Refactoring and bug fixing. --- src/host_interface/virtio.c | 19 ++++++++++++++++--- src/host_interface/virtio_blkdev.c | 4 ++-- src/host_interface/virtio_console.c | 4 ++-- src/include/shared/virtio_ring_buff.h | 4 ++-- 4 files changed, 22 insertions(+), 9 deletions(-) diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index 620b2501f..9f0b755b8 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -7,6 +7,7 @@ #include #include #include +#include #define min_len(a, b) (a < b ? a : b) @@ -40,8 +41,9 @@ struct _virtio_req */ static int packed_desc_is_avail(struct virtq_packed *q, struct virtq_packed_desc* desc) { - uint16_t avail = desc->flags & (1 << LKL_VRING_PACKED_DESC_F_AVAIL); - uint16_t used = desc->flags & (1 << LKL_VRING_PACKED_DESC_F_USED); + bool avail = !!(desc->flags & (1 << LKL_VRING_PACKED_DESC_F_AVAIL)); + bool used = !!(desc->flags & (1 << LKL_VRING_PACKED_DESC_F_USED)); + printf("Flags is %d %d %d %d\n", desc->flags, avail, used, q->driver_wrap_counter); return avail != used && avail == q->driver_wrap_counter; } @@ -340,6 +342,8 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) uint16_t last_buffer_idx = avail_desc_idx+(req->buf_count-1); uint16_t used_len; + printf("Request complete. %d\n", q->num); + if (!q->max_merge_len) used_len = len; else @@ -361,6 +365,7 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) { avail_desc_idx -= q->num; q->driver_wrap_counter = !q->driver_wrap_counter; + //printf("Changing wrapper\n"); send_irq = 1; } @@ -372,8 +377,10 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) /**TODO*/ // Need to use event supression here - but in theory this should work // Read from driver event - if (send_irq) + if (send_irq && (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_ENABLE || + q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_DESC)) { + //printf("Delivering irq\n"); virtio_deliver_irq(_req->dev); } } @@ -517,10 +524,15 @@ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) if (dev->ops->acquire_queue) dev->ops->acquire_queue(dev, qidx); + printf("Processing queue %d %d %d %d\n", q->avail_desc_idx, q->num, q->desc[q->avail_desc_idx].flags, q->driver->flags); + + q->device->flags = LKL_VRING_PACKED_EVENT_FLAG_DISABLE; + // TODO - might need to check driver and see if we need to process a specific descriptor // Have some loop that keeps going until we hit a desc that's not available while (packed_desc_is_avail(q,&q->desc[q->avail_desc_idx & (q->num-1)])) { + printf("Processing now\n"); // Need to process desc here // Possible make some process_one_packed // Question is what else do I include in this statement @@ -528,6 +540,7 @@ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) break; } + q->device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE; if (dev->ops->release_queue) dev->ops->release_queue(dev, qidx); } diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index 2778eab77..0a40a014a 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -166,8 +166,8 @@ int blk_device_init( else { host_blk_device->dev.packed.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH; - host_blk_device->dev.packed.queue[i].device_wrap_counter = true; - host_blk_device->dev.packed.queue[i].driver_wrap_counter = true; + host_blk_device->dev.packed.queue[i].device_wrap_counter = 1; + host_blk_device->dev.packed.queue[i].driver_wrap_counter = 1; host_blk_device->dev.packed.queue[i].driver = mmap( 0, event_size, diff --git a/src/host_interface/virtio_console.c b/src/host_interface/virtio_console.c index a477fb1f6..c695cac39 100644 --- a/src/host_interface/virtio_console.c +++ b/src/host_interface/virtio_console.c @@ -296,8 +296,8 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) else { dev->packed.queue[i].num_max = QUEUE_DEPTH; - dev->packed.queue[i].device_wrap_counter = true; - dev->packed.queue[i].driver_wrap_counter = true; + dev->packed.queue[i].device_wrap_counter = 1; + dev->packed.queue[i].driver_wrap_counter = 1; dev->packed.queue[i].driver = mmap( 0, event_size, diff --git a/src/include/shared/virtio_ring_buff.h b/src/include/shared/virtio_ring_buff.h index 3b41e21a8..81c09018c 100644 --- a/src/include/shared/virtio_ring_buff.h +++ b/src/include/shared/virtio_ring_buff.h @@ -89,8 +89,8 @@ struct virtq_packed //Add supression flags where necessary _Atomic(struct virtq_packed_desc*) desc; - struct virtq_packed_desc_event* driver; - struct virtq_packed_desc_event* device; + _Atomic(struct virtq_packed_desc_event*) driver; + _Atomic(struct virtq_packed_desc_event*) device; bool device_wrap_counter; //Initialise to 1, flip when we change last descriptor as used bool driver_wrap_counter; //Initialise to 1 and flip when when avail_desc_idx becomes greater than queue and we need to wrap around it uint16_t avail_desc_idx; //We increment this for each avail event we process From f5ac4524a3a4bda462face272006c8090aca041a Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Sun, 25 Apr 2021 13:01:14 +0000 Subject: [PATCH 11/27] Add unprocessed used desc in packed virtq. --- src/host_interface/virtio.c | 61 +++++++++++++++++++++++---- src/host_interface/virtio_blkdev.c | 1 + src/host_interface/virtio_console.c | 1 + src/host_interface/virtio_netdev.c | 1 + src/include/shared/virtio_ring_buff.h | 1 + 5 files changed, 57 insertions(+), 8 deletions(-) diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index 9f0b755b8..4524387d9 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -17,6 +17,10 @@ bool packed_ring = true; bool packed_ring = false; #endif +#ifdef DEBUG +static int counter = 0; +#endif + struct _virtio_req { union { @@ -43,7 +47,10 @@ static int packed_desc_is_avail(struct virtq_packed *q, struct virtq_packed_desc { bool avail = !!(desc->flags & (1 << LKL_VRING_PACKED_DESC_F_AVAIL)); bool used = !!(desc->flags & (1 << LKL_VRING_PACKED_DESC_F_USED)); - printf("Flags is %d %d %d %d\n", desc->flags, avail, used, q->driver_wrap_counter); +#ifdef DEBUG + if (counter < 5) + printf("Flags is %d %d %d %d\n", desc->flags, avail, used, q->driver_wrap_counter); +#endif return avail != used && avail == q->driver_wrap_counter; } @@ -342,7 +349,9 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) uint16_t last_buffer_idx = avail_desc_idx+(req->buf_count-1); uint16_t used_len; - printf("Request complete. %d\n", q->num); +#ifdef DEBUG + printf("\nRequest complete. queue num: %d, flags: %d, driver flag: %d, req avail index: %d, ring avail index: %d, ring used index: %d, counter: %d\n", q->num, q->desc[avail_desc_idx].flags, q->driver->flags, avail_desc_idx, q->avail_desc_idx, used_desc_idx, counter); +#endif if (!q->max_merge_len) used_len = len; @@ -365,7 +374,9 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) { avail_desc_idx -= q->num; q->driver_wrap_counter = !q->driver_wrap_counter; - //printf("Changing wrapper\n"); +#ifdef DEBUG + printf("Changing wrapper\n"); +#endif send_irq = 1; } @@ -380,9 +391,14 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) if (send_irq && (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_ENABLE || q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_DESC)) { - //printf("Delivering irq\n"); +#ifdef DEBUG + printf("Delivering irq\n"); +#endif virtio_deliver_irq(_req->dev); + q->unprocessed_used_desc = false; } + else + q->unprocessed_used_desc = true; } /* @@ -516,23 +532,42 @@ static void virtio_process_queue_split(struct virtio_dev* dev, uint32_t qidx) */ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) { +#ifdef DEBUG + counter++; +#endif struct virtq_packed* q = &dev->packed.queue[qidx]; + bool looped_once = false; if (!q->ready) return; if (dev->ops->acquire_queue) dev->ops->acquire_queue(dev, qidx); - - printf("Processing queue %d %d %d %d\n", q->avail_desc_idx, q->num, q->desc[q->avail_desc_idx].flags, q->driver->flags); - +#ifdef DEBUG + if (counter) + { + printf("The qidx: \n"); + printf( + "Processing queue %d %d %d %d\n", + q->avail_desc_idx, + q->num, + q->desc[q->avail_desc_idx].flags, + q->driver->flags); + printf("Rest: \n"); + + for (int i = 0; i < q->num; i++) + { + printf("desc num: %d Flag: %d\n",i, q->desc[i].flags); + } + } +#endif q->device->flags = LKL_VRING_PACKED_EVENT_FLAG_DISABLE; // TODO - might need to check driver and see if we need to process a specific descriptor // Have some loop that keeps going until we hit a desc that's not available while (packed_desc_is_avail(q,&q->desc[q->avail_desc_idx & (q->num-1)])) { - printf("Processing now\n"); + looped_once = true; // Need to process desc here // Possible make some process_one_packed // Question is what else do I include in this statement @@ -540,6 +575,16 @@ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) break; } + // Driver must be awaiting some used desc responses we've not yet sent + if (looped_once && q->unprocessed_used_desc) + { +#ifdef DEBUG + printf("Delivering irq special\n"); +#endif + virtio_deliver_irq(dev); + q->unprocessed_used_desc = true; + } + q->device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE; if (dev->ops->release_queue) dev->ops->release_queue(dev, qidx); diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index 0a40a014a..d9bea22cf 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -168,6 +168,7 @@ int blk_device_init( host_blk_device->dev.packed.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH; host_blk_device->dev.packed.queue[i].device_wrap_counter = 1; host_blk_device->dev.packed.queue[i].driver_wrap_counter = 1; + host_blk_device->dev.packed.queue[i].unprocessed_used_desc = false; host_blk_device->dev.packed.queue[i].driver = mmap( 0, event_size, diff --git a/src/host_interface/virtio_console.c b/src/host_interface/virtio_console.c index c695cac39..8ffd6c1fe 100644 --- a/src/host_interface/virtio_console.c +++ b/src/host_interface/virtio_console.c @@ -298,6 +298,7 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) dev->packed.queue[i].num_max = QUEUE_DEPTH; dev->packed.queue[i].device_wrap_counter = 1; dev->packed.queue[i].driver_wrap_counter = 1; + dev->packed.queue[i].unprocessed_used_desc = false; dev->packed.queue[i].driver = mmap( 0, event_size, diff --git a/src/host_interface/virtio_netdev.c b/src/host_interface/virtio_netdev.c index 15c1cb451..b50d6105f 100644 --- a/src/host_interface/virtio_netdev.c +++ b/src/host_interface/virtio_netdev.c @@ -617,6 +617,7 @@ int netdev_init(sgxlkl_host_state_t* host_state) net_dev->dev.packed.queue[i].num_max = QUEUE_DEPTH; net_dev->dev.packed.queue[i].device_wrap_counter = 1; net_dev->dev.packed.queue[i].driver_wrap_counter = 1; + net_dev->dev.packed.queue[i].unprocessed_used_desc = false; net_dev->dev.packed.queue[i].driver = mmap( 0, event_size, diff --git a/src/include/shared/virtio_ring_buff.h b/src/include/shared/virtio_ring_buff.h index 81c09018c..4693ce521 100644 --- a/src/include/shared/virtio_ring_buff.h +++ b/src/include/shared/virtio_ring_buff.h @@ -95,5 +95,6 @@ struct virtq_packed bool driver_wrap_counter; //Initialise to 1 and flip when when avail_desc_idx becomes greater than queue and we need to wrap around it uint16_t avail_desc_idx; //We increment this for each avail event we process uint16_t used_desc_idx; + bool unprocessed_used_desc; }; #endif From 8b81d1b054e11c5f7b7a8e7222cde808c935bb92 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Sun, 25 Apr 2021 18:28:31 +0000 Subject: [PATCH 12/27] Fix bug for setting avail and used flags to 0. --- src/host_interface/virtio.c | 40 +++++++++++---------------- src/host_interface/virtio_blkdev.c | 1 - src/host_interface/virtio_console.c | 1 - src/host_interface/virtio_netdev.c | 1 - src/include/shared/virtio_ring_buff.h | 1 - 5 files changed, 16 insertions(+), 28 deletions(-) diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index 4524387d9..84ad508b2 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -184,9 +184,18 @@ static inline void virtio_add_used_packed( struct virtq_packed_desc* desc = &q->desc[used_idx & (q->num -1)]; desc->id = id; desc->len = htole32(len); - desc->flags |= - q->device_wrap_counter << LKL_VRING_PACKED_DESC_F_AVAIL | - q->device_wrap_counter << LKL_VRING_PACKED_DESC_F_USED; + if (q->device_wrap_counter == 1) + { + desc->flags |= 1 << LKL_VRING_PACKED_DESC_F_AVAIL | + 1 << LKL_VRING_PACKED_DESC_F_USED; + } + else + { + uint16_t avail_set_zero = 1 << LKL_VRING_PACKED_DESC_F_AVAIL; + uint16_t used_set_zero = 1 << LKL_VRING_PACKED_DESC_F_USED; + desc->flags &= + ~avail_set_zero & ~used_set_zero; + } desc->flags = htole16(desc->flags); } @@ -341,7 +350,6 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) whose request is chained, and the same with network device (and I assume the same for console) */ - int send_irq = 0; struct _virtio_req* _req = container_of(req, struct _virtio_req, req); struct virtq_packed* q = _req->packed.q; uint16_t avail_desc_idx = _req->idx; @@ -350,7 +358,7 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) uint16_t used_len; #ifdef DEBUG - printf("\nRequest complete. queue num: %d, flags: %d, driver flag: %d, req avail index: %d, ring avail index: %d, ring used index: %d, counter: %d\n", q->num, q->desc[avail_desc_idx].flags, q->driver->flags, avail_desc_idx, q->avail_desc_idx, used_desc_idx, counter); + printf("\nRequest complete. queue num: %d, flags: %d, driver flag: %d, req avail index: %d, ring avail index: %d, ring used index: %d, device wrap counter: %d, counter: %d\n", q->num, q->desc[avail_desc_idx].flags, q->driver->flags, avail_desc_idx, q->avail_desc_idx, used_desc_idx, q->device_wrap_counter, counter); #endif if (!q->max_merge_len) @@ -377,7 +385,6 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) #ifdef DEBUG printf("Changing wrapper\n"); #endif - send_irq = 1; } // Don't think we need to synchronise used @@ -388,17 +395,14 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) /**TODO*/ // Need to use event supression here - but in theory this should work // Read from driver event - if (send_irq && (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_ENABLE || - q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_DESC)) + if (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_ENABLE || + q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_DESC) { #ifdef DEBUG printf("Delivering irq\n"); #endif virtio_deliver_irq(_req->dev); - q->unprocessed_used_desc = false; } - else - q->unprocessed_used_desc = true; } /* @@ -536,7 +540,6 @@ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) counter++; #endif struct virtq_packed* q = &dev->packed.queue[qidx]; - bool looped_once = false; if (!q->ready) return; @@ -544,7 +547,7 @@ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) if (dev->ops->acquire_queue) dev->ops->acquire_queue(dev, qidx); #ifdef DEBUG - if (counter) + if (counter < 5) { printf("The qidx: \n"); printf( @@ -567,7 +570,6 @@ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) // Have some loop that keeps going until we hit a desc that's not available while (packed_desc_is_avail(q,&q->desc[q->avail_desc_idx & (q->num-1)])) { - looped_once = true; // Need to process desc here // Possible make some process_one_packed // Question is what else do I include in this statement @@ -575,16 +577,6 @@ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) break; } - // Driver must be awaiting some used desc responses we've not yet sent - if (looped_once && q->unprocessed_used_desc) - { -#ifdef DEBUG - printf("Delivering irq special\n"); -#endif - virtio_deliver_irq(dev); - q->unprocessed_used_desc = true; - } - q->device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE; if (dev->ops->release_queue) dev->ops->release_queue(dev, qidx); diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index d9bea22cf..0a40a014a 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -168,7 +168,6 @@ int blk_device_init( host_blk_device->dev.packed.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH; host_blk_device->dev.packed.queue[i].device_wrap_counter = 1; host_blk_device->dev.packed.queue[i].driver_wrap_counter = 1; - host_blk_device->dev.packed.queue[i].unprocessed_used_desc = false; host_blk_device->dev.packed.queue[i].driver = mmap( 0, event_size, diff --git a/src/host_interface/virtio_console.c b/src/host_interface/virtio_console.c index 8ffd6c1fe..c695cac39 100644 --- a/src/host_interface/virtio_console.c +++ b/src/host_interface/virtio_console.c @@ -298,7 +298,6 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) dev->packed.queue[i].num_max = QUEUE_DEPTH; dev->packed.queue[i].device_wrap_counter = 1; dev->packed.queue[i].driver_wrap_counter = 1; - dev->packed.queue[i].unprocessed_used_desc = false; dev->packed.queue[i].driver = mmap( 0, event_size, diff --git a/src/host_interface/virtio_netdev.c b/src/host_interface/virtio_netdev.c index b50d6105f..15c1cb451 100644 --- a/src/host_interface/virtio_netdev.c +++ b/src/host_interface/virtio_netdev.c @@ -617,7 +617,6 @@ int netdev_init(sgxlkl_host_state_t* host_state) net_dev->dev.packed.queue[i].num_max = QUEUE_DEPTH; net_dev->dev.packed.queue[i].device_wrap_counter = 1; net_dev->dev.packed.queue[i].driver_wrap_counter = 1; - net_dev->dev.packed.queue[i].unprocessed_used_desc = false; net_dev->dev.packed.queue[i].driver = mmap( 0, event_size, diff --git a/src/include/shared/virtio_ring_buff.h b/src/include/shared/virtio_ring_buff.h index 4693ce521..81c09018c 100644 --- a/src/include/shared/virtio_ring_buff.h +++ b/src/include/shared/virtio_ring_buff.h @@ -95,6 +95,5 @@ struct virtq_packed bool driver_wrap_counter; //Initialise to 1 and flip when when avail_desc_idx becomes greater than queue and we need to wrap around it uint16_t avail_desc_idx; //We increment this for each avail event we process uint16_t used_desc_idx; - bool unprocessed_used_desc; }; #endif From fb015f63d2fd3d6f7b2d87806b22cd21aa58f1af Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Mon, 26 Apr 2021 13:38:06 +0000 Subject: [PATCH 13/27] Cleaning up code. --- src/host_interface/virtio.c | 88 +++++++++------------------ src/host_interface/virtio_blkdev.c | 30 +-------- src/host_interface/virtio_console.c | 30 +-------- src/host_interface/virtio_netdev.c | 28 --------- src/include/shared/virtio_ring_buff.h | 31 ++++++++-- src/lkl/virtio.c | 21 ++++--- 6 files changed, 68 insertions(+), 160 deletions(-) diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index 84ad508b2..a01fdda40 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -17,10 +17,6 @@ bool packed_ring = true; bool packed_ring = false; #endif -#ifdef DEBUG -static int counter = 0; -#endif - struct _virtio_req { union { @@ -47,10 +43,6 @@ static int packed_desc_is_avail(struct virtq_packed *q, struct virtq_packed_desc { bool avail = !!(desc->flags & (1 << LKL_VRING_PACKED_DESC_F_AVAIL)); bool used = !!(desc->flags & (1 << LKL_VRING_PACKED_DESC_F_USED)); -#ifdef DEBUG - if (counter < 5) - printf("Flags is %d %d %d %d\n", desc->flags, avail, used, q->driver_wrap_counter); -#endif return avail != used && avail == q->driver_wrap_counter; } @@ -154,15 +146,14 @@ static struct virtq_packed_desc* get_next_desc_packed( return &q->desc[++(*idx) & (q->num - 1)]; } - /* - * virtio_add_used: update used ring at used index with used discriptor index + * virtio_add_used_split: update used ring at used index with used discriptor index * q : input parameter * used_idx : input parameter * avail_idx: input parameter * len : input parameter */ -static inline void virtio_add_used( +static inline void virtio_add_used_split( struct virtq* q, uint16_t used_idx, uint16_t avail_idx, @@ -181,6 +172,7 @@ static inline void virtio_add_used_packed( uint32_t len, uint16_t id) { + __sync_synchronize(); struct virtq_packed_desc* desc = &q->desc[used_idx & (q->num -1)]; desc->id = id; desc->len = htole32(len); @@ -272,7 +264,7 @@ static void virtio_req_complete_split(struct virtio_req* req, uint32_t len) else used_len = min_len(len, req->buf[i].iov_len); - virtio_add_used(q, used_idx++, avail_idx++, used_len); + virtio_add_used_split(q, used_idx++, avail_idx++, used_len); len -= used_len; if (!len) @@ -342,24 +334,17 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) * avail_desc_idx and used_desc_idx to be incremented and wrapped around as appropriate * changing the wrap counters when the above are wrapped around * - - This function only gets called either with chained descriptors, - or max_merge_len (which I assume would also be chained descriptors). - - I know this as for example it gets called from blk_enqueue, - whose request is chained, and the same with network device - (and I assume the same for console) + * This function only gets called either with chained descriptors, + * or max_merge_len (which I assume would also be chained descriptors). */ + int send_irq = 0; struct _virtio_req* _req = container_of(req, struct _virtio_req, req); struct virtq_packed* q = _req->packed.q; uint16_t avail_desc_idx = _req->idx; uint16_t used_desc_idx = q->used_desc_idx; + uint16_t prev_used_desc_idx = used_desc_idx; uint16_t last_buffer_idx = avail_desc_idx+(req->buf_count-1); - uint16_t used_len; - -#ifdef DEBUG - printf("\nRequest complete. queue num: %d, flags: %d, driver flag: %d, req avail index: %d, ring avail index: %d, ring used index: %d, device wrap counter: %d, counter: %d\n", q->num, q->desc[avail_desc_idx].flags, q->driver->flags, avail_desc_idx, q->avail_desc_idx, used_desc_idx, q->device_wrap_counter, counter); -#endif + uint16_t used_len, event_idx; if (!q->max_merge_len) used_len = len; @@ -382,27 +367,29 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) { avail_desc_idx -= q->num; q->driver_wrap_counter = !q->driver_wrap_counter; -#ifdef DEBUG - printf("Changing wrapper\n"); -#endif } - // Don't think we need to synchronise used q->used_desc_idx = used_desc_idx; q->avail_desc_idx = avail_desc_idx; + if (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_ENABLE) + send_irq = 1; - /**TODO*/ - // Need to use event supression here - but in theory this should work - // Read from driver event - if (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_ENABLE || - q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_DESC) + else if (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_DESC) { -#ifdef DEBUG - printf("Delivering irq\n"); -#endif - virtio_deliver_irq(_req->dev); + event_idx = q->driver->off_wrap & ~(1 << LKL_VRING_PACKED_EVENT_F_WRAP_CTR); + //Check if event_idx has been set as used used + // old_used event new_used + // new_used old_used event + // new_used event old_used (X) + // event old_used new_used (X) + if ((used_desc_idx > event_idx && event_idx >= prev_used_desc_idx) || + (used_desc_idx < prev_used_desc_idx && prev_used_desc_idx <= event_idx)) + send_irq = 1; } + + if (send_irq) + virtio_deliver_irq(_req->dev); } /* @@ -536,9 +523,6 @@ static void virtio_process_queue_split(struct virtio_dev* dev, uint32_t qidx) */ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) { -#ifdef DEBUG - counter++; -#endif struct virtq_packed* q = &dev->packed.queue[qidx]; if (!q->ready) @@ -546,28 +530,10 @@ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) if (dev->ops->acquire_queue) dev->ops->acquire_queue(dev, qidx); -#ifdef DEBUG - if (counter < 5) - { - printf("The qidx: \n"); - printf( - "Processing queue %d %d %d %d\n", - q->avail_desc_idx, - q->num, - q->desc[q->avail_desc_idx].flags, - q->driver->flags); - printf("Rest: \n"); - - for (int i = 0; i < q->num; i++) - { - printf("desc num: %d Flag: %d\n",i, q->desc[i].flags); - } - } -#endif + + __sync_synchronize(); q->device->flags = LKL_VRING_PACKED_EVENT_FLAG_DISABLE; - // TODO - might need to check driver and see if we need to process a specific descriptor - // Have some loop that keeps going until we hit a desc that's not available while (packed_desc_is_avail(q,&q->desc[q->avail_desc_idx & (q->num-1)])) { // Need to process desc here @@ -577,7 +543,9 @@ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx) break; } + __sync_synchronize(); q->device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE; + if (dev->ops->release_queue) dev->ops->release_queue(dev, qidx); } diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index 0a40a014a..9c3d970a1 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -19,6 +19,7 @@ extern sgxlkl_host_state_t sgxlkl_host_state; #if DEBUG && VIRTIO_TEST_HOOK +#include static uint64_t virtio_blk_req_cnt; #endif // DEBUG && VIRTIO_TEST_HOOK @@ -113,7 +114,6 @@ int blk_device_init( void* vq_mem = NULL; struct virtio_blk_dev* host_blk_device = NULL; size_t bdev_size = sizeof(struct virtio_blk_dev); - size_t event_size = next_pow2(sizeof(struct virtq_packed_desc_event)); size_t vq_size; if (!packed_ring) @@ -168,33 +168,6 @@ int blk_device_init( host_blk_device->dev.packed.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH; host_blk_device->dev.packed.queue[i].device_wrap_counter = 1; host_blk_device->dev.packed.queue[i].driver_wrap_counter = 1; - host_blk_device->dev.packed.queue[i].driver = mmap( - 0, - event_size, - PROT_READ, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0 - ); - if (!host_blk_device->dev.packed.queue[i].driver) - { - sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); - return -1; - } - host_blk_device->dev.packed.queue[i].device = mmap( - 0, - event_size, - PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0 - ); - if (!host_blk_device->dev.packed.queue[i].device) - { - sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); - return -1; - } - host_blk_device->dev.packed.queue[i].device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE; } } @@ -214,7 +187,6 @@ int blk_device_init( if (packed_ring) host_blk_device->dev.device_features |= BIT(VIRTIO_F_RING_PACKED); - if (enable_swiotlb) host_blk_device->dev.device_features |= BIT(VIRTIO_F_IOMMU_PLATFORM); diff --git a/src/host_interface/virtio_console.c b/src/host_interface/virtio_console.c index c695cac39..20d4193d0 100644 --- a/src/host_interface/virtio_console.c +++ b/src/host_interface/virtio_console.c @@ -105,7 +105,9 @@ void* monitor_console_input(void* cons_dev) break; if (ret & DEV_CONSOLE_WRITE) + { virtio_process_queue(dev, RX_QUEUE_ID); + } } while (1); return NULL; } @@ -228,7 +230,6 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) void* console_vq_mem = NULL; size_t host_console_size = next_pow2(sizeof(struct virtio_console_dev)); - size_t event_size = next_pow2(sizeof(struct virtq_packed_desc_event)); size_t console_vq_size; if (!packed_ring) @@ -298,33 +299,6 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg) dev->packed.queue[i].num_max = QUEUE_DEPTH; dev->packed.queue[i].device_wrap_counter = 1; dev->packed.queue[i].driver_wrap_counter = 1; - dev->packed.queue[i].driver = mmap( - 0, - event_size, - PROT_READ, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0 - ); - if (!dev->packed.queue[i].driver) - { - sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); - return -1; - } - dev->packed.queue[i].device = mmap( - 0, - event_size, - PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0 - ); - if (!dev->packed.queue[i].device) - { - sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); - return -1; - } - dev->packed.queue[i].device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE; } } diff --git a/src/host_interface/virtio_netdev.c b/src/host_interface/virtio_netdev.c index 15c1cb451..c82980081 100644 --- a/src/host_interface/virtio_netdev.c +++ b/src/host_interface/virtio_netdev.c @@ -551,7 +551,6 @@ int netdev_init(sgxlkl_host_state_t* host_state) mac[0] &= 0xfe; size_t host_netdev_size = next_pow2(sizeof(struct virtio_net_dev)); - size_t event_size = next_pow2(sizeof(struct virtq_packed_desc_event)); if (!packed_ring) netdev_vq_size = NUM_QUEUES * sizeof(struct virtq); @@ -617,33 +616,6 @@ int netdev_init(sgxlkl_host_state_t* host_state) net_dev->dev.packed.queue[i].num_max = QUEUE_DEPTH; net_dev->dev.packed.queue[i].device_wrap_counter = 1; net_dev->dev.packed.queue[i].driver_wrap_counter = 1; - net_dev->dev.packed.queue[i].driver = mmap( - 0, - event_size, - PROT_READ, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0 - ); - if (!net_dev->dev.packed.queue[i].driver) - { - sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); - return -1; - } - net_dev->dev.packed.queue[i].device = mmap( - 0, - event_size, - PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0 - ); - if (!net_dev->dev.packed.queue[i].device) - { - sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__); - return -1; - } - net_dev->dev.packed.queue[i].device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE; } } diff --git a/src/include/shared/virtio_ring_buff.h b/src/include/shared/virtio_ring_buff.h index 81c09018c..17e922ef4 100644 --- a/src/include/shared/virtio_ring_buff.h +++ b/src/include/shared/virtio_ring_buff.h @@ -9,13 +9,27 @@ #define LKL_VRING_DESC_F_WRITE 2 /* This means the buffer contains a list of buffer descriptors. */ #define LKL_VRING_DESC_F_INDIRECT 4 - +/* + * Mark a descriptor as available or used in packed ring. + * Notice: they are defined as shifts instead of shifted values. + */ #define LKL_VRING_PACKED_DESC_F_AVAIL 7 #define LKL_VRING_PACKED_DESC_F_USED 15 - +/* Enable events in packed ring. */ #define LKL_VRING_PACKED_EVENT_FLAG_ENABLE 0x0 +/* Disable events in packed ring. */ #define LKL_VRING_PACKED_EVENT_FLAG_DISABLE 0x1 +/* + * Enable events for a specific descriptor in packed ring. + * (as specified by Descriptor Ring Change Event Offset/Wrap Counter). + * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated. + */ #define LKL_VRING_PACKED_EVENT_FLAG_DESC 0x2 +/* + * Wrap counter bit shift in event suppression structure + * of packed ring. + */ +#define LKL_VRING_PACKED_EVENT_F_WRAP_CTR 15 struct virtq_desc @@ -68,15 +82,21 @@ struct virtq struct virtq_packed_desc { + /* Address (guest-physical). */ uint64_t addr; + /* Length. */ uint32_t len; + /* Buffer ID. */ uint16_t id; + /* The flags as indicated above. */ uint16_t flags; }; struct virtq_packed_desc_event { + /* Descriptor Ring Change Event Offset/Wrap Counter. */ uint16_t off_wrap; + /* Descriptor Ring Change Event Flags. */ uint16_t flags; }; @@ -87,13 +107,12 @@ struct virtq_packed _Atomic(uint32_t) num; uint32_t max_merge_len; - //Add supression flags where necessary _Atomic(struct virtq_packed_desc*) desc; _Atomic(struct virtq_packed_desc_event*) driver; _Atomic(struct virtq_packed_desc_event*) device; - bool device_wrap_counter; //Initialise to 1, flip when we change last descriptor as used - bool driver_wrap_counter; //Initialise to 1 and flip when when avail_desc_idx becomes greater than queue and we need to wrap around it - uint16_t avail_desc_idx; //We increment this for each avail event we process + bool device_wrap_counter; + bool driver_wrap_counter; + uint16_t avail_desc_idx; uint16_t used_desc_idx; }; #endif diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index 9adab9aa1..f41e6527c 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -97,11 +97,6 @@ static int virtio_read(void* data, int offset, void* res, int size) uint32_t val = 0; struct virtio_dev* dev = (struct virtio_dev*)data; - if (packed_ring) - { - oe_host_printf("This is a packed ring\n"); - } - if (offset >= VIRTIO_MMIO_CONFIG) { offset -= VIRTIO_MMIO_CONFIG; @@ -393,11 +388,15 @@ static int virtio_write(void* data, int offset, void* res, int size) * to it. */ case VIRTIO_MMIO_QUEUE_AVAIL_LOW: - if (!packed_ring) + if (packed_ring) + set_ptr_low((_Atomic(uint64_t)*)&packed_q->driver, val); + else set_ptr_low((_Atomic(uint64_t)*)&split_q->avail, val); break; case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: - if (!packed_ring) + if (packed_ring) + set_ptr_high((_Atomic(uint64_t)*)&packed_q->driver, val); + else set_ptr_high((_Atomic(uint64_t)*)&split_q->avail, val); break; /* Security Review: For Split Queue, q->used link list content should be @@ -413,11 +412,15 @@ static int virtio_write(void* data, int offset, void* res, int size) * functionality. */ case VIRTIO_MMIO_QUEUE_USED_LOW: - if (!packed_ring) + if (packed_ring) + set_ptr_low((_Atomic(uint64_t)*)&packed_q->device, val); + else set_ptr_low((_Atomic(uint64_t)*)&split_q->used, val); break; case VIRTIO_MMIO_QUEUE_USED_HIGH: - if (!packed_ring) + if (packed_ring) + set_ptr_high((_Atomic(uint64_t)*)&packed_q->device, val); + else set_ptr_high((_Atomic(uint64_t)*)&split_q->used, val); break; default: From c39533ffdd76975eccf4446c929e047c3d11e45c Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Sun, 2 May 2021 16:27:39 +0100 Subject: [PATCH 14/27] Initialise shadow devs. --- src/include/lkl/virtio.h | 4 +++ src/lkl/virtio.c | 55 ++++++++++++++++++++++++++++++---------- src/lkl/virtio_blkdev.c | 51 ++++++++++++++++++++++++++++++------- src/lkl/virtio_console.c | 21 ++++++++++----- src/lkl/virtio_netdev.c | 35 ++++++++++++++----------- 5 files changed, 121 insertions(+), 45 deletions(-) diff --git a/src/include/lkl/virtio.h b/src/include/lkl/virtio.h index 8f464f0f8..67e12b217 100644 --- a/src/include/lkl/virtio.h +++ b/src/include/lkl/virtio.h @@ -52,4 +52,8 @@ int lkl_virtio_dev_setup( */ void lkl_virtio_deliver_irq(uint8_t dev_id); +/* + * Function to allocate memory for a shadow virtio dev + */ +struct virtio_dev* alloc_shadow_virtio_dev(); #endif /* _LKL_LIB_VIRTIO_H */ diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index f41e6527c..ca30e513a 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -52,6 +52,12 @@ static uint32_t lkl_num_virtio_boot_devs; typedef void (*lkl_virtio_dev_deliver_irq)(uint64_t dev_id); static lkl_virtio_dev_deliver_irq virtio_deliver_irq[DEVICE_COUNT]; +struct virtio_dev_handle +{ + struct virtio_dev *dev; //shadow structure in guest memory + struct virtio_dev *dev_host; +} + /* * virtio_read_device_features: Read Device Features * dev : pointer to device structure @@ -448,28 +454,26 @@ void lkl_virtio_deliver_irq(uint8_t dev_id) /* * Function to setup the virtio device setting */ +/** + * TODO Shadow structure + * Need to create and register dev handle here + * Should I create the shadow dev here along with the verification? + */ int lkl_virtio_dev_setup( struct virtio_dev* dev, + struct virtio_dev* dev_host, int mmio_size, void* deliver_irq_cb) { + struct virtio_dev_handle *dev_handle; int avail = 0, num_bytes = 0, ret = 0; - dev->irq = lkl_get_free_irq("virtio"); - dev->int_status = 0; - if (dev->irq < 0) + dev_host->irq = lkl_get_free_irq("virtio"); + dev_host->int_status = 0; + if (dev_host->irq < 0) return 1; - /* Security Review: dev-vendor_id might cause overflow in - * virtio_deliver_irq[DEVICE_COUNT] - */ - virtio_deliver_irq[dev->vendor_id] = deliver_irq_cb; - /* Security Review: pass handle instead of virtio_dev pointer to the rest of - * the system. - */ - /* Security Review: shadow dev->base used in guest side only. No - * copy-through to the host side structure - */ - dev->base = register_iomem(dev, mmio_size, &virtio_ops); + virtio_deliver_irq[dev_host->vendor_id] = deliver_irq_cb; + dev_host->base = register_iomem(dev_host, mmio_size, &virtio_ops); if (!lkl_is_running()) { @@ -509,3 +513,26 @@ int lkl_virtio_dev_setup( } return 0; } + +/* + * Function to allocate memory for a shadow virtio dev + */ +struct virtio_dev* alloc_shadow_virtio_dev() +{ + size_t dev_size = next_pow2(sizeof(struct virtio_dev)); + + struct virtio_dev* dev = mmap( + 0, + dev_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0); + + if (!dev) + { + sgxlkl_error("Shadow device alloc failed\n"); + return NULL + } + return dev; +} \ No newline at end of file diff --git a/src/lkl/virtio_blkdev.c b/src/lkl/virtio_blkdev.c index c98ed0517..f68402ca1 100644 --- a/src/lkl/virtio_blkdev.c +++ b/src/lkl/virtio_blkdev.c @@ -8,15 +8,32 @@ #include "enclave/vio_enclave_event_channel.h" #include "lkl/virtio.h" +#define MAX_BLOCK_DEVS 32 + +static uint8_t registered_shadow_dev_idx = 0; + +static struct virtio_dev* registered_shadow_devs[MAX_BLOCK_DEVS]; + +/* + * Function to get shadow blkdev instance to use its attributes + */ +static inline struct virtio_dev* get_blkdev_instance(uint8_t blkdev_id) +{ + for (size_t i = 0; i < registered_shadow_dev_idx; i++) + if (registered_shadow_devs[i]->vendor_id == blkdev_id) + return registered_shadow_devs[i]; + SGXLKL_ASSERT(false); +} + /* * Function to trigger block dev irq to notify front end driver */ static void lkl_deliver_irq(uint8_t dev_id) { - struct virtio_dev* dev = - sgxlkl_enclave_state.shared_memory.virtio_blk_dev_mem[dev_id]; + struct virtio_dev* dev = get_blkdev_instance(dev_id); dev->int_status |= VIRTIO_MMIO_INT_VRING; + // TODO might need to update int_status in host as well lkl_trigger_irq(dev->irq); } @@ -29,20 +46,36 @@ int lkl_add_disks( const sgxlkl_enclave_mount_config_t* mounts, size_t num_mounts) { - struct virtio_dev* root_dev = + struct virtio_dev* root_dev = alloc_shadow_virtio_dev(); + + if (!root_dev) + return -1; + + struct virtio_dev* root_dev_host = sgxlkl_enclave_state.shared_memory.virtio_blk_dev_mem - [sgxlkl_enclave_state.disk_state[0].host_disk_index]; - int mmio_size = VIRTIO_MMIO_CONFIG + root_dev->config_len; - if (lkl_virtio_dev_setup(root_dev, mmio_size, lkl_deliver_irq) != 0) + [sgxlkl_enclave_state.disk_state[0].host_disk_index]; + + int mmio_size = VIRTIO_MMIO_CONFIG + root_dev_host->config_len; + + registered_shadow_devs[registered_shadow_dev_idx++] = root_dev; + + if (lkl_virtio_dev_setup(root_dev, root_dev_host, mmio_size, lkl_deliver_irq) != 0) return -1; for (size_t i = 0; i < num_mounts; ++i) { - struct virtio_dev* dev = + struct virtio_dev* dev = alloc_shadow_virtio_dev(); + + if (!dev) + return -1; + + struct virtio_dev* dev_host = sgxlkl_enclave_state.shared_memory.virtio_blk_dev_mem [sgxlkl_enclave_state.disk_state[i + 1].host_disk_index]; - int mmio_size = VIRTIO_MMIO_CONFIG + dev->config_len; - if (lkl_virtio_dev_setup(dev, mmio_size, lkl_deliver_irq) != 0) + int mmio_size = VIRTIO_MMIO_CONFIG + dev_host->config_len; + registered_shadow_devs[registered_shadow_dev_idx++] = root_dev; + + if (lkl_virtio_dev_setup(dev, dev_host, mmio_size, lkl_deliver_irq) != 0) return -1; } return 0; diff --git a/src/lkl/virtio_console.c b/src/lkl/virtio_console.c index 92c2971f1..1920b6fc2 100644 --- a/src/lkl/virtio_console.c +++ b/src/lkl/virtio_console.c @@ -9,29 +9,36 @@ #include "enclave/ticketlock.h" #include "lkl/virtio.h" +static struct virtio_dev* console; + /* * Function to generate an interrupt for LKL kernel to reap the virtQ data */ static void lkl_deliver_irq(uint64_t dev_id) { - struct virtio_dev* dev = - sgxlkl_enclave_state.shared_memory.virtio_console_mem; + //TODO may need to uncomment if int_status needs to be changed for host too + //struct virtio_dev* dev = + // sgxlkl_enclave_state.shared_memory.virtio_console_mem; - dev->int_status |= VIRTIO_MMIO_INT_VRING; + //dev->int_status |= VIRTIO_MMIO_INT_VRING; + console->int_status |= VIRTIO_MMIO_INT_VRING; - lkl_trigger_irq(dev->irq); + lkl_trigger_irq(console->irq); } /* * Function to add a new net device to LKL */ -int lkl_virtio_console_add(struct virtio_dev* console) +int lkl_virtio_console_add(struct virtio_dev* console_host) { int ret = -1; + console = alloc_shadow_virtio_dev(); - int mmio_size = VIRTIO_MMIO_CONFIG + console->config_len; + if (!console) + return -1; - ret = lkl_virtio_dev_setup(console, mmio_size, &lkl_deliver_irq); + int mmio_size = VIRTIO_MMIO_CONFIG + console_host->config_len; + ret = lkl_virtio_dev_setup(console,console_host, mmio_size, &lkl_deliver_irq); return ret; } diff --git a/src/lkl/virtio_netdev.c b/src/lkl/virtio_netdev.c index 45934e498..e6fa3e862 100644 --- a/src/lkl/virtio_netdev.c +++ b/src/lkl/virtio_netdev.c @@ -12,17 +12,19 @@ #define MAX_NET_DEVS 16 static uint8_t registered_dev_idx = 0; +static uint8_t registered_shadow_dev_idx = 0; -struct virtio_dev* registered_devs[MAX_NET_DEVS]; +struct virtio_dev* host_devs[MAX_NET_DEVS]; //TODO may be able to delete this +struct virtio_dev* registered_shadow_devs[MAX_NET_DEVS]; /* * Function to get netdev instance to use its attributes */ static inline struct virtio_dev* get_netdev_instance(uint8_t netdev_id) { - for (size_t i = 0; i < registered_dev_idx; i++) - if (registered_devs[i]->vendor_id == netdev_id) - return registered_devs[i]; + for (size_t i = 0; i < registered_shadow_dev_idx; i++) + if (registered_shadow_devs[i]->vendor_id == netdev_id) + return registered_shadow_devs[i]; SGXLKL_ASSERT(false); } @@ -38,11 +40,6 @@ static int dev_register(struct virtio_dev* dev) sgxlkl_info("Too many virtio_net devices!\n"); ret = -LKL_ENOMEM; } - else - { - /* registered_dev_idx is incremented by the caller */ - registered_devs[registered_dev_idx] = dev; - } return ret; } @@ -54,6 +51,7 @@ static void lkl_deliver_irq(uint64_t dev_id) struct virtio_dev* dev = get_netdev_instance(dev_id); dev->int_status |= VIRTIO_MMIO_INT_VRING; + // TODO might need to update int_status in host as well lkl_trigger_irq(dev->irq); } @@ -62,13 +60,19 @@ static void lkl_deliver_irq(uint64_t dev_id) * Function to add a new net device to LKL and register the cb to notify * frontend driver for the request completion. */ -int lkl_virtio_netdev_add(struct virtio_dev* netdev) +int lkl_virtio_netdev_add(struct virtio_dev* netdev_host) { int ret = -1; - int mmio_size = VIRTIO_MMIO_CONFIG + netdev->config_len; + int mmio_size = VIRTIO_MMIO_CONFIG + netdev_host->config_len; + struct virtio_dev* netdev = alloc_shadow_virtio_dev(); + + if (!netdev) + return -1; + + registered_shadow_devs[registered_shadow_dev_idx] = netdev; + host_devs[registered_dev_idx] = netdev_host; - registered_devs[registered_dev_idx] = netdev; - if (lkl_virtio_dev_setup(netdev, mmio_size, &lkl_deliver_irq) != 0) + if (lkl_virtio_dev_setup(netdev, netdev_host, mmio_size, &lkl_deliver_irq) != 0) return -1; ret = dev_register(netdev); @@ -77,8 +81,9 @@ int lkl_virtio_netdev_add(struct virtio_dev* netdev) sgxlkl_fail("Failed to register netdev\n"); return -1; } + registered_dev_idx++; - return registered_dev_idx++; + return registered_shadow_dev_idx++; } /* @@ -87,7 +92,7 @@ int lkl_virtio_netdev_add(struct virtio_dev* netdev) void lkl_virtio_netdev_remove(void) { uint8_t netdev_id = 0; - for (netdev_id = 0; netdev_id < registered_dev_idx; netdev_id++) + for (netdev_id = 0; netdev_id < registered_shadow_dev_idx; netdev_id++) { sgxlkl_host_netdev_remove(netdev_id); int ret = lkl_netdev_get_ifindex(netdev_id); From 23be735dfd8c89c5cf16c78db2b9313579531aa4 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Tue, 4 May 2021 20:20:33 +0100 Subject: [PATCH 15/27] Copy host dev into shadow dev. --- src/host_interface/virtio_blkdev.c | 1 + src/include/lkl/virtio.h | 4 +- src/lkl/virtio.c | 209 +++++++++++++++++++++++++++-- src/lkl/virtio_blkdev.c | 5 +- src/lkl/virtio_console.c | 2 +- src/lkl/virtio_netdev.c | 1 + 6 files changed, 205 insertions(+), 17 deletions(-) diff --git a/src/host_interface/virtio_blkdev.c b/src/host_interface/virtio_blkdev.c index 9c3d970a1..0d0e3144b 100644 --- a/src/host_interface/virtio_blkdev.c +++ b/src/host_interface/virtio_blkdev.c @@ -111,6 +111,7 @@ int blk_device_init( size_t disk_index, int enable_swiotlb) { + void* vq_mem = NULL; struct virtio_blk_dev* host_blk_device = NULL; size_t bdev_size = sizeof(struct virtio_blk_dev); diff --git a/src/include/lkl/virtio.h b/src/include/lkl/virtio.h index 67e12b217..65e5832d8 100644 --- a/src/include/lkl/virtio.h +++ b/src/include/lkl/virtio.h @@ -43,6 +43,7 @@ struct virtio_dev */ int lkl_virtio_dev_setup( struct virtio_dev* dev, + struct virtio_dev* dev_host, int mmio_size, void* virtio_req_complete); @@ -56,4 +57,5 @@ void lkl_virtio_deliver_irq(uint8_t dev_id); * Function to allocate memory for a shadow virtio dev */ struct virtio_dev* alloc_shadow_virtio_dev(); -#endif /* _LKL_LIB_VIRTIO_H */ + +#endif //_LKL_LIB_VIRTIO_H \ No newline at end of file diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index ca30e513a..2217dd129 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -14,7 +15,7 @@ #include "enclave/vio_enclave_event_channel.h" #include #include - +#include #include "openenclave/corelibc/oestring.h" // from inttypes.h @@ -23,6 +24,14 @@ #define VIRTIO_DEV_MAGIC 0x74726976 #define VIRTIO_DEV_VERSION 2 +#define BLK_DEV_NUM_QUEUES 1 +#define NET_DEV_NUM_QUEUES 2 +#define CONSOLE_NUM_QUEUES 2 + +#define BLK_DEV_QUEUE_DEPTH 32 +#define CONSOLE_QUEUE_DEPTH 32 +#define NET_DEV_QUEUE_DEPTH 128 + #undef BIT #define BIT(x) (1ULL << x) @@ -56,7 +65,7 @@ struct virtio_dev_handle { struct virtio_dev *dev; //shadow structure in guest memory struct virtio_dev *dev_host; -} +}; /* * virtio_read_device_features: Read Device Features @@ -71,6 +80,11 @@ static inline uint32_t virtio_read_device_features(struct virtio_dev* dev) return (uint32_t)dev->device_features; } +static bool virtio_has_feature(struct virtio_dev* dev, unsigned int bit) +{ + return dev->device_features & BIT(bit); +} + /* * virtio_read: Process read requests from virtio_mmio * data: virtio_dev pointer @@ -241,6 +255,7 @@ static inline void set_ptr_high(_Atomic(uint64_t) * ptr, uint32_t val) } while (!atomic_compare_exchange_weak(ptr, &expected, desired)); } +// TODO - update host side desc and avail static void virtio_notify_host_device(struct virtio_dev* dev, uint32_t qidx) { uint8_t dev_id = (uint8_t)dev->vendor_id; @@ -451,14 +466,130 @@ void lkl_virtio_deliver_irq(uint8_t dev_id) virtio_deliver_irq[dev_id](dev_id); } +void* copy_queue(struct virtio_dev* dev) +{ + void* vq_mem = NULL; + int num_queues = 0; + size_t vq_size = 0, vq_desc_size = 0, vq_avail_size = 0; + struct virtq_packed* dest_packed = NULL; + struct virtq* dest_split = NULL; + + switch (dev->device_id) + { + case VIRTIO_ID_NET: + num_queues = NET_DEV_NUM_QUEUES; + if (!packed_ring) + { + vq_desc_size = next_pow2(NET_DEV_QUEUE_DEPTH * sizeof(struct virtq_desc)); + vq_avail_size = next_pow2(NET_DEV_QUEUE_DEPTH * sizeof(struct virtq_avail)); + } + break; + case VIRTIO_ID_CONSOLE: + num_queues = CONSOLE_NUM_QUEUES; + if (!packed_ring) + { + vq_desc_size = next_pow2(CONSOLE_QUEUE_DEPTH * sizeof(struct virtq_desc)); + vq_avail_size = next_pow2(CONSOLE_QUEUE_DEPTH * sizeof(struct virtq_avail)); + } + break; + case VIRTIO_ID_BLOCK: + num_queues = BLK_DEV_NUM_QUEUES; + if (!packed_ring) + { + vq_desc_size = next_pow2(BLK_DEV_QUEUE_DEPTH * sizeof(struct virtq_desc)); + vq_avail_size = next_pow2(BLK_DEV_QUEUE_DEPTH * sizeof(struct virtq_avail)); + } + break; + default: + sgxlkl_error("Unsupported device, device id: %d\n", dev->device_id); + return NULL; + } + if (packed_ring) + { + vq_size = next_pow2(num_queues * sizeof(struct virtq_packed)); + } + else + { + vq_size = next_pow2(num_queues * sizeof(struct virtq)); + } + + vq_mem = mmap(0, + vq_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0); + + if (!vq_mem) + { + sgxlkl_error("Queue mem alloc failed\n"); + return NULL; + } + + if (packed_ring) + dest_packed = vq_mem; + else + dest_split = vq_mem; + + for (int i = 0; i < num_queues; i++) + { + if (packed_ring) + { + dest_packed[i].num_max = dev->packed.queue[i].num_max; + } + + else + { + dest_split[i].num_max = dev->split.queue[i].num_max; + + // Need to copy avail and desc contents to host cannot have direct access + // In practice this won't be used as the guest side allocates to host memory + // But if we were to upstream changes to the linux kernel tree, this would be needed + dest_split[i].desc = mmap(0, + vq_desc_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0); + if (!dest_split[i].desc) + { + sgxlkl_error("Desc mem alloc failed\n"); + return NULL; + } + + dest_split[i].avail = mmap(0, + vq_avail_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0); + if (!dest_split[i].avail) + { + sgxlkl_error("Avail mem alloc failed\n"); + return NULL; + } + } + // TODO Don't think I need anything else here such as desc allocation, but double check later + + //dest[i].device_wrap_counter = queue[i].device_wrap_counter; + //dest[i].driver_wrap_counter = queue[i].driver_wrap_counter; + // + // Need to allocate memory for host side desc + // Because we'll be using the shadow dev for the desc + + // My proposal; just use host side desc directly: for reading and writing + // WHen host makes a notification, we can check its contents to ensure its valid + + //What's the point in keeping a copy of anything that's host-read-write? + //A: I suppose for the purpose of sanity checking. So checking desc length is valid + //but can't we just check the host contents directly when sanity checking? + } + return packed_ring ? (void *) dest_packed : (void *) dest_split; +} + /* * Function to setup the virtio device setting */ -/** - * TODO Shadow structure - * Need to create and register dev handle here - * Should I create the shadow dev here along with the verification? - */ int lkl_virtio_dev_setup( struct virtio_dev* dev, struct virtio_dev* dev_host, @@ -467,13 +598,67 @@ int lkl_virtio_dev_setup( { struct virtio_dev_handle *dev_handle; int avail = 0, num_bytes = 0, ret = 0; - dev_host->irq = lkl_get_free_irq("virtio"); + size_t dev_handle_size = next_pow2(sizeof(struct virtio_dev_handle)); + + dev_handle = mmap(0, + dev_handle_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0); + + if (!dev_handle) + { + sgxlkl_error("Failed to allocate memory for dev handle\n"); + return -1; + } + + dev_handle->dev = dev; + dev_handle->dev_host = dev_host; + + dev->device_id = dev_host->device_id; + dev->vendor_id = dev_host->vendor_id; + dev->config_gen = dev_host->config_gen; + dev->device_features = dev_host->device_features; + dev->config_data = dev_host->config_data; + dev->config_len = dev_host->config_len; + dev->int_status = dev_host->int_status; + + if (packed_ring) + { + dev->packed.queue = copy_queue(dev_host); + if (!dev->packed.queue) + { + sgxlkl_error("Failed to copy packed virtqueue into shadow structure\n"); + return -1; + } + } + else + { + dev->split.queue = copy_queue(dev_host); + if (!dev->split.queue) + { + sgxlkl_error("Failed to copy split virtqueue into shadow structure\n"); + return -1; + } + } + + dev->irq = lkl_get_free_irq("virtio"); + dev_host->irq = dev->irq; dev_host->int_status = 0; - if (dev_host->irq < 0) + dev->int_status = 0; + + if (dev->irq < 0) return 1; - virtio_deliver_irq[dev_host->vendor_id] = deliver_irq_cb; - dev_host->base = register_iomem(dev_host, mmio_size, &virtio_ops); + if (!virtio_has_feature(dev, VIRTIO_F_RING_PACKED)) + { + sgxlkl_error("Device %d does not support virtio packed ring\n", dev->device_id); + return -1; + } + + virtio_deliver_irq[dev->vendor_id] = deliver_irq_cb; + dev->base = register_iomem(dev_handle, mmio_size, &virtio_ops); if (!lkl_is_running()) { @@ -532,7 +717,7 @@ struct virtio_dev* alloc_shadow_virtio_dev() if (!dev) { sgxlkl_error("Shadow device alloc failed\n"); - return NULL + return NULL; } return dev; } \ No newline at end of file diff --git a/src/lkl/virtio_blkdev.c b/src/lkl/virtio_blkdev.c index f68402ca1..cb4283fd3 100644 --- a/src/lkl/virtio_blkdev.c +++ b/src/lkl/virtio_blkdev.c @@ -47,7 +47,6 @@ int lkl_add_disks( size_t num_mounts) { struct virtio_dev* root_dev = alloc_shadow_virtio_dev(); - if (!root_dev) return -1; @@ -65,15 +64,15 @@ int lkl_add_disks( for (size_t i = 0; i < num_mounts; ++i) { struct virtio_dev* dev = alloc_shadow_virtio_dev(); - if (!dev) return -1; struct virtio_dev* dev_host = sgxlkl_enclave_state.shared_memory.virtio_blk_dev_mem [sgxlkl_enclave_state.disk_state[i + 1].host_disk_index]; + int mmio_size = VIRTIO_MMIO_CONFIG + dev_host->config_len; - registered_shadow_devs[registered_shadow_dev_idx++] = root_dev; + registered_shadow_devs[registered_shadow_dev_idx++] = dev; if (lkl_virtio_dev_setup(dev, dev_host, mmio_size, lkl_deliver_irq) != 0) return -1; diff --git a/src/lkl/virtio_console.c b/src/lkl/virtio_console.c index 1920b6fc2..117f9f7b4 100644 --- a/src/lkl/virtio_console.c +++ b/src/lkl/virtio_console.c @@ -38,7 +38,7 @@ int lkl_virtio_console_add(struct virtio_dev* console_host) return -1; int mmio_size = VIRTIO_MMIO_CONFIG + console_host->config_len; - ret = lkl_virtio_dev_setup(console,console_host, mmio_size, &lkl_deliver_irq); + ret = lkl_virtio_dev_setup(console, console_host, mmio_size, &lkl_deliver_irq); return ret; } diff --git a/src/lkl/virtio_netdev.c b/src/lkl/virtio_netdev.c index e6fa3e862..9ea2c301c 100644 --- a/src/lkl/virtio_netdev.c +++ b/src/lkl/virtio_netdev.c @@ -62,6 +62,7 @@ static void lkl_deliver_irq(uint64_t dev_id) */ int lkl_virtio_netdev_add(struct virtio_dev* netdev_host) { + //TODO might able to delete host dev stuff later int ret = -1; int mmio_size = VIRTIO_MMIO_CONFIG + netdev_host->config_len; struct virtio_dev* netdev = alloc_shadow_virtio_dev(); From 18e1bfc2bb20afe88c3d077c4830f8e1d09df7de Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Wed, 5 May 2021 01:08:08 +0100 Subject: [PATCH 16/27] Update virtio_read and virtio_write with shadow structure. --- src/lkl/virtio.c | 109 +++++++++++++++++++++++++++++++++++------------ 1 file changed, 82 insertions(+), 27 deletions(-) diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index 2217dd129..5c8dfb87c 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -115,7 +115,9 @@ static int virtio_read(void* data, int offset, void* res, int size) * shadow structure init routine copy the content from the host structure. */ uint32_t val = 0; - struct virtio_dev* dev = (struct virtio_dev*)data; + struct virtio_dev_handle* dev_handle = (struct virtio_dev_handle*)data; + struct virtio_dev* dev = dev_handle->dev; + struct virtio_dev* dev_host = dev_handle->dev_host; if (offset >= VIRTIO_MMIO_CONFIG) { @@ -162,11 +164,15 @@ static int virtio_read(void* data, int offset, void* res, int size) break; /* Security Review: dev->int_status is host-read-write */ case VIRTIO_MMIO_INTERRUPT_STATUS: - val = dev->int_status; + val = dev_host->int_status; + if (dev->int_status != val) + dev->int_status = val; break; /* Security Review: dev->status is host-read-write */ case VIRTIO_MMIO_STATUS: - val = dev->status; + val = dev_host->status; + if (dev->status != val) + dev->status = val; break; /* Security Review: dev->config_gen should be host-write-once */ case VIRTIO_MMIO_CONFIG_GENERATION: @@ -255,13 +261,37 @@ static inline void set_ptr_high(_Atomic(uint64_t) * ptr, uint32_t val) } while (!atomic_compare_exchange_weak(ptr, &expected, desired)); } -// TODO - update host side desc and avail static void virtio_notify_host_device(struct virtio_dev* dev, uint32_t qidx) { uint8_t dev_id = (uint8_t)dev->vendor_id; vio_enclave_notify_enclave_event (dev_id, qidx); } +static void copy_split_queue_contents(struct virtio_dev* dev, struct virtio_dev* dev_host, uint32_t qidx) +{ + //desc is based on num + //avail based on idx? + struct virtq* dev_q = &dev->split.queue[qidx]; + struct virtq* dev_host_q = &dev_host->split.queue[qidx]; + //Copy desc + + for (int i = 0; i < dev_q->num; i++) + { + dev_host_q->desc[i].addr = dev_q->desc[i].addr; + dev_host_q->desc[i].len = dev_q->desc[i].len; + dev_host_q->desc[i].flags = dev_q->desc[i].flags; + dev_host_q->desc[i].nexr = dev_q->desc[i].next; + } + + dev_host_q->avail->flags = dev_q->avail->flags; + dev_host_q->avail->idx = dev_q->avail->idx; + + for (int i = 0; i < dev_q->avail->idx; i++) + { + dev_host_q->avail->ring[i] = dev_q->avail->ring[i]; + } +} + /* * virtio_write : Handle all write requests to the device from driver * data : virtio_dev structure pointer @@ -290,9 +320,12 @@ static int virtio_write(void* data, int offset, void* res, int size) * perform copy-through write (write to shadow structure & to host * structure). virtq desc and avail ring address handling is a special case. */ - struct virtio_dev* dev = (struct virtio_dev*)data; + struct virtio_dev_handle* dev_handle = (struct virtio_dev_handle*)data; + struct virtio_dev* dev = dev_handle->dev; + struct virtio_dev* dev_host = dev_handle->dev_host; + struct virtq* split_q = packed_ring ? NULL : &dev->split.queue[dev->queue_sel]; - struct virtq_packed* packed_q = packed_ring ? &dev->packed.queue[dev->queue_sel] : NULL; + struct virtq_packed* packed_q = packed_ring ? &dev_host->packed.queue[dev->queue_sel] : NULL; uint32_t val; int ret = 0; @@ -321,52 +354,74 @@ static int virtio_write(void* data, int offset, void* res, int size) if (val > 1) return -LKL_EINVAL; dev->device_features_sel = val; + dev_host->device_features_sel = val; break; /* Security Review: dev->driver_features_sel should be host-read-only */ case VIRTIO_MMIO_DRIVER_FEATURES_SEL: if (val > 1) return -LKL_EINVAL; dev->driver_features_sel = val; + dev_host->driver_features_sel = val; break; /* Security Review: dev->driver_features should be host-read-only */ case VIRTIO_MMIO_DRIVER_FEATURES: virtio_write_driver_features(dev, val); + virtio_write_driver_features(dev_host, val); break; /* Security Review: dev->queue_sel should be host-read-only */ case VIRTIO_MMIO_QUEUE_SEL: dev->queue_sel = val; + dev_host->queue_sel = val; break; /* Security Review: dev->queue[dev->queue_sel].num should be * host-read-only */ case VIRTIO_MMIO_QUEUE_NUM: if (packed_ring) + { dev->packed.queue[dev->queue_sel].num = val; + dev_host->packed.queue[dev->queue_sel].num = val; + } else + { dev->split.queue[dev->queue_sel].num = val; + dev_host->split.queue[dev->queue_sel].num = val; + } break; /* Security Review: is dev->queue[dev->queue_sel].ready host-read-only? */ case VIRTIO_MMIO_QUEUE_READY: if (packed_ring) + { dev->packed.queue[dev->queue_sel].ready = val; + dev_host->packed.queue[dev->queue_sel].ready = val; + } else + { dev->split.queue[dev->queue_sel].ready = val; + dev_host->split.queue[dev->queue_sel].ready = val; + } break; /* Security Review: guest virtio driver(s) writes to virtq desc ring and * avail ring in guest memory. In queue notify flow, we need to copy the * update to desc ring and avail ring in host memory. */ case VIRTIO_MMIO_QUEUE_NOTIFY: + if (!packed_ring) + { + copy_split_queue_contents(dev, dev_host, val); + } virtio_notify_host_device(dev, val); break; /* Security Review: dev->int_status is host-read-write */ case VIRTIO_MMIO_INTERRUPT_ACK: dev->int_status = 0; + dev_host->int_status = 0; break; /* Security Review: dev->status is host-read-write */ case VIRTIO_MMIO_STATUS: set_status(dev, val); + set_status(dev_host, val); break; /* Security Review: For Split Queue, q->desc link list * content should be host-read-only. The Split Queue implementaiton @@ -436,13 +491,13 @@ static int virtio_write(void* data, int offset, void* res, int size) if (packed_ring) set_ptr_low((_Atomic(uint64_t)*)&packed_q->device, val); else - set_ptr_low((_Atomic(uint64_t)*)&split_q->used, val); + set_ptr_low((_Atomic(uint64_t)*)&dev_host->split.queue[dev->queue_sel]->used, val); break; case VIRTIO_MMIO_QUEUE_USED_HIGH: if (packed_ring) set_ptr_high((_Atomic(uint64_t)*)&packed_q->device, val); else - set_ptr_high((_Atomic(uint64_t)*)&split_q->used, val); + set_ptr_high((_Atomic(uint64_t)*)&dev_host->split.queue[dev->queue_sel]->used, val); break; default: ret = -1; @@ -514,11 +569,11 @@ void* copy_queue(struct virtio_dev* dev) } vq_mem = mmap(0, - vq_size, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0); + vq_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0); if (!vq_mem) { @@ -545,25 +600,25 @@ void* copy_queue(struct virtio_dev* dev) // Need to copy avail and desc contents to host cannot have direct access // In practice this won't be used as the guest side allocates to host memory // But if we were to upstream changes to the linux kernel tree, this would be needed - dest_split[i].desc = mmap(0, - vq_desc_size, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0); - if (!dest_split[i].desc) + dev->split.queue[i].desc = mmap(0, + vq_desc_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0); + if (!dev->split.queue[i].desc) { sgxlkl_error("Desc mem alloc failed\n"); return NULL; } - dest_split[i].avail = mmap(0, - vq_avail_size, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0); - if (!dest_split[i].avail) + dev->split.queue[i].avail = mmap(0, + vq_avail_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0); + if (!dev->split.queue[i].avail) { sgxlkl_error("Avail mem alloc failed\n"); return NULL; From 6022e1c2c6d4f75dbee4c881b99c40801aa79594 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Thu, 6 May 2021 00:54:08 +0100 Subject: [PATCH 17/27] Add sanity checking for desc length. --- src/lkl/virtio.c | 73 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 68 insertions(+), 5 deletions(-) diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index 5c8dfb87c..f2bbea514 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -61,6 +61,8 @@ static uint32_t lkl_num_virtio_boot_devs; typedef void (*lkl_virtio_dev_deliver_irq)(uint64_t dev_id); static lkl_virtio_dev_deliver_irq virtio_deliver_irq[DEVICE_COUNT]; +static virtio_dev *dev_hosts[DEVICE_COUNT]; + struct virtio_dev_handle { struct virtio_dev *dev; //shadow structure in guest memory @@ -269,11 +271,8 @@ static void virtio_notify_host_device(struct virtio_dev* dev, uint32_t qidx) static void copy_split_queue_contents(struct virtio_dev* dev, struct virtio_dev* dev_host, uint32_t qidx) { - //desc is based on num - //avail based on idx? struct virtq* dev_q = &dev->split.queue[qidx]; struct virtq* dev_host_q = &dev_host->split.queue[qidx]; - //Copy desc for (int i = 0; i < dev_q->num; i++) { @@ -517,6 +516,55 @@ static const struct lkl_iomem_ops virtio_ops = { */ void lkl_virtio_deliver_irq(uint8_t dev_id) { + struct virtio_dev *dev_host = dev_hosts[dev_id]; + int num_queues = 0; + + switch(dev_host->device_id) + { + case VIRTIO_ID_NET: + num_queues = NET_DEV_NUM_QUEUES; + break; + case VIRTIO_ID_CONSOLE: + num_queues = CONSOLE_NUM_QUEUES; + break; + case VIRTIO_ID_BLOCK: + num_queues = BLK_DEV_NUM_QUEUES; + break; + } + + //Verify descriptor len doesn't exceed bounds + for (int i = 0; i < num_queues; i++) + { + if (packed_ring) + { + struct virtq_packed* packed_q = dev_host->packed.queue[i]; + for (int j = 0; j < packed_q->num; j++) + { + if (packed_q->desc[j].len > + sgxlkl_enclave_state.shared_memory.virtio_swiotlb_size) + { + sgxlkl_error("Virtio desc memory size larger than allocated bounce buffer\n"); + return; + } + } + } + + else + { + struct virtq* split_q = dev_host->split.queue[i]; + for (int j = 0; j < split_q->used->idx; j++) + { + if (split_q->used->ring[j].len > + sgxlkl_enclave_state.shared_memory.virtio_swiotlb_size) + { + sgxlkl_error("Virtio used memory size larger than allocated bounce buffer\n"); + return; + } + } + } + } + + // Get sgxlkl_enclave_state if (virtio_deliver_irq[dev_id]) virtio_deliver_irq[dev_id](dev_id); } @@ -654,7 +702,6 @@ int lkl_virtio_dev_setup( struct virtio_dev_handle *dev_handle; int avail = 0, num_bytes = 0, ret = 0; size_t dev_handle_size = next_pow2(sizeof(struct virtio_dev_handle)); - dev_handle = mmap(0, dev_handle_size, PROT_READ | PROT_WRITE, @@ -675,9 +722,24 @@ int lkl_virtio_dev_setup( dev->vendor_id = dev_host->vendor_id; dev->config_gen = dev_host->config_gen; dev->device_features = dev_host->device_features; - dev->config_data = dev_host->config_data; dev->config_len = dev_host->config_len; dev->int_status = dev_host->int_status; + if (dev->config_len != 0) + { + dev->config_data = mmap( + 0, + next_pow2(dev->config_len), + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, + -1, + 0); + if (!dev->config_data) + { + sgxlkl_error("Failed to allocate memory for dev config data\n"); + return -1; + } + memcpy(dev->config_data, dev_host->config_data, dev->config_len); + } if (packed_ring) { @@ -713,6 +775,7 @@ int lkl_virtio_dev_setup( } virtio_deliver_irq[dev->vendor_id] = deliver_irq_cb; + dev_hosts[dev->vendor_id] = dev_host; dev->base = register_iomem(dev_handle, mmio_size, &virtio_ops); if (!lkl_is_running()) From 8594b83ba3ea67a5273022b5993a72326b294d5b Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Fri, 7 May 2021 18:55:27 +0100 Subject: [PATCH 18/27] Have consistency with pointer definition style. --- src/lkl/virtio.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index f2bbea514..b4ed08117 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -61,12 +61,12 @@ static uint32_t lkl_num_virtio_boot_devs; typedef void (*lkl_virtio_dev_deliver_irq)(uint64_t dev_id); static lkl_virtio_dev_deliver_irq virtio_deliver_irq[DEVICE_COUNT]; -static virtio_dev *dev_hosts[DEVICE_COUNT]; +static virtio_dev* dev_hosts[DEVICE_COUNT]; struct virtio_dev_handle { - struct virtio_dev *dev; //shadow structure in guest memory - struct virtio_dev *dev_host; + struct virtio_dev* dev; //shadow structure in guest memory + struct virtio_dev* dev_host; }; /* @@ -699,7 +699,7 @@ int lkl_virtio_dev_setup( int mmio_size, void* deliver_irq_cb) { - struct virtio_dev_handle *dev_handle; + struct virtio_dev_handle* dev_handle; int avail = 0, num_bytes = 0, ret = 0; size_t dev_handle_size = next_pow2(sizeof(struct virtio_dev_handle)); dev_handle = mmap(0, From fb6ab3c6c11af0b007d3eae2a3deb12a76feda32 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Sat, 8 May 2021 16:51:01 +0000 Subject: [PATCH 19/27] Replace mmap with enclave wrapper. --- src/lkl/virtio.c | 65 +++++++++++++++--------------------------------- 1 file changed, 20 insertions(+), 45 deletions(-) diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index b4ed08117..a546ea7d8 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -8,8 +8,10 @@ #include #include #include +#include #include #include +#include #include #include #include "enclave/vio_enclave_event_channel.h" @@ -61,7 +63,7 @@ static uint32_t lkl_num_virtio_boot_devs; typedef void (*lkl_virtio_dev_deliver_irq)(uint64_t dev_id); static lkl_virtio_dev_deliver_irq virtio_deliver_irq[DEVICE_COUNT]; -static virtio_dev* dev_hosts[DEVICE_COUNT]; +static struct virtio_dev* dev_hosts[DEVICE_COUNT]; struct virtio_dev_handle { @@ -120,7 +122,9 @@ static int virtio_read(void* data, int offset, void* res, int size) struct virtio_dev_handle* dev_handle = (struct virtio_dev_handle*)data; struct virtio_dev* dev = dev_handle->dev; struct virtio_dev* dev_host = dev_handle->dev_host; - +#ifdef DEBUG + oe_host_printf("Hi there reading, offset is: %x\n", offset); +#endif if (offset >= VIRTIO_MMIO_CONFIG) { offset -= VIRTIO_MMIO_CONFIG; @@ -279,7 +283,7 @@ static void copy_split_queue_contents(struct virtio_dev* dev, struct virtio_dev* dev_host_q->desc[i].addr = dev_q->desc[i].addr; dev_host_q->desc[i].len = dev_q->desc[i].len; dev_host_q->desc[i].flags = dev_q->desc[i].flags; - dev_host_q->desc[i].nexr = dev_q->desc[i].next; + dev_host_q->desc[i].next = dev_q->desc[i].next; } dev_host_q->avail->flags = dev_q->avail->flags; @@ -325,6 +329,9 @@ static int virtio_write(void* data, int offset, void* res, int size) struct virtq* split_q = packed_ring ? NULL : &dev->split.queue[dev->queue_sel]; struct virtq_packed* packed_q = packed_ring ? &dev_host->packed.queue[dev->queue_sel] : NULL; +#ifdef DEBUG + oe_host_printf("Hi there, packed queue is: %p, offset is: %x\n", packed_q, offset); +#endif uint32_t val; int ret = 0; @@ -490,13 +497,13 @@ static int virtio_write(void* data, int offset, void* res, int size) if (packed_ring) set_ptr_low((_Atomic(uint64_t)*)&packed_q->device, val); else - set_ptr_low((_Atomic(uint64_t)*)&dev_host->split.queue[dev->queue_sel]->used, val); + set_ptr_low((_Atomic(uint64_t)*)&dev_host->split.queue[dev->queue_sel].used, val); break; case VIRTIO_MMIO_QUEUE_USED_HIGH: if (packed_ring) set_ptr_high((_Atomic(uint64_t)*)&packed_q->device, val); else - set_ptr_high((_Atomic(uint64_t)*)&dev_host->split.queue[dev->queue_sel]->used, val); + set_ptr_high((_Atomic(uint64_t)*)&dev_host->split.queue[dev->queue_sel].used, val); break; default: ret = -1; @@ -537,7 +544,7 @@ void lkl_virtio_deliver_irq(uint8_t dev_id) { if (packed_ring) { - struct virtq_packed* packed_q = dev_host->packed.queue[i]; + struct virtq_packed* packed_q = &dev_host->packed.queue[i]; for (int j = 0; j < packed_q->num; j++) { if (packed_q->desc[j].len > @@ -551,7 +558,7 @@ void lkl_virtio_deliver_irq(uint8_t dev_id) else { - struct virtq* split_q = dev_host->split.queue[i]; + struct virtq* split_q = &dev_host->split.queue[i]; for (int j = 0; j < split_q->used->idx; j++) { if (split_q->used->ring[j].len > @@ -616,12 +623,7 @@ void* copy_queue(struct virtio_dev* dev) vq_size = next_pow2(num_queues * sizeof(struct virtq)); } - vq_mem = mmap(0, - vq_size, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0); + vq_mem = sgxlkl_host_ops.mem_alloc(vq_size); if (!vq_mem) { @@ -648,24 +650,14 @@ void* copy_queue(struct virtio_dev* dev) // Need to copy avail and desc contents to host cannot have direct access // In practice this won't be used as the guest side allocates to host memory // But if we were to upstream changes to the linux kernel tree, this would be needed - dev->split.queue[i].desc = mmap(0, - vq_desc_size, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0); + dev->split.queue[i].desc = sgxlkl_host_ops.mem_alloc(vq_desc_size); if (!dev->split.queue[i].desc) { sgxlkl_error("Desc mem alloc failed\n"); return NULL; } - dev->split.queue[i].avail = mmap(0, - vq_avail_size, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0); + dev->split.queue[i].avail = sgxlkl_host_ops.mem_alloc(vq_avail_size); if (!dev->split.queue[i].avail) { sgxlkl_error("Avail mem alloc failed\n"); @@ -702,12 +694,7 @@ int lkl_virtio_dev_setup( struct virtio_dev_handle* dev_handle; int avail = 0, num_bytes = 0, ret = 0; size_t dev_handle_size = next_pow2(sizeof(struct virtio_dev_handle)); - dev_handle = mmap(0, - dev_handle_size, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0); + dev_handle = sgxlkl_host_ops.mem_alloc(dev_handle_size); if (!dev_handle) { @@ -726,13 +713,7 @@ int lkl_virtio_dev_setup( dev->int_status = dev_host->int_status; if (dev->config_len != 0) { - dev->config_data = mmap( - 0, - next_pow2(dev->config_len), - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0); + dev->config_data = sgxlkl_host_ops.mem_alloc(next_pow2(dev->config_len)); if (!dev->config_data) { sgxlkl_error("Failed to allocate memory for dev config data\n"); @@ -824,13 +805,7 @@ struct virtio_dev* alloc_shadow_virtio_dev() { size_t dev_size = next_pow2(sizeof(struct virtio_dev)); - struct virtio_dev* dev = mmap( - 0, - dev_size, - PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANONYMOUS, - -1, - 0); + struct virtio_dev* dev = sgxlkl_host_ops.mem_alloc(dev_size); if (!dev) { From 50589acd0808f89fd6620b023f263b2040debffb Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Wed, 12 May 2021 00:44:22 +0100 Subject: [PATCH 20/27] Update dev_host int status on deliver irq. --- src/lkl/virtio_blkdev.c | 5 ++++- src/lkl/virtio_console.c | 7 +++---- src/lkl/virtio_netdev.c | 34 +++++++++++++++++----------------- 3 files changed, 24 insertions(+), 22 deletions(-) diff --git a/src/lkl/virtio_blkdev.c b/src/lkl/virtio_blkdev.c index cb4283fd3..54fb1ca94 100644 --- a/src/lkl/virtio_blkdev.c +++ b/src/lkl/virtio_blkdev.c @@ -30,10 +30,13 @@ static inline struct virtio_dev* get_blkdev_instance(uint8_t blkdev_id) */ static void lkl_deliver_irq(uint8_t dev_id) { + struct virtio_dev* dev_host = + sgxlkl_enclave_state.shared_memory.virtio_blk_dev_mem[dev_id]; + struct virtio_dev* dev = get_blkdev_instance(dev_id); + dev_host->int_status |= VIRTIO_MMIO_INT_VRING; dev->int_status |= VIRTIO_MMIO_INT_VRING; - // TODO might need to update int_status in host as well lkl_trigger_irq(dev->irq); } diff --git a/src/lkl/virtio_console.c b/src/lkl/virtio_console.c index 117f9f7b4..d595648d9 100644 --- a/src/lkl/virtio_console.c +++ b/src/lkl/virtio_console.c @@ -16,11 +16,10 @@ static struct virtio_dev* console; */ static void lkl_deliver_irq(uint64_t dev_id) { - //TODO may need to uncomment if int_status needs to be changed for host too - //struct virtio_dev* dev = - // sgxlkl_enclave_state.shared_memory.virtio_console_mem; + struct virtio_dev* dev_host = + sgxlkl_enclave_state.shared_memory.virtio_console_mem; - //dev->int_status |= VIRTIO_MMIO_INT_VRING; + dev_host->int_status |= VIRTIO_MMIO_INT_VRING; console->int_status |= VIRTIO_MMIO_INT_VRING; lkl_trigger_irq(console->irq); diff --git a/src/lkl/virtio_netdev.c b/src/lkl/virtio_netdev.c index 9ea2c301c..fd2cf19b4 100644 --- a/src/lkl/virtio_netdev.c +++ b/src/lkl/virtio_netdev.c @@ -11,20 +11,23 @@ #define MAX_NET_DEVS 16 -static uint8_t registered_dev_idx = 0; -static uint8_t registered_shadow_dev_idx = 0; +struct dev_handle +{ + struct virtio_dev* dev_host; + struct virtio_dev* dev; +}; -struct virtio_dev* host_devs[MAX_NET_DEVS]; //TODO may be able to delete this -struct virtio_dev* registered_shadow_devs[MAX_NET_DEVS]; +struct dev_handle devs[MAX_NET_DEVS]; +static uint8_t registered_dev_idx = 0; /* * Function to get netdev instance to use its attributes */ -static inline struct virtio_dev* get_netdev_instance(uint8_t netdev_id) +static inline struct dev_handle* get_netdev_instance(uint8_t netdev_id) { - for (size_t i = 0; i < registered_shadow_dev_idx; i++) - if (registered_shadow_devs[i]->vendor_id == netdev_id) - return registered_shadow_devs[i]; + for (size_t i = 0; i < registered_dev_idx; i++) + if (devs[i].shadow_dev->vendor_id == netdev_id) + return &devs[i]; SGXLKL_ASSERT(false); } @@ -48,10 +51,10 @@ static int dev_register(struct virtio_dev* dev) */ static void lkl_deliver_irq(uint64_t dev_id) { - struct virtio_dev* dev = get_netdev_instance(dev_id); + struct dev_handle* dev_pair = get_netdev_instance(dev_id); - dev->int_status |= VIRTIO_MMIO_INT_VRING; - // TODO might need to update int_status in host as well + dev_pair->dev->int_status |= VIRTIO_MMIO_INT_VRING; + dev_pair->dev_host->int_status |= VIRTIO_MMIO_INT_VRING; lkl_trigger_irq(dev->irq); } @@ -62,7 +65,6 @@ static void lkl_deliver_irq(uint64_t dev_id) */ int lkl_virtio_netdev_add(struct virtio_dev* netdev_host) { - //TODO might able to delete host dev stuff later int ret = -1; int mmio_size = VIRTIO_MMIO_CONFIG + netdev_host->config_len; struct virtio_dev* netdev = alloc_shadow_virtio_dev(); @@ -70,8 +72,7 @@ int lkl_virtio_netdev_add(struct virtio_dev* netdev_host) if (!netdev) return -1; - registered_shadow_devs[registered_shadow_dev_idx] = netdev; - host_devs[registered_dev_idx] = netdev_host; + devs[registered_dev_idx] = {netdev_host, netdev}; if (lkl_virtio_dev_setup(netdev, netdev_host, mmio_size, &lkl_deliver_irq) != 0) return -1; @@ -82,9 +83,8 @@ int lkl_virtio_netdev_add(struct virtio_dev* netdev_host) sgxlkl_fail("Failed to register netdev\n"); return -1; } - registered_dev_idx++; - return registered_shadow_dev_idx++; + return registered_dev_idx++; } /* @@ -93,7 +93,7 @@ int lkl_virtio_netdev_add(struct virtio_dev* netdev_host) void lkl_virtio_netdev_remove(void) { uint8_t netdev_id = 0; - for (netdev_id = 0; netdev_id < registered_shadow_dev_idx; netdev_id++) + for (netdev_id = 0; netdev_id < registered_dev_idx; netdev_id++) { sgxlkl_host_netdev_remove(netdev_id); int ret = lkl_netdev_get_ifindex(netdev_id); From 845026f566b2bb9056f53f722b8396b8a0fd0995 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Tue, 11 May 2021 23:49:49 +0000 Subject: [PATCH 21/27] Fix syntax errors. --- src/lkl/virtio_netdev.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/lkl/virtio_netdev.c b/src/lkl/virtio_netdev.c index fd2cf19b4..abef5cf63 100644 --- a/src/lkl/virtio_netdev.c +++ b/src/lkl/virtio_netdev.c @@ -26,7 +26,7 @@ static uint8_t registered_dev_idx = 0; static inline struct dev_handle* get_netdev_instance(uint8_t netdev_id) { for (size_t i = 0; i < registered_dev_idx; i++) - if (devs[i].shadow_dev->vendor_id == netdev_id) + if (devs[i].dev->vendor_id == netdev_id) return &devs[i]; SGXLKL_ASSERT(false); } @@ -56,7 +56,7 @@ static void lkl_deliver_irq(uint64_t dev_id) dev_pair->dev->int_status |= VIRTIO_MMIO_INT_VRING; dev_pair->dev_host->int_status |= VIRTIO_MMIO_INT_VRING; - lkl_trigger_irq(dev->irq); + lkl_trigger_irq(dev_pair->dev->irq); } /* @@ -72,7 +72,8 @@ int lkl_virtio_netdev_add(struct virtio_dev* netdev_host) if (!netdev) return -1; - devs[registered_dev_idx] = {netdev_host, netdev}; + devs[registered_dev_idx].dev_host = netdev_host; + devs[registered_dev_idx].dev = netdev; if (lkl_virtio_dev_setup(netdev, netdev_host, mmio_size, &lkl_deliver_irq) != 0) return -1; From a0225d64a3b244bf05398e70a6cb794769dccb15 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Wed, 12 May 2021 15:39:23 +0000 Subject: [PATCH 22/27] Clean up lkl/virtio.c. --- src/lkl/virtio.c | 102 +++++++++-------------------------------------- 1 file changed, 18 insertions(+), 84 deletions(-) diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index a546ea7d8..999b5fa68 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include "enclave/vio_enclave_event_channel.h" #include @@ -65,6 +66,10 @@ static lkl_virtio_dev_deliver_irq virtio_deliver_irq[DEVICE_COUNT]; static struct virtio_dev* dev_hosts[DEVICE_COUNT]; +/* + * Used for switching between the host and shadow dev structure based + * on the virtio_read/write request + */ struct virtio_dev_handle { struct virtio_dev* dev; //shadow structure in guest memory @@ -84,6 +89,12 @@ static inline uint32_t virtio_read_device_features(struct virtio_dev* dev) return (uint32_t)dev->device_features; } +/* + * virtio_has_feature: Return whether feature bit has been set on virtio device + * dev: pointer to device structure + * bit: feature bit + * return whether feature bit is set + */ static bool virtio_has_feature(struct virtio_dev* dev, unsigned int bit) { return dev->device_features & BIT(bit); @@ -122,9 +133,7 @@ static int virtio_read(void* data, int offset, void* res, int size) struct virtio_dev_handle* dev_handle = (struct virtio_dev_handle*)data; struct virtio_dev* dev = dev_handle->dev; struct virtio_dev* dev_host = dev_handle->dev_host; -#ifdef DEBUG - oe_host_printf("Hi there reading, offset is: %x\n", offset); -#endif + if (offset >= VIRTIO_MMIO_CONFIG) { offset -= VIRTIO_MMIO_CONFIG; @@ -273,28 +282,6 @@ static void virtio_notify_host_device(struct virtio_dev* dev, uint32_t qidx) vio_enclave_notify_enclave_event (dev_id, qidx); } -static void copy_split_queue_contents(struct virtio_dev* dev, struct virtio_dev* dev_host, uint32_t qidx) -{ - struct virtq* dev_q = &dev->split.queue[qidx]; - struct virtq* dev_host_q = &dev_host->split.queue[qidx]; - - for (int i = 0; i < dev_q->num; i++) - { - dev_host_q->desc[i].addr = dev_q->desc[i].addr; - dev_host_q->desc[i].len = dev_q->desc[i].len; - dev_host_q->desc[i].flags = dev_q->desc[i].flags; - dev_host_q->desc[i].next = dev_q->desc[i].next; - } - - dev_host_q->avail->flags = dev_q->avail->flags; - dev_host_q->avail->idx = dev_q->avail->idx; - - for (int i = 0; i < dev_q->avail->idx; i++) - { - dev_host_q->avail->ring[i] = dev_q->avail->ring[i]; - } -} - /* * virtio_write : Handle all write requests to the device from driver * data : virtio_dev structure pointer @@ -327,11 +314,8 @@ static int virtio_write(void* data, int offset, void* res, int size) struct virtio_dev* dev = dev_handle->dev; struct virtio_dev* dev_host = dev_handle->dev_host; - struct virtq* split_q = packed_ring ? NULL : &dev->split.queue[dev->queue_sel]; + struct virtq* split_q = packed_ring ? NULL : &dev_host->split.queue[dev->queue_sel]; struct virtq_packed* packed_q = packed_ring ? &dev_host->packed.queue[dev->queue_sel] : NULL; -#ifdef DEBUG - oe_host_printf("Hi there, packed queue is: %p, offset is: %x\n", packed_q, offset); -#endif uint32_t val; int ret = 0; @@ -413,10 +397,6 @@ static int virtio_write(void* data, int offset, void* res, int size) * update to desc ring and avail ring in host memory. */ case VIRTIO_MMIO_QUEUE_NOTIFY: - if (!packed_ring) - { - copy_split_queue_contents(dev, dev_host, val); - } virtio_notify_host_device(dev, val); break; /* Security Review: dev->int_status is host-read-write */ @@ -497,13 +477,13 @@ static int virtio_write(void* data, int offset, void* res, int size) if (packed_ring) set_ptr_low((_Atomic(uint64_t)*)&packed_q->device, val); else - set_ptr_low((_Atomic(uint64_t)*)&dev_host->split.queue[dev->queue_sel].used, val); + set_ptr_low((_Atomic(uint64_t)*)&split_q->used, val); break; case VIRTIO_MMIO_QUEUE_USED_HIGH: if (packed_ring) set_ptr_high((_Atomic(uint64_t)*)&packed_q->device, val); else - set_ptr_high((_Atomic(uint64_t)*)&dev_host->split.queue[dev->queue_sel].used, val); + set_ptr_high((_Atomic(uint64_t)*)&split_q->used, val); break; default: ret = -1; @@ -576,11 +556,11 @@ void lkl_virtio_deliver_irq(uint8_t dev_id) virtio_deliver_irq[dev_id](dev_id); } -void* copy_queue(struct virtio_dev* dev) +static void* copy_queue(struct virtio_dev* dev) { void* vq_mem = NULL; int num_queues = 0; - size_t vq_size = 0, vq_desc_size = 0, vq_avail_size = 0; + size_t vq_size = 0; struct virtq_packed* dest_packed = NULL; struct virtq* dest_split = NULL; @@ -588,27 +568,12 @@ void* copy_queue(struct virtio_dev* dev) { case VIRTIO_ID_NET: num_queues = NET_DEV_NUM_QUEUES; - if (!packed_ring) - { - vq_desc_size = next_pow2(NET_DEV_QUEUE_DEPTH * sizeof(struct virtq_desc)); - vq_avail_size = next_pow2(NET_DEV_QUEUE_DEPTH * sizeof(struct virtq_avail)); - } break; case VIRTIO_ID_CONSOLE: num_queues = CONSOLE_NUM_QUEUES; - if (!packed_ring) - { - vq_desc_size = next_pow2(CONSOLE_QUEUE_DEPTH * sizeof(struct virtq_desc)); - vq_avail_size = next_pow2(CONSOLE_QUEUE_DEPTH * sizeof(struct virtq_avail)); - } break; case VIRTIO_ID_BLOCK: num_queues = BLK_DEV_NUM_QUEUES; - if (!packed_ring) - { - vq_desc_size = next_pow2(BLK_DEV_QUEUE_DEPTH * sizeof(struct virtq_desc)); - vq_avail_size = next_pow2(BLK_DEV_QUEUE_DEPTH * sizeof(struct virtq_avail)); - } break; default: sgxlkl_error("Unsupported device, device id: %d\n", dev->device_id); @@ -646,38 +611,7 @@ void* copy_queue(struct virtio_dev* dev) else { dest_split[i].num_max = dev->split.queue[i].num_max; - - // Need to copy avail and desc contents to host cannot have direct access - // In practice this won't be used as the guest side allocates to host memory - // But if we were to upstream changes to the linux kernel tree, this would be needed - dev->split.queue[i].desc = sgxlkl_host_ops.mem_alloc(vq_desc_size); - if (!dev->split.queue[i].desc) - { - sgxlkl_error("Desc mem alloc failed\n"); - return NULL; - } - - dev->split.queue[i].avail = sgxlkl_host_ops.mem_alloc(vq_avail_size); - if (!dev->split.queue[i].avail) - { - sgxlkl_error("Avail mem alloc failed\n"); - return NULL; - } } - // TODO Don't think I need anything else here such as desc allocation, but double check later - - //dest[i].device_wrap_counter = queue[i].device_wrap_counter; - //dest[i].driver_wrap_counter = queue[i].driver_wrap_counter; - // - // Need to allocate memory for host side desc - // Because we'll be using the shadow dev for the desc - - // My proposal; just use host side desc directly: for reading and writing - // WHen host makes a notification, we can check its contents to ensure its valid - - //What's the point in keeping a copy of anything that's host-read-write? - //A: I suppose for the purpose of sanity checking. So checking desc length is valid - //but can't we just check the host contents directly when sanity checking? } return packed_ring ? (void *) dest_packed : (void *) dest_split; } @@ -749,7 +683,7 @@ int lkl_virtio_dev_setup( if (dev->irq < 0) return 1; - if (!virtio_has_feature(dev, VIRTIO_F_RING_PACKED)) + if (packed_ring && !virtio_has_feature(dev, VIRTIO_F_RING_PACKED)) { sgxlkl_error("Device %d does not support virtio packed ring\n", dev->device_id); return -1; From 7b12fd1c421a42dd01ce8f9caf11ed8b91d26736 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Thu, 13 May 2021 00:30:26 +0000 Subject: [PATCH 23/27] Add sanity check for number of devices. --- src/lkl/virtio.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index 999b5fa68..9496e1b94 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -689,6 +689,12 @@ int lkl_virtio_dev_setup( return -1; } + if (dev->vendor_id >= DEVICE_COUNT) + { + sgxlkl_error("Too many devices. Only %d devices are supported\n", DEVICE_COUNT); + return -1; + } + virtio_deliver_irq[dev->vendor_id] = deliver_irq_cb; dev_hosts[dev->vendor_id] = dev_host; dev->base = register_iomem(dev_handle, mmio_size, &virtio_ops); From 03cc9d068ad5cb0e77824f9c74d8c814f7c242d6 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Thu, 13 May 2021 13:57:21 +0000 Subject: [PATCH 24/27] Add check that virtqueue arrays are in shared memory. --- src/lkl/virtio.c | 92 ++++++++++++++++++++++++++++++++---------------- 1 file changed, 61 insertions(+), 31 deletions(-) diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index 9496e1b94..7f861743c 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -48,6 +48,7 @@ bool packed_ring = false; #include #endif + /* Used for notifying LKL for the list of virtio devices at bootup. * Currently block, network and console devices are passed */ char lkl_virtio_devs[4096]; @@ -497,6 +498,21 @@ static const struct lkl_iomem_ops virtio_ops = { .write = virtio_write, }; +static int device_num_queues(int device_id) +{ + switch(device_id) + { + case VIRTIO_ID_NET: + return NET_DEV_NUM_QUEUES; + case VIRTIO_ID_CONSOLE: + return CONSOLE_NUM_QUEUES; + case VIRTIO_ID_BLOCK: + return BLK_DEV_NUM_QUEUES; + default: + return 0; + } +} + /* * lkl_virtio_deliver_irq : Deliver the irq request to device task * dev_id : Device id for which irq needs to be delivered. @@ -504,20 +520,7 @@ static const struct lkl_iomem_ops virtio_ops = { void lkl_virtio_deliver_irq(uint8_t dev_id) { struct virtio_dev *dev_host = dev_hosts[dev_id]; - int num_queues = 0; - - switch(dev_host->device_id) - { - case VIRTIO_ID_NET: - num_queues = NET_DEV_NUM_QUEUES; - break; - case VIRTIO_ID_CONSOLE: - num_queues = CONSOLE_NUM_QUEUES; - break; - case VIRTIO_ID_BLOCK: - num_queues = BLK_DEV_NUM_QUEUES; - break; - } + int num_queues = device_num_queues(dev_host->device_id); //Verify descriptor len doesn't exceed bounds for (int i = 0; i < num_queues; i++) @@ -559,26 +562,11 @@ void lkl_virtio_deliver_irq(uint8_t dev_id) static void* copy_queue(struct virtio_dev* dev) { void* vq_mem = NULL; - int num_queues = 0; - size_t vq_size = 0; struct virtq_packed* dest_packed = NULL; struct virtq* dest_split = NULL; + size_t vq_size = 0; + int num_queues = device_num_queues(dev->device_id); - switch (dev->device_id) - { - case VIRTIO_ID_NET: - num_queues = NET_DEV_NUM_QUEUES; - break; - case VIRTIO_ID_CONSOLE: - num_queues = CONSOLE_NUM_QUEUES; - break; - case VIRTIO_ID_BLOCK: - num_queues = BLK_DEV_NUM_QUEUES; - break; - default: - sgxlkl_error("Unsupported device, device id: %d\n", dev->device_id); - return NULL; - } if (packed_ring) { vq_size = next_pow2(num_queues * sizeof(struct virtq_packed)); @@ -616,6 +604,35 @@ static void* copy_queue(struct virtio_dev* dev) return packed_ring ? (void *) dest_packed : (void *) dest_split; } +static bool virtqueues_in_shared_memory(struct virtio_dev* dev) +{ + int num_queues = device_num_queues(dev->device_id); + + for (int i = 0; i < num_queues; i++) + { + if (packed_ring) + { + if((oe_is_within_enclave(&dev->packed.queue[i], sizeof(struct virtq_packed)))) + return false; + } + + else + { + if((oe_is_within_enclave(&dev->split.queue[i], sizeof(struct virtq)))) + return false; + } + } + + return true; +} + +static bool supported_device(struct virtio_dev* dev) +{ + return dev->device_id == VIRTIO_ID_NET || + dev->device_id == VIRTIO_ID_CONSOLE || + dev->device_id == VIRTIO_ID_BLOCK; +} + /* * Function to setup the virtio device setting */ @@ -645,6 +662,13 @@ int lkl_virtio_dev_setup( dev->device_features = dev_host->device_features; dev->config_len = dev_host->config_len; dev->int_status = dev_host->int_status; + + if (!supported_device(dev)) + { + sgxlkl_error("Unsupported device, device id: %d\n", dev->device_id); + return -1; + } + if (dev->config_len != 0) { dev->config_data = sgxlkl_host_ops.mem_alloc(next_pow2(dev->config_len)); @@ -675,6 +699,12 @@ int lkl_virtio_dev_setup( } } + if (!virtqueues_in_shared_memory(dev_host)) + { + sgxlkl_error("Virtqueue arrays not in shared memory\n"); + return -1; + } + dev->irq = lkl_get_free_irq("virtio"); dev_host->irq = dev->irq; dev_host->int_status = 0; From 24366a4f969e44e073f4c2818a0c96893ae905b5 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Mon, 17 May 2021 21:10:24 +0000 Subject: [PATCH 25/27] Removing desc len sanity check. --- src/lkl/virtio.c | 34 +--------------------------------- 1 file changed, 1 insertion(+), 33 deletions(-) diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index 7f861743c..ef1894365 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -521,39 +521,7 @@ void lkl_virtio_deliver_irq(uint8_t dev_id) { struct virtio_dev *dev_host = dev_hosts[dev_id]; int num_queues = device_num_queues(dev_host->device_id); - - //Verify descriptor len doesn't exceed bounds - for (int i = 0; i < num_queues; i++) - { - if (packed_ring) - { - struct virtq_packed* packed_q = &dev_host->packed.queue[i]; - for (int j = 0; j < packed_q->num; j++) - { - if (packed_q->desc[j].len > - sgxlkl_enclave_state.shared_memory.virtio_swiotlb_size) - { - sgxlkl_error("Virtio desc memory size larger than allocated bounce buffer\n"); - return; - } - } - } - - else - { - struct virtq* split_q = &dev_host->split.queue[i]; - for (int j = 0; j < split_q->used->idx; j++) - { - if (split_q->used->ring[j].len > - sgxlkl_enclave_state.shared_memory.virtio_swiotlb_size) - { - sgxlkl_error("Virtio used memory size larger than allocated bounce buffer\n"); - return; - } - } - } - } - + // Get sgxlkl_enclave_state if (virtio_deliver_irq[dev_id]) virtio_deliver_irq[dev_id](dev_id); From c5a50316748efd6d9599bf60cd78c120213e3a9f Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Tue, 1 Jun 2021 18:31:47 +0000 Subject: [PATCH 26/27] Remove unused variable. --- src/lkl/virtio.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/lkl/virtio.c b/src/lkl/virtio.c index ef1894365..1b57e4bab 100644 --- a/src/lkl/virtio.c +++ b/src/lkl/virtio.c @@ -519,9 +519,6 @@ static int device_num_queues(int device_id) */ void lkl_virtio_deliver_irq(uint8_t dev_id) { - struct virtio_dev *dev_host = dev_hosts[dev_id]; - int num_queues = device_num_queues(dev_host->device_id); - // Get sgxlkl_enclave_state if (virtio_deliver_irq[dev_id]) virtio_deliver_irq[dev_id](dev_id); From 740e64cfd2039b53df6360b581d062f7b8649fe7 Mon Sep 17 00:00:00 2001 From: Kenny Macheka Date: Sat, 12 Jun 2021 18:06:20 +0100 Subject: [PATCH 27/27] Add extra condition for sending IRQ for packed ring. --- src/host_interface/virtio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/host_interface/virtio.c b/src/host_interface/virtio.c index a01fdda40..8435f791f 100644 --- a/src/host_interface/virtio.c +++ b/src/host_interface/virtio.c @@ -384,7 +384,8 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len) // new_used event old_used (X) // event old_used new_used (X) if ((used_desc_idx > event_idx && event_idx >= prev_used_desc_idx) || - (used_desc_idx < prev_used_desc_idx && prev_used_desc_idx <= event_idx)) + (used_desc_idx < prev_used_desc_idx && prev_used_desc_idx <= event_idx) || + (used_desc_idx < prev_used_desc_idx && event_idx < used_desc_idx)) send_irq = 1; }