Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Kennymacheka/issue745 #848

Open
wants to merge 27 commits into
base: oe_port
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
9829ea6
Add packed vring structures and create split and packed processing ro…
KennyMacheka Apr 15, 2021
d4cafa3
Create function to add use packed vring desc.
KennyMacheka Apr 16, 2021
6879c3d
Implement virtio_req_complete_packed and refactoring.
KennyMacheka Apr 18, 2021
81f7eb5
Add packed ring in lkl/virtio.c.
KennyMacheka Apr 18, 2021
2aa85f5
Fix typos and refactor.
KennyMacheka Apr 18, 2021
1208475
Add PACKED_RING macro.
KennyMacheka Apr 18, 2021
3e2d213
Fixing bug for packed_desc_is_avail.
KennyMacheka Apr 20, 2021
bcf19cf
Add device and driver events into packed vring and initialise in virt…
KennyMacheka Apr 20, 2021
7cfaebc
Round queue desc event size to next pow 2.
KennyMacheka Apr 20, 2021
1dd6818
Refactoring and bug fixing.
KennyMacheka Apr 20, 2021
f5ac452
Add unprocessed used desc in packed virtq.
KennyMacheka Apr 25, 2021
8b81d1b
Fix bug for setting avail and used flags to 0.
KennyMacheka Apr 25, 2021
fb015f6
Cleaning up code.
KennyMacheka Apr 26, 2021
c39533f
Initialise shadow devs.
KennyMacheka May 2, 2021
23be735
Copy host dev into shadow dev.
KennyMacheka May 4, 2021
18e1bfc
Update virtio_read and virtio_write with shadow structure.
KennyMacheka May 5, 2021
6022e1c
Add sanity checking for desc length.
KennyMacheka May 5, 2021
8594b83
Have consistency with pointer definition style.
KennyMacheka May 7, 2021
fb6ab3c
Replace mmap with enclave wrapper.
KennyMacheka May 8, 2021
50589ac
Update dev_host int status on deliver irq.
KennyMacheka May 11, 2021
845026f
Fix syntax errors.
KennyMacheka May 11, 2021
a0225d6
Clean up lkl/virtio.c.
KennyMacheka May 12, 2021
7b12fd1
Add sanity check for number of devices.
KennyMacheka May 13, 2021
03cc9d0
Add check that virtqueue arrays are in shared memory.
KennyMacheka May 13, 2021
24366a4
Removing desc len sanity check.
KennyMacheka May 17, 2021
c5a5031
Remove unused variable.
KennyMacheka Jun 1, 2021
740e64c
Add extra condition for sending IRQ for packed ring.
KennyMacheka Jun 12, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Cleaning up code.
  • Loading branch information
KennyMacheka committed May 13, 2021
commit fb015f63d2fd3d6f7b2d87806b22cd21aa58f1af
88 changes: 28 additions & 60 deletions src/host_interface/virtio.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,6 @@ bool packed_ring = true;
bool packed_ring = false;
#endif

#ifdef DEBUG
static int counter = 0;
#endif

struct _virtio_req
{
union {
Expand All @@ -47,10 +43,6 @@ static int packed_desc_is_avail(struct virtq_packed *q, struct virtq_packed_desc
{
bool avail = !!(desc->flags & (1 << LKL_VRING_PACKED_DESC_F_AVAIL));
bool used = !!(desc->flags & (1 << LKL_VRING_PACKED_DESC_F_USED));
#ifdef DEBUG
if (counter < 5)
printf("Flags is %d %d %d %d\n", desc->flags, avail, used, q->driver_wrap_counter);
#endif
return avail != used && avail == q->driver_wrap_counter;
}

Expand Down Expand Up @@ -154,15 +146,14 @@ static struct virtq_packed_desc* get_next_desc_packed(
return &q->desc[++(*idx) & (q->num - 1)];
}


/*
* virtio_add_used: update used ring at used index with used discriptor index
* virtio_add_used_split: update used ring at used index with used discriptor index
* q : input parameter
* used_idx : input parameter
* avail_idx: input parameter
* len : input parameter
*/
static inline void virtio_add_used(
static inline void virtio_add_used_split(
struct virtq* q,
uint16_t used_idx,
uint16_t avail_idx,
Expand All @@ -181,6 +172,7 @@ static inline void virtio_add_used_packed(
uint32_t len,
uint16_t id)
{
__sync_synchronize();
struct virtq_packed_desc* desc = &q->desc[used_idx & (q->num -1)];
desc->id = id;
desc->len = htole32(len);
Expand Down Expand Up @@ -272,7 +264,7 @@ static void virtio_req_complete_split(struct virtio_req* req, uint32_t len)
else
used_len = min_len(len, req->buf[i].iov_len);

virtio_add_used(q, used_idx++, avail_idx++, used_len);
virtio_add_used_split(q, used_idx++, avail_idx++, used_len);

len -= used_len;
if (!len)
Expand Down Expand Up @@ -342,24 +334,17 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len)
* avail_desc_idx and used_desc_idx to be incremented and wrapped around as appropriate
* changing the wrap counters when the above are wrapped around
*

This function only gets called either with chained descriptors,
or max_merge_len (which I assume would also be chained descriptors).

I know this as for example it gets called from blk_enqueue,
whose request is chained, and the same with network device
(and I assume the same for console)
* This function only gets called either with chained descriptors,
* or max_merge_len (which I assume would also be chained descriptors).
*/
int send_irq = 0;
struct _virtio_req* _req = container_of(req, struct _virtio_req, req);
struct virtq_packed* q = _req->packed.q;
uint16_t avail_desc_idx = _req->idx;
uint16_t used_desc_idx = q->used_desc_idx;
uint16_t prev_used_desc_idx = used_desc_idx;
uint16_t last_buffer_idx = avail_desc_idx+(req->buf_count-1);
uint16_t used_len;

#ifdef DEBUG
printf("\nRequest complete. queue num: %d, flags: %d, driver flag: %d, req avail index: %d, ring avail index: %d, ring used index: %d, device wrap counter: %d, counter: %d\n", q->num, q->desc[avail_desc_idx].flags, q->driver->flags, avail_desc_idx, q->avail_desc_idx, used_desc_idx, q->device_wrap_counter, counter);
#endif
uint16_t used_len, event_idx;

if (!q->max_merge_len)
used_len = len;
Expand All @@ -382,27 +367,29 @@ static void virtio_req_complete_packed(struct virtio_req* req, uint32_t len)
{
avail_desc_idx -= q->num;
q->driver_wrap_counter = !q->driver_wrap_counter;
#ifdef DEBUG
printf("Changing wrapper\n");
#endif
}

// Don't think we need to synchronise used
q->used_desc_idx = used_desc_idx;
q->avail_desc_idx = avail_desc_idx;

if (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_ENABLE)
send_irq = 1;

/**TODO*/
// Need to use event supression here - but in theory this should work
// Read from driver event
if (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_ENABLE ||
q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_DESC)
else if (q->driver->flags == LKL_VRING_PACKED_EVENT_FLAG_DESC)
{
#ifdef DEBUG
printf("Delivering irq\n");
#endif
virtio_deliver_irq(_req->dev);
event_idx = q->driver->off_wrap & ~(1 << LKL_VRING_PACKED_EVENT_F_WRAP_CTR);
//Check if event_idx has been set as used used
// old_used event new_used
// new_used old_used event
// new_used event old_used (X)
// event old_used new_used (X)
if ((used_desc_idx > event_idx && event_idx >= prev_used_desc_idx) ||
(used_desc_idx < prev_used_desc_idx && prev_used_desc_idx <= event_idx))
send_irq = 1;
}

if (send_irq)
virtio_deliver_irq(_req->dev);
}

/*
Expand Down Expand Up @@ -536,38 +523,17 @@ static void virtio_process_queue_split(struct virtio_dev* dev, uint32_t qidx)
*/
static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx)
{
#ifdef DEBUG
counter++;
#endif
struct virtq_packed* q = &dev->packed.queue[qidx];

if (!q->ready)
return;

if (dev->ops->acquire_queue)
dev->ops->acquire_queue(dev, qidx);
#ifdef DEBUG
if (counter < 5)
{
printf("The qidx: \n");
printf(
"Processing queue %d %d %d %d\n",
q->avail_desc_idx,
q->num,
q->desc[q->avail_desc_idx].flags,
q->driver->flags);
printf("Rest: \n");

for (int i = 0; i < q->num; i++)
{
printf("desc num: %d Flag: %d\n",i, q->desc[i].flags);
}
}
#endif

__sync_synchronize();
q->device->flags = LKL_VRING_PACKED_EVENT_FLAG_DISABLE;

// TODO - might need to check driver and see if we need to process a specific descriptor
// Have some loop that keeps going until we hit a desc that's not available
while (packed_desc_is_avail(q,&q->desc[q->avail_desc_idx & (q->num-1)]))
{
// Need to process desc here
Expand All @@ -577,7 +543,9 @@ static void virtio_process_queue_packed(struct virtio_dev* dev, uint32_t qidx)
break;
}

__sync_synchronize();
q->device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE;

if (dev->ops->release_queue)
dev->ops->release_queue(dev, qidx);
}
Expand Down
30 changes: 1 addition & 29 deletions src/host_interface/virtio_blkdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
extern sgxlkl_host_state_t sgxlkl_host_state;

#if DEBUG && VIRTIO_TEST_HOOK
#include <stdio.h>
static uint64_t virtio_blk_req_cnt;
#endif // DEBUG && VIRTIO_TEST_HOOK

Expand Down Expand Up @@ -113,7 +114,6 @@ int blk_device_init(
void* vq_mem = NULL;
struct virtio_blk_dev* host_blk_device = NULL;
size_t bdev_size = sizeof(struct virtio_blk_dev);
size_t event_size = next_pow2(sizeof(struct virtq_packed_desc_event));
size_t vq_size;

if (!packed_ring)
Expand Down Expand Up @@ -168,33 +168,6 @@ int blk_device_init(
host_blk_device->dev.packed.queue[i].num_max = HOST_BLK_DEV_QUEUE_DEPTH;
host_blk_device->dev.packed.queue[i].device_wrap_counter = 1;
host_blk_device->dev.packed.queue[i].driver_wrap_counter = 1;
host_blk_device->dev.packed.queue[i].driver = mmap(
0,
event_size,
PROT_READ,
MAP_SHARED | MAP_ANONYMOUS,
-1,
0
);
if (!host_blk_device->dev.packed.queue[i].driver)
{
sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__);
return -1;
}
host_blk_device->dev.packed.queue[i].device = mmap(
0,
event_size,
PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS,
-1,
0
);
if (!host_blk_device->dev.packed.queue[i].device)
{
sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__);
return -1;
}
host_blk_device->dev.packed.queue[i].device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE;
}
}

Expand All @@ -214,7 +187,6 @@ int blk_device_init(
if (packed_ring)
host_blk_device->dev.device_features |= BIT(VIRTIO_F_RING_PACKED);


if (enable_swiotlb)
host_blk_device->dev.device_features |= BIT(VIRTIO_F_IOMMU_PLATFORM);

Expand Down
30 changes: 2 additions & 28 deletions src/host_interface/virtio_console.c
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,9 @@ void* monitor_console_input(void* cons_dev)
break;

if (ret & DEV_CONSOLE_WRITE)
{
virtio_process_queue(dev, RX_QUEUE_ID);
}
} while (1);
return NULL;
}
Expand Down Expand Up @@ -228,7 +230,6 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg)
void* console_vq_mem = NULL;

size_t host_console_size = next_pow2(sizeof(struct virtio_console_dev));
size_t event_size = next_pow2(sizeof(struct virtq_packed_desc_event));
size_t console_vq_size;

if (!packed_ring)
Expand Down Expand Up @@ -298,33 +299,6 @@ int virtio_console_init(sgxlkl_host_state_t* host_state, host_dev_config_t* cfg)
dev->packed.queue[i].num_max = QUEUE_DEPTH;
dev->packed.queue[i].device_wrap_counter = 1;
dev->packed.queue[i].driver_wrap_counter = 1;
dev->packed.queue[i].driver = mmap(
0,
event_size,
PROT_READ,
MAP_SHARED | MAP_ANONYMOUS,
-1,
0
);
if (!dev->packed.queue[i].driver)
{
sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__);
return -1;
}
dev->packed.queue[i].device = mmap(
0,
event_size,
PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS,
-1,
0
);
if (!dev->packed.queue[i].device)
{
sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__);
return -1;
}
dev->packed.queue[i].device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE;
}
}

Expand Down
28 changes: 0 additions & 28 deletions src/host_interface/virtio_netdev.c
Original file line number Diff line number Diff line change
Expand Up @@ -551,7 +551,6 @@ int netdev_init(sgxlkl_host_state_t* host_state)
mac[0] &= 0xfe;

size_t host_netdev_size = next_pow2(sizeof(struct virtio_net_dev));
size_t event_size = next_pow2(sizeof(struct virtq_packed_desc_event));

if (!packed_ring)
netdev_vq_size = NUM_QUEUES * sizeof(struct virtq);
Expand Down Expand Up @@ -617,33 +616,6 @@ int netdev_init(sgxlkl_host_state_t* host_state)
net_dev->dev.packed.queue[i].num_max = QUEUE_DEPTH;
net_dev->dev.packed.queue[i].device_wrap_counter = 1;
net_dev->dev.packed.queue[i].driver_wrap_counter = 1;
net_dev->dev.packed.queue[i].driver = mmap(
0,
event_size,
PROT_READ,
MAP_SHARED | MAP_ANONYMOUS,
-1,
0
);
if (!net_dev->dev.packed.queue[i].driver)
{
sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__);
return -1;
}
net_dev->dev.packed.queue[i].device = mmap(
0,
event_size,
PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS,
-1,
0
);
if (!net_dev->dev.packed.queue[i].device)
{
sgxlkl_host_fail("%s: block device queue descriptor event allocation failed\n", __func__);
return -1;
}
net_dev->dev.packed.queue[i].device->flags = LKL_VRING_PACKED_EVENT_FLAG_ENABLE;
}
}

Expand Down
Loading