Skip to content

Commit

Permalink
KVM: Use eoi to track RTC interrupt delivery status
Browse files Browse the repository at this point in the history
Current interrupt coalescing logci which only used by RTC has conflict
with Posted Interrupt.
This patch introduces a new mechinism to use eoi to track interrupt:
When delivering an interrupt to vcpu, the pending_eoi set to number of
vcpu that received the interrupt. And decrease it when each vcpu writing
eoi. No subsequent RTC interrupt can deliver to vcpu until all vcpus
write eoi.

Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com>
Reviewed-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
  • Loading branch information
Yang Zhang authored and matosatti committed Apr 16, 2013
1 parent aa2fbe6 commit 2c2bf01
Showing 1 changed file with 35 additions and 1 deletion.
36 changes: 35 additions & 1 deletion virt/kvm/ioapic.c
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,22 @@ static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
__rtc_irq_eoi_tracking_restore_one(vcpu);
}

static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
{
if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map))
--ioapic->rtc_status.pending_eoi;

WARN_ON(ioapic->rtc_status.pending_eoi < 0);
}

static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
{
if (ioapic->rtc_status.pending_eoi > 0)
return true; /* coalesced */

return false;
}

static int ioapic_service(struct kvm_ioapic *ioapic, unsigned int idx,
bool line_status)
{
Expand Down Expand Up @@ -260,6 +276,7 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status)
{
union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
struct kvm_lapic_irq irqe;
int ret;

ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
"vector=%x trig_mode=%x\n",
Expand All @@ -275,7 +292,15 @@ static int ioapic_deliver(struct kvm_ioapic *ioapic, int irq, bool line_status)
irqe.level = 1;
irqe.shorthand = 0;

return kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
if (irq == RTC_GSI && line_status) {
BUG_ON(ioapic->rtc_status.pending_eoi != 0);
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
ioapic->rtc_status.dest_map);
ioapic->rtc_status.pending_eoi = ret;
} else
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);

return ret;
}

int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
Expand All @@ -299,13 +324,20 @@ int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
ret = 1;
} else {
int edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);

if (irq == RTC_GSI && line_status &&
rtc_irq_check_coalesced(ioapic)) {
ret = 0; /* coalesced */
goto out;
}
ioapic->irr |= mask;
if ((edge && old_irr != ioapic->irr) ||
(!edge && !entry.fields.remote_irr))
ret = ioapic_service(ioapic, irq, line_status);
else
ret = 0; /* report coalesced interrupt */
}
out:
trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
spin_unlock(&ioapic->lock);

Expand Down Expand Up @@ -333,6 +365,8 @@ static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
if (ent->fields.vector != vector)
continue;

if (i == RTC_GSI)
rtc_irq_eoi(ioapic, vcpu);
/*
* We are dropping lock while calling ack notifiers because ack
* notifier callbacks for assigned devices call into IOAPIC
Expand Down

0 comments on commit 2c2bf01

Please sign in to comment.