}
static void ioapic_inj_irq(struct kvm_ioapic *ioapic,
- struct kvm_lapic *target,
+ struct kvm_vcpu *vcpu,
u8 vector, u8 trig_mode, u8 delivery_mode)
{
ioapic_debug("irq %d trig %d deliv %d\n", vector, trig_mode,
ASSERT((delivery_mode == dest_Fixed) ||
(delivery_mode == dest_LowestPrio));
- kvm_apic_set_irq(target, vector, trig_mode);
+ kvm_apic_set_irq(vcpu, vector, trig_mode);
}
static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
u8 vector = ioapic->redirtbl[irq].fields.vector;
u8 trig_mode = ioapic->redirtbl[irq].fields.trig_mode;
u32 deliver_bitmask;
- struct kvm_lapic *target;
struct kvm_vcpu *vcpu;
int vcpu_id;
switch (delivery_mode) {
case dest_LowestPrio:
- target =
- kvm_apic_round_robin(ioapic->kvm, vector, deliver_bitmask);
- if (target != NULL)
- ioapic_inj_irq(ioapic, target, vector,
+ vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
+ deliver_bitmask);
+ if (vcpu != NULL)
+ ioapic_inj_irq(ioapic, vcpu, vector,
trig_mode, delivery_mode);
else
- ioapic_debug("null round robin: "
+ ioapic_debug("null lowest prio vcpu: "
"mask=%x vector=%x delivery_mode=%x\n",
deliver_bitmask, vector, dest_LowestPrio);
break;
deliver_bitmask &= ~(1 << vcpu_id);
vcpu = ioapic->kvm->vcpus[vcpu_id];
if (vcpu) {
- target = vcpu->apic;
- ioapic_inj_irq(ioapic, target, vector,
+ ioapic_inj_irq(ioapic, vcpu, vector,
trig_mode, delivery_mode);
}
}
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu);
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8);
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value);
-struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
+
+struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
unsigned long bitmap);
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
void kvm_ioapic_update_eoi(struct kvm *kvm, int vector);
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
-int kvm_apic_set_irq(struct kvm_lapic *apic, u8 vec, u8 trig);
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu);
int kvm_ioapic_init(struct kvm *kvm);
void kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
}
EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
-int kvm_apic_set_irq(struct kvm_lapic *apic, u8 vec, u8 trig)
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
{
+ struct kvm_lapic *apic = vcpu->apic;
+
if (!apic_test_and_set_irr(vec, apic)) {
/* a new pending irq is set in IRR */
if (trig)
return result;
}
-struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
+static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
unsigned long bitmap)
{
int last;
return apic;
}
+struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
+ unsigned long bitmap)
+{
+ struct kvm_lapic *apic;
+
+ apic = kvm_apic_round_robin(kvm, vector, bitmap);
+ if (apic)
+ return apic->vcpu;
+ return NULL;
+}
+
static void apic_set_eoi(struct kvm_lapic *apic)
{
int vector = apic_find_highest_isr(apic);
unsigned int delivery_mode = icr_low & APIC_MODE_MASK;
unsigned int vector = icr_low & APIC_VECTOR_MASK;
- struct kvm_lapic *target;
+ struct kvm_vcpu *target;
struct kvm_vcpu *vcpu;
unsigned long lpr_map = 0;
int i;
}
if (delivery_mode == APIC_DM_LOWEST) {
- target = kvm_apic_round_robin(vcpu->kvm, vector, lpr_map);
+ target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map);
if (target != NULL)
- __apic_accept_irq(target, delivery_mode,
+ __apic_accept_irq(target->apic, delivery_mode,
vector, level, trig_mode);
}
}