static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
- bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_IDS);
+ bitmap_zero(ioapic->rtc_status.map, KVM_MAX_VCPU_IDS);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
{
bool new_val, old_val;
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
- struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
+ struct rtc_status *status = &ioapic->rtc_status;
union kvm_ioapic_redirect_entry *e;
e = &ioapic->redirtbl[RTC_GSI];
return;
new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
- old_val = test_bit(vcpu->vcpu_id, dest_map->map);
+ old_val = test_bit(vcpu->vcpu_id, status->map);
if (new_val == old_val)
return;
if (new_val) {
- __set_bit(vcpu->vcpu_id, dest_map->map);
- dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
+ __set_bit(vcpu->vcpu_id, status->map);
+ status->vectors[vcpu->vcpu_id] = e->fields.vector;
ioapic->rtc_status.pending_eoi++;
} else {
- __clear_bit(vcpu->vcpu_id, dest_map->map);
+ __clear_bit(vcpu->vcpu_id, status->map);
ioapic->rtc_status.pending_eoi--;
rtc_status_pending_eoi_check_valid(ioapic);
}
static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu,
int vector)
{
- struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
+ struct rtc_status *status = &ioapic->rtc_status;
/* RTC special handling */
- if (test_bit(vcpu->vcpu_id, dest_map->map) &&
- (vector == dest_map->vectors[vcpu->vcpu_id]) &&
- (test_and_clear_bit(vcpu->vcpu_id,
- ioapic->rtc_status.dest_map.map))) {
+ if (test_bit(vcpu->vcpu_id, status->map) &&
+ (vector == status->vectors[vcpu->vcpu_id]) &&
+ (test_and_clear_bit(vcpu->vcpu_id, status->map))) {
--ioapic->rtc_status.pending_eoi;
rtc_status_pending_eoi_check_valid(ioapic);
}
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
{
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
- struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
+ struct rtc_status *status = &ioapic->rtc_status;
union kvm_ioapic_redirect_entry *e;
int index;
spin_lock(&ioapic->lock);
/* Make sure we see any missing RTC EOI */
- if (test_bit(vcpu->vcpu_id, dest_map->map))
- __set_bit(dest_map->vectors[vcpu->vcpu_id],
+ if (test_bit(vcpu->vcpu_id, status->map))
+ __set_bit(status->vectors[vcpu->vcpu_id],
ioapic_handled_vectors);
for (index = 0; index < IOAPIC_NUM_PINS; index++) {
*/
BUG_ON(ioapic->rtc_status.pending_eoi != 0);
ret = __kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
- &ioapic->rtc_status.dest_map);
+ &ioapic->rtc_status);
ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
} else
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe);
#define RTC_GSI 8
-struct dest_map {
+struct rtc_status {
+ int pending_eoi;
+
/* vcpu bitmap where IRQ has been sent */
DECLARE_BITMAP(map, KVM_MAX_VCPU_IDS);
u8 vectors[KVM_MAX_VCPU_IDS];
};
-
-struct rtc_status {
- int pending_eoi;
- struct dest_map dest_map;
-};
-
union kvm_ioapic_redirect_entry {
u64 bits;
struct {
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode,
- struct dest_map *dest_map);
+ struct rtc_status *rtc_status);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
- struct dest_map *dest_map)
+ struct rtc_status *rtc_status)
{
struct kvm_lapic *apic = vcpu->arch.apic;
return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
- irq->level, irq->trig_mode, dest_map);
+ irq->level, irq->trig_mode, rtc_status);
}
static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
static bool __kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, int *r,
- struct dest_map *dest_map)
+ struct rtc_status *rtc_status)
{
struct kvm_apic_map *map;
unsigned long bitmap;
*r = 0;
return true;
}
- *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
+ *r = kvm_apic_set_irq(src->vcpu, irq, rtc_status);
return true;
}
for_each_set_bit(i, &bitmap, 16) {
if (!dst[i])
continue;
- *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
+ *r += kvm_apic_set_irq(dst[i]->vcpu, irq, rtc_status);
}
}
int __kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq,
- struct dest_map *dest_map)
+ struct rtc_status *rtc_status)
{
int r = -1;
struct kvm_vcpu *vcpu, *lowest = NULL;
unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
unsigned int dest_vcpus = 0;
- if (__kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
+ if (__kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, rtc_status))
return r;
if (irq->dest_mode == APIC_DEST_PHYSICAL &&
if (!kvm_lowest_prio_delivery(irq)) {
if (r < 0)
r = 0;
- r += kvm_apic_set_irq(vcpu, irq, dest_map);
+ r += kvm_apic_set_irq(vcpu, irq, rtc_status);
} else if (kvm_apic_sw_enabled(vcpu->arch.apic)) {
if (!vector_hashing_enabled) {
if (!lowest)
}
if (lowest)
- r = kvm_apic_set_irq(lowest, irq, dest_map);
+ r = kvm_apic_set_irq(lowest, irq, rtc_status);
return r;
}
*/
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode,
- struct dest_map *dest_map)
+ struct rtc_status *rtc_status)
{
int result = 0;
struct kvm_vcpu *vcpu = apic->vcpu;
result = 1;
- if (dest_map) {
- __set_bit(vcpu->vcpu_id, dest_map->map);
- dest_map->vectors[vcpu->vcpu_id] = vector;
+ if (rtc_status) {
+ __set_bit(vcpu->vcpu_id, rtc_status->map);
+ rtc_status->vectors[vcpu->vcpu_id] = vector;
}
if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
int nr_lvt_entries;
};
-struct dest_map;
+struct rtc_status;
int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, unsigned long *pir, int *max_irr);
void kvm_apic_update_ppr(struct kvm_vcpu *vcpu);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
- struct dest_map *dest_map);
+ struct rtc_status *rtc_status);
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
void kvm_apic_update_apicv(struct kvm_vcpu *vcpu);
int kvm_alloc_apic_access_page(struct kvm *kvm);
struct kvm_lapic_irq *irq, int *r);
int __kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq,
- struct dest_map *dest_map);
+ struct rtc_status *rtc_status);
static inline int kvm_irq_delivery_to_apic(struct kvm *kvm,
struct kvm_lapic *src,