In order to find out whether a vcpu is likely to be the target of
VLPIs (and to further optimize the way we deal with those), let's
track the number of VLPIs a vcpu can receive.
This gets implemented with an atomic variable that gets incremented
or decremented on map, unmap and move of a VLPI.
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Zenghui Yu <yuzenghui@huawei.com>
Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
Link: https://lore.kernel.org/r/20191107160412.30301-2-maz@kernel.org
 struct its_vpe {
        struct page             *vpt_page;
        struct its_vm           *its_vm;
+       /* per-vPE VLPI tracking */
+       atomic_t                vlpi_count;
        /* Doorbell interrupt */
        int                     irq;
        irq_hw_number_t         vpe_db_lpi;
 
 
        INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
        raw_spin_lock_init(&vgic_cpu->ap_list_lock);
+       atomic_set(&vgic_cpu->vgic_v3.its_vpe.vlpi_count, 0);
 
        /*
         * Enable and configure all SGIs to be edge-triggered and
 
                if (ret)
                        return ret;
 
+               if (map.vpe)
+                       atomic_dec(&map.vpe->vlpi_count);
                map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+               atomic_inc(&map.vpe->vlpi_count);
 
                ret = its_map_vlpi(irq->host_irq, &map);
        }
 
 
        irq->hw         = true;
        irq->host_irq   = virq;
+       atomic_inc(&map.vpe->vlpi_count);
 
 out:
        mutex_unlock(&its->its_lock);
 
        WARN_ON(!(irq->hw && irq->host_irq == virq));
        if (irq->hw) {
+               atomic_dec(&irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count);
                irq->hw = false;
                ret = its_unmap_vlpi(virq);
        }