if (!hyp_vm || hyp_vm->kvm.created_vcpus <= vcpu_idx)
goto unlock;
- hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
+ /* Pairs with smp_store_release() in register_hyp_vcpu(). */
+ hyp_vcpu = smp_load_acquire(&hyp_vm->vcpus[vcpu_idx]);
if (!hyp_vcpu)
goto unlock;
* the page-aligned size of 'struct pkvm_hyp_vcpu'.
* Return 0 on success, negative error code on failure.
*/
+static int register_hyp_vcpu(struct pkvm_hyp_vm *hyp_vm,
+ struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ unsigned int idx = hyp_vcpu->vcpu.vcpu_idx;
+
+ if (idx >= hyp_vm->kvm.created_vcpus)
+ return -EINVAL;
+
+ if (hyp_vm->vcpus[idx])
+ return -EINVAL;
+
+ /*
+ * Ensure the hyp_vcpu is initialised before publishing it to
+ * the vCPU-load path via 'hyp_vm->vcpus[]'.
+ */
+ smp_store_release(&hyp_vm->vcpus[idx], hyp_vcpu);
+ return 0;
+}
+
int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
unsigned long vcpu_hva)
{
struct pkvm_hyp_vcpu *hyp_vcpu;
struct pkvm_hyp_vm *hyp_vm;
- unsigned int idx;
int ret;
hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
if (ret)
goto unlock;
- idx = hyp_vcpu->vcpu.vcpu_idx;
- if (idx >= hyp_vm->kvm.created_vcpus) {
- ret = -EINVAL;
- goto unlock;
- }
-
- if (hyp_vm->vcpus[idx]) {
- ret = -EINVAL;
- goto unlock;
+ ret = register_hyp_vcpu(hyp_vm, hyp_vcpu);
+ if (ret) {
+ unpin_host_vcpu(host_vcpu);
+ unpin_host_sve_state(hyp_vcpu);
}
-
- hyp_vm->vcpus[idx] = hyp_vcpu;
unlock:
hyp_spin_unlock(&vm_table_lock);