ret = kvm_x86_call(pi_update_irte)(irqfd->kvm,
prod->irq, irqfd->gsi, 0);
if (ret)
- printk(KERN_INFO "irq bypass consumer (token %p) unregistration"
- " fails: %d\n", irqfd->consumer.token, ret);
+ printk(KERN_INFO "irq bypass consumer (eventfd %p) unregistration"
+ " fails: %d\n", irqfd->consumer.eventfd, ret);
spin_unlock_irq(&kvm->irqfds.lock);
if (ret)
goto out_put_eventfd_ctx;
- ctx->producer.token = trigger;
ctx->producer.irq = irq;
- ret = irq_bypass_register_producer(&ctx->producer);
+ ret = irq_bypass_register_producer(&ctx->producer, trigger);
if (unlikely(ret)) {
dev_info(&pdev->dev,
- "irq bypass producer (token %p) registration fails: %d\n",
- ctx->producer.token, ret);
-
- ctx->producer.token = NULL;
+ "irq bypass producer (eventfd %p) registration fails: %d\n",
+ trigger, ret);
}
ctx->trigger = trigger;
return;
vq->call_ctx.producer.irq = irq;
- ret = irq_bypass_register_producer(&vq->call_ctx.producer);
+ ret = irq_bypass_register_producer(&vq->call_ctx.producer, vq->call_ctx.ctx);
if (unlikely(ret))
- dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
- qid, vq->call_ctx.producer.token, ret);
+ dev_info(&v->dev, "vq %u, irq bypass producer (eventfd %p) registration fails, ret = %d\n",
+ qid, vq->call_ctx.ctx, ret);
}
static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
if (ops->get_status(vdpa) &
VIRTIO_CONFIG_S_DRIVER_OK)
vhost_vdpa_unsetup_vq_irq(v, idx);
- vq->call_ctx.producer.token = NULL;
}
break;
}
cb.callback = vhost_vdpa_virtqueue_cb;
cb.private = vq;
cb.trigger = vq->call_ctx.ctx;
- vq->call_ctx.producer.token = vq->call_ctx.ctx;
if (ops->get_status(vdpa) &
VIRTIO_CONFIG_S_DRIVER_OK)
vhost_vdpa_setup_vq_irq(v, idx);
#include <linux/list.h>
+struct eventfd_ctx;
struct irq_bypass_consumer;
/*
* The IRQ bypass manager is a simple set of lists and callbacks that allows
* IRQ producers (ex. physical interrupt sources) to be matched to IRQ
* consumers (ex. virtualization hardware that allows IRQ bypass or offload)
- * via a shared token (ex. eventfd_ctx). Producers and consumers register
- * independently. When a token match is found, the optional @stop callback
- * will be called for each participant. The pair will then be connected via
- * the @add_* callbacks, and finally the optional @start callback will allow
- * any final coordination. When either participant is unregistered, the
- * process is repeated using the @del_* callbacks in place of the @add_*
- * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings
- * are not supported.
+ * via a shared eventfd_ctx. Producers and consumers register independently.
+ * When a producer and consumer are paired, i.e. an eventfd match is found, the
+ * optional @stop callback will be called for each participant. The pair will
+ * then be connected via the @add_* callbacks, and finally the optional @start
+ * callback will allow any final coordination. When either participant is
+ * unregistered, the process is repeated using the @del_* callbacks in place of
+ * the @add_* callbacks. eventfds must be unique per producer/consumer, 1:N
+ * pairings are not supported.
*/
/**
* struct irq_bypass_producer - IRQ bypass producer definition
* @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
* @irq: Linux IRQ number for the producer device
* @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
* @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
*/
struct irq_bypass_producer {
struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
int irq;
int (*add_consumer)(struct irq_bypass_producer *,
struct irq_bypass_consumer *);
/**
* struct irq_bypass_consumer - IRQ bypass consumer definition
* @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
* @add_producer: Connect the IRQ consumer to an IRQ producer
* @del_producer: Disconnect the IRQ consumer from an IRQ producer
* @stop: Perform any quiesce operations necessary prior to add/del (optional)
*/
struct irq_bypass_consumer {
struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
int (*add_producer)(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void (*del_producer)(struct irq_bypass_consumer *,
void (*start)(struct irq_bypass_consumer *);
};
-int irq_bypass_register_producer(struct irq_bypass_producer *);
-void irq_bypass_unregister_producer(struct irq_bypass_producer *);
-int irq_bypass_register_consumer(struct irq_bypass_consumer *);
-void irq_bypass_unregister_consumer(struct irq_bypass_consumer *);
+int irq_bypass_register_producer(struct irq_bypass_producer *producer,
+ struct eventfd_ctx *eventfd);
+void irq_bypass_unregister_producer(struct irq_bypass_producer *producer);
+int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer,
+ struct eventfd_ctx *eventfd);
+void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer);
#endif /* IRQBYPASS_H */
#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
if (kvm_arch_has_irq_bypass()) {
- irqfd->consumer.token = (void *)irqfd->eventfd;
irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
irqfd->consumer.start = kvm_arch_irq_bypass_start;
- ret = irq_bypass_register_consumer(&irqfd->consumer);
+ ret = irq_bypass_register_consumer(&irqfd->consumer, irqfd->eventfd);
if (ret)
- pr_info("irq bypass consumer (token %p) registration fails: %d\n",
- irqfd->consumer.token, ret);
+ pr_info("irq bypass consumer (eventfd %p) registration fails: %d\n",
+ irqfd->eventfd, ret);
}
#endif
/**
* irq_bypass_register_producer - register IRQ bypass producer
* @producer: pointer to producer structure
+ * @eventfd: pointer to the eventfd context associated with the producer
*
* Add the provided IRQ producer to the list of producers and connect
- * with any matching token found on the IRQ consumers list.
+ * with any matching eventfd found on the IRQ consumers list.
*/
-int irq_bypass_register_producer(struct irq_bypass_producer *producer)
+int irq_bypass_register_producer(struct irq_bypass_producer *producer,
+ struct eventfd_ctx *eventfd)
{
struct irq_bypass_producer *tmp;
struct irq_bypass_consumer *consumer;
int ret;
- if (!producer->token)
+ if (WARN_ON_ONCE(producer->eventfd))
return -EINVAL;
mutex_lock(&lock);
list_for_each_entry(tmp, &producers, node) {
- if (tmp->token == producer->token) {
+ if (tmp->eventfd == eventfd) {
ret = -EBUSY;
goto out_err;
}
}
list_for_each_entry(consumer, &consumers, node) {
- if (consumer->token == producer->token) {
+ if (consumer->eventfd == eventfd) {
ret = __connect(producer, consumer);
if (ret)
goto out_err;
}
}
+ producer->eventfd = eventfd;
list_add(&producer->node, &producers);
mutex_unlock(&lock);
struct irq_bypass_producer *tmp;
struct irq_bypass_consumer *consumer;
- if (!producer->token)
+ if (!producer->eventfd)
return;
mutex_lock(&lock);
list_for_each_entry(tmp, &producers, node) {
- if (tmp->token != producer->token)
+ if (tmp->eventfd != producer->eventfd)
continue;
list_for_each_entry(consumer, &consumers, node) {
- if (consumer->token == producer->token) {
+ if (consumer->eventfd == producer->eventfd) {
__disconnect(producer, consumer);
break;
}
}
+ producer->eventfd = NULL;
list_del(&producer->node);
break;
}
+ WARN_ON_ONCE(producer->eventfd);
mutex_unlock(&lock);
}
EXPORT_SYMBOL_GPL(irq_bypass_unregister_producer);
/**
* irq_bypass_register_consumer - register IRQ bypass consumer
* @consumer: pointer to consumer structure
+ * @eventfd: pointer to the eventfd context associated with the consumer
*
* Add the provided IRQ consumer to the list of consumers and connect
- * with any matching token found on the IRQ producer list.
+ * with any matching eventfd found on the IRQ producer list.
*/
-int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
+int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer,
+ struct eventfd_ctx *eventfd)
{
struct irq_bypass_consumer *tmp;
struct irq_bypass_producer *producer;
int ret;
- if (!consumer->token ||
- !consumer->add_producer || !consumer->del_producer)
+ if (WARN_ON_ONCE(consumer->eventfd))
+ return -EINVAL;
+
+ if (!consumer->add_producer || !consumer->del_producer)
return -EINVAL;
mutex_lock(&lock);
list_for_each_entry(tmp, &consumers, node) {
- if (tmp->token == consumer->token || tmp == consumer) {
+ if (tmp->eventfd == eventfd) {
ret = -EBUSY;
goto out_err;
}
}
list_for_each_entry(producer, &producers, node) {
- if (producer->token == consumer->token) {
+ if (producer->eventfd == eventfd) {
ret = __connect(producer, consumer);
if (ret)
goto out_err;
}
}
+ consumer->eventfd = eventfd;
list_add(&consumer->node, &consumers);
mutex_unlock(&lock);
struct irq_bypass_consumer *tmp;
struct irq_bypass_producer *producer;
- if (!consumer->token)
+ if (!consumer->eventfd)
return;
mutex_lock(&lock);
continue;
list_for_each_entry(producer, &producers, node) {
- if (producer->token == consumer->token) {
+ if (producer->eventfd == consumer->eventfd) {
__disconnect(producer, consumer);
break;
}
}
+ consumer->eventfd = NULL;
list_del(&consumer->node);
break;
}
+ WARN_ON_ONCE(consumer->eventfd);
mutex_unlock(&lock);
}
EXPORT_SYMBOL_GPL(irq_bypass_unregister_consumer);