}
}
+int wave5_kfifo_alloc(struct vpu_instance *inst)
+{
+ return kfifo_alloc(&inst->irq_status, 16 * sizeof(int), GFP_KERNEL);
+}
+
void wave5_cleanup_instance(struct vpu_instance *inst, struct file *filp)
{
int i;
v4l2_fh_del(&inst->v4l2_fh, filp);
v4l2_fh_exit(&inst->v4l2_fh);
}
- list_del_init(&inst->list);
+ kfifo_free(&inst->irq_status);
ida_free(&inst->dev->inst_ida, inst->id);
kfree(inst->codec_info);
kfree(inst);
{
struct vpu_instance *inst = file_to_vpu_inst(filp);
int ret = 0;
+ unsigned long flags;
v4l2_m2m_ctx_release(inst->v4l2_fh.m2m_ctx);
+ /*
+ * To prevent Null reference exception, the existing irq handler were
+ * separated to two modules.
+ * One is to queue interrupt reason into the irq handler,
+ * the other is irq_thread to call the wave5_vpu_dec_finish_decode
+ * to get decoded frame.
+ * The list of instances should be protected between all flow of the
+ * decoding process, but to protect the list in the irq_handler, spin lock
+ * should be used, and mutex should be used in the irq_thread because spin lock
+ * is not able to be used because mutex is already being used
+ * in the wave5_vpu_dec_finish_decode.
+ * So the spin lock and mutex were used to protect the list in the release function.
+ */
+ ret = mutex_lock_interruptible(&inst->dev->irq_lock);
+ if (ret)
+ return ret;
+ spin_lock_irqsave(&inst->dev->irq_spinlock, flags);
+ list_del_init(&inst->list);
+ spin_unlock_irqrestore(&inst->dev->irq_spinlock, flags);
+ mutex_unlock(&inst->dev->irq_lock);
if (inst->state != VPU_INST_STATE_NONE) {
u32 fail_res;
u32 seq_done;
u32 cmd_done;
u32 irq_reason;
- struct vpu_instance *inst;
+ u32 irq_subreason;
+ struct vpu_instance *inst, *tmp;
struct vpu_device *dev = dev_id;
+ int val;
+ unsigned long flags;
irq_reason = wave5_vdi_read_register(dev, W5_VPU_VINT_REASON);
seq_done = wave5_vdi_read_register(dev, W5_RET_SEQ_DONE_INSTANCE_INFO);
wave5_vdi_write_register(dev, W5_VPU_VINT_REASON_CLR, irq_reason);
wave5_vdi_write_register(dev, W5_VPU_VINT_CLEAR, 0x1);
- list_for_each_entry(inst, &dev->instances, list) {
+ spin_lock_irqsave(&dev->irq_spinlock, flags);
+ list_for_each_entry_safe(inst, tmp, &dev->instances, list) {
if (irq_reason & BIT(INT_WAVE5_INIT_SEQ) ||
irq_reason & BIT(INT_WAVE5_ENC_SET_PARAM)) {
irq_reason & BIT(INT_WAVE5_ENC_PIC)) {
if (cmd_done & BIT(inst->id)) {
cmd_done &= ~BIT(inst->id);
- wave5_vdi_write_register(dev, W5_RET_QUEUE_CMD_DONE_INST,
- cmd_done);
- inst->ops->finish_process(inst);
+ if (dev->irq >= 0) {
+ irq_subreason =
+ wave5_vdi_read_register(dev, W5_VPU_VINT_REASON);
+ if (!(irq_subreason & BIT(INT_WAVE5_DEC_PIC)))
+ wave5_vdi_write_register(dev,
+ W5_RET_QUEUE_CMD_DONE_INST,
+ cmd_done);
+ }
+ val = BIT(INT_WAVE5_DEC_PIC);
+ kfifo_in(&inst->irq_status, &val, sizeof(int));
}
}
+ }
+ spin_unlock_irqrestore(&dev->irq_spinlock, flags);
+
+ if (dev->irq < 0)
+ up(&dev->irq_sem);
+}
+
+static irqreturn_t wave5_vpu_irq(int irq, void *dev_id)
+{
+ struct vpu_device *dev = dev_id;
- wave5_vpu_clear_interrupt(inst, irq_reason);
+ if (wave5_vdi_read_register(dev, W5_VPU_VPU_INT_STS)) {
+ wave5_vpu_handle_irq(dev);
+ return IRQ_WAKE_THREAD;
}
+
+ return IRQ_HANDLED;
}
static irqreturn_t wave5_vpu_irq_thread(int irq, void *dev_id)
{
struct vpu_device *dev = dev_id;
+ struct vpu_instance *inst, *tmp;
+ int irq_status, ret;
- if (wave5_vdi_read_register(dev, W5_VPU_VPU_INT_STS))
- wave5_vpu_handle_irq(dev);
+ mutex_lock(&dev->irq_lock);
+ list_for_each_entry_safe(inst, tmp, &dev->instances, list) {
+ while (kfifo_len(&inst->irq_status)) {
+ ret = kfifo_out(&inst->irq_status, &irq_status, sizeof(int));
+ if (!ret)
+ break;
+
+ inst->ops->finish_process(inst);
+ }
+ }
+ mutex_unlock(&dev->irq_lock);
return IRQ_HANDLED;
}
return HRTIMER_RESTART;
}
+static int irq_thread(void *data)
+{
+ struct vpu_device *dev = (struct vpu_device *)data;
+ struct vpu_instance *inst, *tmp;
+ int irq_status, ret;
+
+ while (!kthread_should_stop()) {
+ if (down_interruptible(&dev->irq_sem))
+ continue;
+
+ if (kthread_should_stop())
+ break;
+
+ mutex_lock(&dev->irq_lock);
+ list_for_each_entry_safe(inst, tmp, &dev->instances, list) {
+ while (kfifo_len(&inst->irq_status)) {
+ ret = kfifo_out(&inst->irq_status, &irq_status, sizeof(int));
+ if (!ret)
+ break;
+
+ inst->ops->finish_process(inst);
+ }
+ }
+ mutex_unlock(&dev->irq_lock);
+ }
+
+ return 0;
+}
+
static int wave5_vpu_load_firmware(struct device *dev, const char *fw_name,
u32 *revision)
{
mutex_init(&dev->dev_lock);
mutex_init(&dev->hw_lock);
+ mutex_init(&dev->irq_lock);
+ spin_lock_init(&dev->irq_spinlock);
dev_set_drvdata(&pdev->dev, dev);
dev->dev = &pdev->dev;
}
dev->product = wave5_vpu_get_product_id(dev);
+ INIT_LIST_HEAD(&dev->instances);
+
dev->irq = platform_get_irq(pdev, 0);
if (dev->irq < 0) {
dev_err(&pdev->dev, "failed to get irq resource, falling back to polling\n");
+ sema_init(&dev->irq_sem, 1);
+ dev->irq_thread = kthread_run(irq_thread, dev, "irq thread");
hrtimer_setup(&dev->hrtimer, &wave5_vpu_timer_callback, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
dev->worker = kthread_run_worker(0, "vpu_irq_thread");
dev->vpu_poll_interval = vpu_poll_interval;
kthread_init_work(&dev->work, wave5_vpu_irq_work_fn);
} else {
- ret = devm_request_threaded_irq(&pdev->dev, dev->irq, NULL,
+ ret = devm_request_threaded_irq(&pdev->dev, dev->irq, wave5_vpu_irq,
wave5_vpu_irq_thread, IRQF_ONESHOT, "vpu_irq", dev);
if (ret) {
dev_err(&pdev->dev, "Register interrupt handler, fail: %d\n", ret);
}
}
- INIT_LIST_HEAD(&dev->instances);
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret) {
dev_err(&pdev->dev, "v4l2_device_register, fail: %d\n", ret);
v4l2_device_unregister(&dev->v4l2_dev);
if (dev->irq < 0) {
+ if (dev->irq_thread) {
+ kthread_stop(dev->irq_thread);
+ up(&dev->irq_sem);
+ dev->irq_thread = NULL;
+ }
+
hrtimer_cancel(&dev->hrtimer);
kthread_cancel_work_sync(&dev->work);
kthread_destroy_worker(dev->worker);
mutex_destroy(&dev->dev_lock);
mutex_destroy(&dev->hw_lock);
+ mutex_destroy(&dev->irq_lock);
reset_control_assert(dev->resets);
clk_bulk_disable_unprepare(dev->num_clks, dev->clks);
wave5_vdi_release(&pdev->dev);