cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_cmd_commit_flush(dev_priv, bytes);
- vmw_update_seqno(dev_priv);
+ vmw_fences_update(dev_priv->fman);
out_err:
return ret;
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
}
- dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ u32 seqno = vmw_fence_read(dev_priv);
+
+ atomic_set(&dev_priv->last_read_seqno, seqno);
+ atomic_set(&dev_priv->marker_seq, seqno);
return 0;
}
while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
;
- vmw->last_read_seqno = vmw_fence_read(vmw);
+ atomic_set(&vmw->last_read_seqno, vmw_fence_read(vmw));
vmw_write(vmw, SVGA_REG_CONFIG_DONE,
vmw->config_done_state);
int cmdbuf_waiters; /* Protected by waiter_lock */
int error_waiters; /* Protected by waiter_lock */
int fifo_queue_waiters; /* Protected by waiter_lock */
- uint32_t last_read_seqno;
+ atomic_t last_read_seqno;
struct vmw_fence_manager *fman;
uint32_t irq_mask; /* Updates protected by waiter_lock */
uint32_t seqno,
bool interruptible,
unsigned long timeout);
-extern void vmw_update_seqno(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
fence_rep.handle = fence_handle;
fence_rep.seqno = fence->base.seqno;
- vmw_update_seqno(dev_priv);
- fence_rep.passed_seqno = dev_priv->last_read_seqno;
+ fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman);
}
/*
wake_up_process(wait->task);
}
-static void __vmw_fences_update(struct vmw_fence_manager *fman);
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman);
static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
{
return true;
}
-static void __vmw_fences_update(struct vmw_fence_manager *fman)
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman)
{
struct vmw_fence_obj *fence, *next_fence;
struct list_head action_list;
if (!list_empty(&fman->cleanup_list))
(void) schedule_work(&fman->work);
+ atomic_set_release(&fman->dev_priv->last_read_seqno, seqno);
+ return seqno;
}
-void vmw_fences_update(struct vmw_fence_manager *fman)
+u32 vmw_fences_update(struct vmw_fence_manager *fman)
{
+ u32 seqno;
spin_lock(&fman->lock);
- __vmw_fences_update(fman);
+ seqno = __vmw_fences_update(fman);
spin_unlock(&fman->lock);
+ return seqno;
}
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
(struct drm_vmw_fence_signaled_arg *) data;
struct ttm_base_object *base;
struct vmw_fence_obj *fence;
- struct vmw_fence_manager *fman;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
- fman = fman_from_fence(fence);
arg->signaled = vmw_fence_obj_signaled(fence);
arg->signaled_flags = arg->flags;
- spin_lock(&fman->lock);
- arg->passed_seqno = dev_priv->last_read_seqno;
- spin_unlock(&fman->lock);
+ arg->passed_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
ttm_base_object_unref(&base);
return fence;
}
-extern void vmw_fences_update(struct vmw_fence_manager *fman);
+u32 vmw_fences_update(struct vmw_fence_manager *fman);
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
-void vmw_update_seqno(struct vmw_private *dev_priv)
-{
- uint32_t seqno = vmw_fence_read(dev_priv);
-
- if (dev_priv->last_read_seqno != seqno) {
- dev_priv->last_read_seqno = seqno;
- vmw_fences_update(dev_priv->fman);
- }
-}
-
bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno)
{
bool ret;
+ u32 last_read_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
- vmw_update_seqno(dev_priv);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ last_read_seqno = vmw_fences_update(dev_priv->fman);
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))