void dma_fence_signal_timestamp_locked(struct dma_fence *fence,
ktime_t timestamp)
{
+ const struct dma_fence_ops *ops;
struct dma_fence_cb *cur, *tmp;
struct list_head cb_list;
&fence->flags)))
return;
+ /*
+ * When neither a release nor a wait operation is specified set the ops
+ * pointer to NULL to allow the fence structure to become independent
+ * from who originally issued it.
+ */
+ ops = rcu_dereference_protected(fence->ops, true);
+ if (!ops->release && !ops->wait)
+ RCU_INIT_POINTER(fence->ops, NULL);
+
/* Stash the cb_list before replacing it with the timestamp */
list_replace(&fence->cb_list, &cb_list);
rcu_read_lock();
ops = rcu_dereference(fence->ops);
trace_dma_fence_wait_start(fence);
- if (ops->wait) {
+ if (ops && ops->wait) {
/*
* Implementing the wait ops is deprecated and not supported for
* issuers of fences who need their lifetime to be independent
}
ops = rcu_dereference(fence->ops);
- if (ops->release)
+ if (ops && ops->release)
ops->release(fence);
else
dma_fence_free(fence);
rcu_read_lock();
ops = rcu_dereference(fence->ops);
- if (!was_set && ops->enable_signaling) {
+ if (!was_set && ops && ops->enable_signaling) {
trace_dma_fence_enable_signal(fence);
if (!ops->enable_signaling(fence)) {
rcu_read_lock();
ops = rcu_dereference(fence->ops);
- if (ops->set_deadline && !dma_fence_is_signaled(fence))
+ if (ops && ops->set_deadline && !dma_fence_is_signaled(fence))
ops->set_deadline(fence, deadline);
rcu_read_unlock();
}
rcu_read_lock();
ops = rcu_dereference(fence->ops);
- if (ops->signaled && ops->signaled(fence)) {
+ if (ops && ops->signaled && ops->signaled(fence)) {
rcu_read_unlock();
dma_fence_signal_locked(fence);
return true;
rcu_read_lock();
ops = rcu_dereference(fence->ops);
- if (ops->signaled && ops->signaled(fence)) {
+ if (ops && ops->signaled && ops->signaled(fence)) {
rcu_read_unlock();
dma_fence_signal(fence);
return true;