To remove the file_priv NULL-ing dance needed to check if the file
descriptor is open, move the per-fd reset counter into v3d_stats, which
is heap-allocated and refcounted, outliving the fd as long as jobs
reference it.
This change allows the removal of the last `queue_lock` usage to protect
`job->file_priv` and avoids possible NULL ptr dereference issues due to
lifetime mismatches.
Also, to simplify locking, replace both the global and per-fd locked
reset counters with atomics.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Reviewed-by: Iago Toral Quiroga <itoral@igalia.com>
Link: https://patch.msgid.link/20260306-v3d-reset-locking-improv-v3-5-49864fe00692@igalia.com
Co-developed-by: Maíra Canal <mcanal@igalia.com>
Signed-off-by: Maíra Canal <mcanal@igalia.com>
args->value = !!drm_gem_get_huge_mnt(dev);
return 0;
case DRM_V3D_PARAM_GLOBAL_RESET_COUNTER:
- mutex_lock(&v3d->reset_lock);
- args->value = v3d->reset_counter;
- mutex_unlock(&v3d->reset_lock);
+ args->value = atomic_read(&v3d->reset_counter);
return 0;
case DRM_V3D_PARAM_CONTEXT_RESET_COUNTER:
- mutex_lock(&v3d->reset_lock);
- args->value = v3d_priv->reset_counter;
- mutex_unlock(&v3d->reset_lock);
+ args->value = 0;
+ for (enum v3d_queue q = 0; q < V3D_MAX_QUEUES; q++)
+ args->value += atomic_read(&v3d_priv->stats[q]->reset_counter);
return 0;
default:
drm_dbg(dev, "Unknown parameter %d\n", args->param);
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv;
- unsigned long irqflags;
enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- struct v3d_queue_state *queue = &v3d->queue[q];
- struct v3d_job *job = queue->active_job;
-
drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
-
- if (job && job->base.entity == &v3d_priv->sched_entity[q]) {
- spin_lock_irqsave(&queue->queue_lock, irqflags);
- job->file_priv = NULL;
- spin_unlock_irqrestore(&queue->queue_lock, irqflags);
- }
-
v3d_stats_put(v3d_priv->stats[q]);
}
* job queues, even the write side never is.
*/
seqcount_t lock;
+
+ atomic_t reset_counter;
};
struct v3d_queue_state {
*/
struct v3d_perfmon *global_perfmon;
- /* Global reset counter. The counter must be incremented when
- * a GPU reset happens. It must be protected by @reset_lock.
- */
- unsigned int reset_counter;
+ /* Global reset counter incremented on each GPU reset. */
+ atomic_t reset_counter;
};
static inline struct v3d_dev *
/* Stores the GPU stats for a specific queue for this fd. */
struct v3d_stats *stats[V3D_MAX_QUEUES];
-
- /* Per-fd reset counter, must be incremented when a job submitted
- * by this fd causes a GPU reset. It must be protected by
- * &struct v3d_dev->reset_lock.
- */
- unsigned int reset_counter;
};
struct v3d_bo {
enum v3d_queue q)
{
struct v3d_job *job = to_v3d_job(sched_job);
- struct v3d_file_priv *v3d_priv = job->file_priv;
- unsigned long irqflags;
enum v3d_queue i;
mutex_lock(&v3d->reset_lock);
/* get the GPU back into the init state */
v3d_reset(v3d);
- v3d->reset_counter++;
- spin_lock_irqsave(&v3d->queue[q].queue_lock, irqflags);
- if (v3d_priv)
- v3d_priv->reset_counter++;
- spin_unlock_irqrestore(&v3d->queue[q].queue_lock, irqflags);
+ atomic_inc(&v3d->reset_counter);
+ atomic_inc(&job->client_stats->reset_counter);
for (i = 0; i < V3D_MAX_QUEUES; i++)
drm_sched_resubmit_jobs(&v3d->queue[i].sched);