]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
drm/nouveau: svm: Avoid -Wflex-array-member-not-at-end warning
authorGustavo A. R. Silva <gustavoars@kernel.org>
Wed, 2 Apr 2025 21:39:07 +0000 (15:39 -0600)
committerDanilo Krummrich <dakr@kernel.org>
Thu, 3 Apr 2025 16:02:18 +0000 (18:02 +0200)
-Wflex-array-member-not-at-end was introduced in GCC-14, and we are
getting ready to enable it, globally.

Use the `DEFINE_RAW_FLEX()` helper for an on-stack definition of
a flexible structure where the size of the flexible-array member
is known at compile-time, and refactor the rest of the code,
accordingly.

So, with these changes, fix the following warning:

drivers/gpu/drm/nouveau/nouveau_svm.c:724:44: warning: structure containing a flexible array member is not at the end of another structure [-Wflex-array-member-not-at-end]

Signed-off-by: Gustavo A. R. Silva <gustavoars@kernel.org>
Signed-off-by: Danilo Krummrich <dakr@kernel.org>
Link: https://lore.kernel.org/r/Z-2uezeHt1aaHH6x@kspp
drivers/gpu/drm/nouveau/nouveau_svm.c

index 825c867eba7c5630c14905e4136af56278803963..50fae04d8a6902695585381912ec7ecfb221cb21 100644 (file)
@@ -721,10 +721,7 @@ nouveau_svm_fault(struct work_struct *work)
        struct nouveau_svm *svm = container_of(buffer, typeof(*svm), buffer[buffer->id]);
        struct nvif_object *device = &svm->drm->client.device.object;
        struct nouveau_svmm *svmm;
-       struct {
-               struct nouveau_pfnmap_args i;
-               u64 phys[1];
-       } args;
+       DEFINE_RAW_FLEX(struct nouveau_pfnmap_args, args, p.phys, 1);
        unsigned long hmm_flags;
        u64 inst, start, limit;
        int fi, fn;
@@ -773,11 +770,11 @@ nouveau_svm_fault(struct work_struct *work)
        mutex_unlock(&svm->mutex);
 
        /* Process list of faults. */
-       args.i.i.version = 0;
-       args.i.i.type = NVIF_IOCTL_V0_MTHD;
-       args.i.m.version = 0;
-       args.i.m.method = NVIF_VMM_V0_PFNMAP;
-       args.i.p.version = 0;
+       args->i.version = 0;
+       args->i.type = NVIF_IOCTL_V0_MTHD;
+       args->m.version = 0;
+       args->m.method = NVIF_VMM_V0_PFNMAP;
+       args->p.version = 0;
 
        for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
                struct svm_notifier notifier;
@@ -803,9 +800,9 @@ nouveau_svm_fault(struct work_struct *work)
                 * fault window, determining required pages and access
                 * permissions based on pending faults.
                 */
-               args.i.p.addr = start;
-               args.i.p.page = PAGE_SHIFT;
-               args.i.p.size = PAGE_SIZE;
+               args->p.addr = start;
+               args->p.page = PAGE_SHIFT;
+               args->p.size = PAGE_SIZE;
                /*
                 * Determine required permissions based on GPU fault
                 * access flags.
@@ -833,16 +830,16 @@ nouveau_svm_fault(struct work_struct *work)
 
                notifier.svmm = svmm;
                if (atomic)
-                       ret = nouveau_atomic_range_fault(svmm, svm->drm,
-                                                        &args.i, sizeof(args),
+                       ret = nouveau_atomic_range_fault(svmm, svm->drm, args,
+                                                        __struct_size(args),
                                                         &notifier);
                else
-                       ret = nouveau_range_fault(svmm, svm->drm, &args.i,
-                                                 sizeof(args), hmm_flags,
-                                                 &notifier);
+                       ret = nouveau_range_fault(svmm, svm->drm, args,
+                                                 __struct_size(args),
+                                                 hmm_flags, &notifier);
                mmput(mm);
 
-               limit = args.i.p.addr + args.i.p.size;
+               limit = args->p.addr + args->p.size;
                for (fn = fi; ++fn < buffer->fault_nr; ) {
                        /* It's okay to skip over duplicate addresses from the
                         * same SVMM as faults are ordered by access type such
@@ -856,14 +853,14 @@ nouveau_svm_fault(struct work_struct *work)
                        if (buffer->fault[fn]->svmm != svmm ||
                            buffer->fault[fn]->addr >= limit ||
                            (buffer->fault[fi]->access == FAULT_ACCESS_READ &&
-                            !(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
+                            !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
                            (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
                             buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
-                            !(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
+                            !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_W)) ||
                            (buffer->fault[fi]->access != FAULT_ACCESS_READ &&
                             buffer->fault[fi]->access != FAULT_ACCESS_WRITE &&
                             buffer->fault[fi]->access != FAULT_ACCESS_PREFETCH &&
-                            !(args.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
+                            !(args->p.phys[0] & NVIF_VMM_PFNMAP_V0_A)))
                                break;
                }