]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
firewire: core: use spin lock specific to topology map
authorTakashi Sakamoto <o-takashi@sakamocchi.jp>
Mon, 15 Sep 2025 23:47:44 +0000 (08:47 +0900)
committerTakashi Sakamoto <o-takashi@sakamocchi.jp>
Mon, 15 Sep 2025 23:52:18 +0000 (08:52 +0900)
At present, the operation for read transaction to topology map register is
not protected by any kind of lock primitives. This causes a potential
problem to result in the mixed content of topology map.

This commit adds and uses spin lock specific to topology map.

Link: https://lore.kernel.org/r/20250915234747.915922-4-o-takashi@sakamocchi.jp
Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
drivers/firewire/core-topology.c
drivers/firewire/core-transaction.c
include/linux/firewire.h

index 17aaf14cab0b9e0ffb30ef446aa2c1a6b5973447..c62cf93f3f65e14e8f43448507a212cc739decc6 100644 (file)
@@ -435,20 +435,22 @@ static void update_tree(struct fw_card *card, struct fw_node *root)
        }
 }
 
-static void update_topology_map(struct fw_card *card,
-                               u32 *self_ids, int self_id_count)
+static void update_topology_map(__be32 *buffer, size_t buffer_size, int root_node_id,
+                               const u32 *self_ids, int self_id_count)
 {
-       int node_count = (card->root_node->node_id & 0x3f) + 1;
-       __be32 *map = card->topology_map;
+       __be32 *map = buffer;
+       int node_count = (root_node_id & 0x3f) + 1;
+
+       memset(map, 0, buffer_size);
 
        *map++ = cpu_to_be32((self_id_count + 2) << 16);
-       *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
+       *map++ = cpu_to_be32(be32_to_cpu(buffer[1]) + 1);
        *map++ = cpu_to_be32((node_count << 16) | self_id_count);
 
        while (self_id_count--)
                *map++ = cpu_to_be32p(self_ids++);
 
-       fw_compute_block_crc(card->topology_map);
+       fw_compute_block_crc(buffer);
 }
 
 void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
@@ -479,8 +481,6 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
 
                local_node = build_tree(card, self_ids, self_id_count, generation);
 
-               update_topology_map(card, self_ids, self_id_count);
-
                card->color++;
 
                if (local_node == NULL) {
@@ -493,5 +493,11 @@ void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
                        update_tree(card, local_node);
                }
        }
+
+       // Just used by transaction layer.
+       scoped_guard(spinlock, &card->topology_map.lock) {
+               update_topology_map(card->topology_map.buffer, sizeof(card->topology_map.buffer),
+                                   card->root_node->node_id, self_ids, self_id_count);
+       }
 }
 EXPORT_SYMBOL(fw_core_handle_bus_reset);
index 623e1d9bd107f932d2c022d3ece5b6a1a69137b1..8edffafd21c1fc832da03dfb2d7ec11a5dbfecd2 100644 (file)
@@ -1196,7 +1196,11 @@ static void handle_topology_map(struct fw_card *card, struct fw_request *request
        }
 
        start = (offset - topology_map_region.start) / 4;
-       memcpy(payload, &card->topology_map[start], length);
+
+       // NOTE: This can be without irqsave when we can guarantee that fw_send_request() for local
+       // destination never runs in any type of IRQ context.
+       scoped_guard(spinlock_irqsave, &card->topology_map.lock)
+               memcpy(payload, &card->topology_map.buffer[start], length);
 
        fw_send_response(card, request, RCODE_COMPLETE);
 }
index f3260aacf730ea55c8e5645db79091bd82bb7cd7..aeb71c39e57ee287099e09b9e0bb934526e13427 100644 (file)
@@ -129,7 +129,11 @@ struct fw_card {
 
        bool broadcast_channel_allocated;
        u32 broadcast_channel;
-       __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+
+       struct {
+               __be32 buffer[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
+               spinlock_t lock;
+       } topology_map;
 
        __be32 maint_utility_register;