struct b43_dmaring *ring;
int i, err;
dma_addr_t dma_test;
+ size_t nr_slots;
- ring = kzalloc_obj(*ring);
+ if (for_tx)
+ nr_slots = B43_TXRING_SLOTS;
+ else
+ nr_slots = B43_RXRING_SLOTS;
+
+ ring = kzalloc_flex(*ring, meta, nr_slots);
if (!ring)
goto out;
- ring->nr_slots = B43_RXRING_SLOTS;
- if (for_tx)
- ring->nr_slots = B43_TXRING_SLOTS;
+ ring->nr_slots = nr_slots;
- ring->meta = kzalloc_objs(struct b43_dmadesc_meta, ring->nr_slots);
- if (!ring->meta)
- goto err_kfree_ring;
for (i = 0; i < ring->nr_slots; i++)
ring->meta->skb = B43_DMA_PTR_POISON;
err_kfree_txhdr_cache:
kfree(ring->txhdr_cache);
err_kfree_meta:
- kfree(ring->meta);
- err_kfree_ring:
kfree(ring);
ring = NULL;
goto out;
free_ringmemory(ring);
kfree(ring->txhdr_cache);
- kfree(ring->meta);
kfree(ring);
}
const struct b43_dma_ops *ops;
/* Kernel virtual base address of the ring memory. */
void *descbase;
- /* Meta data about all descriptors. */
- struct b43_dmadesc_meta *meta;
/* Cache of TX headers for each TX frame.
* This is to avoid an allocation on each TX.
* This is NULL for an RX ring.
/* Statistics: Total number of TX plus all retries. */
u64 nr_total_packet_tries;
#endif /* CONFIG_B43_DEBUG */
+ /* Meta data about all descriptors. */
+ struct b43_dmadesc_meta meta[] __counted_by(nr_slots);
};
static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)