u64 iso_closure;
struct fw_iso_buffer buffer;
unsigned long vm_start;
- bool buffer_is_mapped;
struct list_head phy_receiver_link;
u64 phy_receiver_closure;
}
// The DMA mapping operation is available if the buffer is already allocated by
// mmap(2) system call. If not, it is delegated to the system call.
- if (!client->buffer_is_mapped) {
+ if (client->buffer.pages && !client->buffer.dma_addrs) {
ret = fw_iso_buffer_map_dma(&client->buffer, client->device->card,
iso_dma_direction(context));
if (ret < 0) {
return ret;
}
- client->buffer_is_mapped = true;
}
client->iso_closure = a->closure;
client->iso_context = context;
iso_dma_direction(client->iso_context));
if (ret < 0)
goto fail;
- client->buffer_is_mapped = true;
}
}
int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
enum dma_data_direction direction)
{
- dma_addr_t address;
+ dma_addr_t *dma_addrs __free(kfree) = kcalloc(buffer->page_count, sizeof(dma_addrs[0]),
+ GFP_KERNEL);
int i;
- buffer->direction = direction;
+ if (!dma_addrs)
+ return -ENOMEM;
// Retrieve DMA mapping addresses for the pages. They are not contiguous. Maintain the cache
// coherency for the pages by hand.
for (i = 0; i < buffer->page_count; i++) {
// The dma_map_phys() with a physical address per page is available here, instead.
- address = dma_map_page(card->device, buffer->pages[i],
- 0, PAGE_SIZE, direction);
- if (dma_mapping_error(card->device, address))
+ dma_addr_t dma_addr = dma_map_page(card->device, buffer->pages[i], 0, PAGE_SIZE,
+ direction);
+ if (dma_mapping_error(card->device, dma_addr))
break;
- set_page_private(buffer->pages[i], address);
+ dma_addrs[i] = dma_addr;
}
- buffer->page_count_mapped = i;
- if (i < buffer->page_count)
+ if (i < buffer->page_count) {
+ while (i-- > 0)
+ dma_unmap_page(card->device, dma_addrs[i], PAGE_SIZE, buffer->direction);
return -ENOMEM;
+ }
+
+ buffer->direction = direction;
+ buffer->dma_addrs = no_free_ptr(dma_addrs);
return 0;
}
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
struct fw_card *card)
{
- int i;
- dma_addr_t address;
-
- for (i = 0; i < buffer->page_count_mapped; i++) {
- address = page_private(buffer->pages[i]);
- dma_unmap_page(card->device, address,
- PAGE_SIZE, buffer->direction);
+ if (buffer->dma_addrs) {
+ for (int i = 0; i < buffer->page_count; ++i) {
+ dma_addr_t dma_addr = buffer->dma_addrs[i];
+ dma_unmap_page(card->device, dma_addr, PAGE_SIZE, buffer->direction);
+ }
+ kfree(buffer->dma_addrs);
+ buffer->dma_addrs = NULL;
}
if (buffer->pages) {
}
buffer->page_count = 0;
- buffer->page_count_mapped = 0;
}
EXPORT_SYMBOL(fw_iso_buffer_destroy);
/* Convert DMA address to offset into virtually contiguous buffer. */
size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed)
{
- size_t i;
- dma_addr_t address;
- ssize_t offset;
-
- for (i = 0; i < buffer->page_count; i++) {
- address = page_private(buffer->pages[i]);
- offset = (ssize_t)completed - (ssize_t)address;
+ for (int i = 0; i < buffer->page_count; i++) {
+ dma_addr_t dma_addr = buffer->dma_addrs[i];
+ ssize_t offset = (ssize_t)completed - (ssize_t)dma_addr;
if (offset > 0 && offset <= PAGE_SIZE)
return (i << PAGE_SHIFT) + offset;
}
struct descriptor *d, *last, *pd;
struct fw_iso_packet *p;
__le32 *header;
- dma_addr_t d_bus, page_bus;
+ dma_addr_t d_bus;
u32 z, header_z, payload_z, irq;
u32 payload_index, payload_end_index, next_page_index;
int page, end_page, i, length, offset;
min(next_page_index, payload_end_index) - payload_index;
pd[i].req_count = cpu_to_le16(length);
- page_bus = page_private(buffer->pages[page]);
- pd[i].data_address = cpu_to_le32(page_bus + offset);
+ dma_addr_t dma_addr = buffer->dma_addrs[i];
+ pd[i].data_address = cpu_to_le32(dma_addr + offset);
dma_sync_single_range_for_device(ctx->context.ohci->card.device,
- page_bus, offset, length,
+ dma_addr, offset, length,
DMA_TO_DEVICE);
payload_index += length;
{
struct device *device = ctx->context.ohci->card.device;
struct descriptor *d, *pd;
- dma_addr_t d_bus, page_bus;
+ dma_addr_t d_bus;
u32 z, header_z, rest;
int i, j, length;
int page, offset, packet_count, header_size, payload_per_buffer;
pd->res_count = pd->req_count;
pd->transfer_status = 0;
- page_bus = page_private(buffer->pages[page]);
- pd->data_address = cpu_to_le32(page_bus + offset);
+ dma_addr_t dma_addr = buffer->dma_addrs[page];
+ pd->data_address = cpu_to_le32(dma_addr + offset);
- dma_sync_single_range_for_device(device, page_bus,
+ dma_sync_single_range_for_device(device, dma_addr,
offset, length,
DMA_FROM_DEVICE);
unsigned long payload)
{
struct descriptor *d;
- dma_addr_t d_bus, page_bus;
+ dma_addr_t d_bus;
int page, offset, rest, z, i, length;
page = payload >> PAGE_SHIFT;
d->res_count = d->req_count;
d->transfer_status = 0;
- page_bus = page_private(buffer->pages[page]);
- d->data_address = cpu_to_le32(page_bus + offset);
+ dma_addr_t dma_addr = buffer->dma_addrs[page];
+ d->data_address = cpu_to_le32(dma_addr + offset);
dma_sync_single_range_for_device(ctx->context.ohci->card.device,
- page_bus, offset, length,
+ dma_addr, offset, length,
DMA_FROM_DEVICE);
rest -= length;
struct fw_iso_buffer {
enum dma_data_direction direction;
struct page **pages;
+ dma_addr_t *dma_addrs;
int page_count;
- int page_count_mapped;
};
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,