srv_path->mrs_num++) {
struct rtrs_srv_mr *srv_mr = &srv_path->mrs[srv_path->mrs_num];
struct scatterlist *s;
- int nr, nr_sgt, chunks;
+ int nr, nr_sgt, chunks, ind;
sgt = &srv_mr->sgt;
chunks = chunks_per_mr * srv_path->mrs_num;
}
nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
NULL, max_chunk_size);
- if (nr != nr_sgt) {
+ if (nr < nr_sgt) {
err = nr < 0 ? nr : -EINVAL;
goto dereg_mr;
}
goto dereg_mr;
}
}
- /* Eventually dma addr for each chunk can be cached */
- for_each_sg(sgt->sgl, s, nr_sgt, i)
- srv_path->dma_addr[chunks + i] = sg_dma_address(s);
+
+ /*
+ * Cache DMA addresses by traversing sg entries. If
+ * regions were merged, an inner loop is required to
+ * populate the DMA address array by traversing larger
+ * regions.
+ */
+ ind = chunks;
+ for_each_sg(sgt->sgl, s, nr_sgt, i) {
+ unsigned int dma_len = sg_dma_len(s);
+ u64 dma_addr = sg_dma_address(s);
+ u64 dma_addr_end = dma_addr + dma_len;
+
+ do {
+ srv_path->dma_addr[ind++] = dma_addr;
+ dma_addr += max_chunk_size;
+ } while (dma_addr < dma_addr_end);
+ }
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
srv_mr->mr = mr;