struct phys_vec {
phys_addr_t paddr;
- u32 len;
+ size_t len;
};
static bool __blk_map_iter_next(struct blk_map_iter *iter)
struct phys_vec *vec)
{
enum dma_data_direction dir = rq_dma_dir(req);
- unsigned int mapped = 0;
unsigned int attrs = 0;
+ size_t mapped = 0;
int error;
iter->addr = state->addr;
blk_rq_map_iter_init(rq, &iter);
while (blk_map_iter_next(rq, &iter, &vec)) {
*last_sg = blk_next_sg(last_sg, sglist);
+
+ WARN_ON_ONCE(overflows_type(vec.len, unsigned int));
sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
offset_in_page(vec.paddr));
nsegs++;
while (blk_map_iter_next(rq, &iter, &vec)) {
sg = blk_next_sg(&sg, sglist);
+
+ WARN_ON_ONCE(overflows_type(vec.len, unsigned int));
sg_set_page(sg, phys_to_page(vec.paddr), vec.len,
offset_in_page(vec.paddr));
segments++;
u8 flags;
u8 nr_descriptors;
- unsigned int total_len;
+ size_t total_len;
struct dma_iova_state dma_state;
void *descriptors[NVME_MAX_NR_DESCRIPTORS];
struct nvme_dma_vec *dma_vecs;
unsigned int nr_dma_vecs;
dma_addr_t meta_dma;
- unsigned int meta_total_len;
+ size_t meta_total_len;
struct dma_iova_state meta_dma_state;
struct nvme_sgl_desc *meta_descriptor;
};