]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
RDMA/irdma: Harden depth calculation functions
authorShiraz Saleem <shiraz.saleem@intel.com>
Mon, 16 Mar 2026 18:39:47 +0000 (13:39 -0500)
committerLeon Romanovsky <leonro@nvidia.com>
Wed, 18 Mar 2026 10:20:53 +0000 (06:20 -0400)
An issue was exposed where OS can pass in U32_MAX for SQ/RQ/SRQ size.
This can cause integer overflow and truncation of SQ/RQ/SRQ depth
returning a success when it should have failed.

Harden the functions to do all depth calculations and boundary
checking in u64 sizes.

Fixes: 563e1feb5f6e ("RDMA/irdma: Add SRQ support")
Signed-off-by: Shiraz Saleem <shiraz.saleem@intel.com>
Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/irdma/uk.c

index ac3721a5747a1e1770794943218dc0a06372afb0..4718acf6c6fd00275bc39103916c0be7f13068ec 100644 (file)
@@ -1438,7 +1438,7 @@ exit:
  * irdma_round_up_wq - return round up qp wq depth
  * @wqdepth: wq depth in quanta to round up
  */
-static int irdma_round_up_wq(u32 wqdepth)
+static u64 irdma_round_up_wq(u64 wqdepth)
 {
        int scount = 1;
 
@@ -1491,15 +1491,16 @@ void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
 int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
                      u32 *sqdepth)
 {
-       u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+       u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift;
+       u64 hw_quanta =
+               irdma_round_up_wq(((u64)sq_size << shift) + IRDMA_SQ_RSVD);
 
-       *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
-
-       if (*sqdepth < min_size)
-               *sqdepth = min_size;
-       else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
+       if (hw_quanta < min_hw_quanta)
+               hw_quanta = min_hw_quanta;
+       else if (hw_quanta > uk_attrs->max_hw_wq_quanta)
                return -EINVAL;
 
+       *sqdepth = hw_quanta;
        return 0;
 }
 
@@ -1513,15 +1514,16 @@ int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift,
 int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
                      u32 *rqdepth)
 {
-       u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
-
-       *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
+       u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift;
+       u64 hw_quanta =
+               irdma_round_up_wq(((u64)rq_size << shift) + IRDMA_RQ_RSVD);
 
-       if (*rqdepth < min_size)
-               *rqdepth = min_size;
-       else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
+       if (hw_quanta < min_hw_quanta)
+               hw_quanta = min_hw_quanta;
+       else if (hw_quanta > uk_attrs->max_hw_rq_quanta)
                return -EINVAL;
 
+       *rqdepth = hw_quanta;
        return 0;
 }
 
@@ -1535,13 +1537,16 @@ int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift,
 int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift,
                       u32 *srqdepth)
 {
-       *srqdepth = irdma_round_up_wq((srq_size << shift) + IRDMA_RQ_RSVD);
+       u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift;
+       u64 hw_quanta =
+               irdma_round_up_wq(((u64)srq_size << shift) + IRDMA_RQ_RSVD);
 
-       if (*srqdepth < ((u32)uk_attrs->min_hw_wq_size << shift))
-               *srqdepth = uk_attrs->min_hw_wq_size << shift;
-       else if (*srqdepth > uk_attrs->max_hw_srq_quanta)
+       if (hw_quanta < min_hw_quanta)
+               hw_quanta = min_hw_quanta;
+       else if (hw_quanta > uk_attrs->max_hw_srq_quanta)
                return -EINVAL;
 
+       *srqdepth = hw_quanta;
        return 0;
 }