]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
cifs: Use iterate_and_advance*() routines directly for hashing
authorDavid Howells <dhowells@redhat.com>
Fri, 26 Jul 2024 19:03:07 +0000 (20:03 +0100)
committerChristian Brauner <brauner@kernel.org>
Thu, 12 Sep 2024 10:20:42 +0000 (12:20 +0200)
Replace the bespoke cifs iterators of ITER_BVEC and ITER_KVEC to do hashing
with iterate_and_advance_kernel() - a variant on iterate_and_advance() that
only supports kernel-internal ITER_* types and not UBUF/IOVEC types.

The bespoke ITER_XARRAY is left because we don't really want to be calling
crypto_shash_update() under the RCU read lock for large amounts of data;
besides, ITER_XARRAY is going to be phased out.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Steve French <sfrench@samba.org>
cc: Paulo Alcantara <pc@manguebit.com>
cc: Tom Talpey <tom@talpey.com>
cc: Enzo Matsumiya <ematsumiya@suse.de>
cc: linux-cifs@vger.kernel.org
Link: https://lore.kernel.org/r/20240814203850.2240469-24-dhowells@redhat.com/
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/smb/client/cifsencrypt.c
include/linux/iov_iter.h

index 6322f0f68a176b177c943b074fe414c4905bf9bb..991a1ab047e71787388e7080a7e2744635f5b29c 100644 (file)
 #include <linux/random.h>
 #include <linux/highmem.h>
 #include <linux/fips.h>
+#include <linux/iov_iter.h>
 #include "../common/arc4.h"
 #include <crypto/aead.h>
 
-/*
- * Hash data from a BVEC-type iterator.
- */
-static int cifs_shash_bvec(const struct iov_iter *iter, ssize_t maxsize,
-                          struct shash_desc *shash)
-{
-       const struct bio_vec *bv = iter->bvec;
-       unsigned long start = iter->iov_offset;
-       unsigned int i;
-       void *p;
-       int ret;
-
-       for (i = 0; i < iter->nr_segs; i++) {
-               size_t off, len;
-
-               len = bv[i].bv_len;
-               if (start >= len) {
-                       start -= len;
-                       continue;
-               }
-
-               len = min_t(size_t, maxsize, len - start);
-               off = bv[i].bv_offset + start;
-
-               p = kmap_local_page(bv[i].bv_page);
-               ret = crypto_shash_update(shash, p + off, len);
-               kunmap_local(p);
-               if (ret < 0)
-                       return ret;
-
-               maxsize -= len;
-               if (maxsize <= 0)
-                       break;
-               start = 0;
-       }
-
-       return 0;
-}
-
-/*
- * Hash data from a KVEC-type iterator.
- */
-static int cifs_shash_kvec(const struct iov_iter *iter, ssize_t maxsize,
-                          struct shash_desc *shash)
-{
-       const struct kvec *kv = iter->kvec;
-       unsigned long start = iter->iov_offset;
-       unsigned int i;
-       int ret;
-
-       for (i = 0; i < iter->nr_segs; i++) {
-               size_t len;
-
-               len = kv[i].iov_len;
-               if (start >= len) {
-                       start -= len;
-                       continue;
-               }
-
-               len = min_t(size_t, maxsize, len - start);
-               ret = crypto_shash_update(shash, kv[i].iov_base + start, len);
-               if (ret < 0)
-                       return ret;
-               maxsize -= len;
-
-               if (maxsize <= 0)
-                       break;
-               start = 0;
-       }
-
-       return 0;
-}
-
 /*
  * Hash data from an XARRAY-type iterator.
  */
@@ -145,27 +73,36 @@ static ssize_t cifs_shash_xarray(const struct iov_iter *iter, ssize_t maxsize,
        return 0;
 }
 
+static size_t cifs_shash_step(void *iter_base, size_t progress, size_t len,
+                             void *priv, void *priv2)
+{
+       struct shash_desc *shash = priv;
+       int ret, *pret = priv2;
+
+       ret = crypto_shash_update(shash, iter_base, len);
+       if (ret < 0) {
+               *pret = ret;
+               return len;
+       }
+       return 0;
+}
+
 /*
  * Pass the data from an iterator into a hash.
  */
 static int cifs_shash_iter(const struct iov_iter *iter, size_t maxsize,
                           struct shash_desc *shash)
 {
-       if (maxsize == 0)
-               return 0;
+       struct iov_iter tmp_iter = *iter;
+       int err = -EIO;
 
-       switch (iov_iter_type(iter)) {
-       case ITER_BVEC:
-               return cifs_shash_bvec(iter, maxsize, shash);
-       case ITER_KVEC:
-               return cifs_shash_kvec(iter, maxsize, shash);
-       case ITER_XARRAY:
+       if (iov_iter_type(iter) == ITER_XARRAY)
                return cifs_shash_xarray(iter, maxsize, shash);
-       default:
-               pr_err("cifs_shash_iter(%u) unsupported\n", iov_iter_type(iter));
-               WARN_ON_ONCE(1);
-               return -EIO;
-       }
+
+       if (iterate_and_advance_kernel(&tmp_iter, maxsize, shash, &err,
+                                      cifs_shash_step) != maxsize)
+               return err;
+       return 0;
 }
 
 int __cifs_calc_signature(struct smb_rqst *rqst,
index a223370a59a78e32af0ee5666c429213ef59b472..c4aa58032faf874ee5b29bd37f9e23c479741bef 100644 (file)
@@ -328,4 +328,51 @@ size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
        return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
 }
 
+/**
+ * iterate_and_advance_kernel - Iterate over a kernel-internal iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length.  The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * [!] Note This will only handle BVEC, KVEC, FOLIOQ, XARRAY and DISCARD-type
+ * iterators; it will not handle UBUF or IOVEC-type iterators.
+ *
+ * A step functions, @step, must be provided, one for handling mapped kernel
+ * addresses and the other is given user addresses which have the potential to
+ * fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer).  The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv,
+                                 void *priv2, iov_step_f step)
+{
+       if (unlikely(iter->count < len))
+               len = iter->count;
+       if (unlikely(!len))
+               return 0;
+       if (iov_iter_is_bvec(iter))
+               return iterate_bvec(iter, len, priv, priv2, step);
+       if (iov_iter_is_kvec(iter))
+               return iterate_kvec(iter, len, priv, priv2, step);
+       if (iov_iter_is_folioq(iter))
+               return iterate_folioq(iter, len, priv, priv2, step);
+       if (iov_iter_is_xarray(iter))
+               return iterate_xarray(iter, len, priv, priv2, step);
+       return iterate_discard(iter, len, priv, priv2, step);
+}
+
 #endif /* _LINUX_IOV_ITER_H */