]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
cachefiles, netfs: Fix write to partial block at EOF
authorDavid Howells <dhowells@redhat.com>
Fri, 12 Jul 2024 11:44:30 +0000 (12:44 +0100)
committerChristian Brauner <brauner@kernel.org>
Thu, 12 Sep 2024 10:20:41 +0000 (12:20 +0200)
Because it uses DIO writes, cachefiles is unable to make a write to the
backing file if that write is not aligned to and sized according to the
backing file's DIO block alignment.  This makes it tricky to handle a write
to the cache where the EOF on the network file is not correctly aligned.

To get around this, netfslib attempts to tell the driver it is calling how
much more data there is available beyond the EOF that it can use to pad the
write (netfslib preclears the part of the folio above the EOF).  However,
it tries to tell the cache what the maximum length is, but doesn't
calculate this correctly; and, in any case, cachefiles actually ignores the
value and just skips the block.

Fix this by:

 (1) Change the value passed to indicate the amount of extra data that can
     be added to the operation (now ->submit_extendable_to).  This is much
     simpler to calculate as it's just the end of the folio minus the top
     of the data within the folio - rather than having to account for data
     spread over multiple folios.

 (2) Make cachefiles add some of this data if the subrequest it is given
     ends at the network file's i_size if the extra data is sufficient to
     pad out to a whole block.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Jeff Layton <jlayton@kernel.org>
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
Link: https://lore.kernel.org/r/20240814203850.2240469-22-dhowells@redhat.com/
Signed-off-by: Christian Brauner <brauner@kernel.org>
fs/cachefiles/io.c
fs/netfs/read_pgpriv2.c
fs/netfs/write_issue.c
include/linux/netfs.h

index 5b82ba7785cd3dadea35031dc83bfab75f5e4182..6a821a959b59e621395a0be5cb48ec6621504b0d 100644 (file)
@@ -648,6 +648,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
        struct netfs_cache_resources *cres = &wreq->cache_resources;
        struct cachefiles_object *object = cachefiles_cres_object(cres);
        struct cachefiles_cache *cache = object->volume->cache;
+       struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
        const struct cred *saved_cred;
        size_t off, pre, post, len = subreq->len;
        loff_t start = subreq->start;
@@ -661,6 +662,7 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
        if (off) {
                pre = CACHEFILES_DIO_BLOCK_SIZE - off;
                if (pre >= len) {
+                       fscache_count_dio_misfit();
                        netfs_write_subrequest_terminated(subreq, len, false);
                        return;
                }
@@ -671,10 +673,22 @@ static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
        }
 
        /* We also need to end on the cache granularity boundary */
+       if (start + len == wreq->i_size) {
+               size_t part = len % CACHEFILES_DIO_BLOCK_SIZE;
+               size_t need = CACHEFILES_DIO_BLOCK_SIZE - part;
+
+               if (part && stream->submit_extendable_to >= need) {
+                       len += need;
+                       subreq->len += need;
+                       subreq->io_iter.count += need;
+               }
+       }
+
        post = len & (CACHEFILES_DIO_BLOCK_SIZE - 1);
        if (post) {
                len -= post;
                if (len == 0) {
+                       fscache_count_dio_misfit();
                        netfs_write_subrequest_terminated(subreq, post, false);
                        return;
                }
index 9439461d535f03ba52f43994fec2b469a86d0bd5..ba5af89d37fae50bfe0768e9c878b170ce84d9ba 100644 (file)
@@ -97,7 +97,7 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
        if (netfs_buffer_append_folio(wreq, folio, false) < 0)
                return -ENOMEM;
 
-       cache->submit_max_len = fsize;
+       cache->submit_extendable_to = fsize;
        cache->submit_off = 0;
        cache->submit_len = flen;
 
@@ -112,10 +112,10 @@ static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio
                wreq->io_iter.iov_offset = cache->submit_off;
 
                atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
+               cache->submit_extendable_to = fsize - cache->submit_off;
                part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
                                           cache->submit_len, to_eof);
                cache->submit_off += part;
-               cache->submit_max_len -= part;
                if (part > cache->submit_len)
                        cache->submit_len = 0;
                else
index 975436d3dc3fc9f8e4cfa20213ffd802f494093f..f7d59f0bb8c21b1db1614543dd0ec2ac079fc934 100644 (file)
@@ -283,6 +283,7 @@ int netfs_advance_write(struct netfs_io_request *wreq,
        _debug("part %zx/%zx %zx/%zx", subreq->len, stream->sreq_max_len, part, len);
        subreq->len += part;
        subreq->nr_segs++;
+       stream->submit_extendable_to -= part;
 
        if (subreq->len >= stream->sreq_max_len ||
            subreq->nr_segs >= stream->sreq_max_segs ||
@@ -424,7 +425,6 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
         */
        for (int s = 0; s < NR_IO_STREAMS; s++) {
                stream = &wreq->io_streams[s];
-               stream->submit_max_len = fsize;
                stream->submit_off = foff;
                stream->submit_len = flen;
                if ((stream->source == NETFS_WRITE_TO_CACHE && streamw) ||
@@ -432,7 +432,6 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
                     fgroup == NETFS_FOLIO_COPY_TO_CACHE)) {
                        stream->submit_off = UINT_MAX;
                        stream->submit_len = 0;
-                       stream->submit_max_len = 0;
                }
        }
 
@@ -462,10 +461,10 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
                wreq->io_iter.iov_offset = stream->submit_off;
 
                atomic64_set(&wreq->issued_to, fpos + stream->submit_off);
+               stream->submit_extendable_to = fsize - stream->submit_off;
                part = netfs_advance_write(wreq, stream, fpos + stream->submit_off,
                                           stream->submit_len, to_eof);
                stream->submit_off += part;
-               stream->submit_max_len -= part;
                if (part > stream->submit_len)
                        stream->submit_len = 0;
                else
index c0f0c9c87d86e868a86001db303f5a31d67a7f85..5eaceef41e6cac4f54ab1404bc4fb125712e12d8 100644 (file)
@@ -135,7 +135,7 @@ struct netfs_io_stream {
        unsigned int            sreq_max_segs;  /* 0 or max number of segments in an iterator */
        unsigned int            submit_off;     /* Folio offset we're submitting from */
        unsigned int            submit_len;     /* Amount of data left to submit */
-       unsigned int            submit_max_len; /* Amount I/O can be rounded up to */
+       unsigned int            submit_extendable_to; /* Amount I/O can be rounded up to */
        void (*prepare_write)(struct netfs_io_subrequest *subreq);
        void (*issue_write)(struct netfs_io_subrequest *subreq);
        /* Collection tracking */