]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Jun 2022 16:01:00 +0000 (18:01 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Jun 2022 16:01:00 +0000 (18:01 +0200)
added patches:
s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch
usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch
zonefs-fix-zonefs_iomap_begin-for-reads.patch

queue-5.10/s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch [new file with mode: 0644]
queue-5.10/zonefs-fix-zonefs_iomap_begin-for-reads.patch [new file with mode: 0644]

diff --git a/queue-5.10/s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch b/queue-5.10/s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch
new file mode 100644 (file)
index 0000000..b8479dc
--- /dev/null
@@ -0,0 +1,35 @@
+From 3ae11dbcfac906a8c3a480e98660a823130dc16a Mon Sep 17 00:00:00 2001
+From: Christian Borntraeger <borntraeger@linux.ibm.com>
+Date: Mon, 30 May 2022 11:27:06 +0200
+Subject: s390/mm: use non-quiescing sske for KVM switch to keyed guest
+
+From: Christian Borntraeger <borntraeger@linux.ibm.com>
+
+commit 3ae11dbcfac906a8c3a480e98660a823130dc16a upstream.
+
+The switch to a keyed guest does not require a classic sske as the other
+guest CPUs are not accessing the key before the switch is complete.
+By using the NQ SSKE things are faster especially with multiple guests.
+
+Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
+Suggested-by: Janis Schoetterl-Glausch <scgl@linux.ibm.com>
+Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Link: https://lore.kernel.org/r/20220530092706.11637-3-borntraeger@linux.ibm.com
+Signed-off-by: Christian Borntraeger <borntraeger@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/mm/pgtable.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -734,7 +734,7 @@ void ptep_zap_key(struct mm_struct *mm,
+       pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
+       ptev = pte_val(*ptep);
+       if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
+-              page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
++              page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
+       pgste_set_unlock(ptep, pgste);
+       preempt_enable();
+ }
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..4c6b59c4453684ecf17b67b32ec8fea1da697f47 100644 (file)
@@ -0,0 +1,3 @@
+s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch
+zonefs-fix-zonefs_iomap_begin-for-reads.patch
+usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch
diff --git a/queue-5.10/usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch b/queue-5.10/usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch
new file mode 100644 (file)
index 0000000..f027828
--- /dev/null
@@ -0,0 +1,79 @@
+From b337af3a4d6147000b7ca6b3438bf5c820849b37 Mon Sep 17 00:00:00 2001
+From: Marian Postevca <posteuca@mutex.one>
+Date: Fri, 3 Jun 2022 18:34:59 +0300
+Subject: usb: gadget: u_ether: fix regression in setting fixed MAC address
+
+From: Marian Postevca <posteuca@mutex.one>
+
+commit b337af3a4d6147000b7ca6b3438bf5c820849b37 upstream.
+
+In systemd systems setting a fixed MAC address through
+the "dev_addr" module argument fails systematically.
+When checking the MAC address after the interface is created
+it always has the same but different MAC address to the one
+supplied as argument.
+
+This is partially caused by systemd which by default will
+set an internally generated permanent MAC address for interfaces
+that are marked as having a randomly generated address.
+
+Commit 890d5b40908bfd1a ("usb: gadget: u_ether: fix race in
+setting MAC address in setup phase") didn't take into account
+the fact that the interface must be marked as having a set
+MAC address when it's set as module argument.
+
+Fixed by marking the interface with NET_ADDR_SET when
+the "dev_addr" module argument is supplied.
+
+Fixes: 890d5b40908bfd1a ("usb: gadget: u_ether: fix race in setting MAC address in setup phase")
+Cc: stable@vger.kernel.org
+Signed-off-by: Marian Postevca <posteuca@mutex.one>
+Link: https://lore.kernel.org/r/20220603153459.32722-1-posteuca@mutex.one
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/gadget/function/u_ether.c |   11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/gadget/function/u_ether.c
++++ b/drivers/usb/gadget/function/u_ether.c
+@@ -772,9 +772,13 @@ struct eth_dev *gether_setup_name(struct
+       dev->qmult = qmult;
+       snprintf(net->name, sizeof(net->name), "%s%%d", netname);
+-      if (get_ether_addr(dev_addr, net->dev_addr))
++      if (get_ether_addr(dev_addr, net->dev_addr)) {
++              net->addr_assign_type = NET_ADDR_RANDOM;
+               dev_warn(&g->dev,
+                       "using random %s ethernet address\n", "self");
++      } else {
++              net->addr_assign_type = NET_ADDR_SET;
++      }
+       if (get_ether_addr(host_addr, dev->host_mac))
+               dev_warn(&g->dev,
+                       "using random %s ethernet address\n", "host");
+@@ -831,6 +835,9 @@ struct net_device *gether_setup_name_def
+       INIT_LIST_HEAD(&dev->tx_reqs);
+       INIT_LIST_HEAD(&dev->rx_reqs);
++      /* by default we always have a random MAC address */
++      net->addr_assign_type = NET_ADDR_RANDOM;
++
+       skb_queue_head_init(&dev->rx_frames);
+       /* network device setup */
+@@ -868,7 +875,6 @@ int gether_register_netdev(struct net_de
+       g = dev->gadget;
+       memcpy(net->dev_addr, dev->dev_mac, ETH_ALEN);
+-      net->addr_assign_type = NET_ADDR_RANDOM;
+       status = register_netdev(net);
+       if (status < 0) {
+@@ -908,6 +914,7 @@ int gether_set_dev_addr(struct net_devic
+       if (get_ether_addr(dev_addr, new_addr))
+               return -EINVAL;
+       memcpy(dev->dev_mac, new_addr, ETH_ALEN);
++      net->addr_assign_type = NET_ADDR_SET;
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(gether_set_dev_addr);
diff --git a/queue-5.10/zonefs-fix-zonefs_iomap_begin-for-reads.patch b/queue-5.10/zonefs-fix-zonefs_iomap_begin-for-reads.patch
new file mode 100644 (file)
index 0000000..bca0042
--- /dev/null
@@ -0,0 +1,257 @@
+From c1c1204c0d0c1dccc1310b9277fb2bd8b663d8fe Mon Sep 17 00:00:00 2001
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Date: Mon, 23 May 2022 16:29:10 +0900
+Subject: zonefs: fix zonefs_iomap_begin() for reads
+
+From: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+
+commit c1c1204c0d0c1dccc1310b9277fb2bd8b663d8fe upstream.
+
+If a readahead is issued to a sequential zone file with an offset
+exactly equal to the current file size, the iomap type is set to
+IOMAP_UNWRITTEN, which will prevent an IO, but the iomap length is
+calculated as 0. This causes a WARN_ON() in iomap_iter():
+
+[17309.548939] WARNING: CPU: 3 PID: 2137 at fs/iomap/iter.c:34 iomap_iter+0x9cf/0xe80
+[...]
+[17309.650907] RIP: 0010:iomap_iter+0x9cf/0xe80
+[...]
+[17309.754560] Call Trace:
+[17309.757078]  <TASK>
+[17309.759240]  ? lock_is_held_type+0xd8/0x130
+[17309.763531]  iomap_readahead+0x1a8/0x870
+[17309.767550]  ? iomap_read_folio+0x4c0/0x4c0
+[17309.771817]  ? lockdep_hardirqs_on_prepare+0x400/0x400
+[17309.778848]  ? lock_release+0x370/0x750
+[17309.784462]  ? folio_add_lru+0x217/0x3f0
+[17309.790220]  ? reacquire_held_locks+0x4e0/0x4e0
+[17309.796543]  read_pages+0x17d/0xb60
+[17309.801854]  ? folio_add_lru+0x238/0x3f0
+[17309.807573]  ? readahead_expand+0x5f0/0x5f0
+[17309.813554]  ? policy_node+0xb5/0x140
+[17309.819018]  page_cache_ra_unbounded+0x27d/0x450
+[17309.825439]  filemap_get_pages+0x500/0x1450
+[17309.831444]  ? filemap_add_folio+0x140/0x140
+[17309.837519]  ? lock_is_held_type+0xd8/0x130
+[17309.843509]  filemap_read+0x28c/0x9f0
+[17309.848953]  ? zonefs_file_read_iter+0x1ea/0x4d0 [zonefs]
+[17309.856162]  ? trace_contention_end+0xd6/0x130
+[17309.862416]  ? __mutex_lock+0x221/0x1480
+[17309.868151]  ? zonefs_file_read_iter+0x166/0x4d0 [zonefs]
+[17309.875364]  ? filemap_get_pages+0x1450/0x1450
+[17309.881647]  ? __mutex_unlock_slowpath+0x15e/0x620
+[17309.888248]  ? wait_for_completion_io_timeout+0x20/0x20
+[17309.895231]  ? lock_is_held_type+0xd8/0x130
+[17309.901115]  ? lock_is_held_type+0xd8/0x130
+[17309.906934]  zonefs_file_read_iter+0x356/0x4d0 [zonefs]
+[17309.913750]  new_sync_read+0x2d8/0x520
+[17309.919035]  ? __x64_sys_lseek+0x1d0/0x1d0
+
+Furthermore, this causes iomap_readahead() to loop forever as
+iomap_readahead_iter() always returns 0, making no progress.
+
+Fix this by treating reads after the file size as access to holes,
+setting the iomap type to IOMAP_HOLE, the iomap addr to IOMAP_NULL_ADDR
+and using the length argument as is for the iomap length. To simplify
+the code with this change, zonefs_iomap_begin() is split into the read
+variant, zonefs_read_iomap_begin() and zonefs_read_iomap_ops, and the
+write variant, zonefs_write_iomap_begin() and zonefs_write_iomap_ops.
+
+Reported-by: Jorgen Hansen <Jorgen.Hansen@wdc.com>
+Fixes: 8dcc1a9d90c1 ("fs: New zonefs file system")
+Signed-off-by: Damien Le Moal <damien.lemoal@opensource.wdc.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Reviewed-by: Jorgen Hansen <Jorgen.Hansen@wdc.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/zonefs/super.c |   92 ++++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 62 insertions(+), 30 deletions(-)
+
+--- a/fs/zonefs/super.c
++++ b/fs/zonefs/super.c
+@@ -68,15 +68,49 @@ static inline void zonefs_i_size_write(s
+               zi->i_flags &= ~ZONEFS_ZONE_OPEN;
+ }
+-static int zonefs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+-                            unsigned int flags, struct iomap *iomap,
+-                            struct iomap *srcmap)
++static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
++                                 loff_t length, unsigned int flags,
++                                 struct iomap *iomap, struct iomap *srcmap)
+ {
+       struct zonefs_inode_info *zi = ZONEFS_I(inode);
+       struct super_block *sb = inode->i_sb;
+       loff_t isize;
+-      /* All I/Os should always be within the file maximum size */
++      /*
++       * All blocks are always mapped below EOF. If reading past EOF,
++       * act as if there is a hole up to the file maximum size.
++       */
++      mutex_lock(&zi->i_truncate_mutex);
++      iomap->bdev = inode->i_sb->s_bdev;
++      iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
++      isize = i_size_read(inode);
++      if (iomap->offset >= isize) {
++              iomap->type = IOMAP_HOLE;
++              iomap->addr = IOMAP_NULL_ADDR;
++              iomap->length = length;
++      } else {
++              iomap->type = IOMAP_MAPPED;
++              iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
++              iomap->length = isize - iomap->offset;
++      }
++      mutex_unlock(&zi->i_truncate_mutex);
++
++      return 0;
++}
++
++static const struct iomap_ops zonefs_read_iomap_ops = {
++      .iomap_begin    = zonefs_read_iomap_begin,
++};
++
++static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
++                                  loff_t length, unsigned int flags,
++                                  struct iomap *iomap, struct iomap *srcmap)
++{
++      struct zonefs_inode_info *zi = ZONEFS_I(inode);
++      struct super_block *sb = inode->i_sb;
++      loff_t isize;
++
++      /* All write I/Os should always be within the file maximum size */
+       if (WARN_ON_ONCE(offset + length > zi->i_max_size))
+               return -EIO;
+@@ -86,7 +120,7 @@ static int zonefs_iomap_begin(struct ino
+        * operation.
+        */
+       if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
+-                       (flags & IOMAP_WRITE) && !(flags & IOMAP_DIRECT)))
++                       !(flags & IOMAP_DIRECT)))
+               return -EIO;
+       /*
+@@ -95,45 +129,42 @@ static int zonefs_iomap_begin(struct ino
+        * write pointer) and unwriten beyond.
+        */
+       mutex_lock(&zi->i_truncate_mutex);
++      iomap->bdev = inode->i_sb->s_bdev;
++      iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
++      iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+       isize = i_size_read(inode);
+-      if (offset >= isize)
++      if (iomap->offset >= isize) {
+               iomap->type = IOMAP_UNWRITTEN;
+-      else
++              iomap->length = zi->i_max_size - iomap->offset;
++      } else {
+               iomap->type = IOMAP_MAPPED;
+-      if (flags & IOMAP_WRITE)
+-              length = zi->i_max_size - offset;
+-      else
+-              length = min(length, isize - offset);
++              iomap->length = isize - iomap->offset;
++      }
+       mutex_unlock(&zi->i_truncate_mutex);
+-      iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+-      iomap->length = ALIGN(offset + length, sb->s_blocksize) - iomap->offset;
+-      iomap->bdev = inode->i_sb->s_bdev;
+-      iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
+-
+       return 0;
+ }
+-static const struct iomap_ops zonefs_iomap_ops = {
+-      .iomap_begin    = zonefs_iomap_begin,
++static const struct iomap_ops zonefs_write_iomap_ops = {
++      .iomap_begin    = zonefs_write_iomap_begin,
+ };
+ static int zonefs_readpage(struct file *unused, struct page *page)
+ {
+-      return iomap_readpage(page, &zonefs_iomap_ops);
++      return iomap_readpage(page, &zonefs_read_iomap_ops);
+ }
+ static void zonefs_readahead(struct readahead_control *rac)
+ {
+-      iomap_readahead(rac, &zonefs_iomap_ops);
++      iomap_readahead(rac, &zonefs_read_iomap_ops);
+ }
+ /*
+  * Map blocks for page writeback. This is used only on conventional zone files,
+  * which implies that the page range can only be within the fixed inode size.
+  */
+-static int zonefs_map_blocks(struct iomap_writepage_ctx *wpc,
+-                           struct inode *inode, loff_t offset)
++static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
++                                 struct inode *inode, loff_t offset)
+ {
+       struct zonefs_inode_info *zi = ZONEFS_I(inode);
+@@ -147,12 +178,12 @@ static int zonefs_map_blocks(struct ioma
+           offset < wpc->iomap.offset + wpc->iomap.length)
+               return 0;
+-      return zonefs_iomap_begin(inode, offset, zi->i_max_size - offset,
+-                                IOMAP_WRITE, &wpc->iomap, NULL);
++      return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset,
++                                      IOMAP_WRITE, &wpc->iomap, NULL);
+ }
+ static const struct iomap_writeback_ops zonefs_writeback_ops = {
+-      .map_blocks             = zonefs_map_blocks,
++      .map_blocks             = zonefs_write_map_blocks,
+ };
+ static int zonefs_writepage(struct page *page, struct writeback_control *wbc)
+@@ -182,7 +213,8 @@ static int zonefs_swap_activate(struct s
+               return -EINVAL;
+       }
+-      return iomap_swapfile_activate(sis, swap_file, span, &zonefs_iomap_ops);
++      return iomap_swapfile_activate(sis, swap_file, span,
++                                     &zonefs_read_iomap_ops);
+ }
+ static const struct address_space_operations zonefs_file_aops = {
+@@ -612,7 +644,7 @@ static vm_fault_t zonefs_filemap_page_mk
+       /* Serialize against truncates */
+       down_read(&zi->i_mmap_sem);
+-      ret = iomap_page_mkwrite(vmf, &zonefs_iomap_ops);
++      ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
+       up_read(&zi->i_mmap_sem);
+       sb_end_pagefault(inode->i_sb);
+@@ -869,7 +901,7 @@ static ssize_t zonefs_file_dio_write(str
+       if (append)
+               ret = zonefs_file_dio_append(iocb, from);
+       else
+-              ret = iomap_dio_rw(iocb, from, &zonefs_iomap_ops,
++              ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
+                                  &zonefs_write_dio_ops, sync);
+       if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
+           (ret > 0 || ret == -EIOCBQUEUED)) {
+@@ -911,7 +943,7 @@ static ssize_t zonefs_file_buffered_writ
+       if (ret <= 0)
+               goto inode_unlock;
+-      ret = iomap_file_buffered_write(iocb, from, &zonefs_iomap_ops);
++      ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
+       if (ret > 0)
+               iocb->ki_pos += ret;
+       else if (ret == -EIO)
+@@ -1004,7 +1036,7 @@ static ssize_t zonefs_file_read_iter(str
+                       goto inode_unlock;
+               }
+               file_accessed(iocb->ki_filp);
+-              ret = iomap_dio_rw(iocb, to, &zonefs_iomap_ops,
++              ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
+                                  &zonefs_read_dio_ops, is_sync_kiocb(iocb));
+       } else {
+               ret = generic_file_read_iter(iocb, to);