From: Greg Kroah-Hartman Date: Thu, 23 Jun 2022 16:00:51 +0000 (+0200) Subject: 5.4-stable patches X-Git-Tag: v4.9.320~19 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=777f916074a8bf40d5f3f5db90c5699cf780627b;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: dm-remove-special-casing-of-bio-based-immutable-singleton-target-on-nvme.patch s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch --- diff --git a/queue-5.4/dm-remove-special-casing-of-bio-based-immutable-singleton-target-on-nvme.patch b/queue-5.4/dm-remove-special-casing-of-bio-based-immutable-singleton-target-on-nvme.patch new file mode 100644 index 00000000000..03f1663ec82 --- /dev/null +++ b/queue-5.4/dm-remove-special-casing-of-bio-based-immutable-singleton-target-on-nvme.patch @@ -0,0 +1,254 @@ +From snitzer@kernel.org Thu Jun 23 17:47:22 2022 +From: Mike Snitzer +Date: Tue, 21 Jun 2022 12:35:04 -0400 +Subject: dm: remove special-casing of bio-based immutable singleton target on NVMe +To: Greg KH +Cc: Guenter Roeck , Mike Snitzer , keescook@chromium.org, sarthakkukreti@google.com, stable@vger.kernel.org, Oleksandr Tymoshenko , dm-devel@redhat.com, regressions@lists.linux.dev +Message-ID: +Content-Disposition: inline + +From: Mike Snitzer + +Commit 9c37de297f6590937f95a28bec1b7ac68a38618f upstream. + +There is no benefit to DM special-casing NVMe. Remove all code used to +establish DM_TYPE_NVME_BIO_BASED. + +Also, remove 3 'struct mapped_device *md' variables in __map_bio() which +masked the same variable that is available within __map_bio()'s scope. + +Tested-by: Guenter Roeck +Signed-off-by: Mike Snitzer +Signed-off-by: Greg Kroah-Hartman +--- + drivers/md/dm-table.c | 32 +----------------- + drivers/md/dm.c | 73 ++++-------------------------------------- + include/linux/device-mapper.h | 1 + 3 files changed, 9 insertions(+), 97 deletions(-) + +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -872,8 +872,7 @@ EXPORT_SYMBOL(dm_consume_args); + static bool __table_type_bio_based(enum dm_queue_mode table_type) + { + return (table_type == DM_TYPE_BIO_BASED || +- table_type == DM_TYPE_DAX_BIO_BASED || +- table_type == DM_TYPE_NVME_BIO_BASED); ++ table_type == DM_TYPE_DAX_BIO_BASED); + } + + static bool __table_type_request_based(enum dm_queue_mode table_type) +@@ -929,8 +928,6 @@ bool dm_table_supports_dax(struct dm_tab + return true; + } + +-static bool dm_table_does_not_support_partial_completion(struct dm_table *t); +- + static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) + { +@@ -960,7 +957,6 @@ static int dm_table_determine_type(struc + goto verify_bio_based; + } + BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); +- BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED); + goto verify_rq_based; + } + +@@ -999,15 +995,6 @@ verify_bio_based: + if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) || + (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { + t->type = DM_TYPE_DAX_BIO_BASED; +- } else { +- /* Check if upgrading to NVMe bio-based is valid or required */ +- tgt = dm_table_get_immutable_target(t); +- if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) { +- t->type = DM_TYPE_NVME_BIO_BASED; +- goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */ +- } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) { +- t->type = DM_TYPE_NVME_BIO_BASED; +- } + } + return 0; + } +@@ -1024,8 +1011,7 @@ verify_rq_based: + * (e.g. request completion process for partial completion.) + */ + if (t->num_targets > 1) { +- DMERR("%s DM doesn't support multiple targets", +- t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based"); ++ DMERR("request-based DM doesn't support multiple targets"); + return -EINVAL; + } + +@@ -1714,20 +1700,6 @@ static int device_is_not_random(struct d + return q && !blk_queue_add_random(q); + } + +-static int device_is_partial_completion(struct dm_target *ti, struct dm_dev *dev, +- sector_t start, sector_t len, void *data) +-{ +- char b[BDEVNAME_SIZE]; +- +- /* For now, NVMe devices are the only devices of this class */ +- return (strncmp(bdevname(dev->bdev, b), "nvme", 4) != 0); +-} +- +-static bool dm_table_does_not_support_partial_completion(struct dm_table *t) +-{ +- return !dm_table_any_dev_attr(t, device_is_partial_completion, NULL); +-} +- + static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) + { +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -1000,7 +1000,7 @@ static void clone_endio(struct bio *bio) + struct mapped_device *md = tio->io->md; + dm_endio_fn endio = tio->ti->type->end_io; + +- if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { ++ if (unlikely(error == BLK_STS_TARGET)) { + if (bio_op(bio) == REQ_OP_DISCARD && + !bio->bi_disk->queue->limits.max_discard_sectors) + disable_discard(md); +@@ -1325,7 +1325,6 @@ static blk_qc_t __map_bio(struct dm_targ + sector = clone->bi_iter.bi_sector; + + if (unlikely(swap_bios_limit(ti, clone))) { +- struct mapped_device *md = io->md; + int latch = get_swap_bios(); + if (unlikely(latch != md->swap_bios)) + __set_swap_bios_limit(md, latch); +@@ -1340,24 +1339,17 @@ static blk_qc_t __map_bio(struct dm_targ + /* the bio has been remapped so dispatch it */ + trace_block_bio_remap(clone->bi_disk->queue, clone, + bio_dev(io->orig_bio), sector); +- if (md->type == DM_TYPE_NVME_BIO_BASED) +- ret = direct_make_request(clone); +- else +- ret = generic_make_request(clone); ++ ret = generic_make_request(clone); + break; + case DM_MAPIO_KILL: +- if (unlikely(swap_bios_limit(ti, clone))) { +- struct mapped_device *md = io->md; ++ if (unlikely(swap_bios_limit(ti, clone))) + up(&md->swap_bios_semaphore); +- } + free_tio(tio); + dec_pending(io, BLK_STS_IOERR); + break; + case DM_MAPIO_REQUEUE: +- if (unlikely(swap_bios_limit(ti, clone))) { +- struct mapped_device *md = io->md; ++ if (unlikely(swap_bios_limit(ti, clone))) + up(&md->swap_bios_semaphore); +- } + free_tio(tio); + dec_pending(io, BLK_STS_DM_REQUEUE); + break; +@@ -1732,51 +1724,6 @@ static blk_qc_t __split_and_process_bio( + return ret; + } + +-/* +- * Optimized variant of __split_and_process_bio that leverages the +- * fact that targets that use it do _not_ have a need to split bios. +- */ +-static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, +- struct bio *bio, struct dm_target *ti) +-{ +- struct clone_info ci; +- blk_qc_t ret = BLK_QC_T_NONE; +- int error = 0; +- +- init_clone_info(&ci, md, map, bio); +- +- if (bio->bi_opf & REQ_PREFLUSH) { +- struct bio flush_bio; +- +- /* +- * Use an on-stack bio for this, it's safe since we don't +- * need to reference it after submit. It's just used as +- * the basis for the clone(s). +- */ +- bio_init(&flush_bio, NULL, 0); +- flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; +- ci.bio = &flush_bio; +- ci.sector_count = 0; +- error = __send_empty_flush(&ci); +- bio_uninit(ci.bio); +- /* dec_pending submits any data associated with flush */ +- } else { +- struct dm_target_io *tio; +- +- ci.bio = bio; +- ci.sector_count = bio_sectors(bio); +- if (__process_abnormal_io(&ci, ti, &error)) +- goto out; +- +- tio = alloc_tio(&ci, ti, 0, GFP_NOIO); +- ret = __clone_and_map_simple_bio(&ci, tio, NULL); +- } +-out: +- /* drop the extra reference count */ +- dec_pending(ci.io, errno_to_blk_status(error)); +- return ret; +-} +- + static blk_qc_t dm_process_bio(struct mapped_device *md, + struct dm_table *map, struct bio *bio) + { +@@ -1807,8 +1754,6 @@ static blk_qc_t dm_process_bio(struct ma + /* regular IO is split by __split_and_process_bio */ + } + +- if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) +- return __process_bio(md, map, bio, ti); + return __split_and_process_bio(md, map, bio); + } + +@@ -2200,12 +2145,10 @@ static struct dm_table *__bind(struct ma + if (request_based) + dm_stop_queue(q); + +- if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { ++ if (request_based) { + /* +- * Leverage the fact that request-based DM targets and +- * NVMe bio based targets are immutable singletons +- * - used to optimize both dm_request_fn and dm_mq_queue_rq; +- * and __process_bio. ++ * Leverage the fact that request-based DM targets are ++ * immutable singletons - used to optimize dm_mq_queue_rq. + */ + md->immutable_target = dm_table_get_immutable_target(t); + } +@@ -2334,7 +2277,6 @@ int dm_setup_md_queue(struct mapped_devi + break; + case DM_TYPE_BIO_BASED: + case DM_TYPE_DAX_BIO_BASED: +- case DM_TYPE_NVME_BIO_BASED: + dm_init_congested_fn(md); + break; + case DM_TYPE_NONE: +@@ -3070,7 +3012,6 @@ struct dm_md_mempools *dm_alloc_md_mempo + switch (type) { + case DM_TYPE_BIO_BASED: + case DM_TYPE_DAX_BIO_BASED: +- case DM_TYPE_NVME_BIO_BASED: + pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); + front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); + io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); +--- a/include/linux/device-mapper.h ++++ b/include/linux/device-mapper.h +@@ -28,7 +28,6 @@ enum dm_queue_mode { + DM_TYPE_BIO_BASED = 1, + DM_TYPE_REQUEST_BASED = 2, + DM_TYPE_DAX_BIO_BASED = 3, +- DM_TYPE_NVME_BIO_BASED = 4, + }; + + typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; diff --git a/queue-5.4/s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch b/queue-5.4/s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch new file mode 100644 index 00000000000..da0710391dc --- /dev/null +++ b/queue-5.4/s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch @@ -0,0 +1,35 @@ +From 3ae11dbcfac906a8c3a480e98660a823130dc16a Mon Sep 17 00:00:00 2001 +From: Christian Borntraeger +Date: Mon, 30 May 2022 11:27:06 +0200 +Subject: s390/mm: use non-quiescing sske for KVM switch to keyed guest + +From: Christian Borntraeger + +commit 3ae11dbcfac906a8c3a480e98660a823130dc16a upstream. + +The switch to a keyed guest does not require a classic sske as the other +guest CPUs are not accessing the key before the switch is complete. +By using the NQ SSKE things are faster especially with multiple guests. + +Signed-off-by: Christian Borntraeger +Suggested-by: Janis Schoetterl-Glausch +Reviewed-by: Claudio Imbrenda +Link: https://lore.kernel.org/r/20220530092706.11637-3-borntraeger@linux.ibm.com +Signed-off-by: Christian Borntraeger +Signed-off-by: Heiko Carstens +Signed-off-by: Greg Kroah-Hartman +--- + arch/s390/mm/pgtable.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/s390/mm/pgtable.c ++++ b/arch/s390/mm/pgtable.c +@@ -716,7 +716,7 @@ void ptep_zap_key(struct mm_struct *mm, + pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT; + ptev = pte_val(*ptep); + if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE)) +- page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1); ++ page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0); + pgste_set_unlock(ptep, pgste); + preempt_enable(); + } diff --git a/queue-5.4/series b/queue-5.4/series index e69de29bb2d..d97dac12bea 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -0,0 +1,3 @@ +s390-mm-use-non-quiescing-sske-for-kvm-switch-to-keyed-guest.patch +dm-remove-special-casing-of-bio-based-immutable-singleton-target-on-nvme.patch +usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch diff --git a/queue-5.4/usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch b/queue-5.4/usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch new file mode 100644 index 00000000000..f0278283c74 --- /dev/null +++ b/queue-5.4/usb-gadget-u_ether-fix-regression-in-setting-fixed-mac-address.patch @@ -0,0 +1,79 @@ +From b337af3a4d6147000b7ca6b3438bf5c820849b37 Mon Sep 17 00:00:00 2001 +From: Marian Postevca +Date: Fri, 3 Jun 2022 18:34:59 +0300 +Subject: usb: gadget: u_ether: fix regression in setting fixed MAC address + +From: Marian Postevca + +commit b337af3a4d6147000b7ca6b3438bf5c820849b37 upstream. + +In systemd systems setting a fixed MAC address through +the "dev_addr" module argument fails systematically. +When checking the MAC address after the interface is created +it always has the same but different MAC address to the one +supplied as argument. + +This is partially caused by systemd which by default will +set an internally generated permanent MAC address for interfaces +that are marked as having a randomly generated address. + +Commit 890d5b40908bfd1a ("usb: gadget: u_ether: fix race in +setting MAC address in setup phase") didn't take into account +the fact that the interface must be marked as having a set +MAC address when it's set as module argument. + +Fixed by marking the interface with NET_ADDR_SET when +the "dev_addr" module argument is supplied. + +Fixes: 890d5b40908bfd1a ("usb: gadget: u_ether: fix race in setting MAC address in setup phase") +Cc: stable@vger.kernel.org +Signed-off-by: Marian Postevca +Link: https://lore.kernel.org/r/20220603153459.32722-1-posteuca@mutex.one +Signed-off-by: Greg Kroah-Hartman +--- + drivers/usb/gadget/function/u_ether.c | 11 +++++++++-- + 1 file changed, 9 insertions(+), 2 deletions(-) + +--- a/drivers/usb/gadget/function/u_ether.c ++++ b/drivers/usb/gadget/function/u_ether.c +@@ -772,9 +772,13 @@ struct eth_dev *gether_setup_name(struct + dev->qmult = qmult; + snprintf(net->name, sizeof(net->name), "%s%%d", netname); + +- if (get_ether_addr(dev_addr, net->dev_addr)) ++ if (get_ether_addr(dev_addr, net->dev_addr)) { ++ net->addr_assign_type = NET_ADDR_RANDOM; + dev_warn(&g->dev, + "using random %s ethernet address\n", "self"); ++ } else { ++ net->addr_assign_type = NET_ADDR_SET; ++ } + if (get_ether_addr(host_addr, dev->host_mac)) + dev_warn(&g->dev, + "using random %s ethernet address\n", "host"); +@@ -831,6 +835,9 @@ struct net_device *gether_setup_name_def + INIT_LIST_HEAD(&dev->tx_reqs); + INIT_LIST_HEAD(&dev->rx_reqs); + ++ /* by default we always have a random MAC address */ ++ net->addr_assign_type = NET_ADDR_RANDOM; ++ + skb_queue_head_init(&dev->rx_frames); + + /* network device setup */ +@@ -868,7 +875,6 @@ int gether_register_netdev(struct net_de + g = dev->gadget; + + memcpy(net->dev_addr, dev->dev_mac, ETH_ALEN); +- net->addr_assign_type = NET_ADDR_RANDOM; + + status = register_netdev(net); + if (status < 0) { +@@ -908,6 +914,7 @@ int gether_set_dev_addr(struct net_devic + if (get_ether_addr(dev_addr, new_addr)) + return -EINVAL; + memcpy(dev->dev_mac, new_addr, ETH_ALEN); ++ net->addr_assign_type = NET_ADDR_SET; + return 0; + } + EXPORT_SYMBOL_GPL(gether_set_dev_addr);