From: Greg Kroah-Hartman Date: Mon, 11 Dec 2023 12:57:23 +0000 (+0100) Subject: 6.1-stable patches X-Git-Tag: v4.14.333~37 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=17dd7fb7805cf7cc30b70f84c315960c0f7adfa7;p=thirdparty%2Fkernel%2Fstable-queue.git 6.1-stable patches added patches: arm-pl011-fix-dma-support.patch cifs-fix-flushing-invalidation-and-file-size-with-copy_file_range.patch cifs-fix-flushing-invalidation-and-file-size-with-ficlone.patch cifs-fix-non-availability-of-dedup-breaking-generic-304.patch devcoredump-send-uevent-once-devcd-is-ready.patch kvm-s390-mm-properly-reset-no-dat.patch kvm-svm-update-efer-software-model-on-cr0-trap-for-sev-es.patch mips-kernel-clear-fpu-states-when-setting-up-kernel-threads.patch mips-loongson64-enable-dma-noncoherent-support.patch mips-loongson64-handle-more-memory-types-passed-from-firmware.patch mips-loongson64-reserve-vgabios-memory-on-boot.patch parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch revert-xhci-loosen-rpm-as-default-policy-to-cover-for-amd-xhc-1.1.patch serial-8250-8250_omap-clear-uart_has_rhr_it_dis-bit.patch serial-8250-8250_omap-do-not-start-rx-dma-on-thri-interrupt.patch serial-8250_dw-add-acpi-id-for-granite-rapids-d-uart.patch serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch smb-client-fix-potential-null-deref-in-parse_dfs_referrals.patch usb-gadget-core-adjust-uevent-timing-on-gadget-unbind.patch usb-gadget-f_hid-fix-report-descriptor-allocation.patch usb-typec-class-fix-typec_altmode_put_partner-to-put-plugs.patch x86-cpu-amd-check-vendor-in-the-amd-microcode-callback.patch --- diff --git a/queue-6.1/arm-pl011-fix-dma-support.patch b/queue-6.1/arm-pl011-fix-dma-support.patch new file mode 100644 index 00000000000..2e404b56a4e --- /dev/null +++ b/queue-6.1/arm-pl011-fix-dma-support.patch @@ -0,0 +1,335 @@ +From 58ac1b3799799069d53f5bf95c093f2fe8dd3cc5 Mon Sep 17 00:00:00 2001 +From: Arnd Bergmann +Date: Wed, 22 Nov 2023 18:15:03 +0100 +Subject: ARM: PL011: Fix DMA support + +From: Arnd Bergmann + +commit 58ac1b3799799069d53f5bf95c093f2fe8dd3cc5 upstream. + +Since there is no guarantee that the memory returned by +dma_alloc_coherent() is associated with a 'struct page', using the +architecture specific phys_to_page() is wrong, but using +virt_to_page() would be as well. + +Stop using sg lists altogether and just use the *_single() functions +instead. This also simplifies the code a bit since the scatterlists in +this driver always have only one entry anyway. + +https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/ + Use consistent names for dma buffers + +gc: Add a commit log from the initial thread: +https://lore.kernel.org/lkml/86db0fe5-930d-4cbb-bd7d-03367da38951@app.fastmail.com/ + Use consistent names for dma buffers + +Fixes: cb06ff102e2d7 ("ARM: PL011: Add support for Rx DMA buffer polling.") +Signed-off-by: Arnd Bergmann +Tested-by: Gregory CLEMENT +Signed-off-by: Gregory CLEMENT +Cc: stable +Link: https://lore.kernel.org/r/20231122171503.235649-1-gregory.clement@bootlin.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/tty/serial/amba-pl011.c | 112 +++++++++++++++++++--------------------- + 1 file changed, 54 insertions(+), 58 deletions(-) + +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -218,17 +218,18 @@ static struct vendor_data vendor_st = { + + /* Deals with DMA transactions */ + +-struct pl011_sgbuf { +- struct scatterlist sg; +- char *buf; ++struct pl011_dmabuf { ++ dma_addr_t dma; ++ size_t len; ++ char *buf; + }; + + struct pl011_dmarx_data { + struct dma_chan *chan; + struct completion complete; + bool use_buf_b; +- struct pl011_sgbuf sgbuf_a; +- struct pl011_sgbuf sgbuf_b; ++ struct pl011_dmabuf dbuf_a; ++ struct pl011_dmabuf dbuf_b; + dma_cookie_t cookie; + bool running; + struct timer_list timer; +@@ -241,7 +242,8 @@ struct pl011_dmarx_data { + + struct pl011_dmatx_data { + struct dma_chan *chan; +- struct scatterlist sg; ++ dma_addr_t dma; ++ size_t len; + char *buf; + bool queued; + }; +@@ -365,32 +367,24 @@ static int pl011_fifo_to_tty(struct uart + + #define PL011_DMA_BUFFER_SIZE PAGE_SIZE + +-static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg, ++static int pl011_dmabuf_init(struct dma_chan *chan, struct pl011_dmabuf *db, + enum dma_data_direction dir) + { +- dma_addr_t dma_addr; +- +- sg->buf = dma_alloc_coherent(chan->device->dev, +- PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL); +- if (!sg->buf) ++ db->buf = dma_alloc_coherent(chan->device->dev, PL011_DMA_BUFFER_SIZE, ++ &db->dma, GFP_KERNEL); ++ if (!db->buf) + return -ENOMEM; +- +- sg_init_table(&sg->sg, 1); +- sg_set_page(&sg->sg, phys_to_page(dma_addr), +- PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr)); +- sg_dma_address(&sg->sg) = dma_addr; +- sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE; ++ db->len = PL011_DMA_BUFFER_SIZE; + + return 0; + } + +-static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg, ++static void pl011_dmabuf_free(struct dma_chan *chan, struct pl011_dmabuf *db, + enum dma_data_direction dir) + { +- if (sg->buf) { ++ if (db->buf) { + dma_free_coherent(chan->device->dev, +- PL011_DMA_BUFFER_SIZE, sg->buf, +- sg_dma_address(&sg->sg)); ++ PL011_DMA_BUFFER_SIZE, db->buf, db->dma); + } + } + +@@ -551,8 +545,8 @@ static void pl011_dma_tx_callback(void * + + spin_lock_irqsave(&uap->port.lock, flags); + if (uap->dmatx.queued) +- dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1, +- DMA_TO_DEVICE); ++ dma_unmap_single(dmatx->chan->device->dev, dmatx->dma, ++ dmatx->len, DMA_TO_DEVICE); + + dmacr = uap->dmacr; + uap->dmacr = dmacr & ~UART011_TXDMAE; +@@ -638,18 +632,19 @@ static int pl011_dma_tx_refill(struct ua + memcpy(&dmatx->buf[first], &xmit->buf[0], second); + } + +- dmatx->sg.length = count; +- +- if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) { ++ dmatx->len = count; ++ dmatx->dma = dma_map_single(dma_dev->dev, dmatx->buf, count, ++ DMA_TO_DEVICE); ++ if (dmatx->dma == DMA_MAPPING_ERROR) { + uap->dmatx.queued = false; + dev_dbg(uap->port.dev, "unable to map TX DMA\n"); + return -EBUSY; + } + +- desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV, ++ desc = dmaengine_prep_slave_single(chan, dmatx->dma, dmatx->len, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { +- dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE); ++ dma_unmap_single(dma_dev->dev, dmatx->dma, dmatx->len, DMA_TO_DEVICE); + uap->dmatx.queued = false; + /* + * If DMA cannot be used right now, we complete this +@@ -813,8 +808,8 @@ __acquires(&uap->port.lock) + dmaengine_terminate_async(uap->dmatx.chan); + + if (uap->dmatx.queued) { +- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, +- DMA_TO_DEVICE); ++ dma_unmap_single(uap->dmatx.chan->device->dev, uap->dmatx.dma, ++ uap->dmatx.len, DMA_TO_DEVICE); + uap->dmatx.queued = false; + uap->dmacr &= ~UART011_TXDMAE; + pl011_write(uap->dmacr, uap, REG_DMACR); +@@ -828,15 +823,15 @@ static int pl011_dma_rx_trigger_dma(stru + struct dma_chan *rxchan = uap->dmarx.chan; + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_async_tx_descriptor *desc; +- struct pl011_sgbuf *sgbuf; ++ struct pl011_dmabuf *dbuf; + + if (!rxchan) + return -EIO; + + /* Start the RX DMA job */ +- sgbuf = uap->dmarx.use_buf_b ? +- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; +- desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1, ++ dbuf = uap->dmarx.use_buf_b ? ++ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; ++ desc = dmaengine_prep_slave_single(rxchan, dbuf->dma, dbuf->len, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + /* +@@ -876,8 +871,8 @@ static void pl011_dma_rx_chars(struct ua + bool readfifo) + { + struct tty_port *port = &uap->port.state->port; +- struct pl011_sgbuf *sgbuf = use_buf_b ? +- &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; ++ struct pl011_dmabuf *dbuf = use_buf_b ? ++ &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; + int dma_count = 0; + u32 fifotaken = 0; /* only used for vdbg() */ + +@@ -886,7 +881,7 @@ static void pl011_dma_rx_chars(struct ua + + if (uap->dmarx.poll_rate) { + /* The data can be taken by polling */ +- dmataken = sgbuf->sg.length - dmarx->last_residue; ++ dmataken = dbuf->len - dmarx->last_residue; + /* Recalculate the pending size */ + if (pending >= dmataken) + pending -= dmataken; +@@ -900,7 +895,7 @@ static void pl011_dma_rx_chars(struct ua + * Note that tty_insert_flip_buf() tries to take as many chars + * as it can. + */ +- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, ++ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, + pending); + + uap->port.icount.rx += dma_count; +@@ -911,7 +906,7 @@ static void pl011_dma_rx_chars(struct ua + + /* Reset the last_residue for Rx DMA poll */ + if (uap->dmarx.poll_rate) +- dmarx->last_residue = sgbuf->sg.length; ++ dmarx->last_residue = dbuf->len; + + /* + * Only continue with trying to read the FIFO if all DMA chars have +@@ -946,8 +941,8 @@ static void pl011_dma_rx_irq(struct uart + { + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_chan *rxchan = dmarx->chan; +- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? +- &dmarx->sgbuf_b : &dmarx->sgbuf_a; ++ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? ++ &dmarx->dbuf_b : &dmarx->dbuf_a; + size_t pending; + struct dma_tx_state state; + enum dma_status dmastat; +@@ -969,7 +964,7 @@ static void pl011_dma_rx_irq(struct uart + pl011_write(uap->dmacr, uap, REG_DMACR); + uap->dmarx.running = false; + +- pending = sgbuf->sg.length - state.residue; ++ pending = dbuf->len - state.residue; + BUG_ON(pending > PL011_DMA_BUFFER_SIZE); + /* Then we terminate the transfer - we now know our residue */ + dmaengine_terminate_all(rxchan); +@@ -996,8 +991,8 @@ static void pl011_dma_rx_callback(void * + struct pl011_dmarx_data *dmarx = &uap->dmarx; + struct dma_chan *rxchan = dmarx->chan; + bool lastbuf = dmarx->use_buf_b; +- struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ? +- &dmarx->sgbuf_b : &dmarx->sgbuf_a; ++ struct pl011_dmabuf *dbuf = dmarx->use_buf_b ? ++ &dmarx->dbuf_b : &dmarx->dbuf_a; + size_t pending; + struct dma_tx_state state; + int ret; +@@ -1015,7 +1010,7 @@ static void pl011_dma_rx_callback(void * + * the DMA irq handler. So we check the residue here. + */ + rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); +- pending = sgbuf->sg.length - state.residue; ++ pending = dbuf->len - state.residue; + BUG_ON(pending > PL011_DMA_BUFFER_SIZE); + /* Then we terminate the transfer - we now know our residue */ + dmaengine_terminate_all(rxchan); +@@ -1067,16 +1062,16 @@ static void pl011_dma_rx_poll(struct tim + unsigned long flags; + unsigned int dmataken = 0; + unsigned int size = 0; +- struct pl011_sgbuf *sgbuf; ++ struct pl011_dmabuf *dbuf; + int dma_count; + struct dma_tx_state state; + +- sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a; ++ dbuf = dmarx->use_buf_b ? &uap->dmarx.dbuf_b : &uap->dmarx.dbuf_a; + rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state); + if (likely(state.residue < dmarx->last_residue)) { +- dmataken = sgbuf->sg.length - dmarx->last_residue; ++ dmataken = dbuf->len - dmarx->last_residue; + size = dmarx->last_residue - state.residue; +- dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken, ++ dma_count = tty_insert_flip_string(port, dbuf->buf + dmataken, + size); + if (dma_count == size) + dmarx->last_residue = state.residue; +@@ -1123,7 +1118,7 @@ static void pl011_dma_startup(struct uar + return; + } + +- sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE); ++ uap->dmatx.len = PL011_DMA_BUFFER_SIZE; + + /* The DMA buffer is now the FIFO the TTY subsystem can use */ + uap->port.fifosize = PL011_DMA_BUFFER_SIZE; +@@ -1133,7 +1128,7 @@ static void pl011_dma_startup(struct uar + goto skip_rx; + + /* Allocate and map DMA RX buffers */ +- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a, ++ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_a, + DMA_FROM_DEVICE); + if (ret) { + dev_err(uap->port.dev, "failed to init DMA %s: %d\n", +@@ -1141,12 +1136,12 @@ static void pl011_dma_startup(struct uar + goto skip_rx; + } + +- ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b, ++ ret = pl011_dmabuf_init(uap->dmarx.chan, &uap->dmarx.dbuf_b, + DMA_FROM_DEVICE); + if (ret) { + dev_err(uap->port.dev, "failed to init DMA %s: %d\n", + "RX buffer B", ret); +- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, ++ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, + DMA_FROM_DEVICE); + goto skip_rx; + } +@@ -1200,8 +1195,9 @@ static void pl011_dma_shutdown(struct ua + /* In theory, this should already be done by pl011_dma_flush_buffer */ + dmaengine_terminate_all(uap->dmatx.chan); + if (uap->dmatx.queued) { +- dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1, +- DMA_TO_DEVICE); ++ dma_unmap_single(uap->dmatx.chan->device->dev, ++ uap->dmatx.dma, uap->dmatx.len, ++ DMA_TO_DEVICE); + uap->dmatx.queued = false; + } + +@@ -1212,8 +1208,8 @@ static void pl011_dma_shutdown(struct ua + if (uap->using_rx_dma) { + dmaengine_terminate_all(uap->dmarx.chan); + /* Clean up the RX DMA */ +- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE); +- pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE); ++ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_a, DMA_FROM_DEVICE); ++ pl011_dmabuf_free(uap->dmarx.chan, &uap->dmarx.dbuf_b, DMA_FROM_DEVICE); + if (uap->dmarx.poll_rate) + del_timer_sync(&uap->dmarx.timer); + uap->using_rx_dma = false; diff --git a/queue-6.1/cifs-fix-flushing-invalidation-and-file-size-with-copy_file_range.patch b/queue-6.1/cifs-fix-flushing-invalidation-and-file-size-with-copy_file_range.patch new file mode 100644 index 00000000000..8a22269b9e3 --- /dev/null +++ b/queue-6.1/cifs-fix-flushing-invalidation-and-file-size-with-copy_file_range.patch @@ -0,0 +1,204 @@ +From 7b2404a886f8b91250c31855d287e632123e1746 Mon Sep 17 00:00:00 2001 +From: David Howells +Date: Fri, 1 Dec 2023 00:22:00 +0000 +Subject: cifs: Fix flushing, invalidation and file size with copy_file_range() + +From: David Howells + +commit 7b2404a886f8b91250c31855d287e632123e1746 upstream. + +Fix a number of issues in the cifs filesystem implementation of the +copy_file_range() syscall in cifs_file_copychunk_range(). + +Firstly, the invalidation of the destination range is handled incorrectly: +We shouldn't just invalidate the whole file as dirty data in the file may +get lost and we can't just call truncate_inode_pages_range() to invalidate +the destination range as that will erase parts of a partial folio at each +end whilst invalidating and discarding all the folios in the middle. We +need to force all the folios covering the range to be reloaded, but we +mustn't lose dirty data in them that's not in the destination range. + +Further, we shouldn't simply round out the range to PAGE_SIZE at each end +as cifs should move to support multipage folios. + +Secondly, there's an issue whereby a write may have extended the file +locally, but not have been written back yet. This can leaves the local +idea of the EOF at a later point than the server's EOF. If a copy request +is issued, this will fail on the server with STATUS_INVALID_VIEW_SIZE +(which gets translated to -EIO locally) if the copy source extends past the +server's EOF. + +Fix this by: + + (0) Flush the source region (already done). The flush does nothing and + the EOF isn't moved if the source region has no dirty data. + + (1) Move the EOF to the end of the source region if it isn't already at + least at this point. If we can't do this, for instance if the server + doesn't support it, just flush the entire source file. + + (2) Find the folio (if present) at each end of the range, flushing it and + increasing the region-to-be-invalidated to cover those in their + entirety. + + (3) Fully discard all the folios covering the range as we want them to be + reloaded. + + (4) Then perform the copy. + +Thirdly, set i_size after doing the copychunk_range operation as this value +may be used by various things internally. stat() hides the issue because +setting ->time to 0 causes cifs_getatr() to revalidate the attributes. + +These were causing the generic/075 xfstest to fail. + +Fixes: 620d8745b35d ("Introduce cifs_copy_file_range()") +Cc: stable@vger.kernel.org +Signed-off-by: David Howells +cc: Paulo Alcantara +cc: Shyam Prasad N +cc: Rohith Surabattula +cc: Matthew Wilcox +cc: Jeff Layton +cc: linux-cifs@vger.kernel.org +cc: linux-mm@kvack.org +Signed-off-by: David Howells +Signed-off-by: Steve French +Signed-off-by: Greg Kroah-Hartman +--- + fs/smb/client/cifsfs.c | 102 +++++++++++++++++++++++++++++++++++++++++++++++-- + 1 file changed, 99 insertions(+), 3 deletions(-) + +--- a/fs/smb/client/cifsfs.c ++++ b/fs/smb/client/cifsfs.c +@@ -1191,6 +1191,72 @@ const struct inode_operations cifs_symli + .listxattr = cifs_listxattr, + }; + ++/* ++ * Advance the EOF marker to after the source range. ++ */ ++static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi, ++ struct cifs_tcon *src_tcon, ++ unsigned int xid, loff_t src_end) ++{ ++ struct cifsFileInfo *writeable_srcfile; ++ int rc = -EINVAL; ++ ++ writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY); ++ if (writeable_srcfile) { ++ if (src_tcon->ses->server->ops->set_file_size) ++ rc = src_tcon->ses->server->ops->set_file_size( ++ xid, src_tcon, writeable_srcfile, ++ src_inode->i_size, true /* no need to set sparse */); ++ else ++ rc = -ENOSYS; ++ cifsFileInfo_put(writeable_srcfile); ++ cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc); ++ } ++ ++ if (rc < 0) ++ goto set_failed; ++ ++ netfs_resize_file(&src_cifsi->netfs, src_end); ++ fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end); ++ return 0; ++ ++set_failed: ++ return filemap_write_and_wait(src_inode->i_mapping); ++} ++ ++/* ++ * Flush out either the folio that overlaps the beginning of a range in which ++ * pos resides or the folio that overlaps the end of a range unless that folio ++ * is entirely within the range we're going to invalidate. We extend the flush ++ * bounds to encompass the folio. ++ */ ++static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend, ++ bool first) ++{ ++ struct folio *folio; ++ unsigned long long fpos, fend; ++ pgoff_t index = pos / PAGE_SIZE; ++ size_t size; ++ int rc = 0; ++ ++ folio = filemap_get_folio(inode->i_mapping, index); ++ if (IS_ERR(folio)) ++ return 0; ++ ++ size = folio_size(folio); ++ fpos = folio_pos(folio); ++ fend = fpos + size - 1; ++ *_fstart = min_t(unsigned long long, *_fstart, fpos); ++ *_fend = max_t(unsigned long long, *_fend, fend); ++ if ((first && pos == fpos) || (!first && pos == fend)) ++ goto out; ++ ++ rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend); ++out: ++ folio_put(folio); ++ return rc; ++} ++ + static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, + struct file *dst_file, loff_t destoff, loff_t len, + unsigned int remap_flags) +@@ -1260,10 +1326,12 @@ ssize_t cifs_file_copychunk_range(unsign + { + struct inode *src_inode = file_inode(src_file); + struct inode *target_inode = file_inode(dst_file); ++ struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode); + struct cifsFileInfo *smb_file_src; + struct cifsFileInfo *smb_file_target; + struct cifs_tcon *src_tcon; + struct cifs_tcon *target_tcon; ++ unsigned long long destend, fstart, fend; + ssize_t rc; + + cifs_dbg(FYI, "copychunk range\n"); +@@ -1303,13 +1371,41 @@ ssize_t cifs_file_copychunk_range(unsign + if (rc) + goto unlock; + +- /* should we flush first and last page first */ +- truncate_inode_pages(&target_inode->i_data, 0); ++ /* The server-side copy will fail if the source crosses the EOF marker. ++ * Advance the EOF marker after the flush above to the end of the range ++ * if it's short of that. ++ */ ++ if (src_cifsi->server_eof < off + len) { ++ rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len); ++ if (rc < 0) ++ goto unlock; ++ } ++ ++ destend = destoff + len - 1; ++ ++ /* Flush the folios at either end of the destination range to prevent ++ * accidental loss of dirty data outside of the range. ++ */ ++ fstart = destoff; ++ fend = destend; ++ ++ rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true); ++ if (rc) ++ goto unlock; ++ rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false); ++ if (rc) ++ goto unlock; ++ ++ /* Discard all the folios that overlap the destination region. */ ++ truncate_inode_pages_range(&target_inode->i_data, fstart, fend); + + rc = file_modified(dst_file); +- if (!rc) ++ if (!rc) { + rc = target_tcon->ses->server->ops->copychunk_range(xid, + smb_file_src, smb_file_target, off, len, destoff); ++ if (rc > 0 && destoff + rc > i_size_read(target_inode)) ++ truncate_setsize(target_inode, destoff + rc); ++ } + + file_accessed(src_file); + diff --git a/queue-6.1/cifs-fix-flushing-invalidation-and-file-size-with-ficlone.patch b/queue-6.1/cifs-fix-flushing-invalidation-and-file-size-with-ficlone.patch new file mode 100644 index 00000000000..ba70234076f --- /dev/null +++ b/queue-6.1/cifs-fix-flushing-invalidation-and-file-size-with-ficlone.patch @@ -0,0 +1,179 @@ +From c54fc3a4f375663f2361a9cbb2955fb4ef912879 Mon Sep 17 00:00:00 2001 +From: David Howells +Date: Fri, 1 Dec 2023 00:22:01 +0000 +Subject: cifs: Fix flushing, invalidation and file size with FICLONE + +From: David Howells + +commit c54fc3a4f375663f2361a9cbb2955fb4ef912879 upstream. + +Fix a number of issues in the cifs filesystem implementation of the FICLONE +ioctl in cifs_remap_file_range(). This is analogous to the previously +fixed bug in cifs_file_copychunk_range() and can share the helper +functions. + +Firstly, the invalidation of the destination range is handled incorrectly: +We shouldn't just invalidate the whole file as dirty data in the file may +get lost and we can't just call truncate_inode_pages_range() to invalidate +the destination range as that will erase parts of a partial folio at each +end whilst invalidating and discarding all the folios in the middle. We +need to force all the folios covering the range to be reloaded, but we +mustn't lose dirty data in them that's not in the destination range. + +Further, we shouldn't simply round out the range to PAGE_SIZE at each end +as cifs should move to support multipage folios. + +Secondly, there's an issue whereby a write may have extended the file +locally, but not have been written back yet. This can leaves the local +idea of the EOF at a later point than the server's EOF. If a clone request +is issued, this will fail on the server with STATUS_INVALID_VIEW_SIZE +(which gets translated to -EIO locally) if the clone source extends past +the server's EOF. + +Fix this by: + + (0) Flush the source region (already done). The flush does nothing and + the EOF isn't moved if the source region has no dirty data. + + (1) Move the EOF to the end of the source region if it isn't already at + least at this point. If we can't do this, for instance if the server + doesn't support it, just flush the entire source file. + + (2) Find the folio (if present) at each end of the range, flushing it and + increasing the region-to-be-invalidated to cover those in their + entirety. + + (3) Fully discard all the folios covering the range as we want them to be + reloaded. + + (4) Then perform the extent duplication. + +Thirdly, set i_size after doing the duplicate_extents operation as this +value may be used by various things internally. stat() hides the issue +because setting ->time to 0 causes cifs_getatr() to revalidate the +attributes. + +These were causing the cifs/001 xfstest to fail. + +Fixes: 04b38d601239 ("vfs: pull btrfs clone API to vfs layer") +Signed-off-by: David Howells +Cc: stable@vger.kernel.org +cc: Christoph Hellwig +cc: Paulo Alcantara +cc: Shyam Prasad N +cc: Rohith Surabattula +cc: Matthew Wilcox +cc: Jeff Layton +cc: linux-cifs@vger.kernel.org +cc: linux-mm@kvack.org +Signed-off-by: David Howells +Signed-off-by: Steve French +Signed-off-by: Greg Kroah-Hartman +--- + fs/smb/client/cifsfs.c | 68 +++++++++++++++++++++++++++++++++++++++++-------- + 1 file changed, 57 insertions(+), 11 deletions(-) + +--- a/fs/smb/client/cifsfs.c ++++ b/fs/smb/client/cifsfs.c +@@ -1263,9 +1263,12 @@ static loff_t cifs_remap_file_range(stru + { + struct inode *src_inode = file_inode(src_file); + struct inode *target_inode = file_inode(dst_file); ++ struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode); ++ struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode); + struct cifsFileInfo *smb_file_src = src_file->private_data; +- struct cifsFileInfo *smb_file_target; +- struct cifs_tcon *target_tcon; ++ struct cifsFileInfo *smb_file_target = dst_file->private_data; ++ struct cifs_tcon *target_tcon, *src_tcon; ++ unsigned long long destend, fstart, fend, new_size; + unsigned int xid; + int rc; + +@@ -1278,13 +1281,13 @@ static loff_t cifs_remap_file_range(stru + + xid = get_xid(); + +- if (!src_file->private_data || !dst_file->private_data) { ++ if (!smb_file_src || !smb_file_target) { + rc = -EBADF; + cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); + goto out; + } + +- smb_file_target = dst_file->private_data; ++ src_tcon = tlink_tcon(smb_file_src->tlink); + target_tcon = tlink_tcon(smb_file_target->tlink); + + /* +@@ -1297,20 +1300,63 @@ static loff_t cifs_remap_file_range(stru + if (len == 0) + len = src_inode->i_size - off; + +- cifs_dbg(FYI, "about to flush pages\n"); +- /* should we flush first and last page first */ +- truncate_inode_pages_range(&target_inode->i_data, destoff, +- PAGE_ALIGN(destoff + len)-1); ++ cifs_dbg(FYI, "clone range\n"); ++ ++ /* Flush the source buffer */ ++ rc = filemap_write_and_wait_range(src_inode->i_mapping, off, ++ off + len - 1); ++ if (rc) ++ goto unlock; ++ ++ /* The server-side copy will fail if the source crosses the EOF marker. ++ * Advance the EOF marker after the flush above to the end of the range ++ * if it's short of that. ++ */ ++ if (src_cifsi->netfs.remote_i_size < off + len) { ++ rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len); ++ if (rc < 0) ++ goto unlock; ++ } ++ ++ new_size = destoff + len; ++ destend = destoff + len - 1; + +- if (target_tcon->ses->server->ops->duplicate_extents) ++ /* Flush the folios at either end of the destination range to prevent ++ * accidental loss of dirty data outside of the range. ++ */ ++ fstart = destoff; ++ fend = destend; ++ ++ rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true); ++ if (rc) ++ goto unlock; ++ rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false); ++ if (rc) ++ goto unlock; ++ ++ /* Discard all the folios that overlap the destination region. */ ++ cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend); ++ truncate_inode_pages_range(&target_inode->i_data, fstart, fend); ++ ++ fscache_invalidate(cifs_inode_cookie(target_inode), NULL, ++ i_size_read(target_inode), 0); ++ ++ rc = -EOPNOTSUPP; ++ if (target_tcon->ses->server->ops->duplicate_extents) { + rc = target_tcon->ses->server->ops->duplicate_extents(xid, + smb_file_src, smb_file_target, off, len, destoff); +- else +- rc = -EOPNOTSUPP; ++ if (rc == 0 && new_size > i_size_read(target_inode)) { ++ truncate_setsize(target_inode, new_size); ++ netfs_resize_file(&target_cifsi->netfs, new_size); ++ fscache_resize_cookie(cifs_inode_cookie(target_inode), ++ new_size); ++ } ++ } + + /* force revalidate of size and timestamps of target file now + that target is updated on the server */ + CIFS_I(target_inode)->time = 0; ++unlock: + /* although unlocking in the reverse order from locking is not + strictly necessary here it is a little cleaner to be consistent */ + unlock_two_nondirectories(src_inode, target_inode); diff --git a/queue-6.1/cifs-fix-non-availability-of-dedup-breaking-generic-304.patch b/queue-6.1/cifs-fix-non-availability-of-dedup-breaking-generic-304.patch new file mode 100644 index 00000000000..10b20b49065 --- /dev/null +++ b/queue-6.1/cifs-fix-non-availability-of-dedup-breaking-generic-304.patch @@ -0,0 +1,51 @@ +From 691a41d8da4b34fe72f09393505f55f28a8f34ec Mon Sep 17 00:00:00 2001 +From: David Howells +Date: Mon, 4 Dec 2023 14:01:59 +0000 +Subject: cifs: Fix non-availability of dedup breaking generic/304 + +From: David Howells + +commit 691a41d8da4b34fe72f09393505f55f28a8f34ec upstream. + +Deduplication isn't supported on cifs, but cifs doesn't reject it, instead +treating it as extent duplication/cloning. This can cause generic/304 to go +silly and run for hours on end. + +Fix cifs to indicate EOPNOTSUPP if REMAP_FILE_DEDUP is set in +->remap_file_range(). + +Note that it's unclear whether or not commit b073a08016a1 is meant to cause +cifs to return an error if REMAP_FILE_DEDUP. + +Fixes: b073a08016a1 ("cifs: fix that return -EINVAL when do dedupe operation") +Cc: stable@vger.kernel.org +Suggested-by: Dave Chinner +cc: Xiaoli Feng +cc: Shyam Prasad N +cc: Rohith Surabattula +cc: Jeff Layton +cc: Darrick Wong +cc: fstests@vger.kernel.org +cc: linux-cifs@vger.kernel.org +cc: linux-fsdevel@vger.kernel.org +Link: https://lore.kernel.org/r/3876191.1701555260@warthog.procyon.org.uk/ +Signed-off-by: David Howells +Signed-off-by: Steve French +Signed-off-by: Greg Kroah-Hartman +--- + fs/smb/client/cifsfs.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/fs/smb/client/cifsfs.c ++++ b/fs/smb/client/cifsfs.c +@@ -1203,7 +1203,9 @@ static loff_t cifs_remap_file_range(stru + unsigned int xid; + int rc; + +- if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) ++ if (remap_flags & REMAP_FILE_DEDUP) ++ return -EOPNOTSUPP; ++ if (remap_flags & ~REMAP_FILE_ADVISORY) + return -EINVAL; + + cifs_dbg(FYI, "clone range\n"); diff --git a/queue-6.1/devcoredump-send-uevent-once-devcd-is-ready.patch b/queue-6.1/devcoredump-send-uevent-once-devcd-is-ready.patch new file mode 100644 index 00000000000..9a5ae5c7279 --- /dev/null +++ b/queue-6.1/devcoredump-send-uevent-once-devcd-is-ready.patch @@ -0,0 +1,53 @@ +From af54d778a03853801d681c98c0c2a6c316ef9ca7 Mon Sep 17 00:00:00 2001 +From: Mukesh Ojha +Date: Fri, 17 Nov 2023 20:19:32 +0530 +Subject: devcoredump: Send uevent once devcd is ready + +From: Mukesh Ojha + +commit af54d778a03853801d681c98c0c2a6c316ef9ca7 upstream. + +dev_coredumpm() creates a devcoredump device and adds it +to the core kernel framework which eventually end up +sending uevent to the user space and later creates a +symbolic link to the failed device. An application +running in userspace may be interested in this symbolic +link to get the name of the failed device. + +In a issue scenario, once uevent sent to the user space +it start reading '/sys/class/devcoredump/devcdX/failing_device' +to get the actual name of the device which might not been +created and it is in its path of creation. + +To fix this, suppress sending uevent till the failing device +symbolic link gets created and send uevent once symbolic +link is created successfully. + +Fixes: 833c95456a70 ("device coredump: add new device coredump class") +Signed-off-by: Mukesh Ojha +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/1700232572-25823-1-git-send-email-quic_mojha@quicinc.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/base/devcoredump.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/drivers/base/devcoredump.c ++++ b/drivers/base/devcoredump.c +@@ -363,6 +363,7 @@ void dev_coredumpm(struct device *dev, s + devcd->devcd_dev.class = &devcd_class; + + mutex_lock(&devcd->mutex); ++ dev_set_uevent_suppress(&devcd->devcd_dev, true); + if (device_add(&devcd->devcd_dev)) + goto put_device; + +@@ -377,6 +378,8 @@ void dev_coredumpm(struct device *dev, s + "devcoredump")) + dev_warn(dev, "devcoredump create_link failed\n"); + ++ dev_set_uevent_suppress(&devcd->devcd_dev, false); ++ kobject_uevent(&devcd->devcd_dev.kobj, KOBJ_ADD); + INIT_DELAYED_WORK(&devcd->del_wk, devcd_del); + schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT); + mutex_unlock(&devcd->mutex); diff --git a/queue-6.1/kvm-s390-mm-properly-reset-no-dat.patch b/queue-6.1/kvm-s390-mm-properly-reset-no-dat.patch new file mode 100644 index 00000000000..79b938456a3 --- /dev/null +++ b/queue-6.1/kvm-s390-mm-properly-reset-no-dat.patch @@ -0,0 +1,33 @@ +From 27072b8e18a73ffeffb1c140939023915a35134b Mon Sep 17 00:00:00 2001 +From: Claudio Imbrenda +Date: Thu, 9 Nov 2023 13:36:24 +0100 +Subject: KVM: s390/mm: Properly reset no-dat + +From: Claudio Imbrenda + +commit 27072b8e18a73ffeffb1c140939023915a35134b upstream. + +When the CMMA state needs to be reset, the no-dat bit also needs to be +reset. Failure to do so could cause issues in the guest, since the +guest expects the bit to be cleared after a reset. + +Cc: +Reviewed-by: Nico Boehr +Message-ID: <20231109123624.37314-1-imbrenda@linux.ibm.com> +Signed-off-by: Claudio Imbrenda +Signed-off-by: Greg Kroah-Hartman +--- + arch/s390/mm/pgtable.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/s390/mm/pgtable.c ++++ b/arch/s390/mm/pgtable.c +@@ -731,7 +731,7 @@ void ptep_zap_unused(struct mm_struct *m + pte_clear(mm, addr, ptep); + } + if (reset) +- pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; ++ pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT); + pgste_set_unlock(ptep, pgste); + preempt_enable(); + } diff --git a/queue-6.1/kvm-svm-update-efer-software-model-on-cr0-trap-for-sev-es.patch b/queue-6.1/kvm-svm-update-efer-software-model-on-cr0-trap-for-sev-es.patch new file mode 100644 index 00000000000..966f05763ec --- /dev/null +++ b/queue-6.1/kvm-svm-update-efer-software-model-on-cr0-trap-for-sev-es.patch @@ -0,0 +1,74 @@ +From 4cdf351d3630a640ab6a05721ef055b9df62277f Mon Sep 17 00:00:00 2001 +From: Sean Christopherson +Date: Fri, 7 May 2021 09:59:46 -0700 +Subject: KVM: SVM: Update EFER software model on CR0 trap for SEV-ES + +From: Sean Christopherson + +commit 4cdf351d3630a640ab6a05721ef055b9df62277f upstream. + +In general, activating long mode involves setting the EFER_LME bit in +the EFER register and then enabling the X86_CR0_PG bit in the CR0 +register. At this point, the EFER_LMA bit will be set automatically by +hardware. + +In the case of SVM/SEV guests where writes to CR0 are intercepted, it's +necessary for the host to set EFER_LMA on behalf of the guest since +hardware does not see the actual CR0 write. + +In the case of SEV-ES guests where writes to CR0 are trapped instead of +intercepted, the hardware *does* see/record the write to CR0 before +exiting and passing the value on to the host, so as part of enabling +SEV-ES support commit f1c6366e3043 ("KVM: SVM: Add required changes to +support intercepts under SEV-ES") dropped special handling of the +EFER_LMA bit with the understanding that it would be set automatically. + +However, since the guest never explicitly sets the EFER_LMA bit, the +host never becomes aware that it has been set. This becomes problematic +when userspace tries to get/set the EFER values via +KVM_GET_SREGS/KVM_SET_SREGS, since the EFER contents tracked by the host +will be missing the EFER_LMA bit, and when userspace attempts to pass +the EFER value back via KVM_SET_SREGS it will fail a sanity check that +asserts that EFER_LMA should always be set when X86_CR0_PG and EFER_LME +are set. + +Fix this by always inferring the value of EFER_LMA based on X86_CR0_PG +and EFER_LME, regardless of whether or not SEV-ES is enabled. + +Fixes: f1c6366e3043 ("KVM: SVM: Add required changes to support intercepts under SEV-ES") +Reported-by: Peter Gonda +Cc: stable@vger.kernel.org +Signed-off-by: Sean Christopherson +Message-Id: <20210507165947.2502412-2-seanjc@google.com> +[A two year old patch that was revived after we noticed the failure in + KVM_SET_SREGS and a similar patch was posted by Michael Roth. This is + Sean's patch, but with Michael's more complete commit message. - Paolo] +Signed-off-by: Paolo Bonzini +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kvm/svm/svm.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -1786,15 +1786,17 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, + bool old_paging = is_paging(vcpu); + + #ifdef CONFIG_X86_64 +- if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) { ++ if (vcpu->arch.efer & EFER_LME) { + if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { + vcpu->arch.efer |= EFER_LMA; +- svm->vmcb->save.efer |= EFER_LMA | EFER_LME; ++ if (!vcpu->arch.guest_state_protected) ++ svm->vmcb->save.efer |= EFER_LMA | EFER_LME; + } + + if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { + vcpu->arch.efer &= ~EFER_LMA; +- svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); ++ if (!vcpu->arch.guest_state_protected) ++ svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); + } + } + #endif diff --git a/queue-6.1/mips-kernel-clear-fpu-states-when-setting-up-kernel-threads.patch b/queue-6.1/mips-kernel-clear-fpu-states-when-setting-up-kernel-threads.patch new file mode 100644 index 00000000000..963ae82557d --- /dev/null +++ b/queue-6.1/mips-kernel-clear-fpu-states-when-setting-up-kernel-threads.patch @@ -0,0 +1,69 @@ +From a58a173444a68412bb08849bd81c679395f20ca0 Mon Sep 17 00:00:00 2001 +From: Thomas Bogendoerfer +Date: Thu, 30 Nov 2023 17:36:01 +0100 +Subject: MIPS: kernel: Clear FPU states when setting up kernel threads + +From: Thomas Bogendoerfer + +commit a58a173444a68412bb08849bd81c679395f20ca0 upstream. + +io_uring sets up the io worker kernel thread via a syscall out of an +user space prrocess. This process might have used FPU and since +copy_thread() didn't clear FPU states for kernel threads a BUG() +is triggered for using FPU inside kernel. Move code around +to always clear FPU state for user and kernel threads. + +Cc: stable@vger.kernel.org +Reported-by: Aurelien Jarno +Closes: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1055021 +Suggested-by: Jiaxun Yang +Reviewed-by: Jiaxun Yang +Signed-off-by: Thomas Bogendoerfer +Signed-off-by: Greg Kroah-Hartman +--- + arch/mips/kernel/process.c | 25 +++++++++++++------------ + 1 file changed, 13 insertions(+), 12 deletions(-) + +--- a/arch/mips/kernel/process.c ++++ b/arch/mips/kernel/process.c +@@ -121,6 +121,19 @@ int copy_thread(struct task_struct *p, c + /* Put the stack after the struct pt_regs. */ + childksp = (unsigned long) childregs; + p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK; ++ ++ /* ++ * New tasks lose permission to use the fpu. This accelerates context ++ * switching for most programs since they don't use the fpu. ++ */ ++ clear_tsk_thread_flag(p, TIF_USEDFPU); ++ clear_tsk_thread_flag(p, TIF_USEDMSA); ++ clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE); ++ ++#ifdef CONFIG_MIPS_MT_FPAFF ++ clear_tsk_thread_flag(p, TIF_FPUBOUND); ++#endif /* CONFIG_MIPS_MT_FPAFF */ ++ + if (unlikely(args->fn)) { + /* kernel thread */ + unsigned long status = p->thread.cp0_status; +@@ -149,20 +162,8 @@ int copy_thread(struct task_struct *p, c + p->thread.reg29 = (unsigned long) childregs; + p->thread.reg31 = (unsigned long) ret_from_fork; + +- /* +- * New tasks lose permission to use the fpu. This accelerates context +- * switching for most programs since they don't use the fpu. +- */ + childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); + +- clear_tsk_thread_flag(p, TIF_USEDFPU); +- clear_tsk_thread_flag(p, TIF_USEDMSA); +- clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE); +- +-#ifdef CONFIG_MIPS_MT_FPAFF +- clear_tsk_thread_flag(p, TIF_FPUBOUND); +-#endif /* CONFIG_MIPS_MT_FPAFF */ +- + #ifdef CONFIG_MIPS_FP_SUPPORT + atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE); + #endif diff --git a/queue-6.1/mips-loongson64-enable-dma-noncoherent-support.patch b/queue-6.1/mips-loongson64-enable-dma-noncoherent-support.patch new file mode 100644 index 00000000000..fbe8b1a88a9 --- /dev/null +++ b/queue-6.1/mips-loongson64-enable-dma-noncoherent-support.patch @@ -0,0 +1,87 @@ +From edc0378eee00200a5bedf1bb9f00ad390e0d1bd4 Mon Sep 17 00:00:00 2001 +From: Jiaxun Yang +Date: Tue, 7 Nov 2023 11:15:19 +0000 +Subject: MIPS: Loongson64: Enable DMA noncoherent support + +From: Jiaxun Yang + +commit edc0378eee00200a5bedf1bb9f00ad390e0d1bd4 upstream. + +There are some Loongson64 systems come with broken coherent DMA +support, firmware will set a bit in boot_param and pass nocoherentio +in cmdline. + +However nonconherent support was missed out when spin off Loongson-2EF +form Loongson64, and that boot_param change never made itself into +upstream. + +Support DMA noncoherent properly to get those systems working. + +Cc: stable@vger.kernel.org +Fixes: 71e2f4dd5a65 ("MIPS: Fork loongson2ef from loongson64") +Signed-off-by: Jiaxun Yang +Signed-off-by: Thomas Bogendoerfer +Signed-off-by: Greg Kroah-Hartman +--- + arch/mips/Kconfig | 2 ++ + arch/mips/include/asm/mach-loongson64/boot_param.h | 3 ++- + arch/mips/loongson64/env.c | 10 +++++++++- + 3 files changed, 13 insertions(+), 2 deletions(-) + +--- a/arch/mips/Kconfig ++++ b/arch/mips/Kconfig +@@ -483,6 +483,7 @@ config MACH_LOONGSON2EF + + config MACH_LOONGSON64 + bool "Loongson 64-bit family of machines" ++ select ARCH_DMA_DEFAULT_COHERENT + select ARCH_SPARSEMEM_ENABLE + select ARCH_MIGHT_HAVE_PC_PARPORT + select ARCH_MIGHT_HAVE_PC_SERIO +@@ -1304,6 +1305,7 @@ config CPU_LOONGSON64 + select CPU_SUPPORTS_MSA + select CPU_DIEI_BROKEN if !LOONGSON3_ENHANCEMENT + select CPU_MIPSR2_IRQ_VI ++ select DMA_NONCOHERENT + select WEAK_ORDERING + select WEAK_REORDERING_BEYOND_LLSC + select MIPS_ASID_BITS_VARIABLE +--- a/arch/mips/include/asm/mach-loongson64/boot_param.h ++++ b/arch/mips/include/asm/mach-loongson64/boot_param.h +@@ -121,7 +121,8 @@ struct irq_source_routing_table { + u64 pci_io_start_addr; + u64 pci_io_end_addr; + u64 pci_config_addr; +- u32 dma_mask_bits; ++ u16 dma_mask_bits; ++ u16 dma_noncoherent; + } __packed; + + struct interface_info { +--- a/arch/mips/loongson64/env.c ++++ b/arch/mips/loongson64/env.c +@@ -13,6 +13,8 @@ + * Copyright (C) 2009 Lemote Inc. + * Author: Wu Zhangjin, wuzhangjin@gmail.com + */ ++ ++#include + #include + #include + #include +@@ -147,8 +149,14 @@ void __init prom_lefi_init_env(void) + + loongson_sysconf.dma_mask_bits = eirq_source->dma_mask_bits; + if (loongson_sysconf.dma_mask_bits < 32 || +- loongson_sysconf.dma_mask_bits > 64) ++ loongson_sysconf.dma_mask_bits > 64) { + loongson_sysconf.dma_mask_bits = 32; ++ dma_default_coherent = true; ++ } else { ++ dma_default_coherent = !eirq_source->dma_noncoherent; ++ } ++ ++ pr_info("Firmware: Coherent DMA: %s\n", dma_default_coherent ? "on" : "off"); + + loongson_sysconf.restart_addr = boot_p->reset_system.ResetWarm; + loongson_sysconf.poweroff_addr = boot_p->reset_system.Shutdown; diff --git a/queue-6.1/mips-loongson64-handle-more-memory-types-passed-from-firmware.patch b/queue-6.1/mips-loongson64-handle-more-memory-types-passed-from-firmware.patch new file mode 100644 index 00000000000..dd327a963d0 --- /dev/null +++ b/queue-6.1/mips-loongson64-handle-more-memory-types-passed-from-firmware.patch @@ -0,0 +1,108 @@ +From c7206e7bd214ebb3ca6fa474a4423662327d9beb Mon Sep 17 00:00:00 2001 +From: Jiaxun Yang +Date: Tue, 7 Nov 2023 11:15:20 +0000 +Subject: MIPS: Loongson64: Handle more memory types passed from firmware + +From: Jiaxun Yang + +commit c7206e7bd214ebb3ca6fa474a4423662327d9beb upstream. + +There are many types of revsered memory passed from firmware +that should be reserved in memblock, and UMA memory passed +from firmware that should be added to system memory for system +to use. + +Also for memblock there is no need to align those space into page, +which actually cause problems. + +Handle them properly to prevent memory corruption on some systems. + +Cc: stable@vger.kernel.org +Signed-off-by: Jiaxun Yang +Signed-off-by: Thomas Bogendoerfer +Signed-off-by: Greg Kroah-Hartman +--- + arch/mips/include/asm/mach-loongson64/boot_param.h | 6 ++- + arch/mips/loongson64/init.c | 42 +++++++++++++-------- + 2 files changed, 31 insertions(+), 17 deletions(-) + +--- a/arch/mips/include/asm/mach-loongson64/boot_param.h ++++ b/arch/mips/include/asm/mach-loongson64/boot_param.h +@@ -14,7 +14,11 @@ + #define ADAPTER_ROM 8 + #define ACPI_TABLE 9 + #define SMBIOS_TABLE 10 +-#define MAX_MEMORY_TYPE 11 ++#define UMA_VIDEO_RAM 11 ++#define VUMA_VIDEO_RAM 12 ++#define MAX_MEMORY_TYPE 13 ++ ++#define MEM_SIZE_IS_IN_BYTES (1 << 31) + + #define LOONGSON3_BOOT_MEM_MAP_MAX 128 + struct efi_memory_map_loongson { +--- a/arch/mips/loongson64/init.c ++++ b/arch/mips/loongson64/init.c +@@ -49,8 +49,7 @@ void virtual_early_config(void) + void __init szmem(unsigned int node) + { + u32 i, mem_type; +- static unsigned long num_physpages; +- u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size; ++ phys_addr_t node_id, mem_start, mem_size; + + /* Otherwise come from DTB */ + if (loongson_sysconf.fw_interface != LOONGSON_LEFI) +@@ -64,27 +63,38 @@ void __init szmem(unsigned int node) + + mem_type = loongson_memmap->map[i].mem_type; + mem_size = loongson_memmap->map[i].mem_size; +- mem_start = loongson_memmap->map[i].mem_start; ++ ++ /* Memory size comes in MB if MEM_SIZE_IS_IN_BYTES not set */ ++ if (mem_size & MEM_SIZE_IS_IN_BYTES) ++ mem_size &= ~MEM_SIZE_IS_IN_BYTES; ++ else ++ mem_size = mem_size << 20; ++ ++ mem_start = (node_id << 44) | loongson_memmap->map[i].mem_start; + + switch (mem_type) { + case SYSTEM_RAM_LOW: + case SYSTEM_RAM_HIGH: +- start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT; +- node_psize = (mem_size << 20) >> PAGE_SHIFT; +- end_pfn = start_pfn + node_psize; +- num_physpages += node_psize; +- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", +- (u32)node_id, mem_type, mem_start, mem_size); +- pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n", +- start_pfn, end_pfn, num_physpages); +- memblock_add_node(PFN_PHYS(start_pfn), +- PFN_PHYS(node_psize), node, ++ case UMA_VIDEO_RAM: ++ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes usable\n", ++ (u32)node_id, mem_type, &mem_start, &mem_size); ++ memblock_add_node(mem_start, mem_size, node, + MEMBLOCK_NONE); + break; + case SYSTEM_RAM_RESERVED: +- pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n", +- (u32)node_id, mem_type, mem_start, mem_size); +- memblock_reserve(((node_id << 44) + mem_start), mem_size << 20); ++ case VIDEO_ROM: ++ case ADAPTER_ROM: ++ case ACPI_TABLE: ++ case SMBIOS_TABLE: ++ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes reserved\n", ++ (u32)node_id, mem_type, &mem_start, &mem_size); ++ memblock_reserve(mem_start, mem_size); ++ break; ++ /* We should not reserve VUMA_VIDEO_RAM as it overlaps with MMIO */ ++ case VUMA_VIDEO_RAM: ++ default: ++ pr_info("Node %d, mem_type:%d\t[%pa], %pa bytes unhandled\n", ++ (u32)node_id, mem_type, &mem_start, &mem_size); + break; + } + } diff --git a/queue-6.1/mips-loongson64-reserve-vgabios-memory-on-boot.patch b/queue-6.1/mips-loongson64-reserve-vgabios-memory-on-boot.patch new file mode 100644 index 00000000000..8cb20dd598a --- /dev/null +++ b/queue-6.1/mips-loongson64-reserve-vgabios-memory-on-boot.patch @@ -0,0 +1,43 @@ +From 8f7aa77a463f47c9e00592d02747a9fcf2271543 Mon Sep 17 00:00:00 2001 +From: Jiaxun Yang +Date: Tue, 7 Nov 2023 11:15:18 +0000 +Subject: MIPS: Loongson64: Reserve vgabios memory on boot + +From: Jiaxun Yang + +commit 8f7aa77a463f47c9e00592d02747a9fcf2271543 upstream. + +vgabios is passed from firmware to kernel on Loongson64 systems. +Sane firmware will keep this pointer in reserved memory space +passed from the firmware but insane firmware keeps it in low +memory before kernel entry that is not reserved. + +Previously kernel won't try to allocate memory from low memory +before kernel entry on boot, but after converting to memblock +it will do that. + +Fix by resversing those memory on early boot. + +Cc: stable@vger.kernel.org +Fixes: a94e4f24ec83 ("MIPS: init: Drop boot_mem_map") +Signed-off-by: Jiaxun Yang +Signed-off-by: Thomas Bogendoerfer +Signed-off-by: Greg Kroah-Hartman +--- + arch/mips/loongson64/init.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/arch/mips/loongson64/init.c ++++ b/arch/mips/loongson64/init.c +@@ -88,6 +88,11 @@ void __init szmem(unsigned int node) + break; + } + } ++ ++ /* Reserve vgabios if it comes from firmware */ ++ if (loongson_sysconf.vgabios_addr) ++ memblock_reserve(virt_to_phys((void *)loongson_sysconf.vgabios_addr), ++ SZ_256K); + } + + #ifndef CONFIG_NUMA diff --git a/queue-6.1/parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch b/queue-6.1/parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch new file mode 100644 index 00000000000..6ff65efbb59 --- /dev/null +++ b/queue-6.1/parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch @@ -0,0 +1,65 @@ +From 1a031f6edc460e9562098bdedc3918da07c30a6e Mon Sep 17 00:00:00 2001 +From: Cameron Williams +Date: Thu, 2 Nov 2023 21:10:40 +0000 +Subject: parport: Add support for Brainboxes IX/UC/PX parallel cards + +From: Cameron Williams + +commit 1a031f6edc460e9562098bdedc3918da07c30a6e upstream. + +Adds support for Intashield IX-500/IX-550, UC-146/UC-157, PX-146/PX-157, +PX-203 and PX-475 (LPT port) + +Cc: stable@vger.kernel.org +Signed-off-by: Cameron Williams +Acked-by: Sudip Mukherjee +Link: https://lore.kernel.org/r/AS4PR02MB790389C130410BD864C8DCC9C4A6A@AS4PR02MB7903.eurprd02.prod.outlook.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/parport/parport_pc.c | 21 +++++++++++++++++++++ + 1 file changed, 21 insertions(+) + +--- a/drivers/parport/parport_pc.c ++++ b/drivers/parport/parport_pc.c +@@ -2614,6 +2614,8 @@ enum parport_pc_pci_cards { + netmos_9865, + quatech_sppxp100, + wch_ch382l, ++ brainboxes_uc146, ++ brainboxes_px203, + }; + + +@@ -2678,6 +2680,8 @@ static struct parport_pc_pci { + /* netmos_9865 */ { 1, { { 0, -1 }, } }, + /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, + /* wch_ch382l */ { 1, { { 2, -1 }, } }, ++ /* brainboxes_uc146 */ { 1, { { 3, -1 }, } }, ++ /* brainboxes_px203 */ { 1, { { 0, -1 }, } }, + }; + + static const struct pci_device_id parport_pc_pci_tbl[] = { +@@ -2771,6 +2775,23 @@ static const struct pci_device_id parpor + PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, + /* WCH CH382L PCI-E single parallel port card */ + { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l }, ++ /* Brainboxes IX-500/550 */ ++ { PCI_VENDOR_ID_INTASHIELD, 0x402a, ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, ++ /* Brainboxes UC-146/UC-157 */ ++ { PCI_VENDOR_ID_INTASHIELD, 0x0be1, ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 }, ++ { PCI_VENDOR_ID_INTASHIELD, 0x0be2, ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc146 }, ++ /* Brainboxes PX-146/PX-257 */ ++ { PCI_VENDOR_ID_INTASHIELD, 0x401c, ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, ++ /* Brainboxes PX-203 */ ++ { PCI_VENDOR_ID_INTASHIELD, 0x4007, ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px203 }, ++ /* Brainboxes PX-475 */ ++ { PCI_VENDOR_ID_INTASHIELD, 0x401f, ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0, oxsemi_pcie_pport }, + { 0, } /* terminate list */ + }; + MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl); diff --git a/queue-6.1/revert-xhci-loosen-rpm-as-default-policy-to-cover-for-amd-xhc-1.1.patch b/queue-6.1/revert-xhci-loosen-rpm-as-default-policy-to-cover-for-amd-xhc-1.1.patch new file mode 100644 index 00000000000..de0ad2ff487 --- /dev/null +++ b/queue-6.1/revert-xhci-loosen-rpm-as-default-policy-to-cover-for-amd-xhc-1.1.patch @@ -0,0 +1,44 @@ +From 24be0b3c40594a14b65141ced486ae327398faf8 Mon Sep 17 00:00:00 2001 +From: Mathias Nyman +Date: Tue, 5 Dec 2023 11:05:48 +0200 +Subject: Revert "xhci: Loosen RPM as default policy to cover for AMD xHC 1.1" + +From: Mathias Nyman + +commit 24be0b3c40594a14b65141ced486ae327398faf8 upstream. + +This reverts commit 4baf1218150985ee3ab0a27220456a1f027ea0ac. + +Enabling runtime pm as default for all AMD xHC 1.1 controllers caused +regression. An initial attempt to fix those was done in commit a5d6264b638e +("xhci: Enable RPM on controllers that support low-power states") but new +issues are still seen. + +Revert this to get those AMD xHC 1.1 systems working + +This patch went to stable an needs to be reverted from there as well. + +Fixes: 4baf12181509 ("xhci: Loosen RPM as default policy to cover for AMD xHC 1.1") +Link: https://lore.kernel.org/linux-usb/55c50bf5-bffb-454e-906e-4408c591cb63@molgen.mpg.de +Cc: Mario Limonciello +Cc: Basavaraj Natikar +Cc: stable@vger.kernel.org +Signed-off-by: Mathias Nyman +Reviewed-by: Mario Limonciello +Link: https://lore.kernel.org/r/20231205090548.1377667-1-mathias.nyman@linux.intel.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/usb/host/xhci-pci.c | 2 -- + 1 file changed, 2 deletions(-) + +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -348,8 +348,6 @@ static void xhci_pci_quirks(struct devic + /* xHC spec requires PCI devices to support D3hot and D3cold */ + if (xhci->hci_version >= 0x120) + xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; +- else if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version >= 0x110) +- xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; + + if (xhci->quirks & XHCI_RESET_ON_RESUME) + xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, diff --git a/queue-6.1/serial-8250-8250_omap-clear-uart_has_rhr_it_dis-bit.patch b/queue-6.1/serial-8250-8250_omap-clear-uart_has_rhr_it_dis-bit.patch new file mode 100644 index 00000000000..6bbbd64abc8 --- /dev/null +++ b/queue-6.1/serial-8250-8250_omap-clear-uart_has_rhr_it_dis-bit.patch @@ -0,0 +1,44 @@ +From 8973ab7a2441b286218f4a5c4c33680e2f139996 Mon Sep 17 00:00:00 2001 +From: Ronald Wahl +Date: Tue, 31 Oct 2023 12:09:09 +0100 +Subject: serial: 8250: 8250_omap: Clear UART_HAS_RHR_IT_DIS bit + +From: Ronald Wahl + +commit 8973ab7a2441b286218f4a5c4c33680e2f139996 upstream. + +This fixes commit 439c7183e5b9 ("serial: 8250: 8250_omap: Disable RX +interrupt after DMA enable") which unfortunately set the +UART_HAS_RHR_IT_DIS bit in the UART_OMAP_IER2 register and never +cleared it. + +Cc: stable@vger.kernel.org +Fixes: 439c7183e5b9 ("serial: 8250: 8250_omap: Disable RX interrupt after DMA enable") +Signed-off-by: Ronald Wahl +Reviewed-by: Vignesh Raghavendra +Link: https://lore.kernel.org/r/20231031110909.11695-1-rwahl@gmx.de +Signed-off-by: Greg Kroah-Hartman +--- + drivers/tty/serial/8250/8250_omap.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/tty/serial/8250/8250_omap.c ++++ b/drivers/tty/serial/8250/8250_omap.c +@@ -825,7 +825,7 @@ static void __dma_rx_do_complete(struct + if (priv->habit & UART_HAS_RHR_IT_DIS) { + reg = serial_in(p, UART_OMAP_IER2); + reg &= ~UART_OMAP_IER2_RHR_IT_DIS; +- serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS); ++ serial_out(p, UART_OMAP_IER2, reg); + } + + dmaengine_tx_status(rxchan, cookie, &state); +@@ -967,7 +967,7 @@ static int omap_8250_rx_dma(struct uart_ + if (priv->habit & UART_HAS_RHR_IT_DIS) { + reg = serial_in(p, UART_OMAP_IER2); + reg |= UART_OMAP_IER2_RHR_IT_DIS; +- serial_out(p, UART_OMAP_IER2, UART_OMAP_IER2_RHR_IT_DIS); ++ serial_out(p, UART_OMAP_IER2, reg); + } + + dma_async_issue_pending(dma->rxchan); diff --git a/queue-6.1/serial-8250-8250_omap-do-not-start-rx-dma-on-thri-interrupt.patch b/queue-6.1/serial-8250-8250_omap-do-not-start-rx-dma-on-thri-interrupt.patch new file mode 100644 index 00000000000..7751545ac64 --- /dev/null +++ b/queue-6.1/serial-8250-8250_omap-do-not-start-rx-dma-on-thri-interrupt.patch @@ -0,0 +1,45 @@ +From c6bb057418876cdfdd29a6f7b8cef54539ee8811 Mon Sep 17 00:00:00 2001 +From: Ronald Wahl +Date: Wed, 1 Nov 2023 18:14:31 +0100 +Subject: serial: 8250: 8250_omap: Do not start RX DMA on THRI interrupt + +From: Ronald Wahl + +commit c6bb057418876cdfdd29a6f7b8cef54539ee8811 upstream. + +Starting RX DMA on THRI interrupt is too early because TX may not have +finished yet. + +This change is inspired by commit 90b8596ac460 ("serial: 8250: Prevent +starting up DMA Rx on THRI interrupt") and fixes DMA issues I had with +an AM62 SoC that is using the 8250 OMAP variant. + +Cc: stable@vger.kernel.org +Fixes: c26389f998a8 ("serial: 8250: 8250_omap: Add DMA support for UARTs on K3 SoCs") +Signed-off-by: Ronald Wahl +Reviewed-by: Vignesh Raghavendra +Link: https://lore.kernel.org/r/20231101171431.16495-1-rwahl@gmx.de +Signed-off-by: Greg Kroah-Hartman +--- + drivers/tty/serial/8250/8250_omap.c | 10 ++++++---- + 1 file changed, 6 insertions(+), 4 deletions(-) + +--- a/drivers/tty/serial/8250/8250_omap.c ++++ b/drivers/tty/serial/8250/8250_omap.c +@@ -1186,10 +1186,12 @@ static int omap_8250_dma_handle_irq(stru + + status = serial_port_in(port, UART_LSR); + +- if (priv->habit & UART_HAS_EFR2) +- am654_8250_handle_rx_dma(up, iir, status); +- else +- status = omap_8250_handle_rx_dma(up, iir, status); ++ if ((iir & 0x3f) != UART_IIR_THRI) { ++ if (priv->habit & UART_HAS_EFR2) ++ am654_8250_handle_rx_dma(up, iir, status); ++ else ++ status = omap_8250_handle_rx_dma(up, iir, status); ++ } + + serial8250_modem_status(up); + if (status & UART_LSR_THRE && up->dma->tx_err) { diff --git a/queue-6.1/serial-8250_dw-add-acpi-id-for-granite-rapids-d-uart.patch b/queue-6.1/serial-8250_dw-add-acpi-id-for-granite-rapids-d-uart.patch new file mode 100644 index 00000000000..7b0a6cc1a0b --- /dev/null +++ b/queue-6.1/serial-8250_dw-add-acpi-id-for-granite-rapids-d-uart.patch @@ -0,0 +1,30 @@ +From e92fad024929c79460403acf946bc9c09ce5c3a9 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Tue, 5 Dec 2023 21:55:24 +0200 +Subject: serial: 8250_dw: Add ACPI ID for Granite Rapids-D UART + +From: Andy Shevchenko + +commit e92fad024929c79460403acf946bc9c09ce5c3a9 upstream. + +Granite Rapids-D has an additional UART that is enumerated via ACPI. +Add ACPI ID for it. + +Signed-off-by: Andy Shevchenko +Cc: stable +Link: https://lore.kernel.org/r/20231205195524.2705965-1-andriy.shevchenko@linux.intel.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/tty/serial/8250/8250_dw.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/tty/serial/8250/8250_dw.c ++++ b/drivers/tty/serial/8250/8250_dw.c +@@ -795,6 +795,7 @@ static const struct acpi_device_id dw825 + { "INT33C5", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT3434", (kernel_ulong_t)&dw8250_dw_apb }, + { "INT3435", (kernel_ulong_t)&dw8250_dw_apb }, ++ { "INTC10EE", (kernel_ulong_t)&dw8250_dw_apb }, + { }, + }; + MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); diff --git a/queue-6.1/serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch b/queue-6.1/serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch new file mode 100644 index 00000000000..b7bdd88d204 --- /dev/null +++ b/queue-6.1/serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch @@ -0,0 +1,31 @@ +From 8e42c301ce64e0dcca547626eb486877d502d336 Mon Sep 17 00:00:00 2001 +From: Ronald Wahl +Date: Tue, 31 Oct 2023 14:12:42 +0100 +Subject: serial: 8250_omap: Add earlycon support for the AM654 UART controller + +From: Ronald Wahl + +commit 8e42c301ce64e0dcca547626eb486877d502d336 upstream. + +Currently there is no support for earlycon on the AM654 UART +controller. This commit adds it. + +Signed-off-by: Ronald Wahl +Reviewed-by: Vignesh Raghavendra +Link: https://lore.kernel.org/r/20231031131242.15516-1-rwahl@gmx.de +Cc: stable +Signed-off-by: Greg Kroah-Hartman +--- + drivers/tty/serial/8250/8250_early.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/tty/serial/8250/8250_early.c ++++ b/drivers/tty/serial/8250/8250_early.c +@@ -197,6 +197,7 @@ static int __init early_omap8250_setup(s + OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup); + OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup); + OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup); ++OF_EARLYCON_DECLARE(omap8250, "ti,am654-uart", early_omap8250_setup); + + #endif + diff --git a/queue-6.1/serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch b/queue-6.1/serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch new file mode 100644 index 00000000000..f76c20deb3a --- /dev/null +++ b/queue-6.1/serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch @@ -0,0 +1,68 @@ +From 08ce9a1b72e38cf44c300a44ac5858533eb3c860 Mon Sep 17 00:00:00 2001 +From: Daniel Mack +Date: Thu, 23 Nov 2023 08:28:18 +0100 +Subject: serial: sc16is7xx: address RX timeout interrupt errata + +From: Daniel Mack + +commit 08ce9a1b72e38cf44c300a44ac5858533eb3c860 upstream. + +This device has a silicon bug that makes it report a timeout interrupt +but no data in the FIFO. + +The datasheet states the following in the errata section 18.1.4: + + "If the host reads the receive FIFO at the same time as a + time-out interrupt condition happens, the host might read 0xCC + (time-out) in the Interrupt Indication Register (IIR), but bit 0 + of the Line Status Register (LSR) is not set (means there is no + data in the receive FIFO)." + +The errata description seems to indicate it concerns only polled mode of +operation when reading bit 0 of the LSR register. However, tests have +shown and NXP has confirmed that the RXLVL register also yields 0 when +the bug is triggered, and hence the IRQ driven implementation in this +driver is equally affected. + +This bug has hit us on production units and when it does, sc16is7xx_irq() +would spin forever because sc16is7xx_port_irq() keeps seeing an +interrupt in the IIR register that is not cleared because the driver +does not call into sc16is7xx_handle_rx() unless the RXLVL register +reports at least one byte in the FIFO. + +Fix this by always reading one byte from the FIFO when this condition +is detected in order to clear the interrupt. This approach was +confirmed to be correct by NXP through their support channels. + +Tested by: Hugo Villeneuve + +Signed-off-by: Daniel Mack +Co-Developed-by: Maxim Popov +Cc: stable@vger.kernel.org +Link: https://lore.kernel.org/r/20231123072818.1394539-1-daniel@zonque.org +Signed-off-by: Greg Kroah-Hartman +--- + drivers/tty/serial/sc16is7xx.c | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +--- a/drivers/tty/serial/sc16is7xx.c ++++ b/drivers/tty/serial/sc16is7xx.c +@@ -769,6 +769,18 @@ static bool sc16is7xx_port_irq(struct sc + case SC16IS7XX_IIR_RTOI_SRC: + case SC16IS7XX_IIR_XOFFI_SRC: + rxlen = sc16is7xx_port_read(port, SC16IS7XX_RXLVL_REG); ++ ++ /* ++ * There is a silicon bug that makes the chip report a ++ * time-out interrupt but no data in the FIFO. This is ++ * described in errata section 18.1.4. ++ * ++ * When this happens, read one byte from the FIFO to ++ * clear the interrupt. ++ */ ++ if (iir == SC16IS7XX_IIR_RTOI_SRC && !rxlen) ++ rxlen = 1; ++ + if (rxlen) + sc16is7xx_handle_rx(port, rxlen, iir); + break; diff --git a/queue-6.1/series b/queue-6.1/series index 087a94f4c59..cd0ee8f20b1 100644 --- a/queue-6.1/series +++ b/queue-6.1/series @@ -163,3 +163,26 @@ drm-amdgpu-return-from-switch-early-for-eeprom-i2c-a.patch drm-amdgpu-simplify-amdgpu_ras_eeprom.c.patch drm-amdgpu-add-i2c-eeprom-support-on-smu-v13_0_6.patch drm-amdgpu-update-eeprom-i2c-address-for-smu-v13_0_0.patch +usb-gadget-f_hid-fix-report-descriptor-allocation.patch +serial-8250_dw-add-acpi-id-for-granite-rapids-d-uart.patch +parport-add-support-for-brainboxes-ix-uc-px-parallel-cards.patch +cifs-fix-non-availability-of-dedup-breaking-generic-304.patch +revert-xhci-loosen-rpm-as-default-policy-to-cover-for-amd-xhc-1.1.patch +smb-client-fix-potential-null-deref-in-parse_dfs_referrals.patch +usb-typec-class-fix-typec_altmode_put_partner-to-put-plugs.patch +arm-pl011-fix-dma-support.patch +serial-sc16is7xx-address-rx-timeout-interrupt-errata.patch +serial-8250-8250_omap-clear-uart_has_rhr_it_dis-bit.patch +serial-8250-8250_omap-do-not-start-rx-dma-on-thri-interrupt.patch +serial-8250_omap-add-earlycon-support-for-the-am654-uart-controller.patch +devcoredump-send-uevent-once-devcd-is-ready.patch +x86-cpu-amd-check-vendor-in-the-amd-microcode-callback.patch +usb-gadget-core-adjust-uevent-timing-on-gadget-unbind.patch +cifs-fix-flushing-invalidation-and-file-size-with-copy_file_range.patch +cifs-fix-flushing-invalidation-and-file-size-with-ficlone.patch +mips-kernel-clear-fpu-states-when-setting-up-kernel-threads.patch +kvm-s390-mm-properly-reset-no-dat.patch +kvm-svm-update-efer-software-model-on-cr0-trap-for-sev-es.patch +mips-loongson64-reserve-vgabios-memory-on-boot.patch +mips-loongson64-handle-more-memory-types-passed-from-firmware.patch +mips-loongson64-enable-dma-noncoherent-support.patch diff --git a/queue-6.1/smb-client-fix-potential-null-deref-in-parse_dfs_referrals.patch b/queue-6.1/smb-client-fix-potential-null-deref-in-parse_dfs_referrals.patch new file mode 100644 index 00000000000..41001b5ca0e --- /dev/null +++ b/queue-6.1/smb-client-fix-potential-null-deref-in-parse_dfs_referrals.patch @@ -0,0 +1,38 @@ +From 92414333eb375ed64f4ae92d34d579e826936480 Mon Sep 17 00:00:00 2001 +From: Paulo Alcantara +Date: Tue, 5 Dec 2023 21:49:29 -0300 +Subject: smb: client: fix potential NULL deref in parse_dfs_referrals() + +From: Paulo Alcantara + +commit 92414333eb375ed64f4ae92d34d579e826936480 upstream. + +If server returned no data for FSCTL_DFS_GET_REFERRALS, @dfs_rsp will +remain NULL and then parse_dfs_referrals() will dereference it. + +Fix this by returning -EIO when no output data is returned. + +Besides, we can't fix it in SMB2_ioctl() as some FSCTLs are allowed to +return no data as per MS-SMB2 2.2.32. + +Fixes: 9d49640a21bf ("CIFS: implement get_dfs_refer for SMB2+") +Cc: stable@vger.kernel.org +Reported-by: Robert Morris +Signed-off-by: Paulo Alcantara (SUSE) +Signed-off-by: Steve French +Signed-off-by: Greg Kroah-Hartman +--- + fs/smb/client/smb2ops.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/fs/smb/client/smb2ops.c ++++ b/fs/smb/client/smb2ops.c +@@ -2834,6 +2834,8 @@ smb2_get_dfs_refer(const unsigned int xi + usleep_range(512, 2048); + } while (++retry_count < 5); + ++ if (!rc && !dfs_rsp) ++ rc = -EIO; + if (rc) { + if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP) + cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc); diff --git a/queue-6.1/usb-gadget-core-adjust-uevent-timing-on-gadget-unbind.patch b/queue-6.1/usb-gadget-core-adjust-uevent-timing-on-gadget-unbind.patch new file mode 100644 index 00000000000..07a27e7c43c --- /dev/null +++ b/queue-6.1/usb-gadget-core-adjust-uevent-timing-on-gadget-unbind.patch @@ -0,0 +1,45 @@ +From 73ea73affe8622bdf292de898da869d441da6a9d Mon Sep 17 00:00:00 2001 +From: Roy Luo +Date: Tue, 28 Nov 2023 22:17:56 +0000 +Subject: USB: gadget: core: adjust uevent timing on gadget unbind + +From: Roy Luo + +commit 73ea73affe8622bdf292de898da869d441da6a9d upstream. + +The KOBJ_CHANGE uevent is sent before gadget unbind is actually +executed, resulting in inaccurate uevent emitted at incorrect timing +(the uevent would have USB_UDC_DRIVER variable set while it would +soon be removed). +Move the KOBJ_CHANGE uevent to the end of the unbind function so that +uevent is sent only after the change has been made. + +Fixes: 2ccea03a8f7e ("usb: gadget: introduce UDC Class") +Cc: stable@vger.kernel.org +Signed-off-by: Roy Luo +Link: https://lore.kernel.org/r/20231128221756.2591158-1-royluo@google.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/usb/gadget/udc/core.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/usb/gadget/udc/core.c ++++ b/drivers/usb/gadget/udc/core.c +@@ -1608,8 +1608,6 @@ static void gadget_unbind_driver(struct + + dev_dbg(&udc->dev, "unbinding gadget driver [%s]\n", driver->function); + +- kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); +- + udc->allow_connect = false; + cancel_work_sync(&udc->vbus_work); + mutex_lock(&udc->connect_lock); +@@ -1629,6 +1627,8 @@ static void gadget_unbind_driver(struct + driver->is_bound = false; + udc->driver = NULL; + mutex_unlock(&udc_lock); ++ ++ kobject_uevent(&udc->dev.kobj, KOBJ_CHANGE); + } + + /* ------------------------------------------------------------------------- */ diff --git a/queue-6.1/usb-gadget-f_hid-fix-report-descriptor-allocation.patch b/queue-6.1/usb-gadget-f_hid-fix-report-descriptor-allocation.patch new file mode 100644 index 00000000000..eb38f7efc9b --- /dev/null +++ b/queue-6.1/usb-gadget-f_hid-fix-report-descriptor-allocation.patch @@ -0,0 +1,54 @@ +From 61890dc28f7d9e9aac8a9471302613824c22fae4 Mon Sep 17 00:00:00 2001 +From: Konstantin Aladyshev +Date: Wed, 6 Dec 2023 11:07:44 +0300 +Subject: usb: gadget: f_hid: fix report descriptor allocation + +From: Konstantin Aladyshev + +commit 61890dc28f7d9e9aac8a9471302613824c22fae4 upstream. + +The commit 89ff3dfac604 ("usb: gadget: f_hid: fix f_hidg lifetime vs +cdev") has introduced a bug that leads to hid device corruption after +the replug operation. +Reverse device managed memory allocation for the report descriptor +to fix the issue. + +Tested: +This change was tested on the AMD EthanolX CRB server with the BMC +based on the OpenBMC distribution. The BMC provides KVM functionality +via the USB gadget device: +- before: KVM page refresh results in a broken USB device, +- after: KVM page refresh works without any issues. + +Fixes: 89ff3dfac604 ("usb: gadget: f_hid: fix f_hidg lifetime vs cdev") +Cc: stable@vger.kernel.org +Signed-off-by: Konstantin Aladyshev +Link: https://lore.kernel.org/r/20231206080744.253-2-aladyshev22@gmail.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/usb/gadget/function/f_hid.c | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +--- a/drivers/usb/gadget/function/f_hid.c ++++ b/drivers/usb/gadget/function/f_hid.c +@@ -88,6 +88,7 @@ static void hidg_release(struct device * + { + struct f_hidg *hidg = container_of(dev, struct f_hidg, dev); + ++ kfree(hidg->report_desc); + kfree(hidg->set_report_buf); + kfree(hidg); + } +@@ -1287,9 +1288,9 @@ static struct usb_function *hidg_alloc(s + hidg->report_length = opts->report_length; + hidg->report_desc_length = opts->report_desc_length; + if (opts->report_desc) { +- hidg->report_desc = devm_kmemdup(&hidg->dev, opts->report_desc, +- opts->report_desc_length, +- GFP_KERNEL); ++ hidg->report_desc = kmemdup(opts->report_desc, ++ opts->report_desc_length, ++ GFP_KERNEL); + if (!hidg->report_desc) { + put_device(&hidg->dev); + --opts->refcnt; diff --git a/queue-6.1/usb-typec-class-fix-typec_altmode_put_partner-to-put-plugs.patch b/queue-6.1/usb-typec-class-fix-typec_altmode_put_partner-to-put-plugs.patch new file mode 100644 index 00000000000..2bf563a8888 --- /dev/null +++ b/queue-6.1/usb-typec-class-fix-typec_altmode_put_partner-to-put-plugs.patch @@ -0,0 +1,51 @@ +From b17b7fe6dd5c6ff74b38b0758ca799cdbb79e26e Mon Sep 17 00:00:00 2001 +From: RD Babiera +Date: Wed, 29 Nov 2023 19:23:50 +0000 +Subject: usb: typec: class: fix typec_altmode_put_partner to put plugs + +From: RD Babiera + +commit b17b7fe6dd5c6ff74b38b0758ca799cdbb79e26e upstream. + +When typec_altmode_put_partner is called by a plug altmode upon release, +the port altmode the plug belongs to will not remove its reference to the +plug. The check to see if the altmode being released evaluates against the +released altmode's partner instead of the calling altmode itself, so change +adev in typec_altmode_put_partner to properly refer to the altmode being +released. + +typec_altmode_set_partner is not run for port altmodes, so also add a check +in typec_altmode_release to prevent typec_altmode_put_partner() calls on +port altmode release. + +Fixes: 8a37d87d72f0 ("usb: typec: Bus type for alternate modes") +Cc: stable@vger.kernel.org +Signed-off-by: RD Babiera +Reviewed-by: Heikki Krogerus +Link: https://lore.kernel.org/r/20231129192349.1773623-2-rdbabiera@google.com +Signed-off-by: Greg Kroah-Hartman +--- + drivers/usb/typec/class.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +--- a/drivers/usb/typec/class.c ++++ b/drivers/usb/typec/class.c +@@ -267,7 +267,7 @@ static void typec_altmode_put_partner(st + if (!partner) + return; + +- adev = &partner->adev; ++ adev = &altmode->adev; + + if (is_typec_plug(adev->dev.parent)) { + struct typec_plug *plug = to_typec_plug(adev->dev.parent); +@@ -497,7 +497,8 @@ static void typec_altmode_release(struct + { + struct altmode *alt = to_altmode(to_typec_altmode(dev)); + +- typec_altmode_put_partner(alt); ++ if (!is_typec_port(dev->parent)) ++ typec_altmode_put_partner(alt); + + altmode_id_remove(alt->adev.dev.parent, alt->id); + kfree(alt); diff --git a/queue-6.1/x86-cpu-amd-check-vendor-in-the-amd-microcode-callback.patch b/queue-6.1/x86-cpu-amd-check-vendor-in-the-amd-microcode-callback.patch new file mode 100644 index 00000000000..1ad8a0e97e7 --- /dev/null +++ b/queue-6.1/x86-cpu-amd-check-vendor-in-the-amd-microcode-callback.patch @@ -0,0 +1,52 @@ +From 9b8493dc43044376716d789d07699f17d538a7c4 Mon Sep 17 00:00:00 2001 +From: "Borislav Petkov (AMD)" +Date: Fri, 1 Dec 2023 19:37:27 +0100 +Subject: x86/CPU/AMD: Check vendor in the AMD microcode callback + +From: Borislav Petkov (AMD) + +commit 9b8493dc43044376716d789d07699f17d538a7c4 upstream. + +Commit in Fixes added an AMD-specific microcode callback. However, it +didn't check the CPU vendor the kernel runs on explicitly. + +The only reason the Zenbleed check in it didn't run on other x86 vendors +hardware was pure coincidental luck: + + if (!cpu_has_amd_erratum(c, amd_zenbleed)) + return; + +gives true on other vendors because they don't have those families and +models. + +However, with the removal of the cpu_has_amd_erratum() in + + 05f5f73936fa ("x86/CPU/AMD: Drop now unused CPU erratum checking function") + +that coincidental condition is gone, leading to the zenbleed check +getting executed on other vendors too. + +Add the explicit vendor check for the whole callback as it should've +been done in the first place. + +Fixes: 522b1d69219d ("x86/cpu/amd: Add a Zenbleed fix") +Cc: +Signed-off-by: Borislav Petkov (AMD) +Link: https://lore.kernel.org/r/20231201184226.16749-1-bp@alien8.de +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/kernel/cpu/amd.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -1291,6 +1291,9 @@ static void zenbleed_check_cpu(void *unu + + void amd_check_microcode(void) + { ++ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) ++ return; ++ + on_each_cpu(zenbleed_check_cpu, NULL, 1); + } +