--- /dev/null
+From b29bf7119d6bbfd04aabb8d82b060fe2a33ef890 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Tue, 3 Dec 2024 12:27:15 +0100
+Subject: jffs2: Fix rtime decompressor
+
+From: Richard Weinberger <richard@nod.at>
+
+commit b29bf7119d6bbfd04aabb8d82b060fe2a33ef890 upstream.
+
+The fix for a memory corruption contained a off-by-one error and
+caused the compressor to fail in legit cases.
+
+Cc: Kinsey Moore <kinsey.moore@oarcorp.com>
+Cc: stable@vger.kernel.org
+Fixes: fe051552f5078 ("jffs2: Prevent rtime decompress memory corruption")
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jffs2/compr_rtime.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/jffs2/compr_rtime.c
++++ b/fs/jffs2/compr_rtime.c
+@@ -95,7 +95,7 @@ static int jffs2_rtime_decompress(unsign
+
+ positions[value]=outpos;
+ if (repeat) {
+- if ((outpos + repeat) >= destlen) {
++ if ((outpos + repeat) > destlen) {
+ return 1;
+ }
+ if (backoffs + repeat >= outpos) {
--- /dev/null
+From fe051552f5078fa02d593847529a3884305a6ffe Mon Sep 17 00:00:00 2001
+From: Kinsey Moore <kinsey.moore@oarcorp.com>
+Date: Tue, 23 Jul 2024 15:58:05 -0500
+Subject: jffs2: Prevent rtime decompress memory corruption
+
+From: Kinsey Moore <kinsey.moore@oarcorp.com>
+
+commit fe051552f5078fa02d593847529a3884305a6ffe upstream.
+
+The rtime decompression routine does not fully check bounds during the
+entirety of the decompression pass and can corrupt memory outside the
+decompression buffer if the compressed data is corrupted. This adds the
+required check to prevent this failure mode.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Kinsey Moore <kinsey.moore@oarcorp.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/jffs2/compr_rtime.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/jffs2/compr_rtime.c
++++ b/fs/jffs2/compr_rtime.c
+@@ -95,6 +95,9 @@ static int jffs2_rtime_decompress(unsign
+
+ positions[value]=outpos;
+ if (repeat) {
++ if ((outpos + repeat) >= destlen) {
++ return 1;
++ }
+ if (backoffs + repeat >= outpos) {
+ while(repeat) {
+ cpage_out[outpos++] = cpage_out[backoffs++];
--- /dev/null
+From f3c7a1ede435e2e45177d7a490a85fb0a0ec96d1 Mon Sep 17 00:00:00 2001
+From: Zheng Yejian <zhengyejian@huaweicloud.com>
+Date: Tue, 22 Oct 2024 16:39:26 +0800
+Subject: mm/damon/vaddr: fix issue in damon_va_evenly_split_region()
+
+From: Zheng Yejian <zhengyejian@huaweicloud.com>
+
+commit f3c7a1ede435e2e45177d7a490a85fb0a0ec96d1 upstream.
+
+Patch series "mm/damon/vaddr: Fix issue in
+damon_va_evenly_split_region()". v2.
+
+According to the logic of damon_va_evenly_split_region(), currently
+following split case would not meet the expectation:
+
+ Suppose DAMON_MIN_REGION=0x1000,
+ Case: Split [0x0, 0x3000) into 2 pieces, then the result would be
+ acutually 3 regions:
+ [0x0, 0x1000), [0x1000, 0x2000), [0x2000, 0x3000)
+ but NOT the expected 2 regions:
+ [0x0, 0x1000), [0x1000, 0x3000) !!!
+
+The root cause is that when calculating size of each split piece in
+damon_va_evenly_split_region():
+
+ `sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);`
+
+both the dividing and the ALIGN_DOWN may cause loss of precision, then
+each time split one piece of size 'sz_piece' from origin 'start' to 'end'
+would cause more pieces are split out than expected!!!
+
+To fix it, count for each piece split and make sure no more than
+'nr_pieces'. In addition, add above case into damon_test_split_evenly().
+
+And add 'nr_piece == 1' check in damon_va_evenly_split_region() for better
+code readability and add a corresponding kunit testcase.
+
+
+This patch (of 2):
+
+According to the logic of damon_va_evenly_split_region(), currently
+following split case would not meet the expectation:
+
+ Suppose DAMON_MIN_REGION=0x1000,
+ Case: Split [0x0, 0x3000) into 2 pieces, then the result would be
+ acutually 3 regions:
+ [0x0, 0x1000), [0x1000, 0x2000), [0x2000, 0x3000)
+ but NOT the expected 2 regions:
+ [0x0, 0x1000), [0x1000, 0x3000) !!!
+
+The root cause is that when calculating size of each split piece in
+damon_va_evenly_split_region():
+
+ `sz_piece = ALIGN_DOWN(sz_orig / nr_pieces, DAMON_MIN_REGION);`
+
+both the dividing and the ALIGN_DOWN may cause loss of precision,
+then each time split one piece of size 'sz_piece' from origin 'start' to
+'end' would cause more pieces are split out than expected!!!
+
+To fix it, count for each piece split and make sure no more than
+'nr_pieces'. In addition, add above case into damon_test_split_evenly().
+
+After this patch, damon-operations test passed:
+
+ # ./tools/testing/kunit/kunit.py run damon-operations
+ [...]
+ ============== damon-operations (6 subtests) ===============
+ [PASSED] damon_test_three_regions_in_vmas
+ [PASSED] damon_test_apply_three_regions1
+ [PASSED] damon_test_apply_three_regions2
+ [PASSED] damon_test_apply_three_regions3
+ [PASSED] damon_test_apply_three_regions4
+ [PASSED] damon_test_split_evenly
+ ================ [PASSED] damon-operations =================
+
+Link: https://lkml.kernel.org/r/20241022083927.3592237-1-zhengyejian@huaweicloud.com
+Link: https://lkml.kernel.org/r/20241022083927.3592237-2-zhengyejian@huaweicloud.com
+Fixes: 3f49584b262c ("mm/damon: implement primitives for the virtual memory address spaces")
+Signed-off-by: Zheng Yejian <zhengyejian@huaweicloud.com>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: Fernand Sieber <sieberf@amazon.com>
+Cc: Leonard Foerster <foersleo@amazon.de>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Ye Weihua <yeweihua4@huawei.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/damon/vaddr-test.h | 1 +
+ mm/damon/vaddr.c | 4 ++--
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/mm/damon/vaddr-test.h
++++ b/mm/damon/vaddr-test.h
+@@ -306,6 +306,7 @@ static void damon_test_split_evenly(stru
+ damon_test_split_evenly_fail(test, 0, 100, 0);
+ damon_test_split_evenly_succ(test, 0, 100, 10);
+ damon_test_split_evenly_succ(test, 5, 59, 5);
++ damon_test_split_evenly_succ(test, 0, 3, 2);
+ damon_test_split_evenly_fail(test, 5, 6, 2);
+
+ damon_destroy_ctx(c);
+--- a/mm/damon/vaddr.c
++++ b/mm/damon/vaddr.c
+@@ -69,6 +69,7 @@ static int damon_va_evenly_split_region(
+ unsigned long sz_orig, sz_piece, orig_end;
+ struct damon_region *n = NULL, *next;
+ unsigned long start;
++ unsigned int i;
+
+ if (!r || !nr_pieces)
+ return -EINVAL;
+@@ -82,8 +83,7 @@ static int damon_va_evenly_split_region(
+
+ r->ar.end = r->ar.start + sz_piece;
+ next = damon_next_region(r);
+- for (start = r->ar.end; start + sz_piece <= orig_end;
+- start += sz_piece) {
++ for (start = r->ar.end, i = 1; i < nr_pieces; start += sz_piece, i++) {
+ n = damon_new_region(start, start + sz_piece);
+ if (!n)
+ return -ENOMEM;
--- /dev/null
+From 044cd9750fe010170f5dc812e4824d98f5ea928c Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Fri, 10 Dec 2021 14:46:40 -0800
+Subject: mm/damon/vaddr-test: split a test function having >1024 bytes frame size
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 044cd9750fe010170f5dc812e4824d98f5ea928c upstream.
+
+On some configuration[1], 'damon_test_split_evenly()' kunit test
+function has >1024 bytes frame size, so below build warning is
+triggered:
+
+ CC mm/damon/vaddr.o
+ In file included from mm/damon/vaddr.c:672:
+ mm/damon/vaddr-test.h: In function 'damon_test_split_evenly':
+ mm/damon/vaddr-test.h:309:1: warning: the frame size of 1064 bytes is larger than 1024 bytes [-Wframe-larger-than=]
+ 309 | }
+ | ^
+
+This commit fixes the warning by separating the common logic in the
+function.
+
+[1] https://lore.kernel.org/linux-mm/202111182146.OV3C4uGr-lkp@intel.com/
+
+Link: https://lkml.kernel.org/r/20211201150440.1088-6-sj@kernel.org
+Fixes: 17ccae8bb5c9 ("mm/damon: add kunit tests")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Reported-by: kernel test robot <lkp@intel.com>
+Cc: Brendan Higgins <brendanhiggins@google.com>
+Cc: Shuah Khan <shuah@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/vaddr-test.h | 77 +++++++++++++++++++++++++-------------------------
+ 1 file changed, 40 insertions(+), 37 deletions(-)
+
+--- a/mm/damon/vaddr-test.h
++++ b/mm/damon/vaddr-test.h
+@@ -252,59 +252,62 @@ static void damon_test_apply_three_regio
+ new_three_regions, expected, ARRAY_SIZE(expected));
+ }
+
+-static void damon_test_split_evenly(struct kunit *test)
++static void damon_test_split_evenly_fail(struct kunit *test,
++ unsigned long start, unsigned long end, unsigned int nr_pieces)
+ {
+- struct damon_ctx *c = damon_new_ctx();
+- struct damon_target *t;
+- struct damon_region *r;
+- unsigned long i;
+-
+- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
+- -EINVAL);
+-
+- t = damon_new_target(42);
+- r = damon_new_region(0, 100);
+- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL);
++ struct damon_target *t = damon_new_target(42);
++ struct damon_region *r = damon_new_region(start, end);
+
+ damon_add_region(r, t);
+- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0);
+- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u);
++ KUNIT_EXPECT_EQ(test,
++ damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL);
++ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
+
+- i = 0;
+ damon_for_each_region(r, t) {
+- KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10);
+- KUNIT_EXPECT_EQ(test, r->ar.end, i * 10);
++ KUNIT_EXPECT_EQ(test, r->ar.start, start);
++ KUNIT_EXPECT_EQ(test, r->ar.end, end);
+ }
++
+ damon_free_target(t);
++}
++
++static void damon_test_split_evenly_succ(struct kunit *test,
++ unsigned long start, unsigned long end, unsigned int nr_pieces)
++{
++ struct damon_target *t = damon_new_target(42);
++ struct damon_region *r = damon_new_region(start, end);
++ unsigned long expected_width = (end - start) / nr_pieces;
++ unsigned long i = 0;
+
+- t = damon_new_target(42);
+- r = damon_new_region(5, 59);
+ damon_add_region(r, t);
+- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0);
+- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u);
++ KUNIT_EXPECT_EQ(test,
++ damon_va_evenly_split_region(t, r, nr_pieces), 0);
++ KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces);
+
+- i = 0;
+ damon_for_each_region(r, t) {
+- if (i == 4)
++ if (i == nr_pieces - 1)
+ break;
+- KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++);
+- KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i);
++ KUNIT_EXPECT_EQ(test,
++ r->ar.start, start + i++ * expected_width);
++ KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width);
+ }
+- KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i);
+- KUNIT_EXPECT_EQ(test, r->ar.end, 59ul);
++ KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width);
++ KUNIT_EXPECT_EQ(test, r->ar.end, end);
+ damon_free_target(t);
++}
+
+- t = damon_new_target(42);
+- r = damon_new_region(5, 6);
+- damon_add_region(r, t);
+- KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL);
+- KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u);
++static void damon_test_split_evenly(struct kunit *test)
++{
++ struct damon_ctx *c = damon_new_ctx();
++
++ KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5),
++ -EINVAL);
++
++ damon_test_split_evenly_fail(test, 0, 100, 0);
++ damon_test_split_evenly_succ(test, 0, 100, 10);
++ damon_test_split_evenly_succ(test, 5, 59, 5);
++ damon_test_split_evenly_fail(test, 5, 6, 2);
+
+- damon_for_each_region(r, t) {
+- KUNIT_EXPECT_EQ(test, r->ar.start, 5ul);
+- KUNIT_EXPECT_EQ(test, r->ar.end, 6ul);
+- }
+- damon_free_target(t);
+ damon_destroy_ctx(c);
+ }
+
kvm-arm64-vgic-its-add-a-data-length-check-in-vgic_its_save_.patch
kvm-arm64-vgic-its-clear-dte-when-mapd-unmaps-a-device.patch
kvm-arm64-vgic-its-clear-ite-when-discard-frees-an-ite.patch
+jffs2-prevent-rtime-decompress-memory-corruption.patch
+jffs2-fix-rtime-decompressor.patch
+mm-damon-vaddr-test-split-a-test-function-having-1024-bytes-frame-size.patch
+mm-damon-vaddr-fix-issue-in-damon_va_evenly_split_region.patch
+xhci-dbc-fix-stall-transfer-event-handling.patch
--- /dev/null
+From 9044ad57b60b0556d42b6f8aa218a68865e810a4 Mon Sep 17 00:00:00 2001
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+Date: Thu, 5 Sep 2024 17:32:49 +0300
+Subject: xhci: dbc: Fix STALL transfer event handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+commit 9044ad57b60b0556d42b6f8aa218a68865e810a4 upstream.
+
+Don't flush all pending DbC data requests when an endpoint halts.
+
+An endpoint may halt and xHC DbC triggers a STALL error event if there's
+an issue with a bulk data transfer. The transfer should restart once xHC
+DbC receives a ClearFeature(ENDPOINT_HALT) request from the host.
+
+Once xHC DbC restarts it will start from the TRB pointed to by dequeue
+field in the endpoint context, which might be the same TRB we got the
+STALL event for. Turn the TRB to a no-op in this case to make sure xHC
+DbC doesn't reuse and tries to retransmit this same TRB after we already
+handled it, and gave its corresponding data request back.
+
+Other STALL events might be completely bogus.
+Lukasz Bartosik discovered that xHC DbC might issue spurious STALL events
+if hosts sends a ClearFeature(ENDPOINT_HALT) request to non-halted
+endpoints even without any active bulk transfers.
+
+Assume STALL event is spurious if it reports 0 bytes transferred, and
+the endpoint stopped on the STALLED TRB.
+Don't give back the data request corresponding to the TRB in this case.
+
+The halted status is per endpoint. Track it with a per endpoint flag
+instead of the driver invented DbC wide DS_STALLED state.
+DbC remains in DbC-Configured state even if endpoints halt. There is no
+Stalled state in the DbC Port state Machine (xhci section 7.6.6)
+
+Reported-by: Łukasz Bartosik <ukaszb@chromium.org>
+Closes: https://lore.kernel.org/linux-usb/20240725074857.623299-1-ukaszb@chromium.org/
+Tested-by: Łukasz Bartosik <ukaszb@chromium.org>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20240905143300.1959279-2-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-dbgcap.c | 135 ++++++++++++++++++++++++-----------------
+ drivers/usb/host/xhci-dbgcap.h | 2
+ 2 files changed, 83 insertions(+), 54 deletions(-)
+
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -158,16 +158,18 @@ static void xhci_dbc_giveback(struct dbc
+ spin_lock(&dbc->lock);
+ }
+
+-static void xhci_dbc_flush_single_request(struct dbc_request *req)
++static void trb_to_noop(union xhci_trb *trb)
+ {
+- union xhci_trb *trb = req->trb;
+-
+ trb->generic.field[0] = 0;
+ trb->generic.field[1] = 0;
+ trb->generic.field[2] = 0;
+ trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+ trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(TRB_TR_NOOP));
++}
+
++static void xhci_dbc_flush_single_request(struct dbc_request *req)
++{
++ trb_to_noop(req->trb);
+ xhci_dbc_giveback(req, -ESHUTDOWN);
+ }
+
+@@ -637,7 +639,6 @@ static void xhci_dbc_stop(struct xhci_db
+ case DS_DISABLED:
+ return;
+ case DS_CONFIGURED:
+- case DS_STALLED:
+ if (dbc->driver->disconnect)
+ dbc->driver->disconnect(dbc);
+ break;
+@@ -658,6 +659,23 @@ static void xhci_dbc_stop(struct xhci_db
+ }
+
+ static void
++handle_ep_halt_changes(struct xhci_dbc *dbc, struct dbc_ep *dep, bool halted)
++{
++ if (halted) {
++ dev_info(dbc->dev, "DbC Endpoint halted\n");
++ dep->halted = 1;
++
++ } else if (dep->halted) {
++ dev_info(dbc->dev, "DbC Endpoint halt cleared\n");
++ dep->halted = 0;
++
++ if (!list_empty(&dep->list_pending))
++ writel(DBC_DOOR_BELL_TARGET(dep->direction),
++ &dbc->regs->doorbell);
++ }
++}
++
++static void
+ dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event)
+ {
+ u32 portsc;
+@@ -685,6 +703,7 @@ static void dbc_handle_xfer_event(struct
+ struct xhci_ring *ring;
+ int ep_id;
+ int status;
++ struct xhci_ep_ctx *ep_ctx;
+ u32 comp_code;
+ size_t remain_length;
+ struct dbc_request *req = NULL, *r;
+@@ -694,8 +713,30 @@ static void dbc_handle_xfer_event(struct
+ ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3]));
+ dep = (ep_id == EPID_OUT) ?
+ get_out_ep(dbc) : get_in_ep(dbc);
++ ep_ctx = (ep_id == EPID_OUT) ?
++ dbc_bulkout_ctx(dbc) : dbc_bulkin_ctx(dbc);
+ ring = dep->ring;
+
++ /* Match the pending request: */
++ list_for_each_entry(r, &dep->list_pending, list_pending) {
++ if (r->trb_dma == event->trans_event.buffer) {
++ req = r;
++ break;
++ }
++ if (r->status == -COMP_STALL_ERROR) {
++ dev_warn(dbc->dev, "Give back stale stalled req\n");
++ ring->num_trbs_free++;
++ xhci_dbc_giveback(r, 0);
++ }
++ }
++
++ if (!req) {
++ dev_warn(dbc->dev, "no matched request\n");
++ return;
++ }
++
++ trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
++
+ switch (comp_code) {
+ case COMP_SUCCESS:
+ remain_length = 0;
+@@ -706,31 +747,49 @@ static void dbc_handle_xfer_event(struct
+ case COMP_TRB_ERROR:
+ case COMP_BABBLE_DETECTED_ERROR:
+ case COMP_USB_TRANSACTION_ERROR:
+- case COMP_STALL_ERROR:
+ dev_warn(dbc->dev, "tx error %d detected\n", comp_code);
+ status = -comp_code;
+ break;
++ case COMP_STALL_ERROR:
++ dev_warn(dbc->dev, "Stall error at bulk TRB %llx, remaining %zu, ep deq %llx\n",
++ event->trans_event.buffer, remain_length, ep_ctx->deq);
++ status = 0;
++ dep->halted = 1;
++
++ /*
++ * xHC DbC may trigger a STALL bulk xfer event when host sends a
++ * ClearFeature(ENDPOINT_HALT) request even if there wasn't an
++ * active bulk transfer.
++ *
++ * Don't give back this transfer request as hardware will later
++ * start processing TRBs starting from this 'STALLED' TRB,
++ * causing TRBs and requests to be out of sync.
++ *
++ * If STALL event shows some bytes were transferred then assume
++ * it's an actual transfer issue and give back the request.
++ * In this case mark the TRB as No-Op to avoid hw from using the
++ * TRB again.
++ */
++
++ if ((ep_ctx->deq & ~TRB_CYCLE) == event->trans_event.buffer) {
++ dev_dbg(dbc->dev, "Ep stopped on Stalled TRB\n");
++ if (remain_length == req->length) {
++ dev_dbg(dbc->dev, "Spurious stall event, keep req\n");
++ req->status = -COMP_STALL_ERROR;
++ req->actual = 0;
++ return;
++ }
++ dev_dbg(dbc->dev, "Give back stalled req, but turn TRB to No-op\n");
++ trb_to_noop(req->trb);
++ }
++ break;
++
+ default:
+ dev_err(dbc->dev, "unknown tx error %d\n", comp_code);
+ status = -comp_code;
+ break;
+ }
+
+- /* Match the pending request: */
+- list_for_each_entry(r, &dep->list_pending, list_pending) {
+- if (r->trb_dma == event->trans_event.buffer) {
+- req = r;
+- break;
+- }
+- }
+-
+- if (!req) {
+- dev_warn(dbc->dev, "no matched request\n");
+- return;
+- }
+-
+- trace_xhci_dbc_handle_transfer(ring, &req->trb->generic);
+-
+ ring->num_trbs_free++;
+ req->actual = req->length - remain_length;
+ xhci_dbc_giveback(req, status);
+@@ -750,7 +809,6 @@ static void inc_evt_deq(struct xhci_ring
+ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc)
+ {
+ dma_addr_t deq;
+- struct dbc_ep *dep;
+ union xhci_trb *evt;
+ u32 ctrl, portsc;
+ bool update_erdp = false;
+@@ -802,43 +860,17 @@ static enum evtreturn xhci_dbc_do_handle
+ return EVT_DISC;
+ }
+
+- /* Handle endpoint stall event: */
++ /* Check and handle changes in endpoint halt status */
+ ctrl = readl(&dbc->regs->control);
+- if ((ctrl & DBC_CTRL_HALT_IN_TR) ||
+- (ctrl & DBC_CTRL_HALT_OUT_TR)) {
+- dev_info(dbc->dev, "DbC Endpoint stall\n");
+- dbc->state = DS_STALLED;
+-
+- if (ctrl & DBC_CTRL_HALT_IN_TR) {
+- dep = get_in_ep(dbc);
+- xhci_dbc_flush_endpoint_requests(dep);
+- }
+-
+- if (ctrl & DBC_CTRL_HALT_OUT_TR) {
+- dep = get_out_ep(dbc);
+- xhci_dbc_flush_endpoint_requests(dep);
+- }
+-
+- return EVT_DONE;
+- }
++ handle_ep_halt_changes(dbc, get_in_ep(dbc), ctrl & DBC_CTRL_HALT_IN_TR);
++ handle_ep_halt_changes(dbc, get_out_ep(dbc), ctrl & DBC_CTRL_HALT_OUT_TR);
+
+ /* Clear DbC run change bit: */
+ if (ctrl & DBC_CTRL_DBC_RUN_CHANGE) {
+ writel(ctrl, &dbc->regs->control);
+ ctrl = readl(&dbc->regs->control);
+ }
+-
+ break;
+- case DS_STALLED:
+- ctrl = readl(&dbc->regs->control);
+- if (!(ctrl & DBC_CTRL_HALT_IN_TR) &&
+- !(ctrl & DBC_CTRL_HALT_OUT_TR) &&
+- (ctrl & DBC_CTRL_DBC_RUN)) {
+- dbc->state = DS_CONFIGURED;
+- break;
+- }
+-
+- return EVT_DONE;
+ default:
+ dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state);
+ break;
+@@ -941,9 +973,6 @@ static ssize_t dbc_show(struct device *d
+ case DS_CONFIGURED:
+ p = "configured";
+ break;
+- case DS_STALLED:
+- p = "stalled";
+- break;
+ default:
+ p = "unknown";
+ }
+--- a/drivers/usb/host/xhci-dbgcap.h
++++ b/drivers/usb/host/xhci-dbgcap.h
+@@ -81,7 +81,6 @@ enum dbc_state {
+ DS_ENABLED,
+ DS_CONNECTED,
+ DS_CONFIGURED,
+- DS_STALLED,
+ };
+
+ struct dbc_ep {
+@@ -89,6 +88,7 @@ struct dbc_ep {
+ struct list_head list_pending;
+ struct xhci_ring *ring;
+ unsigned int direction:1;
++ unsigned int halted:1;
+ };
+
+ #define DBC_QUEUE_SIZE 16