--- /dev/null
+From 92ca170e717d5c7a1890a0a9acee621f6d5af3d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2020 11:28:45 +0200
+Subject: iio: adc: stm32-adc: fix device used to request dma
+
+From: Fabrice Gasnier <fabrice.gasnier@st.com>
+
+[ Upstream commit 52cd91c27f3908b88e8b25aed4a4d20660abcc45 ]
+
+DMA channel request should use device struct from platform device struct.
+Currently it's using iio device struct. But at this stage when probing,
+device struct isn't yet registered (e.g. device_register is done in
+iio_device_register). Since commit 71723a96b8b1 ("dmaengine: Create
+symlinks between DMA channels and slaves"), a warning message is printed
+as the links in sysfs can't be created, due to device isn't yet registered:
+- Cannot create DMA slave symlink
+- Cannot create DMA dma:rx symlink
+
+Fix this by using device struct from platform device to request dma chan.
+
+Fixes: 2763ea0585c99 ("iio: adc: stm32: add optional dma support")
+
+Signed-off-by: Fabrice Gasnier <fabrice.gasnier@st.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/adc/stm32-adc.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index a2279cccb584..94fde39d9ff7 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -1757,18 +1757,18 @@ static int stm32_adc_chan_of_init(struct iio_dev *indio_dev)
+ return 0;
+ }
+
+-static int stm32_adc_dma_request(struct iio_dev *indio_dev)
++static int stm32_adc_dma_request(struct device *dev, struct iio_dev *indio_dev)
+ {
+ struct stm32_adc *adc = iio_priv(indio_dev);
+ struct dma_slave_config config;
+ int ret;
+
+- adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
++ adc->dma_chan = dma_request_chan(dev, "rx");
+ if (IS_ERR(adc->dma_chan)) {
+ ret = PTR_ERR(adc->dma_chan);
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+- dev_err(&indio_dev->dev,
++ dev_err(dev,
+ "DMA channel request failed with %d\n",
+ ret);
+ return ret;
+@@ -1874,7 +1874,7 @@ static int stm32_adc_probe(struct platform_device *pdev)
+ if (ret < 0)
+ return ret;
+
+- ret = stm32_adc_dma_request(indio_dev);
++ ret = stm32_adc_dma_request(dev, indio_dev);
+ if (ret < 0)
+ return ret;
+
+--
+2.25.1
+
--- /dev/null
+From d5488dc1feaf53e0874960f82391a72cbd96f246 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jan 2020 10:08:01 +0200
+Subject: iio: adc: stm32-adc: Use dma_request_chan() instead
+ dma_request_slave_channel()
+
+From: Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+[ Upstream commit 735404b846dffcb320264f62b76e6f70012214dd ]
+
+dma_request_slave_channel() is a wrapper on top of dma_request_chan()
+eating up the error code.
+
+By using dma_request_chan() directly the driver can support deferred
+probing against DMA.
+
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Acked-by: Fabrice Gasnier <fabrice.gasnier@st.com>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/adc/stm32-adc.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
+index 9f63ceb15865..a2279cccb584 100644
+--- a/drivers/iio/adc/stm32-adc.c
++++ b/drivers/iio/adc/stm32-adc.c
+@@ -1763,9 +1763,21 @@ static int stm32_adc_dma_request(struct iio_dev *indio_dev)
+ struct dma_slave_config config;
+ int ret;
+
+- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
+- if (!adc->dma_chan)
++ adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
++ if (IS_ERR(adc->dma_chan)) {
++ ret = PTR_ERR(adc->dma_chan);
++ if (ret != -ENODEV) {
++ if (ret != -EPROBE_DEFER)
++ dev_err(&indio_dev->dev,
++ "DMA channel request failed with %d\n",
++ ret);
++ return ret;
++ }
++
++ /* DMA is optional: fall back to IRQ mode */
++ adc->dma_chan = NULL;
+ return 0;
++ }
+
+ adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
+ STM32_DMA_BUFFER_SIZE,
+--
+2.25.1
+
--- /dev/null
+From e9e9dcf0c172af759916cef9cf4b51e86e3be82a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2020 11:28:46 +0200
+Subject: iio: adc: stm32-dfsdm: fix device used to request dma
+
+From: Fabrice Gasnier <fabrice.gasnier@st.com>
+
+[ Upstream commit b455d06e6fb3c035711e8aab1ca18082ccb15d87 ]
+
+DMA channel request should use device struct from platform device struct.
+Currently it's using iio device struct. But at this stage when probing,
+device struct isn't yet registered (e.g. device_register is done in
+iio_device_register). Since commit 71723a96b8b1 ("dmaengine: Create
+symlinks between DMA channels and slaves"), a warning message is printed
+as the links in sysfs can't be created, due to device isn't yet registered:
+- Cannot create DMA slave symlink
+- Cannot create DMA dma:rx symlink
+
+Fix this by using device struct from platform device to request dma chan.
+
+Fixes: eca949800d2d ("IIO: ADC: add stm32 DFSDM support for PDM microphone")
+
+Signed-off-by: Fabrice Gasnier <fabrice.gasnier@st.com>
+Cc: <Stable@vger.kernel.org>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/adc/stm32-dfsdm-adc.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
+index 4a9337a3f9a3..c2948defa785 100644
+--- a/drivers/iio/adc/stm32-dfsdm-adc.c
++++ b/drivers/iio/adc/stm32-dfsdm-adc.c
+@@ -62,7 +62,7 @@ enum sd_converter_type {
+
+ struct stm32_dfsdm_dev_data {
+ int type;
+- int (*init)(struct iio_dev *indio_dev);
++ int (*init)(struct device *dev, struct iio_dev *indio_dev);
+ unsigned int num_channels;
+ const struct regmap_config *regmap_cfg;
+ };
+@@ -1359,11 +1359,12 @@ static void stm32_dfsdm_dma_release(struct iio_dev *indio_dev)
+ }
+ }
+
+-static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
++static int stm32_dfsdm_dma_request(struct device *dev,
++ struct iio_dev *indio_dev)
+ {
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+- adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
++ adc->dma_chan = dma_request_chan(dev, "rx");
+ if (IS_ERR(adc->dma_chan)) {
+ int ret = PTR_ERR(adc->dma_chan);
+
+@@ -1419,7 +1420,7 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
+ &adc->dfsdm->ch_list[ch->channel]);
+ }
+
+-static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
++static int stm32_dfsdm_audio_init(struct device *dev, struct iio_dev *indio_dev)
+ {
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+@@ -1446,10 +1447,10 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
+ indio_dev->num_channels = 1;
+ indio_dev->channels = ch;
+
+- return stm32_dfsdm_dma_request(indio_dev);
++ return stm32_dfsdm_dma_request(dev, indio_dev);
+ }
+
+-static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
++static int stm32_dfsdm_adc_init(struct device *dev, struct iio_dev *indio_dev)
+ {
+ struct iio_chan_spec *ch;
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+@@ -1493,17 +1494,17 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+ init_completion(&adc->completion);
+
+ /* Optionally request DMA */
+- ret = stm32_dfsdm_dma_request(indio_dev);
++ ret = stm32_dfsdm_dma_request(dev, indio_dev);
+ if (ret) {
+ if (ret != -ENODEV) {
+ if (ret != -EPROBE_DEFER)
+- dev_err(&indio_dev->dev,
++ dev_err(dev,
+ "DMA channel request failed with %d\n",
+ ret);
+ return ret;
+ }
+
+- dev_dbg(&indio_dev->dev, "No DMA support\n");
++ dev_dbg(dev, "No DMA support\n");
+ return 0;
+ }
+
+@@ -1616,7 +1617,7 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
+ adc->dfsdm->fl_list[adc->fl_id].sync_mode = val;
+
+ adc->dev_data = dev_data;
+- ret = dev_data->init(iio);
++ ret = dev_data->init(dev, iio);
+ if (ret < 0)
+ return ret;
+
+--
+2.25.1
+
--- /dev/null
+From 95b7314894c86abf48a770abe6698d1742fc84b5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Jan 2020 13:45:32 +0200
+Subject: iio: adc: stm32-dfsdm: Use dma_request_chan() instead
+ dma_request_slave_channel()
+
+From: Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+[ Upstream commit a9ab624edd9186fbad734cfe5d606a6da3ca34db ]
+
+dma_request_slave_channel() is a wrapper on top of dma_request_chan()
+eating up the error code.
+
+By using dma_request_chan() directly the driver can support deferred
+probing against DMA.
+
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Acked-by: Olivier Moysan <olivier.moysan@st.com>
+Acked-by: Fabrice Gasnier <fabrice.gasnier@st.com>
+Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iio/adc/stm32-dfsdm-adc.c | 21 +++++++++++++++++----
+ 1 file changed, 17 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
+index 3ae0366a7b58..4a9337a3f9a3 100644
+--- a/drivers/iio/adc/stm32-dfsdm-adc.c
++++ b/drivers/iio/adc/stm32-dfsdm-adc.c
+@@ -1363,9 +1363,13 @@ static int stm32_dfsdm_dma_request(struct iio_dev *indio_dev)
+ {
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+
+- adc->dma_chan = dma_request_slave_channel(&indio_dev->dev, "rx");
+- if (!adc->dma_chan)
+- return -EINVAL;
++ adc->dma_chan = dma_request_chan(&indio_dev->dev, "rx");
++ if (IS_ERR(adc->dma_chan)) {
++ int ret = PTR_ERR(adc->dma_chan);
++
++ adc->dma_chan = NULL;
++ return ret;
++ }
+
+ adc->rx_buf = dma_alloc_coherent(adc->dma_chan->device->dev,
+ DFSDM_DMA_BUFFER_SIZE,
+@@ -1489,7 +1493,16 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
+ init_completion(&adc->completion);
+
+ /* Optionally request DMA */
+- if (stm32_dfsdm_dma_request(indio_dev)) {
++ ret = stm32_dfsdm_dma_request(indio_dev);
++ if (ret) {
++ if (ret != -ENODEV) {
++ if (ret != -EPROBE_DEFER)
++ dev_err(&indio_dev->dev,
++ "DMA channel request failed with %d\n",
++ ret);
++ return ret;
++ }
++
+ dev_dbg(&indio_dev->dev, "No DMA support\n");
+ return 0;
+ }
+--
+2.25.1
+
--- /dev/null
+From b540acc97c22e9bad68fdf7cfc98772fd2dede2f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Apr 2020 23:48:43 +0100
+Subject: rxrpc: Fix ack discard
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 441fdee1eaf050ef0040bde0d7af075c1c6a6d8b ]
+
+The Rx protocol has a "previousPacket" field in it that is not handled in
+the same way by all protocol implementations. Sometimes it contains the
+serial number of the last DATA packet received, sometimes the sequence
+number of the last DATA packet received and sometimes the highest sequence
+number so far received.
+
+AF_RXRPC is using this to weed out ACKs that are out of date (it's possible
+for ACK packets to get reordered on the wire), but this does not work with
+OpenAFS which will just stick the sequence number of the last packet seen
+into previousPacket.
+
+The issue being seen is that big AFS FS.StoreData RPC (eg. of ~256MiB) are
+timing out when partly sent. A trace was captured, with an additional
+tracepoint to show ACKs being discarded in rxrpc_input_ack(). Here's an
+excerpt showing the problem.
+
+ 52873.203230: rxrpc_tx_data: c=000004ae DATA ed1a3584:00000002 0002449c q=00024499 fl=09
+
+A DATA packet with sequence number 00024499 has been transmitted (the "q="
+field).
+
+ ...
+ 52873.243296: rxrpc_rx_ack: c=000004ae 00012a2b DLY r=00024499 f=00024497 p=00024496 n=0
+ 52873.243376: rxrpc_rx_ack: c=000004ae 00012a2c IDL r=0002449b f=00024499 p=00024498 n=0
+ 52873.243383: rxrpc_rx_ack: c=000004ae 00012a2d OOS r=0002449d f=00024499 p=0002449a n=2
+
+The Out-Of-Sequence ACK indicates that the server didn't see DATA sequence
+number 00024499, but did see seq 0002449a (previousPacket, shown as "p=",
+skipped the number, but firstPacket, "f=", which shows the bottom of the
+window is set at that point).
+
+ 52873.252663: rxrpc_retransmit: c=000004ae q=24499 a=02 xp=14581537
+ 52873.252664: rxrpc_tx_data: c=000004ae DATA ed1a3584:00000002 000244bc q=00024499 fl=0b *RETRANS*
+
+The packet has been retransmitted. Retransmission recurs until the peer
+says it got the packet.
+
+ 52873.271013: rxrpc_rx_ack: c=000004ae 00012a31 OOS r=000244a1 f=00024499 p=0002449e n=6
+
+More OOS ACKs indicate that the other packets that are already in the
+transmission pipeline are being received. The specific-ACK list is up to 6
+ACKs and NAKs.
+
+ ...
+ 52873.284792: rxrpc_rx_ack: c=000004ae 00012a49 OOS r=000244b9 f=00024499 p=000244b6 n=30
+ 52873.284802: rxrpc_retransmit: c=000004ae q=24499 a=0a xp=63505500
+ 52873.284804: rxrpc_tx_data: c=000004ae DATA ed1a3584:00000002 000244c2 q=00024499 fl=0b *RETRANS*
+ 52873.287468: rxrpc_rx_ack: c=000004ae 00012a4a OOS r=000244ba f=00024499 p=000244b7 n=31
+ 52873.287478: rxrpc_rx_ack: c=000004ae 00012a4b OOS r=000244bb f=00024499 p=000244b8 n=32
+
+At this point, the server's receive window is full (n=32) with presumably 1
+NAK'd packet and 31 ACK'd packets. We can't transmit any more packets.
+
+ 52873.287488: rxrpc_retransmit: c=000004ae q=24499 a=0a xp=61327980
+ 52873.287489: rxrpc_tx_data: c=000004ae DATA ed1a3584:00000002 000244c3 q=00024499 fl=0b *RETRANS*
+ 52873.293850: rxrpc_rx_ack: c=000004ae 00012a4c DLY r=000244bc f=000244a0 p=00024499 n=25
+
+And now we've received an ACK indicating that a DATA retransmission was
+received. 7 packets have been processed (the occupied part of the window
+moved, as indicated by f= and n=).
+
+ 52873.293853: rxrpc_rx_discard_ack: c=000004ae r=00012a4c 000244a0<00024499 00024499<000244b8
+
+However, the DLY ACK gets discarded because its previousPacket has gone
+backwards (from p=000244b8, in the ACK at 52873.287478 to p=00024499 in the
+ACK at 52873.293850).
+
+We then end up in a continuous cycle of retransmit/discard. kafs fails to
+update its window because it's discarding the ACKs and can't transmit an
+extra packet that would clear the issue because the window is full.
+OpenAFS doesn't change the previousPacket value in the ACKs because no new
+DATA packets are received with a different previousPacket number.
+
+Fix this by altering the discard check to only discard an ACK based on
+previousPacket if there was no advance in the firstPacket. This allows us
+to transmit a new packet which will cause previousPacket to advance in the
+next ACK.
+
+The check, however, needs to allow for the possibility that previousPacket
+may actually have had the serial number placed in it instead - in which
+case it will go outside the window and we should ignore it.
+
+Fixes: 1a2391c30c0b ("rxrpc: Fix detection of out of order acks")
+Reported-by: Dave Botsch <botsch@cnf.cornell.edu>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/input.c | 30 ++++++++++++++++++++++++++----
+ 1 file changed, 26 insertions(+), 4 deletions(-)
+
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 2f22f082a66c..3be4177baf70 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -802,6 +802,30 @@ static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
+ }
+ }
+
++/*
++ * Return true if the ACK is valid - ie. it doesn't appear to have regressed
++ * with respect to the ack state conveyed by preceding ACKs.
++ */
++static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
++ rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt)
++{
++ rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq);
++
++ if (after(first_pkt, base))
++ return true; /* The window advanced */
++
++ if (before(first_pkt, base))
++ return false; /* firstPacket regressed */
++
++ if (after_eq(prev_pkt, call->ackr_prev_seq))
++ return true; /* previousPacket hasn't regressed. */
++
++ /* Some rx implementations put a serial number in previousPacket. */
++ if (after_eq(prev_pkt, base + call->tx_winsize))
++ return false;
++ return true;
++}
++
+ /*
+ * Process an ACK packet.
+ *
+@@ -865,8 +889,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ }
+
+ /* Discard any out-of-order or duplicate ACKs (outside lock). */
+- if (before(first_soft_ack, call->ackr_first_seq) ||
+- before(prev_pkt, call->ackr_prev_seq)) {
++ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
+ trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ first_soft_ack, call->ackr_first_seq,
+ prev_pkt, call->ackr_prev_seq);
+@@ -882,8 +905,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+ spin_lock(&call->input_lock);
+
+ /* Discard any out-of-order or duplicate ACKs (inside lock). */
+- if (before(first_soft_ack, call->ackr_first_seq) ||
+- before(prev_pkt, call->ackr_prev_seq)) {
++ if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
+ trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
+ first_soft_ack, call->ackr_first_seq,
+ prev_pkt, call->ackr_prev_seq);
+--
+2.25.1
+
--- /dev/null
+From 5d485443a34cc9955ddd46b51cfc69ee84968c59 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2020 22:06:54 +0100
+Subject: rxrpc: Trace discarded ACKs
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit d1f129470e6cb79b8b97fecd12689f6eb49e27fe ]
+
+Add a tracepoint to track received ACKs that are discarded due to being
+outside of the Tx window.
+
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/trace/events/rxrpc.h | 35 +++++++++++++++++++++++++++++++++++
+ net/rxrpc/input.c | 12 ++++++++++--
+ 2 files changed, 45 insertions(+), 2 deletions(-)
+
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index ab75f261f04a..ba9efdc848f9 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -1541,6 +1541,41 @@ TRACE_EVENT(rxrpc_notify_socket,
+ __entry->serial)
+ );
+
++TRACE_EVENT(rxrpc_rx_discard_ack,
++ TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial,
++ rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first,
++ rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev),
++
++ TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first,
++ prev_pkt, call_ackr_prev),
++
++ TP_STRUCT__entry(
++ __field(unsigned int, debug_id )
++ __field(rxrpc_serial_t, serial )
++ __field(rxrpc_seq_t, first_soft_ack)
++ __field(rxrpc_seq_t, call_ackr_first)
++ __field(rxrpc_seq_t, prev_pkt)
++ __field(rxrpc_seq_t, call_ackr_prev)
++ ),
++
++ TP_fast_assign(
++ __entry->debug_id = debug_id;
++ __entry->serial = serial;
++ __entry->first_soft_ack = first_soft_ack;
++ __entry->call_ackr_first = call_ackr_first;
++ __entry->prev_pkt = prev_pkt;
++ __entry->call_ackr_prev = call_ackr_prev;
++ ),
++
++ TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x",
++ __entry->debug_id,
++ __entry->serial,
++ __entry->first_soft_ack,
++ __entry->call_ackr_first,
++ __entry->prev_pkt,
++ __entry->call_ackr_prev)
++ );
++
+ #endif /* _TRACE_RXRPC_H */
+
+ /* This part must be outside protection */
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index e438bfd3fdf5..2f22f082a66c 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -866,8 +866,12 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+
+ /* Discard any out-of-order or duplicate ACKs (outside lock). */
+ if (before(first_soft_ack, call->ackr_first_seq) ||
+- before(prev_pkt, call->ackr_prev_seq))
++ before(prev_pkt, call->ackr_prev_seq)) {
++ trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
++ first_soft_ack, call->ackr_first_seq,
++ prev_pkt, call->ackr_prev_seq);
+ return;
++ }
+
+ buf.info.rxMTU = 0;
+ ioffset = offset + nr_acks + 3;
+@@ -879,8 +883,12 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
+
+ /* Discard any out-of-order or duplicate ACKs (inside lock). */
+ if (before(first_soft_ack, call->ackr_first_seq) ||
+- before(prev_pkt, call->ackr_prev_seq))
++ before(prev_pkt, call->ackr_prev_seq)) {
++ trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
++ first_soft_ack, call->ackr_first_seq,
++ prev_pkt, call->ackr_prev_seq);
+ goto out;
++ }
+ call->acks_latest_ts = skb->tstamp;
+
+ call->ackr_first_seq = first_soft_ack;
+--
+2.25.1
+
--- /dev/null
+From 323a1a9d14cdca52c6528257d8c6345ca4cd6862 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 May 2020 09:52:22 -0400
+Subject: sched/fair: Fix enqueue_task_fair() warning some more
+
+From: Phil Auld <pauld@redhat.com>
+
+[ Upstream commit b34cb07dde7c2346dec73d053ce926aeaa087303 ]
+
+sched/fair: Fix enqueue_task_fair warning some more
+
+The recent patch, fe61468b2cb (sched/fair: Fix enqueue_task_fair warning)
+did not fully resolve the issues with the rq->tmp_alone_branch !=
+&rq->leaf_cfs_rq_list warning in enqueue_task_fair. There is a case where
+the first for_each_sched_entity loop exits due to on_rq, having incompletely
+updated the list. In this case the second for_each_sched_entity loop can
+further modify se. The later code to fix up the list management fails to do
+what is needed because se does not point to the sched_entity which broke out
+of the first loop. The list is not fixed up because the throttled parent was
+already added back to the list by a task enqueue in a parallel child hierarchy.
+
+Address this by calling list_add_leaf_cfs_rq if there are throttled parents
+while doing the second for_each_sched_entity loop.
+
+Fixes: fe61468b2cb ("sched/fair: Fix enqueue_task_fair warning")
+Suggested-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Phil Auld <pauld@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Link: https://lkml.kernel.org/r/20200512135222.GC2201@lorien.usersys.redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 42cc3de24dcc..193b6ab74d7f 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5254,6 +5254,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ /* end evaluation on encountering a throttled cfs_rq */
+ if (cfs_rq_throttled(cfs_rq))
+ goto enqueue_throttle;
++
++ /*
++ * One parent has been throttled and cfs_rq removed from the
++ * list. Add it back to not break the leaf list.
++ */
++ if (throttled_hierarchy(cfs_rq))
++ list_add_leaf_cfs_rq(cfs_rq);
+ }
+
+ enqueue_throttle:
+--
+2.25.1
+
--- /dev/null
+From 3e0ac8ab87522fa993bab9042ba298dd435203eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Mar 2020 09:42:08 +0100
+Subject: sched/fair: Fix reordering of enqueue/dequeue_task_fair()
+
+From: Vincent Guittot <vincent.guittot@linaro.org>
+
+[ Upstream commit 5ab297bab984310267734dfbcc8104566658ebef ]
+
+Even when a cgroup is throttled, the group se of a child cgroup can still
+be enqueued and its gse->on_rq stays true. When a task is enqueued on such
+child, we still have to update the load_avg and increase
+h_nr_running of the throttled cfs. Nevertheless, the 1st
+for_each_sched_entity() loop is skipped because of gse->on_rq == true and the
+2nd loop because the cfs is throttled whereas we have to update both
+load_avg with the old h_nr_running and increase h_nr_running in such case.
+
+The same sequence can happen during dequeue when se moves to parent before
+breaking in the 1st loop.
+
+Note that the update of load_avg will effectively happen only once in order
+to sync up to the throttled time. Next call for updating load_avg will stop
+early because the clock stays unchanged.
+
+Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Fixes: 6d4d22468dae ("sched/fair: Reorder enqueue/dequeue_task_fair path")
+Link: https://lkml.kernel.org/r/20200306084208.12583-1-vincent.guittot@linaro.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 0e042e847ed3..42cc3de24dcc 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5245,15 +5245,15 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+- /* end evaluation on encountering a throttled cfs_rq */
+- if (cfs_rq_throttled(cfs_rq))
+- goto enqueue_throttle;
+-
+ update_load_avg(cfs_rq, se, UPDATE_TG);
+ update_cfs_group(se);
+
+ cfs_rq->h_nr_running++;
+ cfs_rq->idle_h_nr_running += idle_h_nr_running;
++
++ /* end evaluation on encountering a throttled cfs_rq */
++ if (cfs_rq_throttled(cfs_rq))
++ goto enqueue_throttle;
+ }
+
+ enqueue_throttle:
+@@ -5341,15 +5341,16 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+
+- /* end evaluation on encountering a throttled cfs_rq */
+- if (cfs_rq_throttled(cfs_rq))
+- goto dequeue_throttle;
+-
+ update_load_avg(cfs_rq, se, UPDATE_TG);
+ update_cfs_group(se);
+
+ cfs_rq->h_nr_running--;
+ cfs_rq->idle_h_nr_running -= idle_h_nr_running;
++
++ /* end evaluation on encountering a throttled cfs_rq */
++ if (cfs_rq_throttled(cfs_rq))
++ goto dequeue_throttle;
++
+ }
+
+ dequeue_throttle:
+--
+2.25.1
+
--- /dev/null
+From 96f9d08b39400a0a44aea0507e75ca7c3ff05fa6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2020 09:52:14 +0000
+Subject: sched/fair: Reorder enqueue/dequeue_task_fair path
+
+From: Vincent Guittot <vincent.guittot@linaro.org>
+
+[ Upstream commit 6d4d22468dae3d8757af9f8b81b848a76ef4409d ]
+
+The walk through the cgroup hierarchy during the enqueue/dequeue of a task
+is split in 2 distinct parts for throttled cfs_rq without any added value
+but making code less readable.
+
+Change the code ordering such that everything related to a cfs_rq
+(throttled or not) will be done in the same loop.
+
+In addition, the same steps ordering is used when updating a cfs_rq:
+
+ - update_load_avg
+ - update_cfs_group
+ - update *h_nr_running
+
+This reordering enables the use of h_nr_running in PELT algorithm.
+
+No functional and performance changes are expected and have been noticed
+during tests.
+
+Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
+Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Reviewed-by: "Dietmar Eggemann <dietmar.eggemann@arm.com>"
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Juri Lelli <juri.lelli@redhat.com>
+Cc: Valentin Schneider <valentin.schneider@arm.com>
+Cc: Phil Auld <pauld@redhat.com>
+Cc: Hillf Danton <hdanton@sina.com>
+Link: https://lore.kernel.org/r/20200224095223.13361-5-mgorman@techsingularity.net
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 42 ++++++++++++++++++++----------------------
+ 1 file changed, 20 insertions(+), 22 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index eeaf34d65742..0e042e847ed3 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -5232,32 +5232,31 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ cfs_rq = cfs_rq_of(se);
+ enqueue_entity(cfs_rq, se, flags);
+
+- /*
+- * end evaluation on encountering a throttled cfs_rq
+- *
+- * note: in the case of encountering a throttled cfs_rq we will
+- * post the final h_nr_running increment below.
+- */
+- if (cfs_rq_throttled(cfs_rq))
+- break;
+ cfs_rq->h_nr_running++;
+ cfs_rq->idle_h_nr_running += idle_h_nr_running;
+
++ /* end evaluation on encountering a throttled cfs_rq */
++ if (cfs_rq_throttled(cfs_rq))
++ goto enqueue_throttle;
++
+ flags = ENQUEUE_WAKEUP;
+ }
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+- cfs_rq->h_nr_running++;
+- cfs_rq->idle_h_nr_running += idle_h_nr_running;
+
++ /* end evaluation on encountering a throttled cfs_rq */
+ if (cfs_rq_throttled(cfs_rq))
+- break;
++ goto enqueue_throttle;
+
+ update_load_avg(cfs_rq, se, UPDATE_TG);
+ update_cfs_group(se);
++
++ cfs_rq->h_nr_running++;
++ cfs_rq->idle_h_nr_running += idle_h_nr_running;
+ }
+
++enqueue_throttle:
+ if (!se) {
+ add_nr_running(rq, 1);
+ /*
+@@ -5317,17 +5316,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+ cfs_rq = cfs_rq_of(se);
+ dequeue_entity(cfs_rq, se, flags);
+
+- /*
+- * end evaluation on encountering a throttled cfs_rq
+- *
+- * note: in the case of encountering a throttled cfs_rq we will
+- * post the final h_nr_running decrement below.
+- */
+- if (cfs_rq_throttled(cfs_rq))
+- break;
+ cfs_rq->h_nr_running--;
+ cfs_rq->idle_h_nr_running -= idle_h_nr_running;
+
++ /* end evaluation on encountering a throttled cfs_rq */
++ if (cfs_rq_throttled(cfs_rq))
++ goto dequeue_throttle;
++
+ /* Don't dequeue parent if it has other entities besides us */
+ if (cfs_rq->load.weight) {
+ /* Avoid re-evaluating load for this entity: */
+@@ -5345,16 +5340,19 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+- cfs_rq->h_nr_running--;
+- cfs_rq->idle_h_nr_running -= idle_h_nr_running;
+
++ /* end evaluation on encountering a throttled cfs_rq */
+ if (cfs_rq_throttled(cfs_rq))
+- break;
++ goto dequeue_throttle;
+
+ update_load_avg(cfs_rq, se, UPDATE_TG);
+ update_cfs_group(se);
++
++ cfs_rq->h_nr_running--;
++ cfs_rq->idle_h_nr_running -= idle_h_nr_running;
+ }
+
++dequeue_throttle:
+ if (!se)
+ sub_nr_running(rq, 1);
+
+--
+2.25.1
+
flow_dissector-drop-bpf-flow-dissector-prog-ref-on-netns-cleanup.patch
x86-unwind-orc-fix-unwind_get_return_address_ptr-for-inactive-tasks.patch
y2038-sh-remove-timeval-timespec-usage-from-headers.patch
+iio-adc-stm32-adc-use-dma_request_chan-instead-dma_r.patch
+iio-adc-stm32-adc-fix-device-used-to-request-dma.patch
+iio-adc-stm32-dfsdm-use-dma_request_chan-instead-dma.patch
+iio-adc-stm32-dfsdm-fix-device-used-to-request-dma.patch
+rxrpc-trace-discarded-acks.patch
+rxrpc-fix-ack-discard.patch
+tpm-check-event-log-version-before-reading-final-eve.patch
+sched-fair-reorder-enqueue-dequeue_task_fair-path.patch
+sched-fair-fix-reordering-of-enqueue-dequeue_task_fa.patch
+sched-fair-fix-enqueue_task_fair-warning-some-more.patch
--- /dev/null
+From 1b581f0dd5ffb4c4880e551de23edf2ad558b0ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 May 2020 06:01:13 +0200
+Subject: tpm: check event log version before reading final events
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Loïc Yhuel <loic.yhuel@gmail.com>
+
+[ Upstream commit b4f1874c62168159fdb419ced4afc77c1b51c475 ]
+
+This fixes the boot issues since 5.3 on several Dell models when the TPM
+is enabled. Depending on the exact grub binary, booting the kernel would
+freeze early, or just report an error parsing the final events log.
+
+We get an event log in the SHA-1 format, which doesn't have a
+tcg_efi_specid_event_head in the first event, and there is a final events
+table which doesn't match the crypto agile format.
+__calc_tpm2_event_size reads bad "count" and "efispecid->num_algs", and
+either fails, or loops long enough for the machine to be appear frozen.
+
+So we now only parse the final events table, which is per the spec always
+supposed to be in the crypto agile format, when we got a event log in this
+format.
+
+Fixes: c46f3405692de ("tpm: Reserve the TPM final events table")
+Fixes: 166a2809d65b2 ("tpm: Don't duplicate events from the final event log in the TCG2 log")
+Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1779611
+Signed-off-by: Loïc Yhuel <loic.yhuel@gmail.com>
+Link: https://lore.kernel.org/r/20200512040113.277768-1-loic.yhuel@gmail.com
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Reviewed-by: Matthew Garrett <mjg59@google.com>
+[ardb: warn when final events table is missing or in the wrong format]
+Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/efi/libstub/tpm.c | 5 +++--
+ drivers/firmware/efi/tpm.c | 5 ++++-
+ 2 files changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
+index eb9af83e4d59..aeeb1b2d8ede 100644
+--- a/drivers/firmware/efi/libstub/tpm.c
++++ b/drivers/firmware/efi/libstub/tpm.c
+@@ -64,7 +64,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
+ efi_status_t status;
+ efi_physical_addr_t log_location = 0, log_last_entry = 0;
+ struct linux_efi_tpm_eventlog *log_tbl = NULL;
+- struct efi_tcg2_final_events_table *final_events_table;
++ struct efi_tcg2_final_events_table *final_events_table = NULL;
+ unsigned long first_entry_addr, last_entry_addr;
+ size_t log_size, last_entry_size;
+ efi_bool_t truncated;
+@@ -140,7 +140,8 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table_arg)
+ * Figure out whether any events have already been logged to the
+ * final events structure, and if so how much space they take up
+ */
+- final_events_table = get_efi_config_table(sys_table_arg,
++ if (version == EFI_TCG2_EVENT_LOG_FORMAT_TCG_2)
++ final_events_table = get_efi_config_table(sys_table_arg,
+ LINUX_EFI_TPM_FINAL_LOG_GUID);
+ if (final_events_table && final_events_table->nr_events) {
+ struct tcg_pcr_event2_head *header;
+diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c
+index 55b031d2c989..c1955d320fec 100644
+--- a/drivers/firmware/efi/tpm.c
++++ b/drivers/firmware/efi/tpm.c
+@@ -62,8 +62,11 @@ int __init efi_tpm_eventlog_init(void)
+ tbl_size = sizeof(*log_tbl) + log_tbl->size;
+ memblock_reserve(efi.tpm_log, tbl_size);
+
+- if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR)
++ if (efi.tpm_final_log == EFI_INVALID_TABLE_ADDR ||
++ log_tbl->version != EFI_TCG2_EVENT_LOG_FORMAT_TCG_2) {
++ pr_warn(FW_BUG "TPM Final Events table missing or invalid\n");
+ goto out;
++ }
+
+ final_tbl = early_memremap(efi.tpm_final_log, sizeof(*final_tbl));
+
+--
+2.25.1
+