--- /dev/null
+From cfc8f568aada98f9608a0a62511ca18d647613e2 Mon Sep 17 00:00:00 2001
+From: Oleksandr Suvorov <oleksandr.suvorov@toradex.com>
+Date: Fri, 19 Jul 2019 10:05:30 +0000
+Subject: ASoC: Define a set of DAPM pre/post-up events
+
+From: Oleksandr Suvorov <oleksandr.suvorov@toradex.com>
+
+commit cfc8f568aada98f9608a0a62511ca18d647613e2 upstream.
+
+Prepare to use SND_SOC_DAPM_PRE_POST_PMU definition to
+reduce coming code size and make it more readable.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Oleksandr Suvorov <oleksandr.suvorov@toradex.com>
+Reviewed-by: Marcel Ziswiler <marcel.ziswiler@toradex.com>
+Reviewed-by: Igor Opaniuk <igor.opaniuk@toradex.com>
+Reviewed-by: Fabio Estevam <festevam@gmail.com>
+Link: https://lore.kernel.org/r/20190719100524.23300-2-oleksandr.suvorov@toradex.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/sound/soc-dapm.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/include/sound/soc-dapm.h
++++ b/include/sound/soc-dapm.h
+@@ -353,6 +353,8 @@ struct device;
+ #define SND_SOC_DAPM_WILL_PMD 0x80 /* called at start of sequence */
+ #define SND_SOC_DAPM_PRE_POST_PMD \
+ (SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD)
++#define SND_SOC_DAPM_PRE_POST_PMU \
++ (SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU)
+
+ /* convenience event type detection */
+ #define SND_SOC_DAPM_EVENT_ON(e) \
--- /dev/null
+From b1f373a11d25fc9a5f7679c9b85799fe09b0dc4a Mon Sep 17 00:00:00 2001
+From: Oleksandr Suvorov <oleksandr.suvorov@toradex.com>
+Date: Fri, 19 Jul 2019 10:05:31 +0000
+Subject: ASoC: sgtl5000: Improve VAG power and mute control
+
+From: Oleksandr Suvorov <oleksandr.suvorov@toradex.com>
+
+commit b1f373a11d25fc9a5f7679c9b85799fe09b0dc4a upstream.
+
+VAG power control is improved to fit the manual [1]. This patch fixes as
+minimum one bug: if customer muxes Headphone to Line-In right after boot,
+the VAG power remains off that leads to poor sound quality from line-in.
+
+I.e. after boot:
+ - Connect sound source to Line-In jack;
+ - Connect headphone to HP jack;
+ - Run following commands:
+ $ amixer set 'Headphone' 80%
+ $ amixer set 'Headphone Mux' LINE_IN
+
+Change VAG power on/off control according to the following algorithm:
+ - turn VAG power ON on the 1st incoming event.
+ - keep it ON if there is any active VAG consumer (ADC/DAC/HP/Line-In).
+ - turn VAG power OFF when there is the latest consumer's pre-down event
+ come.
+ - always delay after VAG power OFF to avoid pop.
+ - delay after VAG power ON if the initiative consumer is Line-In, this
+ prevents pop during line-in muxing.
+
+According to the data sheet [1], to avoid any pops/clicks,
+the outputs should be muted during input/output
+routing changes.
+
+[1] https://www.nxp.com/docs/en/data-sheet/SGTL5000.pdf
+
+Cc: stable@vger.kernel.org
+Fixes: 9b34e6cc3bc2 ("ASoC: Add Freescale SGTL5000 codec support")
+Signed-off-by: Oleksandr Suvorov <oleksandr.suvorov@toradex.com>
+Reviewed-by: Marcel Ziswiler <marcel.ziswiler@toradex.com>
+Reviewed-by: Fabio Estevam <festevam@gmail.com>
+Reviewed-by: Cezary Rojewski <cezary.rojewski@intel.com>
+Link: https://lore.kernel.org/r/20190719100524.23300-3-oleksandr.suvorov@toradex.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/soc/codecs/sgtl5000.c | 224 ++++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 194 insertions(+), 30 deletions(-)
+
+--- a/sound/soc/codecs/sgtl5000.c
++++ b/sound/soc/codecs/sgtl5000.c
+@@ -31,6 +31,13 @@
+ #define SGTL5000_DAP_REG_OFFSET 0x0100
+ #define SGTL5000_MAX_REG_OFFSET 0x013A
+
++/* Delay for the VAG ramp up */
++#define SGTL5000_VAG_POWERUP_DELAY 500 /* ms */
++/* Delay for the VAG ramp down */
++#define SGTL5000_VAG_POWERDOWN_DELAY 500 /* ms */
++
++#define SGTL5000_OUTPUTS_MUTE (SGTL5000_HP_MUTE | SGTL5000_LINE_OUT_MUTE)
++
+ /* default value of sgtl5000 registers */
+ static const struct reg_default sgtl5000_reg_defaults[] = {
+ { SGTL5000_CHIP_DIG_POWER, 0x0000 },
+@@ -116,6 +123,13 @@ enum {
+ I2S_LRCLK_STRENGTH_HIGH,
+ };
+
++enum {
++ HP_POWER_EVENT,
++ DAC_POWER_EVENT,
++ ADC_POWER_EVENT,
++ LAST_POWER_EVENT = ADC_POWER_EVENT
++};
++
+ /* sgtl5000 private structure in codec */
+ struct sgtl5000_priv {
+ int sysclk; /* sysclk rate */
+@@ -129,8 +143,109 @@ struct sgtl5000_priv {
+ u8 micbias_resistor;
+ u8 micbias_voltage;
+ u8 lrclk_strength;
++ u16 mute_state[LAST_POWER_EVENT + 1];
+ };
+
++static inline int hp_sel_input(struct snd_soc_component *component)
++{
++ return (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_CTRL) &
++ SGTL5000_HP_SEL_MASK) >> SGTL5000_HP_SEL_SHIFT;
++}
++
++static inline u16 mute_output(struct snd_soc_component *component,
++ u16 mute_mask)
++{
++ u16 mute_reg = snd_soc_component_read32(component,
++ SGTL5000_CHIP_ANA_CTRL);
++
++ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
++ mute_mask, mute_mask);
++ return mute_reg;
++}
++
++static inline void restore_output(struct snd_soc_component *component,
++ u16 mute_mask, u16 mute_reg)
++{
++ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
++ mute_mask, mute_reg);
++}
++
++static void vag_power_on(struct snd_soc_component *component, u32 source)
++{
++ if (snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
++ SGTL5000_VAG_POWERUP)
++ return;
++
++ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
++
++ /* When VAG powering on to get local loop from Line-In, the sleep
++ * is required to avoid loud pop.
++ */
++ if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN &&
++ source == HP_POWER_EVENT)
++ msleep(SGTL5000_VAG_POWERUP_DELAY);
++}
++
++static int vag_power_consumers(struct snd_soc_component *component,
++ u16 ana_pwr_reg, u32 source)
++{
++ int consumers = 0;
++
++ /* count dac/adc consumers unconditional */
++ if (ana_pwr_reg & SGTL5000_DAC_POWERUP)
++ consumers++;
++ if (ana_pwr_reg & SGTL5000_ADC_POWERUP)
++ consumers++;
++
++ /*
++ * If the event comes from HP and Line-In is selected,
++ * current action is 'DAC to be powered down'.
++ * As HP_POWERUP is not set when HP muxed to line-in,
++ * we need to keep VAG power ON.
++ */
++ if (source == HP_POWER_EVENT) {
++ if (hp_sel_input(component) == SGTL5000_HP_SEL_LINE_IN)
++ consumers++;
++ } else {
++ if (ana_pwr_reg & SGTL5000_HP_POWERUP)
++ consumers++;
++ }
++
++ return consumers;
++}
++
++static void vag_power_off(struct snd_soc_component *component, u32 source)
++{
++ u16 ana_pwr = snd_soc_component_read32(component,
++ SGTL5000_CHIP_ANA_POWER);
++
++ if (!(ana_pwr & SGTL5000_VAG_POWERUP))
++ return;
++
++ /*
++ * This function calls when any of VAG power consumers is disappearing.
++ * Thus, if there is more than one consumer at the moment, as minimum
++ * one consumer will definitely stay after the end of the current
++ * event.
++ * Don't clear VAG_POWERUP if 2 or more consumers of VAG present:
++ * - LINE_IN (for HP events) / HP (for DAC/ADC events)
++ * - DAC
++ * - ADC
++ * (the current consumer is disappearing right now)
++ */
++ if (vag_power_consumers(component, ana_pwr, source) >= 2)
++ return;
++
++ snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
++ SGTL5000_VAG_POWERUP, 0);
++ /* In power down case, we need wait 400-1000 ms
++ * when VAG fully ramped down.
++ * As longer we wait, as smaller pop we've got.
++ */
++ msleep(SGTL5000_VAG_POWERDOWN_DELAY);
++}
++
+ /*
+ * mic_bias power on/off share the same register bits with
+ * output impedance of mic bias, when power on mic bias, we
+@@ -162,36 +277,46 @@ static int mic_bias_event(struct snd_soc
+ return 0;
+ }
+
+-/*
+- * As manual described, ADC/DAC only works when VAG powerup,
+- * So enabled VAG before ADC/DAC up.
+- * In power down case, we need wait 400ms when vag fully ramped down.
+- */
+-static int power_vag_event(struct snd_soc_dapm_widget *w,
+- struct snd_kcontrol *kcontrol, int event)
++static int vag_and_mute_control(struct snd_soc_component *component,
++ int event, int event_source)
+ {
+- struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+- const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
++ static const u16 mute_mask[] = {
++ /*
++ * Mask for HP_POWER_EVENT.
++ * Muxing Headphones have to be wrapped with mute/unmute
++ * headphones only.
++ */
++ SGTL5000_HP_MUTE,
++ /*
++ * Masks for DAC_POWER_EVENT/ADC_POWER_EVENT.
++ * Muxing DAC or ADC block have to wrapped with mute/unmute
++ * both headphones and line-out.
++ */
++ SGTL5000_OUTPUTS_MUTE,
++ SGTL5000_OUTPUTS_MUTE
++ };
++
++ struct sgtl5000_priv *sgtl5000 =
++ snd_soc_component_get_drvdata(component);
+
+ switch (event) {
++ case SND_SOC_DAPM_PRE_PMU:
++ sgtl5000->mute_state[event_source] =
++ mute_output(component, mute_mask[event_source]);
++ break;
+ case SND_SOC_DAPM_POST_PMU:
+- snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+- SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
+- msleep(400);
++ vag_power_on(component, event_source);
++ restore_output(component, mute_mask[event_source],
++ sgtl5000->mute_state[event_source]);
+ break;
+-
+ case SND_SOC_DAPM_PRE_PMD:
+- /*
+- * Don't clear VAG_POWERUP, when both DAC and ADC are
+- * operational to prevent inadvertently starving the
+- * other one of them.
+- */
+- if ((snd_soc_component_read32(component, SGTL5000_CHIP_ANA_POWER) &
+- mask) != mask) {
+- snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_POWER,
+- SGTL5000_VAG_POWERUP, 0);
+- msleep(400);
+- }
++ sgtl5000->mute_state[event_source] =
++ mute_output(component, mute_mask[event_source]);
++ vag_power_off(component, event_source);
++ break;
++ case SND_SOC_DAPM_POST_PMD:
++ restore_output(component, mute_mask[event_source],
++ sgtl5000->mute_state[event_source]);
+ break;
+ default:
+ break;
+@@ -200,6 +325,41 @@ static int power_vag_event(struct snd_so
+ return 0;
+ }
+
++/*
++ * Mute Headphone when power it up/down.
++ * Control VAG power on HP power path.
++ */
++static int headphone_pga_event(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ struct snd_soc_component *component =
++ snd_soc_dapm_to_component(w->dapm);
++
++ return vag_and_mute_control(component, event, HP_POWER_EVENT);
++}
++
++/* As manual describes, ADC/DAC powering up/down requires
++ * to mute outputs to avoid pops.
++ * Control VAG power on ADC/DAC power path.
++ */
++static int adc_updown_depop(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ struct snd_soc_component *component =
++ snd_soc_dapm_to_component(w->dapm);
++
++ return vag_and_mute_control(component, event, ADC_POWER_EVENT);
++}
++
++static int dac_updown_depop(struct snd_soc_dapm_widget *w,
++ struct snd_kcontrol *kcontrol, int event)
++{
++ struct snd_soc_component *component =
++ snd_soc_dapm_to_component(w->dapm);
++
++ return vag_and_mute_control(component, event, DAC_POWER_EVENT);
++}
++
+ /* input sources for ADC */
+ static const char *adc_mux_text[] = {
+ "MIC_IN", "LINE_IN"
+@@ -272,7 +432,10 @@ static const struct snd_soc_dapm_widget
+ mic_bias_event,
+ SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
+
+- SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
++ SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0,
++ headphone_pga_event,
++ SND_SOC_DAPM_PRE_POST_PMU |
++ SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
+
+ SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
+@@ -293,11 +456,12 @@ static const struct snd_soc_dapm_widget
+ 0, SGTL5000_CHIP_DIG_POWER,
+ 1, 0),
+
+- SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
+- SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
+-
+- SND_SOC_DAPM_PRE("VAG_POWER_PRE", power_vag_event),
+- SND_SOC_DAPM_POST("VAG_POWER_POST", power_vag_event),
++ SND_SOC_DAPM_ADC_E("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0,
++ adc_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
++ SND_SOC_DAPM_PRE_POST_PMD),
++ SND_SOC_DAPM_DAC_E("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0,
++ dac_updown_depop, SND_SOC_DAPM_PRE_POST_PMU |
++ SND_SOC_DAPM_PRE_POST_PMD),
+ };
+
+ /* routes for sgtl5000 */
--- /dev/null
+From d84ea2123f8d27144e3f4d58cd88c9c6ddc799de Mon Sep 17 00:00:00 2001
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+Date: Tue, 13 Aug 2019 16:01:02 +0200
+Subject: can: mcp251x: mcp251x_hw_reset(): allow more time after a reset
+
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+
+commit d84ea2123f8d27144e3f4d58cd88c9c6ddc799de upstream.
+
+Some boards take longer than 5ms to power up after a reset, so allow
+some retries attempts before giving up.
+
+Fixes: ff06d611a31c ("can: mcp251x: Improve mcp251x_hw_reset()")
+Cc: linux-stable <stable@vger.kernel.org>
+Tested-by: Sean Nyekjaer <sean@geanix.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/can/spi/mcp251x.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -626,7 +626,7 @@ static int mcp251x_setup(struct net_devi
+ static int mcp251x_hw_reset(struct spi_device *spi)
+ {
+ struct mcp251x_priv *priv = spi_get_drvdata(spi);
+- u8 reg;
++ unsigned long timeout;
+ int ret;
+
+ /* Wait for oscillator startup timer after power up */
+@@ -640,10 +640,19 @@ static int mcp251x_hw_reset(struct spi_d
+ /* Wait for oscillator startup timer after reset */
+ mdelay(MCP251X_OST_DELAY_MS);
+
+- reg = mcp251x_read_reg(spi, CANSTAT);
+- if ((reg & CANCTRL_REQOP_MASK) != CANCTRL_REQOP_CONF)
+- return -ENODEV;
+-
++ /* Wait for reset to finish */
++ timeout = jiffies + HZ;
++ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
++ CANCTRL_REQOP_CONF) {
++ usleep_range(MCP251X_OST_DELAY_MS * 1000,
++ MCP251X_OST_DELAY_MS * 1000 * 2);
++
++ if (time_after(jiffies, timeout)) {
++ dev_err(&spi->dev,
++ "MCP251x didn't enter in conf mode after reset\n");
++ return -EBUSY;
++ }
++ }
+ return 0;
+ }
+
--- /dev/null
+From 48f89d2a2920166c35b1c0b69917dbb0390ebec7 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Horia=20Geant=C4=83?= <horia.geanta@nxp.com>
+Date: Tue, 30 Jul 2019 08:48:33 +0300
+Subject: crypto: caam - fix concurrency issue in givencrypt descriptor
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Horia Geantă <horia.geanta@nxp.com>
+
+commit 48f89d2a2920166c35b1c0b69917dbb0390ebec7 upstream.
+
+IV transfer from ofifo to class2 (set up at [29][30]) is not guaranteed
+to be scheduled before the data transfer from ofifo to external memory
+(set up at [38]:
+
+[29] 10FA0004 ld: ind-nfifo (len=4) imm
+[30] 81F00010 <nfifo_entry: ofifo->class2 type=msg len=16>
+[31] 14820004 ld: ccb2-datasz len=4 offs=0 imm
+[32] 00000010 data:0x00000010
+[33] 8210010D operation: cls1-op aes cbc init-final enc
+[34] A8080B04 math: (seqin + math0)->vseqout len=4
+[35] 28000010 seqfifold: skip len=16
+[36] A8080A04 math: (seqin + math0)->vseqin len=4
+[37] 2F1E0000 seqfifold: both msg1->2-last2-last1 len=vseqinsz
+[38] 69300000 seqfifostr: msg len=vseqoutsz
+[39] 5C20000C seqstr: ccb2 ctx len=12 offs=0
+
+If ofifo -> external memory transfer happens first, DECO will hang
+(issuing a Watchdog Timeout error, if WDOG is enabled) waiting for
+data availability in ofifo for the ofifo -> c2 ififo transfer.
+
+Make sure IV transfer happens first by waiting for all CAAM internal
+transfers to end before starting payload transfer.
+
+New descriptor with jump command inserted at [37]:
+
+[..]
+[36] A8080A04 math: (seqin + math0)->vseqin len=4
+[37] A1000401 jump: jsl1 all-match[!nfifopend] offset=[01] local->[38]
+[38] 2F1E0000 seqfifold: both msg1->2-last2-last1 len=vseqinsz
+[39] 69300000 seqfifostr: msg len=vseqoutsz
+[40] 5C20000C seqstr: ccb2 ctx len=12 offs=0
+
+[Note: the issue is present in the descriptor from the very beginning
+(cf. Fixes tag). However I've marked it v4.19+ since it's the oldest
+maintained kernel that the patch applies clean against.]
+
+Cc: <stable@vger.kernel.org> # v4.19+
+Fixes: 1acebad3d8db8 ("crypto: caam - faster aead implementation")
+Signed-off-by: Horia Geantă <horia.geanta@nxp.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamalg_desc.c | 9 +++++++++
+ drivers/crypto/caam/caamalg_desc.h | 2 +-
+ 2 files changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/caam/caamalg_desc.c
++++ b/drivers/crypto/caam/caamalg_desc.c
+@@ -509,6 +509,7 @@ void cnstr_shdsc_aead_givencap(u32 * con
+ const bool is_qi, int era)
+ {
+ u32 geniv, moveiv;
++ u32 *wait_cmd;
+
+ /* Note: Context registers are saved. */
+ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era);
+@@ -604,6 +605,14 @@ copy_iv:
+
+ /* Will read cryptlen */
+ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
++
++ /*
++ * Wait for IV transfer (ofifo -> class2) to finish before starting
++ * ciphertext transfer (ofifo -> external memory).
++ */
++ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP);
++ set_jump_tgt_here(desc, wait_cmd);
++
+ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF |
+ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH);
+ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF);
+--- a/drivers/crypto/caam/caamalg_desc.h
++++ b/drivers/crypto/caam/caamalg_desc.h
+@@ -12,7 +12,7 @@
+ #define DESC_AEAD_BASE (4 * CAAM_CMD_SZ)
+ #define DESC_AEAD_ENC_LEN (DESC_AEAD_BASE + 11 * CAAM_CMD_SZ)
+ #define DESC_AEAD_DEC_LEN (DESC_AEAD_BASE + 15 * CAAM_CMD_SZ)
+-#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 7 * CAAM_CMD_SZ)
++#define DESC_AEAD_GIVENC_LEN (DESC_AEAD_ENC_LEN + 8 * CAAM_CMD_SZ)
+ #define DESC_QI_AEAD_ENC_LEN (DESC_AEAD_ENC_LEN + 3 * CAAM_CMD_SZ)
+ #define DESC_QI_AEAD_DEC_LEN (DESC_AEAD_DEC_LEN + 3 * CAAM_CMD_SZ)
+ #define DESC_QI_AEAD_GIVENC_LEN (DESC_AEAD_GIVENC_LEN + 3 * CAAM_CMD_SZ)
--- /dev/null
+From c552ffb5c93d9d65aaf34f5f001c4e7e8484ced1 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <weiyongjun1@huawei.com>
+Date: Wed, 4 Sep 2019 14:18:09 +0000
+Subject: crypto: cavium/zip - Add missing single_release()
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+commit c552ffb5c93d9d65aaf34f5f001c4e7e8484ced1 upstream.
+
+When using single_open() for opening, single_release() should be
+used instead of seq_release(), otherwise there is a memory leak.
+
+Fixes: 09ae5d37e093 ("crypto: zip - Add Compression/Decompression statistics")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/cavium/zip/zip_main.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/crypto/cavium/zip/zip_main.c
++++ b/drivers/crypto/cavium/zip/zip_main.c
+@@ -593,6 +593,7 @@ static const struct file_operations zip_
+ .owner = THIS_MODULE,
+ .open = zip_stats_open,
+ .read = seq_read,
++ .release = single_release,
+ };
+
+ static int zip_clear_open(struct inode *inode, struct file *file)
+@@ -604,6 +605,7 @@ static const struct file_operations zip_
+ .owner = THIS_MODULE,
+ .open = zip_clear_open,
+ .read = seq_read,
++ .release = single_release,
+ };
+
+ static int zip_regs_open(struct inode *inode, struct file *file)
+@@ -615,6 +617,7 @@ static const struct file_operations zip_
+ .owner = THIS_MODULE,
+ .open = zip_regs_open,
+ .read = seq_read,
++ .release = single_release,
+ };
+
+ /* Root directory for thunderx_zip debugfs entry */
--- /dev/null
+From 76a95bd8f9e10cade9c4c8df93b5c20ff45dc0f5 Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Tue, 2 Jul 2019 14:39:19 +0300
+Subject: crypto: ccree - account for TEE not ready to report
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+commit 76a95bd8f9e10cade9c4c8df93b5c20ff45dc0f5 upstream.
+
+When ccree driver runs it checks the state of the Trusted Execution
+Environment CryptoCell driver before proceeding. We did not account
+for cases where the TEE side is not ready or not available at all.
+Fix it by only considering TEE error state after sync with the TEE
+side driver.
+
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Fixes: ab8ec9658f5a ("crypto: ccree - add FIPS support")
+CC: stable@vger.kernel.org # v4.17+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccree/cc_fips.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/ccree/cc_fips.c
++++ b/drivers/crypto/ccree/cc_fips.c
+@@ -21,7 +21,13 @@ static bool cc_get_tee_fips_status(struc
+ u32 reg;
+
+ reg = cc_ioread(drvdata, CC_REG(GPR_HOST));
+- return (reg == (CC_FIPS_SYNC_TEE_STATUS | CC_FIPS_SYNC_MODULE_OK));
++ /* Did the TEE report status? */
++ if (reg & CC_FIPS_SYNC_TEE_STATUS)
++ /* Yes. Is it OK? */
++ return (reg & CC_FIPS_SYNC_MODULE_OK);
++
++ /* No. It's either not in use or will be reported later */
++ return true;
+ }
+
+ /*
--- /dev/null
+From 7a4be6c113c1f721818d1e3722a9015fe393295c Mon Sep 17 00:00:00 2001
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+Date: Mon, 29 Jul 2019 13:40:18 +0300
+Subject: crypto: ccree - use the full crypt length value
+
+From: Gilad Ben-Yossef <gilad@benyossef.com>
+
+commit 7a4be6c113c1f721818d1e3722a9015fe393295c upstream.
+
+In case of AEAD decryption verifcation error we were using the
+wrong value to zero out the plaintext buffer leaving the end of
+the buffer with the false plaintext.
+
+Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com>
+Fixes: ff27e85a85bb ("crypto: ccree - add AEAD support")
+CC: stable@vger.kernel.org # v4.17+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/ccree/cc_aead.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/ccree/cc_aead.c
++++ b/drivers/crypto/ccree/cc_aead.c
+@@ -227,7 +227,7 @@ static void cc_aead_complete(struct devi
+ /* In case of payload authentication failure, MUST NOT
+ * revealed the decrypted message --> zero its memory.
+ */
+- cc_zero_sgl(areq->dst, areq_ctx->cryptlen);
++ cc_zero_sgl(areq->dst, areq->cryptlen);
+ err = -EBADMSG;
+ }
+ } else { /*ENCRYPT*/
--- /dev/null
+From 1b82feb6c5e1996513d0fb0bbb475417088b4954 Mon Sep 17 00:00:00 2001
+From: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Date: Tue, 23 Jul 2019 07:24:01 +0000
+Subject: crypto: qat - Silence smp_processor_id() warning
+
+From: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+
+commit 1b82feb6c5e1996513d0fb0bbb475417088b4954 upstream.
+
+It seems that smp_processor_id() is only used for a best-effort
+load-balancing, refer to qat_crypto_get_instance_node(). It's not feasible
+to disable preemption for the duration of the crypto requests. Therefore,
+just silence the warning. This commit is similar to e7a9b05ca4
+("crypto: cavium - Fix smp_processor_id() warnings").
+
+Silences the following splat:
+BUG: using smp_processor_id() in preemptible [00000000] code: cryptomgr_test/2904
+caller is qat_alg_ablkcipher_setkey+0x300/0x4a0 [intel_qat]
+CPU: 1 PID: 2904 Comm: cryptomgr_test Tainted: P O 4.14.69 #1
+...
+Call Trace:
+ dump_stack+0x5f/0x86
+ check_preemption_disabled+0xd3/0xe0
+ qat_alg_ablkcipher_setkey+0x300/0x4a0 [intel_qat]
+ skcipher_setkey_ablkcipher+0x2b/0x40
+ __test_skcipher+0x1f3/0xb20
+ ? cpumask_next_and+0x26/0x40
+ ? find_busiest_group+0x10e/0x9d0
+ ? preempt_count_add+0x49/0xa0
+ ? try_module_get+0x61/0xf0
+ ? crypto_mod_get+0x15/0x30
+ ? __kmalloc+0x1df/0x1f0
+ ? __crypto_alloc_tfm+0x116/0x180
+ ? crypto_skcipher_init_tfm+0xa6/0x180
+ ? crypto_create_tfm+0x4b/0xf0
+ test_skcipher+0x21/0xa0
+ alg_test_skcipher+0x3f/0xa0
+ alg_test.part.6+0x126/0x2a0
+ ? finish_task_switch+0x21b/0x260
+ ? __schedule+0x1e9/0x800
+ ? __wake_up_common+0x8d/0x140
+ cryptomgr_test+0x40/0x50
+ kthread+0xff/0x130
+ ? cryptomgr_notify+0x540/0x540
+ ? kthread_create_on_node+0x70/0x70
+ ret_from_fork+0x24/0x50
+
+Fixes: ed8ccaef52 ("crypto: qat - Add support for SRIOV")
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/qat/qat_common/adf_common_drv.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
++++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
+@@ -95,7 +95,7 @@ struct service_hndl {
+
+ static inline int get_current_node(void)
+ {
+- return topology_physical_package_id(smp_processor_id());
++ return topology_physical_package_id(raw_smp_processor_id());
+ }
+
+ int adf_service_register(struct service_hndl *service);
--- /dev/null
+From 0ba3c026e685573bd3534c17e27da7c505ac99c4 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Fri, 6 Sep 2019 13:13:06 +1000
+Subject: crypto: skcipher - Unmap pages after an external error
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 0ba3c026e685573bd3534c17e27da7c505ac99c4 upstream.
+
+skcipher_walk_done may be called with an error by internal or
+external callers. For those internal callers we shouldn't unmap
+pages but for external callers we must unmap any pages that are
+in use.
+
+This patch distinguishes between the two cases by checking whether
+walk->nbytes is zero or not. For internal callers, we now set
+walk->nbytes to zero prior to the call. For external callers,
+walk->nbytes has always been non-zero (as zero is used to indicate
+the termination of a walk).
+
+Reported-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Fixes: 5cde0af2a982 ("[CRYPTO] cipher: Added block cipher type")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/skcipher.c | 42 +++++++++++++++++++++++-------------------
+ 1 file changed, 23 insertions(+), 19 deletions(-)
+
+--- a/crypto/skcipher.c
++++ b/crypto/skcipher.c
+@@ -95,7 +95,7 @@ static inline u8 *skcipher_get_spot(u8 *
+ return max(start, end_page);
+ }
+
+-static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
++static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
+ {
+ u8 *addr;
+
+@@ -103,19 +103,21 @@ static void skcipher_done_slow(struct sk
+ addr = skcipher_get_spot(addr, bsize);
+ scatterwalk_copychunks(addr, &walk->out, bsize,
+ (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
++ return 0;
+ }
+
+ int skcipher_walk_done(struct skcipher_walk *walk, int err)
+ {
+- unsigned int n; /* bytes processed */
+- bool more;
++ unsigned int n = walk->nbytes;
++ unsigned int nbytes = 0;
+
+- if (unlikely(err < 0))
++ if (!n)
+ goto finish;
+
+- n = walk->nbytes - err;
+- walk->total -= n;
+- more = (walk->total != 0);
++ if (likely(err >= 0)) {
++ n -= err;
++ nbytes = walk->total - n;
++ }
+
+ if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
+ SKCIPHER_WALK_SLOW |
+@@ -131,7 +133,7 @@ unmap_src:
+ memcpy(walk->dst.virt.addr, walk->page, n);
+ skcipher_unmap_dst(walk);
+ } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
+- if (err) {
++ if (err > 0) {
+ /*
+ * Didn't process all bytes. Either the algorithm is
+ * broken, or this was the last step and it turned out
+@@ -139,27 +141,29 @@ unmap_src:
+ * the algorithm requires it.
+ */
+ err = -EINVAL;
+- goto finish;
+- }
+- skcipher_done_slow(walk, n);
+- goto already_advanced;
++ nbytes = 0;
++ } else
++ n = skcipher_done_slow(walk, n);
+ }
+
++ if (err > 0)
++ err = 0;
++
++ walk->total = nbytes;
++ walk->nbytes = 0;
++
+ scatterwalk_advance(&walk->in, n);
+ scatterwalk_advance(&walk->out, n);
+-already_advanced:
+- scatterwalk_done(&walk->in, 0, more);
+- scatterwalk_done(&walk->out, 1, more);
++ scatterwalk_done(&walk->in, 0, nbytes);
++ scatterwalk_done(&walk->out, 1, nbytes);
+
+- if (more) {
++ if (nbytes) {
+ crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
+ CRYPTO_TFM_REQ_MAY_SLEEP : 0);
+ return skcipher_walk_next(walk);
+ }
+- err = 0;
+-finish:
+- walk->nbytes = 0;
+
++finish:
+ /* Short-circuit for the common/fast path. */
+ if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
+ goto out;
--- /dev/null
+From jinpuwang@gmail.com Tue Oct 8 09:22:17 2019
+From: Jack Wang <jinpuwang@gmail.com>
+Date: Mon, 7 Oct 2019 14:36:53 +0200
+Subject: KVM: nVMX: handle page fault in vmread fix
+To: gregkh@linuxfoundation.org, sashal@kernel.org, stable@vger.kernel.org, pbonzini@redhat.com
+Cc: Jack Wang <jinpu.wang@cloud.ionos.com>
+Message-ID: <20191007123653.17961-1-jinpuwang@gmail.com>
+
+From: Jack Wang <jinpu.wang@cloud.ionos.com>
+
+During backport f7eea636c3d5 ("KVM: nVMX: handle page fault in vmread"),
+there was a mistake the exception reference should be passed to function
+kvm_write_guest_virt_system, instead of NULL, other wise, we will get
+NULL pointer deref, eg
+
+kvm-unit-test triggered a NULL pointer deref below:
+[ 948.518437] kvm [24114]: vcpu0, guest rIP: 0x407ef9 kvm_set_msr_common: MSR_IA32_DEBUGCTLMSR 0x3, nop
+[ 949.106464] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
+[ 949.106707] PGD 0 P4D 0
+[ 949.106872] Oops: 0002 [#1] SMP
+[ 949.107038] CPU: 2 PID: 24126 Comm: qemu-2.7 Not tainted 4.19.77-pserver #4.19.77-1+feature+daily+update+20191005.1625+a4168bb~deb9
+[ 949.107283] Hardware name: Dell Inc. Precision Tower 3620/09WH54, BIOS 2.7.3 01/31/2018
+[ 949.107549] RIP: 0010:kvm_write_guest_virt_system+0x12/0x40 [kvm]
+[ 949.107719] Code: c0 5d 41 5c 41 5d 41 5e 83 f8 03 41 0f 94 c0 41 c1 e0 02 e9 b0 ed ff ff 0f 1f 44 00 00 48 89 f0 c6 87 59 56 00 00 01 48 89 d6 <49> c7 00 00 00 00 00 89 ca 49 c7 40 08 00 00 00 00 49 c7 40 10 00
+[ 949.108044] RSP: 0018:ffffb31b0a953cb0 EFLAGS: 00010202
+[ 949.108216] RAX: 000000000046b4d8 RBX: ffff9e9f415b0000 RCX: 0000000000000008
+[ 949.108389] RDX: ffffb31b0a953cc0 RSI: ffffb31b0a953cc0 RDI: ffff9e9f415b0000
+[ 949.108562] RBP: 00000000d2e14928 R08: 0000000000000000 R09: 0000000000000000
+[ 949.108733] R10: 0000000000000000 R11: 0000000000000000 R12: ffffffffffffffc8
+[ 949.108907] R13: 0000000000000002 R14: ffff9e9f4f26f2e8 R15: 0000000000000000
+[ 949.109079] FS: 00007eff8694c700(0000) GS:ffff9e9f51a80000(0000) knlGS:0000000031415928
+[ 949.109318] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 949.109495] CR2: 0000000000000000 CR3: 00000003be53b002 CR4: 00000000003626e0
+[ 949.109671] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 949.109845] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 949.110017] Call Trace:
+[ 949.110186] handle_vmread+0x22b/0x2f0 [kvm_intel]
+[ 949.110356] ? vmexit_fill_RSB+0xc/0x30 [kvm_intel]
+[ 949.110549] kvm_arch_vcpu_ioctl_run+0xa98/0x1b30 [kvm]
+[ 949.110725] ? kvm_vcpu_ioctl+0x388/0x5d0 [kvm]
+[ 949.110901] kvm_vcpu_ioctl+0x388/0x5d0 [kvm]
+[ 949.111072] do_vfs_ioctl+0xa2/0x620
+
+Signed-off-by: Jack Wang <jinpu.wang@cloud.ionos.com>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+---
+ arch/x86/kvm/vmx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -8801,7 +8801,7 @@ static int handle_vmread(struct kvm_vcpu
+ /* _system ok, nested_vmx_check_permission has verified cpl=0 */
+ if (kvm_write_guest_virt_system(vcpu, gva, &field_value,
+ (is_long_mode(vcpu) ? 8 : 4),
+- NULL))
++ &e))
+ kvm_inject_page_fault(vcpu, &e);
+ }
+
--- /dev/null
+From d28eafc5a64045c78136162af9d4ba42f8230080 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Tue, 27 Aug 2019 11:31:37 +1000
+Subject: KVM: PPC: Book3S HV: Check for MMU ready on piggybacked virtual cores
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit d28eafc5a64045c78136162af9d4ba42f8230080 upstream.
+
+When we are running multiple vcores on the same physical core, they
+could be from different VMs and so it is possible that one of the
+VMs could have its arch.mmu_ready flag cleared (for example by a
+concurrent HPT resize) when we go to run it on a physical core.
+We currently check the arch.mmu_ready flag for the primary vcore
+but not the flags for the other vcores that will be run alongside
+it. This adds that check, and also a check when we select the
+secondary vcores from the preempted vcores list.
+
+Cc: stable@vger.kernel.org # v4.14+
+Fixes: 38c53af85306 ("KVM: PPC: Book3S HV: Fix exclusion between HPT resizing and other HPT updates")
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -2550,7 +2550,7 @@ static void collect_piggybacks(struct co
+ if (!spin_trylock(&pvc->lock))
+ continue;
+ prepare_threads(pvc);
+- if (!pvc->n_runnable) {
++ if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) {
+ list_del_init(&pvc->preempt_list);
+ if (pvc->runner == NULL) {
+ pvc->vcore_state = VCORE_INACTIVE;
+@@ -2571,15 +2571,20 @@ static void collect_piggybacks(struct co
+ spin_unlock(&lp->lock);
+ }
+
+-static bool recheck_signals(struct core_info *cip)
++static bool recheck_signals_and_mmu(struct core_info *cip)
+ {
+ int sub, i;
+ struct kvm_vcpu *vcpu;
++ struct kvmppc_vcore *vc;
+
+- for (sub = 0; sub < cip->n_subcores; ++sub)
+- for_each_runnable_thread(i, vcpu, cip->vc[sub])
++ for (sub = 0; sub < cip->n_subcores; ++sub) {
++ vc = cip->vc[sub];
++ if (!vc->kvm->arch.mmu_ready)
++ return true;
++ for_each_runnable_thread(i, vcpu, vc)
+ if (signal_pending(vcpu->arch.run_task))
+ return true;
++ }
+ return false;
+ }
+
+@@ -2800,7 +2805,7 @@ static noinline void kvmppc_run_core(str
+ local_irq_disable();
+ hard_irq_disable();
+ if (lazy_irq_pending() || need_resched() ||
+- recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
++ recheck_signals_and_mmu(&core_info)) {
+ local_irq_enable();
+ vc->vcore_state = VCORE_INACTIVE;
+ /* Unlock all except the primary vcore */
--- /dev/null
+From ff42df49e75f053a8a6b4c2533100cdcc23afe69 Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Tue, 27 Aug 2019 11:35:40 +1000
+Subject: KVM: PPC: Book3S HV: Don't lose pending doorbell request on migration on P9
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit ff42df49e75f053a8a6b4c2533100cdcc23afe69 upstream.
+
+On POWER9, when userspace reads the value of the DPDES register on a
+vCPU, it is possible for 0 to be returned although there is a doorbell
+interrupt pending for the vCPU. This can lead to a doorbell interrupt
+being lost across migration. If the guest kernel uses doorbell
+interrupts for IPIs, then it could malfunction because of the lost
+interrupt.
+
+This happens because a newly-generated doorbell interrupt is signalled
+by setting vcpu->arch.doorbell_request to 1; the DPDES value in
+vcpu->arch.vcore->dpdes is not updated, because it can only be updated
+when holding the vcpu mutex, in order to avoid races.
+
+To fix this, we OR in vcpu->arch.doorbell_request when reading the
+DPDES value.
+
+Cc: stable@vger.kernel.org # v4.13+
+Fixes: 579006944e0d ("KVM: PPC: Book3S HV: Virtualize doorbell facility on POWER9")
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Tested-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -1407,7 +1407,14 @@ static int kvmppc_get_one_reg_hv(struct
+ *val = get_reg_val(id, vcpu->arch.pspb);
+ break;
+ case KVM_REG_PPC_DPDES:
+- *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
++ /*
++ * On POWER9, where we are emulating msgsndp etc.,
++ * we return 1 bit for each vcpu, which can come from
++ * either vcore->dpdes or doorbell_request.
++ * On POWER8, doorbell_request is 0.
++ */
++ *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
++ vcpu->arch.doorbell_request);
+ break;
+ case KVM_REG_PPC_VTB:
+ *val = get_reg_val(id, vcpu->arch.vcore->vtb);
--- /dev/null
+From 959c5d5134786b4988b6fdd08e444aa67d1667ed Mon Sep 17 00:00:00 2001
+From: Paul Mackerras <paulus@ozlabs.org>
+Date: Tue, 13 Aug 2019 20:03:49 +1000
+Subject: KVM: PPC: Book3S HV: Fix race in re-enabling XIVE escalation interrupts
+
+From: Paul Mackerras <paulus@ozlabs.org>
+
+commit 959c5d5134786b4988b6fdd08e444aa67d1667ed upstream.
+
+Escalation interrupts are interrupts sent to the host by the XIVE
+hardware when it has an interrupt to deliver to a guest VCPU but that
+VCPU is not running anywhere in the system. Hence we disable the
+escalation interrupt for the VCPU being run when we enter the guest
+and re-enable it when the guest does an H_CEDE hypercall indicating
+it is idle.
+
+It is possible that an escalation interrupt gets generated just as we
+are entering the guest. In that case the escalation interrupt may be
+using a queue entry in one of the interrupt queues, and that queue
+entry may not have been processed when the guest exits with an H_CEDE.
+The existing entry code detects this situation and does not clear the
+vcpu->arch.xive_esc_on flag as an indication that there is a pending
+queue entry (if the queue entry gets processed, xive_esc_irq() will
+clear the flag). There is a comment in the code saying that if the
+flag is still set on H_CEDE, we have to abort the cede rather than
+re-enabling the escalation interrupt, lest we end up with two
+occurrences of the escalation interrupt in the interrupt queue.
+
+However, the exit code doesn't do that; it aborts the cede in the sense
+that vcpu->arch.ceded gets cleared, but it still enables the escalation
+interrupt by setting the source's PQ bits to 00. Instead we need to
+set the PQ bits to 10, indicating that an interrupt has been triggered.
+We also need to avoid setting vcpu->arch.xive_esc_on in this case
+(i.e. vcpu->arch.xive_esc_on seen to be set on H_CEDE) because
+xive_esc_irq() will run at some point and clear it, and if we race with
+that we may end up with an incorrect result (i.e. xive_esc_on set when
+the escalation interrupt has just been handled).
+
+It is extremely unlikely that having two queue entries would cause
+observable problems; theoretically it could cause queue overflow, but
+the CPU would have to have thousands of interrupts targetted to it for
+that to be possible. However, this fix will also make it possible to
+determine accurately whether there is an unhandled escalation
+interrupt in the queue, which will be needed by the following patch.
+
+Fixes: 9b9b13a6d153 ("KVM: PPC: Book3S HV: Keep XIVE escalation interrupt masked unless ceded")
+Cc: stable@vger.kernel.org # v4.16+
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20190813100349.GD9567@blackberry
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 36 ++++++++++++++++++++------------
+ 1 file changed, 23 insertions(+), 13 deletions(-)
+
+--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+@@ -2903,29 +2903,39 @@ kvm_cede_prodded:
+ kvm_cede_exit:
+ ld r9, HSTATE_KVM_VCPU(r13)
+ #ifdef CONFIG_KVM_XICS
+- /* Abort if we still have a pending escalation */
++ /* are we using XIVE with single escalation? */
++ ld r10, VCPU_XIVE_ESC_VADDR(r9)
++ cmpdi r10, 0
++ beq 3f
++ li r6, XIVE_ESB_SET_PQ_00
++ /*
++ * If we still have a pending escalation, abort the cede,
++ * and we must set PQ to 10 rather than 00 so that we don't
++ * potentially end up with two entries for the escalation
++ * interrupt in the XIVE interrupt queue. In that case
++ * we also don't want to set xive_esc_on to 1 here in
++ * case we race with xive_esc_irq().
++ */
+ lbz r5, VCPU_XIVE_ESC_ON(r9)
+ cmpwi r5, 0
+- beq 1f
++ beq 4f
+ li r0, 0
+ stb r0, VCPU_CEDED(r9)
+-1: /* Enable XIVE escalation */
+- li r5, XIVE_ESB_SET_PQ_00
++ li r6, XIVE_ESB_SET_PQ_10
++ b 5f
++4: li r0, 1
++ stb r0, VCPU_XIVE_ESC_ON(r9)
++ /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
++ sync
++5: /* Enable XIVE escalation */
+ mfmsr r0
+ andi. r0, r0, MSR_DR /* in real mode? */
+ beq 1f
+- ld r10, VCPU_XIVE_ESC_VADDR(r9)
+- cmpdi r10, 0
+- beq 3f
+- ldx r0, r10, r5
++ ldx r0, r10, r6
+ b 2f
+ 1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
+- cmpdi r10, 0
+- beq 3f
+- ldcix r0, r10, r5
++ ldcix r0, r10, r6
+ 2: sync
+- li r0, 1
+- stb r0, VCPU_XIVE_ESC_ON(r9)
+ #endif /* CONFIG_KVM_XICS */
+ 3: b guest_exit_cont
+
--- /dev/null
+From a13b03bbb4575b350b46090af4dfd30e735aaed1 Mon Sep 17 00:00:00 2001
+From: Thomas Huth <thuth@redhat.com>
+Date: Thu, 29 Aug 2019 14:25:17 +0200
+Subject: KVM: s390: Test for bad access register and size at the start of S390_MEM_OP
+
+From: Thomas Huth <thuth@redhat.com>
+
+commit a13b03bbb4575b350b46090af4dfd30e735aaed1 upstream.
+
+If the KVM_S390_MEM_OP ioctl is called with an access register >= 16,
+then there is certainly a bug in the calling userspace application.
+We check for wrong access registers, but only if the vCPU was already
+in the access register mode before (i.e. the SIE block has recorded
+it). The check is also buried somewhere deep in the calling chain (in
+the function ar_translation()), so this is somewhat hard to find.
+
+It's better to always report an error to the userspace in case this
+field is set wrong, and it's safer in the KVM code if we block wrong
+values here early instead of relying on a check somewhere deep down
+the calling chain, so let's add another check to kvm_s390_guest_mem_op()
+directly.
+
+We also should check that the "size" is non-zero here (thanks to Janosch
+Frank for the hint!). If we do not check the size, we could call vmalloc()
+with this 0 value, and this will cause a kernel warning.
+
+Signed-off-by: Thomas Huth <thuth@redhat.com>
+Link: https://lkml.kernel.org/r/20190829122517.31042-1-thuth@redhat.com
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Reviewed-by: Janosch Frank <frankja@linux.ibm.com>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kvm/kvm-s390.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/s390/kvm/kvm-s390.c
++++ b/arch/s390/kvm/kvm-s390.c
+@@ -3890,7 +3890,7 @@ static long kvm_s390_guest_mem_op(struct
+ const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
+ | KVM_S390_MEMOP_F_CHECK_ONLY;
+
+- if (mop->flags & ~supported_flags)
++ if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
+ return -EINVAL;
+
+ if (mop->size > MEM_OP_MAX_SIZE)
--- /dev/null
+From 3ca94192278ca8de169d78c085396c424be123b3 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpengli@tencent.com>
+Date: Wed, 18 Sep 2019 17:50:10 +0800
+Subject: KVM: X86: Fix userspace set invalid CR4
+
+From: Wanpeng Li <wanpengli@tencent.com>
+
+commit 3ca94192278ca8de169d78c085396c424be123b3 upstream.
+
+Reported by syzkaller:
+
+ WARNING: CPU: 0 PID: 6544 at /home/kernel/data/kvm/arch/x86/kvm//vmx/vmx.c:4689 handle_desc+0x37/0x40 [kvm_intel]
+ CPU: 0 PID: 6544 Comm: a.out Tainted: G OE 5.3.0-rc4+ #4
+ RIP: 0010:handle_desc+0x37/0x40 [kvm_intel]
+ Call Trace:
+ vmx_handle_exit+0xbe/0x6b0 [kvm_intel]
+ vcpu_enter_guest+0x4dc/0x18d0 [kvm]
+ kvm_arch_vcpu_ioctl_run+0x407/0x660 [kvm]
+ kvm_vcpu_ioctl+0x3ad/0x690 [kvm]
+ do_vfs_ioctl+0xa2/0x690
+ ksys_ioctl+0x6d/0x80
+ __x64_sys_ioctl+0x1a/0x20
+ do_syscall_64+0x74/0x720
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+When CR4.UMIP is set, guest should have UMIP cpuid flag. Current
+kvm set_sregs function doesn't have such check when userspace inputs
+sregs values. SECONDARY_EXEC_DESC is enabled on writes to CR4.UMIP
+in vmx_set_cr4 though guest doesn't have UMIP cpuid flag. The testcast
+triggers handle_desc warning when executing ltr instruction since
+guest architectural CR4 doesn't set UMIP. This patch fixes it by
+adding valid CR4 and CPUID combination checking in __set_sregs.
+
+syzkaller source: https://syzkaller.appspot.com/x/repro.c?x=138efb99600000
+
+Reported-by: syzbot+0f1819555fbdce992df9@syzkaller.appspotmail.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
+Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kvm/x86.c | 38 +++++++++++++++++++++-----------------
+ 1 file changed, 21 insertions(+), 17 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -791,34 +791,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u
+ }
+ EXPORT_SYMBOL_GPL(kvm_set_xcr);
+
+-int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
++static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+ {
+- unsigned long old_cr4 = kvm_read_cr4(vcpu);
+- unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
+- X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
+-
+ if (cr4 & CR4_RESERVED_BITS)
+- return 1;
++ return -EINVAL;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
+- return 1;
++ return -EINVAL;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
+- return 1;
++ return -EINVAL;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
+- return 1;
++ return -EINVAL;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
+- return 1;
++ return -EINVAL;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
+- return 1;
++ return -EINVAL;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
+- return 1;
++ return -EINVAL;
+
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
++ return -EINVAL;
++
++ return 0;
++}
++
++int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
++{
++ unsigned long old_cr4 = kvm_read_cr4(vcpu);
++ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
++ X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
++
++ if (kvm_valid_cr4(vcpu, cr4))
+ return 1;
+
+ if (is_long_mode(vcpu)) {
+@@ -8237,10 +8245,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
+
+ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
+ {
+- if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
+- (sregs->cr4 & X86_CR4_OSXSAVE))
+- return -EINVAL;
+-
+ if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
+ /*
+ * When EFER.LME and CR0.PG are set, the processor is in
+@@ -8259,7 +8263,7 @@ static int kvm_valid_sregs(struct kvm_vc
+ return -EINVAL;
+ }
+
+- return 0;
++ return kvm_valid_cr4(vcpu, sregs->cr4);
+ }
+
+ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
--- /dev/null
+From d2f965549006acb865c4638f1f030ebcefdc71f6 Mon Sep 17 00:00:00 2001
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Date: Wed, 29 May 2019 16:42:59 +0800
+Subject: MIPS: Treat Loongson Extensions as ASEs
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+commit d2f965549006acb865c4638f1f030ebcefdc71f6 upstream.
+
+Recently, binutils had split Loongson-3 Extensions into four ASEs:
+MMI, CAM, EXT, EXT2. This patch do the samething in kernel and expose
+them in cpuinfo so applications can probe supported ASEs at runtime.
+
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Cc: Huacai Chen <chenhc@lemote.com>
+Cc: Yunqiang Su <ysu@wavecomp.com>
+Cc: stable@vger.kernel.org # v4.14+
+Signed-off-by: Paul Burton <paul.burton@mips.com>
+Cc: linux-mips@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/include/asm/cpu-features.h | 16 ++++++++++++++++
+ arch/mips/include/asm/cpu.h | 4 ++++
+ arch/mips/kernel/cpu-probe.c | 6 ++++++
+ arch/mips/kernel/proc.c | 4 ++++
+ 4 files changed, 30 insertions(+)
+
+--- a/arch/mips/include/asm/cpu-features.h
++++ b/arch/mips/include/asm/cpu-features.h
+@@ -387,6 +387,22 @@
+ #define cpu_has_dsp3 __ase(MIPS_ASE_DSP3)
+ #endif
+
++#ifndef cpu_has_loongson_mmi
++#define cpu_has_loongson_mmi __ase(MIPS_ASE_LOONGSON_MMI)
++#endif
++
++#ifndef cpu_has_loongson_cam
++#define cpu_has_loongson_cam __ase(MIPS_ASE_LOONGSON_CAM)
++#endif
++
++#ifndef cpu_has_loongson_ext
++#define cpu_has_loongson_ext __ase(MIPS_ASE_LOONGSON_EXT)
++#endif
++
++#ifndef cpu_has_loongson_ext2
++#define cpu_has_loongson_ext2 __ase(MIPS_ASE_LOONGSON_EXT2)
++#endif
++
+ #ifndef cpu_has_mipsmt
+ #define cpu_has_mipsmt __isa_lt_and_ase(6, MIPS_ASE_MIPSMT)
+ #endif
+--- a/arch/mips/include/asm/cpu.h
++++ b/arch/mips/include/asm/cpu.h
+@@ -436,5 +436,9 @@ enum cpu_type_enum {
+ #define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
+ #define MIPS_ASE_DSP3 0x00000200 /* Signal Processing ASE Rev 3*/
+ #define MIPS_ASE_MIPS16E2 0x00000400 /* MIPS16e2 */
++#define MIPS_ASE_LOONGSON_MMI 0x00000800 /* Loongson MultiMedia extensions Instructions */
++#define MIPS_ASE_LOONGSON_CAM 0x00001000 /* Loongson CAM */
++#define MIPS_ASE_LOONGSON_EXT 0x00002000 /* Loongson EXTensions */
++#define MIPS_ASE_LOONGSON_EXT2 0x00004000 /* Loongson EXTensions R2 */
+
+ #endif /* _ASM_CPU_H */
+--- a/arch/mips/kernel/cpu-probe.c
++++ b/arch/mips/kernel/cpu-probe.c
+@@ -1489,6 +1489,8 @@ static inline void cpu_probe_legacy(stru
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R1);
++ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
++ MIPS_ASE_LOONGSON_EXT);
+ break;
+ case PRID_REV_LOONGSON3B_R1:
+ case PRID_REV_LOONGSON3B_R2:
+@@ -1496,6 +1498,8 @@ static inline void cpu_probe_legacy(stru
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3b");
+ set_isa(c, MIPS_CPU_ISA_M64R1);
++ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
++ MIPS_ASE_LOONGSON_EXT);
+ break;
+ }
+
+@@ -1861,6 +1865,8 @@ static inline void cpu_probe_loongson(st
+ decode_configs(c);
+ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
++ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
++ MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
+ break;
+ default:
+ panic("Unknown Loongson Processor ID!");
+--- a/arch/mips/kernel/proc.c
++++ b/arch/mips/kernel/proc.c
+@@ -124,6 +124,10 @@ static int show_cpuinfo(struct seq_file
+ if (cpu_has_eva) seq_printf(m, "%s", " eva");
+ if (cpu_has_htw) seq_printf(m, "%s", " htw");
+ if (cpu_has_xpa) seq_printf(m, "%s", " xpa");
++ if (cpu_has_loongson_mmi) seq_printf(m, "%s", " loongson-mmi");
++ if (cpu_has_loongson_cam) seq_printf(m, "%s", " loongson-cam");
++ if (cpu_has_loongson_ext) seq_printf(m, "%s", " loongson-ext");
++ if (cpu_has_loongson_ext2) seq_printf(m, "%s", " loongson-ext2");
+ seq_printf(m, "\n");
+
+ if (cpu_has_mmips) {
--- /dev/null
+From e9e006f5fcf2bab59149cb38a48a4817c1b538b4 Mon Sep 17 00:00:00 2001
+From: Mike Christie <mchristi@redhat.com>
+Date: Sun, 4 Aug 2019 14:10:06 -0500
+Subject: nbd: fix max number of supported devs
+
+From: Mike Christie <mchristi@redhat.com>
+
+commit e9e006f5fcf2bab59149cb38a48a4817c1b538b4 upstream.
+
+This fixes a bug added in 4.10 with commit:
+
+commit 9561a7ade0c205bc2ee035a2ac880478dcc1a024
+Author: Josef Bacik <jbacik@fb.com>
+Date: Tue Nov 22 14:04:40 2016 -0500
+
+ nbd: add multi-connection support
+
+that limited the number of devices to 256. Before the patch we could
+create 1000s of devices, but the patch switched us from using our
+own thread to using a work queue which has a default limit of 256
+active works.
+
+The problem is that our recv_work function sits in a loop until
+disconnection but only handles IO for one connection. The work is
+started when the connection is started/restarted, but if we end up
+creating 257 or more connections, the queue_work call just queues
+connection257+'s recv_work and that waits for connection 1 - 256's
+recv_work to be disconnected and that work instance completing.
+
+Instead of reverting back to kthreads, this has us allocate a
+workqueue_struct per device, so we can block in the work.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Mike Christie <mchristi@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/nbd.c | 39 +++++++++++++++++++++++++--------------
+ 1 file changed, 25 insertions(+), 14 deletions(-)
+
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -106,6 +106,7 @@ struct nbd_device {
+ struct nbd_config *config;
+ struct mutex config_lock;
+ struct gendisk *disk;
++ struct workqueue_struct *recv_workq;
+
+ struct list_head list;
+ struct task_struct *task_recv;
+@@ -134,7 +135,6 @@ static struct dentry *nbd_dbg_dir;
+
+ static unsigned int nbds_max = 16;
+ static int max_part = 16;
+-static struct workqueue_struct *recv_workqueue;
+ static int part_shift;
+
+ static int nbd_dev_dbg_init(struct nbd_device *nbd);
+@@ -1025,7 +1025,7 @@ static int nbd_reconnect_socket(struct n
+ /* We take the tx_mutex in an error path in the recv_work, so we
+ * need to queue_work outside of the tx_mutex.
+ */
+- queue_work(recv_workqueue, &args->work);
++ queue_work(nbd->recv_workq, &args->work);
+
+ atomic_inc(&config->live_connections);
+ wake_up(&config->conn_wait);
+@@ -1126,6 +1126,10 @@ static void nbd_config_put(struct nbd_de
+ kfree(nbd->config);
+ nbd->config = NULL;
+
++ if (nbd->recv_workq)
++ destroy_workqueue(nbd->recv_workq);
++ nbd->recv_workq = NULL;
++
+ nbd->tag_set.timeout = 0;
+ nbd->disk->queue->limits.discard_granularity = 0;
+ nbd->disk->queue->limits.discard_alignment = 0;
+@@ -1154,6 +1158,14 @@ static int nbd_start_device(struct nbd_d
+ return -EINVAL;
+ }
+
++ nbd->recv_workq = alloc_workqueue("knbd%d-recv",
++ WQ_MEM_RECLAIM | WQ_HIGHPRI |
++ WQ_UNBOUND, 0, nbd->index);
++ if (!nbd->recv_workq) {
++ dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
++ return -ENOMEM;
++ }
++
+ blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
+ nbd->task_recv = current;
+
+@@ -1184,7 +1196,7 @@ static int nbd_start_device(struct nbd_d
+ INIT_WORK(&args->work, recv_work);
+ args->nbd = nbd;
+ args->index = i;
+- queue_work(recv_workqueue, &args->work);
++ queue_work(nbd->recv_workq, &args->work);
+ }
+ nbd_size_update(nbd);
+ return error;
+@@ -1204,8 +1216,10 @@ static int nbd_start_device_ioctl(struct
+ mutex_unlock(&nbd->config_lock);
+ ret = wait_event_interruptible(config->recv_wq,
+ atomic_read(&config->recv_threads) == 0);
+- if (ret)
++ if (ret) {
+ sock_shutdown(nbd);
++ flush_workqueue(nbd->recv_workq);
++ }
+ mutex_lock(&nbd->config_lock);
+ nbd_bdev_reset(bdev);
+ /* user requested, ignore socket errors */
+@@ -1835,6 +1849,12 @@ static void nbd_disconnect_and_put(struc
+ nbd_disconnect(nbd);
+ nbd_clear_sock(nbd);
+ mutex_unlock(&nbd->config_lock);
++ /*
++ * Make sure recv thread has finished, so it does not drop the last
++ * config ref and try to destroy the workqueue from inside the work
++ * queue.
++ */
++ flush_workqueue(nbd->recv_workq);
+ if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+ &nbd->config->runtime_flags))
+ nbd_config_put(nbd);
+@@ -2215,20 +2235,12 @@ static int __init nbd_init(void)
+
+ if (nbds_max > 1UL << (MINORBITS - part_shift))
+ return -EINVAL;
+- recv_workqueue = alloc_workqueue("knbd-recv",
+- WQ_MEM_RECLAIM | WQ_HIGHPRI |
+- WQ_UNBOUND, 0);
+- if (!recv_workqueue)
+- return -ENOMEM;
+
+- if (register_blkdev(NBD_MAJOR, "nbd")) {
+- destroy_workqueue(recv_workqueue);
++ if (register_blkdev(NBD_MAJOR, "nbd"))
+ return -EIO;
+- }
+
+ if (genl_register_family(&nbd_genl_family)) {
+ unregister_blkdev(NBD_MAJOR, "nbd");
+- destroy_workqueue(recv_workqueue);
+ return -EINVAL;
+ }
+ nbd_dbg_init();
+@@ -2270,7 +2282,6 @@ static void __exit nbd_cleanup(void)
+
+ idr_destroy(&nbd_index_idr);
+ genl_unregister_family(&nbd_genl_family);
+- destroy_workqueue(recv_workqueue);
+ unregister_blkdev(NBD_MAJOR, "nbd");
+ }
+
--- /dev/null
+From 62bacb06b9f08965c4ef10e17875450490c948c0 Mon Sep 17 00:00:00 2001
+From: Dmitry Osipenko <digetx@gmail.com>
+Date: Thu, 2 May 2019 02:38:00 +0300
+Subject: PM / devfreq: tegra: Fix kHz to Hz conversion
+
+From: Dmitry Osipenko <digetx@gmail.com>
+
+commit 62bacb06b9f08965c4ef10e17875450490c948c0 upstream.
+
+The kHz to Hz is incorrectly converted in a few places in the code,
+this results in a wrong frequency being calculated because devfreq core
+uses OPP frequencies that are given in Hz to clamp the rate, while
+tegra-devfreq gives to the core value in kHz and then it also expects to
+receive value in kHz from the core. In a result memory freq is always set
+to a value which is close to ULONG_MAX because of the bug. Hence the EMC
+frequency is always capped to the maximum and the driver doesn't do
+anything useful. This patch was tested on Tegra30 and Tegra124 SoC's, EMC
+frequency scaling works properly now.
+
+Cc: <stable@vger.kernel.org> # 4.14+
+Tested-by: Steev Klimaszewski <steev@kali.org>
+Reviewed-by: Chanwoo Choi <cw00.choi@samsung.com>
+Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
+Acked-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: MyungJoo Ham <myungjoo.ham@samsung.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/devfreq/tegra-devfreq.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/drivers/devfreq/tegra-devfreq.c
++++ b/drivers/devfreq/tegra-devfreq.c
+@@ -486,11 +486,11 @@ static int tegra_devfreq_target(struct d
+ {
+ struct tegra_devfreq *tegra = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+- unsigned long rate = *freq * KHZ;
++ unsigned long rate;
+
+- opp = devfreq_recommended_opp(dev, &rate, flags);
++ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp)) {
+- dev_err(dev, "Failed to find opp for %lu KHz\n", *freq);
++ dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
+ return PTR_ERR(opp);
+ }
+ rate = dev_pm_opp_get_freq(opp);
+@@ -499,8 +499,6 @@ static int tegra_devfreq_target(struct d
+ clk_set_min_rate(tegra->emc_clock, rate);
+ clk_set_rate(tegra->emc_clock, 0);
+
+- *freq = rate;
+-
+ return 0;
+ }
+
+@@ -510,7 +508,7 @@ static int tegra_devfreq_get_dev_status(
+ struct tegra_devfreq *tegra = dev_get_drvdata(dev);
+ struct tegra_devfreq_device *actmon_dev;
+
+- stat->current_frequency = tegra->cur_freq;
++ stat->current_frequency = tegra->cur_freq * KHZ;
+
+ /* To be used by the tegra governor */
+ stat->private_data = tegra;
+@@ -565,7 +563,7 @@ static int tegra_governor_get_target(str
+ target_freq = max(target_freq, dev->target_freq);
+ }
+
+- *freq = target_freq;
++ *freq = target_freq * KHZ;
+
+ return 0;
+ }
--- /dev/null
+From fe55e770327363304c4111423e6f7ff3c650136d Mon Sep 17 00:00:00 2001
+From: Michael Nosthoff <committed@heine.so>
+Date: Fri, 16 Aug 2019 09:58:42 +0200
+Subject: power: supply: sbs-battery: only return health when battery present
+
+From: Michael Nosthoff <committed@heine.so>
+
+commit fe55e770327363304c4111423e6f7ff3c650136d upstream.
+
+when the battery is set to sbs-mode and no gpio detection is enabled
+"health" is always returning a value even when the battery is not present.
+All other fields return "not present".
+This leads to a scenario where the driver is constantly switching between
+"present" and "not present" state. This generates a lot of constant
+traffic on the i2c.
+
+This commit changes the response of "health" to an error when the battery
+is not responding leading to a consistent "not present" state.
+
+Fixes: 76b16f4cdfb8 ("power: supply: sbs-battery: don't assume MANUFACTURER_DATA formats")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Michael Nosthoff <committed@heine.so>
+Reviewed-by: Brian Norris <briannorris@chromium.org>
+Tested-by: Brian Norris <briannorris@chromium.org>
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/power/supply/sbs-battery.c | 25 ++++++++++++++++---------
+ 1 file changed, 16 insertions(+), 9 deletions(-)
+
+--- a/drivers/power/supply/sbs-battery.c
++++ b/drivers/power/supply/sbs-battery.c
+@@ -323,17 +323,22 @@ static int sbs_get_battery_presence_and_
+ {
+ int ret;
+
+- if (psp == POWER_SUPPLY_PROP_PRESENT) {
+- /* Dummy command; if it succeeds, battery is present. */
+- ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
+- if (ret < 0)
+- val->intval = 0; /* battery disconnected */
+- else
+- val->intval = 1; /* battery present */
+- } else { /* POWER_SUPPLY_PROP_HEALTH */
++ /* Dummy command; if it succeeds, battery is present. */
++ ret = sbs_read_word_data(client, sbs_data[REG_STATUS].addr);
++
++ if (ret < 0) { /* battery not present*/
++ if (psp == POWER_SUPPLY_PROP_PRESENT) {
++ val->intval = 0;
++ return 0;
++ }
++ return ret;
++ }
++
++ if (psp == POWER_SUPPLY_PROP_PRESENT)
++ val->intval = 1; /* battery present */
++ else /* POWER_SUPPLY_PROP_HEALTH */
+ /* SBS spec doesn't have a general health command. */
+ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+- }
+
+ return 0;
+ }
+@@ -635,6 +640,8 @@ static int sbs_get_property(struct power
+ else
+ ret = sbs_get_battery_presence_and_health(client, psp,
+ val);
++
++ /* this can only be true if no gpio is used */
+ if (psp == POWER_SUPPLY_PROP_PRESENT)
+ return 0;
+ break;
--- /dev/null
+From 99956a9e08251a1234434b492875b1eaff502a12 Mon Sep 17 00:00:00 2001
+From: Michael Nosthoff <committed@heine.so>
+Date: Fri, 16 Aug 2019 09:37:42 +0200
+Subject: power: supply: sbs-battery: use correct flags field
+
+From: Michael Nosthoff <committed@heine.so>
+
+commit 99956a9e08251a1234434b492875b1eaff502a12 upstream.
+
+the type flag is stored in the chip->flags field not in the
+client->flags field. This currently leads to never using the ti
+specific health function as client->flags doesn't use that bit.
+So it's always falling back to the general one.
+
+Fixes: 76b16f4cdfb8 ("power: supply: sbs-battery: don't assume MANUFACTURER_DATA formats")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Michael Nosthoff <committed@heine.so>
+Reviewed-by: Brian Norris <briannorris@chromium.org>
+Reviewed-by: Enric Balletbo i Serra <enric.balletbo@collabora.com>
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/power/supply/sbs-battery.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/power/supply/sbs-battery.c
++++ b/drivers/power/supply/sbs-battery.c
+@@ -629,7 +629,7 @@ static int sbs_get_property(struct power
+ switch (psp) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_HEALTH:
+- if (client->flags & SBS_FLAGS_TI_BQ20Z75)
++ if (chip->flags & SBS_FLAGS_TI_BQ20Z75)
+ ret = sbs_get_ti_battery_presence_and_health(client,
+ psp, val);
+ else
--- /dev/null
+From 677733e296b5c7a37c47da391fc70a43dc40bd67 Mon Sep 17 00:00:00 2001
+From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
+Date: Tue, 24 Sep 2019 09:22:51 +0530
+Subject: powerpc/book3s64/mm: Don't do tlbie fixup for some hardware revisions
+
+From: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+
+commit 677733e296b5c7a37c47da391fc70a43dc40bd67 upstream.
+
+The store ordering vs tlbie issue mentioned in commit
+a5d4b5891c2f ("powerpc/mm: Fixup tlbie vs store ordering issue on
+POWER9") is fixed for Nimbus 2.3 and Cumulus 1.3 revisions. We don't
+need to apply the fixup if we are running on them
+
+We can only do this on PowerNV. On pseries guest with KVM we still
+don't support redoing the feature fixup after migration. So we should
+be enabling all the workarounds needed, because whe can possibly
+migrate between DD 2.3 and DD 2.2
+
+Fixes: a5d4b5891c2f ("powerpc/mm: Fixup tlbie vs store ordering issue on POWER9")
+Cc: stable@vger.kernel.org # v4.16+
+Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20190924035254.24612-1-aneesh.kumar@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/dt_cpu_ftrs.c | 30 ++++++++++++++++++++++++++++--
+ 1 file changed, 28 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
+@@ -694,9 +694,35 @@ static bool __init cpufeatures_process_f
+ return true;
+ }
+
++/*
++ * Handle POWER9 broadcast tlbie invalidation issue using
++ * cpu feature flag.
++ */
++static __init void update_tlbie_feature_flag(unsigned long pvr)
++{
++ if (PVR_VER(pvr) == PVR_POWER9) {
++ /*
++ * Set the tlbie feature flag for anything below
++ * Nimbus DD 2.3 and Cumulus DD 1.3
++ */
++ if ((pvr & 0xe000) == 0) {
++ /* Nimbus */
++ if ((pvr & 0xfff) < 0x203)
++ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
++ } else if ((pvr & 0xc000) == 0) {
++ /* Cumulus */
++ if ((pvr & 0xfff) < 0x103)
++ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
++ } else {
++ WARN_ONCE(1, "Unknown PVR");
++ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
++ }
++ }
++}
++
+ static __init void cpufeatures_cpu_quirks(void)
+ {
+- int version = mfspr(SPRN_PVR);
++ unsigned long version = mfspr(SPRN_PVR);
+
+ /*
+ * Not all quirks can be derived from the cpufeatures device tree.
+@@ -715,10 +741,10 @@ static __init void cpufeatures_cpu_quirk
+
+ if ((version & 0xffff0000) == 0x004e0000) {
+ cur_cpu_spec->cpu_features &= ~(CPU_FTR_DAWR);
+- cur_cpu_spec->cpu_features |= CPU_FTR_P9_TLBIE_BUG;
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TIDR;
+ }
+
++ update_tlbie_feature_flag(version);
+ /*
+ * PKEY was not in the initial base or feature node
+ * specification, but it should become optional in the next
--- /dev/null
+From 99ead78afd1128bfcebe7f88f3b102fb2da09aee Mon Sep 17 00:00:00 2001
+From: Balbir Singh <bsingharora@gmail.com>
+Date: Tue, 20 Aug 2019 13:43:47 +0530
+Subject: powerpc/mce: Fix MCE handling for huge pages
+
+From: Balbir Singh <bsingharora@gmail.com>
+
+commit 99ead78afd1128bfcebe7f88f3b102fb2da09aee upstream.
+
+The current code would fail on huge pages addresses, since the shift would
+be incorrect. Use the correct page shift value returned by
+__find_linux_pte() to get the correct physical address. The code is more
+generic and can handle both regular and compound pages.
+
+Fixes: ba41e1e1ccb9 ("powerpc/mce: Hookup derror (load/store) UE errors")
+Signed-off-by: Balbir Singh <bsingharora@gmail.com>
+[arbab@linux.ibm.com: Fixup pseries_do_memory_failure()]
+Signed-off-by: Reza Arbab <arbab@linux.ibm.com>
+Tested-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
+Cc: stable@vger.kernel.org # v4.15+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20190820081352.8641-3-santosh@fossix.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/mce_power.c | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/kernel/mce_power.c
++++ b/arch/powerpc/kernel/mce_power.c
+@@ -39,6 +39,7 @@
+ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
+ {
+ pte_t *ptep;
++ unsigned int shift;
+ unsigned long flags;
+ struct mm_struct *mm;
+
+@@ -48,13 +49,18 @@ static unsigned long addr_to_pfn(struct
+ mm = &init_mm;
+
+ local_irq_save(flags);
+- if (mm == current->mm)
+- ptep = find_current_mm_pte(mm->pgd, addr, NULL, NULL);
+- else
+- ptep = find_init_mm_pte(addr, NULL);
++ ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
+ local_irq_restore(flags);
++
+ if (!ptep || pte_special(*ptep))
+ return ULONG_MAX;
++
++ if (shift > PAGE_SHIFT) {
++ unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
++
++ return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
++ }
++
+ return pte_pfn(*ptep);
+ }
+
+@@ -339,7 +345,7 @@ static const struct mce_derror_table mce
+ MCE_INITIATOR_CPU, MCE_SEV_ERROR_SYNC, },
+ { 0, false, 0, 0, 0, 0 } };
+
+-static int mce_find_instr_ea_and_pfn(struct pt_regs *regs, uint64_t *addr,
++static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
+ uint64_t *phys_addr)
+ {
+ /*
+@@ -530,7 +536,8 @@ static int mce_handle_derror(struct pt_r
+ * kernel/exception-64s.h
+ */
+ if (get_paca()->in_mce < MAX_MCE_DEPTH)
+- mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
++ mce_find_instr_ea_and_phys(regs, addr,
++ phys_addr);
+ }
+ found = 1;
+ }
--- /dev/null
+From b5bda6263cad9a927e1a4edb7493d542da0c1410 Mon Sep 17 00:00:00 2001
+From: Santosh Sivaraj <santosh@fossix.org>
+Date: Tue, 20 Aug 2019 13:43:46 +0530
+Subject: powerpc/mce: Schedule work from irq_work
+
+From: Santosh Sivaraj <santosh@fossix.org>
+
+commit b5bda6263cad9a927e1a4edb7493d542da0c1410 upstream.
+
+schedule_work() cannot be called from MCE exception context as MCE can
+interrupt even in interrupt disabled context.
+
+Fixes: 733e4a4c4467 ("powerpc/mce: hookup memory_failure for UE errors")
+Cc: stable@vger.kernel.org # v4.15+
+Reviewed-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
+Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
+Acked-by: Balbir Singh <bsingharora@gmail.com>
+Signed-off-by: Santosh Sivaraj <santosh@fossix.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20190820081352.8641-2-santosh@fossix.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/mce.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/mce.c
++++ b/arch/powerpc/kernel/mce.c
+@@ -45,6 +45,7 @@ static DEFINE_PER_CPU(struct machine_che
+ mce_ue_event_queue);
+
+ static void machine_check_process_queued_event(struct irq_work *work);
++static void machine_check_ue_irq_work(struct irq_work *work);
+ void machine_check_ue_event(struct machine_check_event *evt);
+ static void machine_process_ue_event(struct work_struct *work);
+
+@@ -52,6 +53,10 @@ static struct irq_work mce_event_process
+ .func = machine_check_process_queued_event,
+ };
+
++static struct irq_work mce_ue_event_irq_work = {
++ .func = machine_check_ue_irq_work,
++};
++
+ DECLARE_WORK(mce_ue_event_work, machine_process_ue_event);
+
+ static void mce_set_error_info(struct machine_check_event *mce,
+@@ -208,6 +213,10 @@ void release_mce_event(void)
+ get_mce_event(NULL, true);
+ }
+
++static void machine_check_ue_irq_work(struct irq_work *work)
++{
++ schedule_work(&mce_ue_event_work);
++}
+
+ /*
+ * Queue up the MCE event which then can be handled later.
+@@ -225,7 +234,7 @@ void machine_check_ue_event(struct machi
+ memcpy(this_cpu_ptr(&mce_ue_event_queue[index]), evt, sizeof(*evt));
+
+ /* Queue work to process this event later. */
+- schedule_work(&mce_ue_event_work);
++ irq_work_queue(&mce_ue_event_irq_work);
+ }
+
+ /*
--- /dev/null
+From 56090a3902c80c296e822d11acdb6a101b322c52 Mon Sep 17 00:00:00 2001
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+Date: Thu, 18 Jul 2019 15:11:36 +1000
+Subject: powerpc/powernv/ioda: Fix race in TCE level allocation
+
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+
+commit 56090a3902c80c296e822d11acdb6a101b322c52 upstream.
+
+pnv_tce() returns a pointer to a TCE entry and originally a TCE table
+would be pre-allocated. For the default case of 2GB window the table
+needs only a single level and that is fine. However if more levels are
+requested, it is possible to get a race when 2 threads want a pointer
+to a TCE entry from the same page of TCEs.
+
+This adds cmpxchg to handle the race. Note that once TCE is non-zero,
+it cannot become zero again.
+
+Fixes: a68bd1267b72 ("powerpc/powernv/ioda: Allocate indirect TCE levels on demand")
+CC: stable@vger.kernel.org # v4.19+
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20190718051139.74787-2-aik@ozlabs.ru
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/pci-ioda-tce.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+@@ -49,6 +49,9 @@ static __be64 *pnv_alloc_tce_level(int n
+ return addr;
+ }
+
++static void pnv_pci_ioda2_table_do_free_pages(__be64 *addr,
++ unsigned long size, unsigned int levels);
++
+ static __be64 *pnv_tce(struct iommu_table *tbl, bool user, long idx, bool alloc)
+ {
+ __be64 *tmp = user ? tbl->it_userspace : (__be64 *) tbl->it_base;
+@@ -58,9 +61,9 @@ static __be64 *pnv_tce(struct iommu_tabl
+
+ while (level) {
+ int n = (idx & mask) >> (level * shift);
+- unsigned long tce;
++ unsigned long oldtce, tce = be64_to_cpu(READ_ONCE(tmp[n]));
+
+- if (tmp[n] == 0) {
++ if (!tce) {
+ __be64 *tmp2;
+
+ if (!alloc)
+@@ -71,10 +74,15 @@ static __be64 *pnv_tce(struct iommu_tabl
+ if (!tmp2)
+ return NULL;
+
+- tmp[n] = cpu_to_be64(__pa(tmp2) |
+- TCE_PCI_READ | TCE_PCI_WRITE);
++ tce = __pa(tmp2) | TCE_PCI_READ | TCE_PCI_WRITE;
++ oldtce = be64_to_cpu(cmpxchg(&tmp[n], 0,
++ cpu_to_be64(tce)));
++ if (oldtce) {
++ pnv_pci_ioda2_table_do_free_pages(tmp2,
++ ilog2(tbl->it_level_size) + 3, 1);
++ tce = oldtce;
++ }
+ }
+- tce = be64_to_cpu(tmp[n]);
+
+ tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
+ idx &= ~mask;
--- /dev/null
+From e7de4f7b64c23e503a8c42af98d56f2a7462bd6d Mon Sep 17 00:00:00 2001
+From: Andrew Donnellan <ajd@linux.ibm.com>
+Date: Fri, 3 May 2019 17:52:53 +1000
+Subject: powerpc/powernv: Restrict OPAL symbol map to only be readable by root
+
+From: Andrew Donnellan <ajd@linux.ibm.com>
+
+commit e7de4f7b64c23e503a8c42af98d56f2a7462bd6d upstream.
+
+Currently the OPAL symbol map is globally readable, which seems bad as
+it contains physical addresses.
+
+Restrict it to root.
+
+Fixes: c8742f85125d ("powerpc/powernv: Expose OPAL firmware symbol map")
+Cc: stable@vger.kernel.org # v3.19+
+Suggested-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20190503075253.22798-1-ajd@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/platforms/powernv/opal.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/arch/powerpc/platforms/powernv/opal.c
++++ b/arch/powerpc/platforms/powernv/opal.c
+@@ -680,7 +680,10 @@ static ssize_t symbol_map_read(struct fi
+ bin_attr->size);
+ }
+
+-static BIN_ATTR_RO(symbol_map, 0);
++static struct bin_attribute symbol_map_attr = {
++ .attr = {.name = "symbol_map", .mode = 0400},
++ .read = symbol_map_read
++};
+
+ static void opal_export_symmap(void)
+ {
+@@ -697,10 +700,10 @@ static void opal_export_symmap(void)
+ return;
+
+ /* Setup attributes */
+- bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
+- bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
++ symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
++ symbol_map_attr.size = be64_to_cpu(syms[1]);
+
+- rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
++ rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
+ if (rc)
+ pr_warn("Error %d creating OPAL symbols file\n", rc);
+ }
--- /dev/null
+From ea298e6ee8b34b3ed4366be7eb799d0650ebe555 Mon Sep 17 00:00:00 2001
+From: Vasily Gorbik <gor@linux.ibm.com>
+Date: Tue, 17 Sep 2019 20:04:04 +0200
+Subject: s390/cio: avoid calling strlen on null pointer
+
+From: Vasily Gorbik <gor@linux.ibm.com>
+
+commit ea298e6ee8b34b3ed4366be7eb799d0650ebe555 upstream.
+
+Fix the following kasan finding:
+BUG: KASAN: global-out-of-bounds in ccwgroup_create_dev+0x850/0x1140
+Read of size 1 at addr 0000000000000000 by task systemd-udevd.r/561
+
+CPU: 30 PID: 561 Comm: systemd-udevd.r Tainted: G B
+Hardware name: IBM 3906 M04 704 (LPAR)
+Call Trace:
+([<0000000231b3db7e>] show_stack+0x14e/0x1a8)
+ [<0000000233826410>] dump_stack+0x1d0/0x218
+ [<000000023216fac4>] print_address_description+0x64/0x380
+ [<000000023216f5a8>] __kasan_report+0x138/0x168
+ [<00000002331b8378>] ccwgroup_create_dev+0x850/0x1140
+ [<00000002332b618a>] group_store+0x3a/0x50
+ [<00000002323ac706>] kernfs_fop_write+0x246/0x3b8
+ [<00000002321d409a>] vfs_write+0x132/0x450
+ [<00000002321d47da>] ksys_write+0x122/0x208
+ [<0000000233877102>] system_call+0x2a6/0x2c8
+
+Triggered by:
+openat(AT_FDCWD, "/sys/bus/ccwgroup/drivers/qeth/group",
+ O_WRONLY|O_CREAT|O_TRUNC|O_CLOEXEC, 0666) = 16
+write(16, "0.0.bd00,0.0.bd01,0.0.bd02", 26) = 26
+
+The problem is that __get_next_id in ccwgroup_create_dev might set "buf"
+buffer pointer to NULL and explicit check for that is required.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Sebastian Ott <sebott@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/cio/ccwgroup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/s390/cio/ccwgroup.c
++++ b/drivers/s390/cio/ccwgroup.c
+@@ -372,7 +372,7 @@ int ccwgroup_create_dev(struct device *p
+ goto error;
+ }
+ /* Check for trailing stuff. */
+- if (i == num_devices && strlen(buf) > 0) {
++ if (i == num_devices && buf && strlen(buf) > 0) {
+ rc = -EINVAL;
+ goto error;
+ }
--- /dev/null
+From ab5758848039de9a4b249d46e4ab591197eebaf2 Mon Sep 17 00:00:00 2001
+From: Vasily Gorbik <gor@linux.ibm.com>
+Date: Thu, 19 Sep 2019 15:55:17 +0200
+Subject: s390/cio: exclude subchannels with no parent from pseudo check
+
+From: Vasily Gorbik <gor@linux.ibm.com>
+
+commit ab5758848039de9a4b249d46e4ab591197eebaf2 upstream.
+
+ccw console is created early in start_kernel and used before css is
+initialized or ccw console subchannel is registered. Until then console
+subchannel does not have a parent. For that reason assume subchannels
+with no parent are not pseudo subchannels. This fixes the following
+kasan finding:
+
+BUG: KASAN: global-out-of-bounds in sch_is_pseudo_sch+0x8e/0x98
+Read of size 8 at addr 00000000000005e8 by task swapper/0/0
+
+CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.3.0-rc8-07370-g6ac43dd12538 #2
+Hardware name: IBM 2964 NC9 702 (z/VM 6.4.0)
+Call Trace:
+([<000000000012cd76>] show_stack+0x14e/0x1e0)
+ [<0000000001f7fb44>] dump_stack+0x1a4/0x1f8
+ [<00000000007d7afc>] print_address_description+0x64/0x3c8
+ [<00000000007d75f6>] __kasan_report+0x14e/0x180
+ [<00000000018a2986>] sch_is_pseudo_sch+0x8e/0x98
+ [<000000000189b950>] cio_enable_subchannel+0x1d0/0x510
+ [<00000000018cac7c>] ccw_device_recognition+0x12c/0x188
+ [<0000000002ceb1a8>] ccw_device_enable_console+0x138/0x340
+ [<0000000002cf1cbe>] con3215_init+0x25e/0x300
+ [<0000000002c8770a>] console_init+0x68a/0x9b8
+ [<0000000002c6a3d6>] start_kernel+0x4fe/0x728
+ [<0000000000100070>] startup_continue+0x70/0xd0
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Sebastian Ott <sebott@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/cio/css.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/s390/cio/css.c
++++ b/drivers/s390/cio/css.c
+@@ -1213,6 +1213,8 @@ device_initcall(cio_settle_init);
+
+ int sch_is_pseudo_sch(struct subchannel *sch)
+ {
++ if (!sch->dev.parent)
++ return 0;
+ return sch == to_css(sch->dev.parent)->pseudo_subchannel;
+ }
+
--- /dev/null
+From 8769f610fe6d473e5e8e221709c3ac402037da6c Mon Sep 17 00:00:00 2001
+From: Vasily Gorbik <gor@linux.ibm.com>
+Date: Tue, 13 Aug 2019 20:11:08 +0200
+Subject: s390/process: avoid potential reading of freed stack
+
+From: Vasily Gorbik <gor@linux.ibm.com>
+
+commit 8769f610fe6d473e5e8e221709c3ac402037da6c upstream.
+
+With THREAD_INFO_IN_TASK (which is selected on s390) task's stack usage
+is refcounted and should always be protected by get/put when touching
+other task's stack to avoid race conditions with task's destruction code.
+
+Fixes: d5c352cdd022 ("s390: move thread_info into task_struct")
+Cc: stable@vger.kernel.org # v4.10+
+Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/process.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -183,20 +183,30 @@ unsigned long get_wchan(struct task_stru
+
+ if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
+ return 0;
++
++ if (!try_get_task_stack(p))
++ return 0;
++
+ low = task_stack_page(p);
+ high = (struct stack_frame *) task_pt_regs(p);
+ sf = (struct stack_frame *) p->thread.ksp;
+- if (sf <= low || sf > high)
+- return 0;
++ if (sf <= low || sf > high) {
++ return_address = 0;
++ goto out;
++ }
+ for (count = 0; count < 16; count++) {
+ sf = (struct stack_frame *) sf->back_chain;
+- if (sf <= low || sf > high)
+- return 0;
++ if (sf <= low || sf > high) {
++ return_address = 0;
++ goto out;
++ }
+ return_address = sf->gprs[8];
+ if (!in_sched_functions(return_address))
+- return return_address;
++ goto out;
+ }
+- return 0;
++out:
++ put_task_stack(p);
++ return return_address;
+ }
+
+ unsigned long arch_align_stack(unsigned long sp)
--- /dev/null
+From f3122a79a1b0a113d3aea748e0ec26f2cb2889de Mon Sep 17 00:00:00 2001
+From: Vasily Gorbik <gor@linux.ibm.com>
+Date: Tue, 17 Sep 2019 22:59:03 +0200
+Subject: s390/topology: avoid firing events before kobjs are created
+
+From: Vasily Gorbik <gor@linux.ibm.com>
+
+commit f3122a79a1b0a113d3aea748e0ec26f2cb2889de upstream.
+
+arch_update_cpu_topology is first called from:
+kernel_init_freeable->sched_init_smp->sched_init_domains
+
+even before cpus has been registered in:
+kernel_init_freeable->do_one_initcall->s390_smp_init
+
+Do not trigger kobject_uevent change events until cpu devices are
+actually created. Fixes the following kasan findings:
+
+BUG: KASAN: global-out-of-bounds in kobject_uevent_env+0xb40/0xee0
+Read of size 8 at addr 0000000000000020 by task swapper/0/1
+
+BUG: KASAN: global-out-of-bounds in kobject_uevent_env+0xb36/0xee0
+Read of size 8 at addr 0000000000000018 by task swapper/0/1
+
+CPU: 0 PID: 1 Comm: swapper/0 Tainted: G B
+Hardware name: IBM 3906 M04 704 (LPAR)
+Call Trace:
+([<0000000143c6db7e>] show_stack+0x14e/0x1a8)
+ [<0000000145956498>] dump_stack+0x1d0/0x218
+ [<000000014429fb4c>] print_address_description+0x64/0x380
+ [<000000014429f630>] __kasan_report+0x138/0x168
+ [<0000000145960b96>] kobject_uevent_env+0xb36/0xee0
+ [<0000000143c7c47c>] arch_update_cpu_topology+0x104/0x108
+ [<0000000143df9e22>] sched_init_domains+0x62/0xe8
+ [<000000014644c94a>] sched_init_smp+0x3a/0xc0
+ [<0000000146433a20>] kernel_init_freeable+0x558/0x958
+ [<000000014599002a>] kernel_init+0x22/0x160
+ [<00000001459a71d4>] ret_from_fork+0x28/0x30
+ [<00000001459a71dc>] kernel_thread_starter+0x0/0x10
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/topology.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/kernel/topology.c
++++ b/arch/s390/kernel/topology.c
+@@ -311,7 +311,8 @@ int arch_update_cpu_topology(void)
+ on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
+ for_each_online_cpu(cpu) {
+ dev = get_cpu_device(cpu);
+- kobject_uevent(&dev->kobj, KOBJ_CHANGE);
++ if (dev)
++ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ }
+ return rc;
+ }
--- /dev/null
+s390-process-avoid-potential-reading-of-freed-stack.patch
+kvm-s390-test-for-bad-access-register-and-size-at-the-start-of-s390_mem_op.patch
+s390-topology-avoid-firing-events-before-kobjs-are-created.patch
+s390-cio-exclude-subchannels-with-no-parent-from-pseudo-check.patch
+kvm-ppc-book3s-hv-fix-race-in-re-enabling-xive-escalation-interrupts.patch
+kvm-ppc-book3s-hv-check-for-mmu-ready-on-piggybacked-virtual-cores.patch
+kvm-ppc-book3s-hv-don-t-lose-pending-doorbell-request-on-migration-on-p9.patch
+kvm-x86-fix-userspace-set-invalid-cr4.patch
+kvm-nvmx-handle-page-fault-in-vmread-fix.patch
+nbd-fix-max-number-of-supported-devs.patch
+pm-devfreq-tegra-fix-khz-to-hz-conversion.patch
+asoc-define-a-set-of-dapm-pre-post-up-events.patch
+asoc-sgtl5000-improve-vag-power-and-mute-control.patch
+powerpc-mce-fix-mce-handling-for-huge-pages.patch
+powerpc-mce-schedule-work-from-irq_work.patch
+powerpc-powernv-restrict-opal-symbol-map-to-only-be-readable-by-root.patch
+powerpc-powernv-ioda-fix-race-in-tce-level-allocation.patch
+powerpc-book3s64-mm-don-t-do-tlbie-fixup-for-some-hardware-revisions.patch
+can-mcp251x-mcp251x_hw_reset-allow-more-time-after-a-reset.patch
+tools-lib-traceevent-fix-robust-test-of-do_generate_dynamic_list_file.patch
+crypto-qat-silence-smp_processor_id-warning.patch
+crypto-skcipher-unmap-pages-after-an-external-error.patch
+crypto-cavium-zip-add-missing-single_release.patch
+crypto-caam-fix-concurrency-issue-in-givencrypt-descriptor.patch
+crypto-ccree-account-for-tee-not-ready-to-report.patch
+crypto-ccree-use-the-full-crypt-length-value.patch
+mips-treat-loongson-extensions-as-ases.patch
+power-supply-sbs-battery-use-correct-flags-field.patch
+power-supply-sbs-battery-only-return-health-when-battery-present.patch
+tracing-make-sure-variable-reference-alias-has-correct-var_ref_idx.patch
+usercopy-avoid-highmem-pfn-warning.patch
+timer-read-jiffies-once-when-forwarding-base-clk.patch
+s390-cio-avoid-calling-strlen-on-null-pointer.patch
--- /dev/null
+From e430d802d6a3aaf61bd3ed03d9404888a29b9bf9 Mon Sep 17 00:00:00 2001
+From: Li RongQing <lirongqing@baidu.com>
+Date: Thu, 19 Sep 2019 20:04:47 +0800
+Subject: timer: Read jiffies once when forwarding base clk
+
+From: Li RongQing <lirongqing@baidu.com>
+
+commit e430d802d6a3aaf61bd3ed03d9404888a29b9bf9 upstream.
+
+The timer delayed for more than 3 seconds warning was triggered during
+testing.
+
+ Workqueue: events_unbound sched_tick_remote
+ RIP: 0010:sched_tick_remote+0xee/0x100
+ ...
+ Call Trace:
+ process_one_work+0x18c/0x3a0
+ worker_thread+0x30/0x380
+ kthread+0x113/0x130
+ ret_from_fork+0x22/0x40
+
+The reason is that the code in collect_expired_timers() uses jiffies
+unprotected:
+
+ if (next_event > jiffies)
+ base->clk = jiffies;
+
+As the compiler is allowed to reload the value base->clk can advance
+between the check and the store and in the worst case advance farther than
+next event. That causes the timer expiry to be delayed until the wheel
+pointer wraps around.
+
+Convert the code to use READ_ONCE()
+
+Fixes: 236968383cf5 ("timers: Optimize collect_expired_timers() for NOHZ")
+Signed-off-by: Li RongQing <lirongqing@baidu.com>
+Signed-off-by: Liang ZhiCheng <liangzhicheng@baidu.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/1568894687-14499-1-git-send-email-lirongqing@baidu.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/timer.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1590,24 +1590,26 @@ void timer_clear_idle(void)
+ static int collect_expired_timers(struct timer_base *base,
+ struct hlist_head *heads)
+ {
++ unsigned long now = READ_ONCE(jiffies);
++
+ /*
+ * NOHZ optimization. After a long idle sleep we need to forward the
+ * base to current jiffies. Avoid a loop by searching the bitfield for
+ * the next expiring timer.
+ */
+- if ((long)(jiffies - base->clk) > 2) {
++ if ((long)(now - base->clk) > 2) {
+ unsigned long next = __next_timer_interrupt(base);
+
+ /*
+ * If the next timer is ahead of time forward to current
+ * jiffies, otherwise forward to the next expiry time:
+ */
+- if (time_after(next, jiffies)) {
++ if (time_after(next, now)) {
+ /*
+ * The call site will increment base->clk and then
+ * terminate the expiry loop immediately.
+ */
+- base->clk = jiffies;
++ base->clk = now;
+ return 0;
+ }
+ base->clk = next;
--- /dev/null
+From 82a2f88458d70704be843961e10b5cef9a6e95d3 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Mon, 5 Aug 2019 13:01:50 -0400
+Subject: tools lib traceevent: Fix "robust" test of do_generate_dynamic_list_file
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 82a2f88458d70704be843961e10b5cef9a6e95d3 upstream.
+
+The tools/lib/traceevent/Makefile had a test added to it to detect a failure
+of the "nm" when making the dynamic list file (whatever that is). The
+problem is that the test sorts the values "U W w" and some versions of sort
+will place "w" ahead of "W" (even though it has a higher ASCII value, and
+break the test.
+
+Add 'tr "w" "W"' to merge the two and not worry about the ordering.
+
+Reported-by: Tzvetomir Stoyanov <tstoyanov@vmware.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: David Carrillo-Cisneros <davidcc@google.com>
+Cc: He Kuang <hekuang@huawei.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Michal rarek <mmarek@suse.com>
+Cc: Paul Turner <pjt@google.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Cc: Wang Nan <wangnan0@huawei.com>
+Cc: stable@vger.kernel.org
+Fixes: 6467753d61399 ("tools lib traceevent: Robustify do_generate_dynamic_list_file")
+Link: http://lkml.kernel.org/r/20190805130150.25acfeb1@gandalf.local.home
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/lib/traceevent/Makefile | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/tools/lib/traceevent/Makefile
++++ b/tools/lib/traceevent/Makefile
+@@ -259,8 +259,8 @@ endef
+
+ define do_generate_dynamic_list_file
+ symbol_type=`$(NM) -u -D $1 | awk 'NF>1 {print $$1}' | \
+- xargs echo "U W w" | tr ' ' '\n' | sort -u | xargs echo`;\
+- if [ "$$symbol_type" = "U W w" ];then \
++ xargs echo "U w W" | tr 'w ' 'W\n' | sort -u | xargs echo`;\
++ if [ "$$symbol_type" = "U W" ];then \
+ (echo '{'; \
+ $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;\
+ echo '};'; \
--- /dev/null
+From 17f8607a1658a8e70415eef67909f990d13017b5 Mon Sep 17 00:00:00 2001
+From: Tom Zanussi <zanussi@kernel.org>
+Date: Sun, 1 Sep 2019 17:02:01 -0500
+Subject: tracing: Make sure variable reference alias has correct var_ref_idx
+
+From: Tom Zanussi <zanussi@kernel.org>
+
+commit 17f8607a1658a8e70415eef67909f990d13017b5 upstream.
+
+Original changelog from Steve Rostedt (except last sentence which
+explains the problem, and the Fixes: tag):
+
+I performed a three way histogram with the following commands:
+
+echo 'irq_lat u64 lat pid_t pid' > synthetic_events
+echo 'wake_lat u64 lat u64 irqlat pid_t pid' >> synthetic_events
+echo 'hist:keys=common_pid:irqts=common_timestamp.usecs if function == 0xffffffff81200580' > events/timer/hrtimer_start/trigger
+echo 'hist:keys=common_pid:lat=common_timestamp.usecs-$irqts:onmatch(timer.hrtimer_start).irq_lat($lat,pid) if common_flags & 1' > events/sched/sched_waking/trigger
+echo 'hist:keys=pid:wakets=common_timestamp.usecs,irqlat=lat' > events/synthetic/irq_lat/trigger
+echo 'hist:keys=next_pid:lat=common_timestamp.usecs-$wakets,irqlat=$irqlat:onmatch(synthetic.irq_lat).wake_lat($lat,$irqlat,next_pid)' > events/sched/sched_switch/trigger
+echo 1 > events/synthetic/wake_lat/enable
+
+Basically I wanted to see:
+
+ hrtimer_start (calling function tick_sched_timer)
+
+Note:
+
+ # grep tick_sched_timer /proc/kallsyms
+ffffffff81200580 t tick_sched_timer
+
+And save the time of that, and then record sched_waking if it is called
+in interrupt context and with the same pid as the hrtimer_start, it
+will record the latency between that and the waking event.
+
+I then look at when the task that is woken is scheduled in, and record
+the latency between the wakeup and the task running.
+
+At the end, the wake_lat synthetic event will show the wakeup to
+scheduled latency, as well as the irq latency in from hritmer_start to
+the wakeup. The problem is that I found this:
+
+ <idle>-0 [007] d... 190.485261: wake_lat: lat=27 irqlat=190485230 pid=698
+ <idle>-0 [005] d... 190.485283: wake_lat: lat=40 irqlat=190485239 pid=10
+ <idle>-0 [002] d... 190.488327: wake_lat: lat=56 irqlat=190488266 pid=335
+ <idle>-0 [005] d... 190.489330: wake_lat: lat=64 irqlat=190489262 pid=10
+ <idle>-0 [003] d... 190.490312: wake_lat: lat=43 irqlat=190490265 pid=77
+ <idle>-0 [005] d... 190.493322: wake_lat: lat=54 irqlat=190493262 pid=10
+ <idle>-0 [005] d... 190.497305: wake_lat: lat=35 irqlat=190497267 pid=10
+ <idle>-0 [005] d... 190.501319: wake_lat: lat=50 irqlat=190501264 pid=10
+
+The irqlat seemed quite large! Investigating this further, if I had
+enabled the irq_lat synthetic event, I noticed this:
+
+ <idle>-0 [002] d.s. 249.429308: irq_lat: lat=164968 pid=335
+ <idle>-0 [002] d... 249.429369: wake_lat: lat=55 irqlat=249429308 pid=335
+
+Notice that the timestamp of the irq_lat "249.429308" is awfully
+similar to the reported irqlat variable. In fact, all instances were
+like this. It appeared that:
+
+ irqlat=$irqlat
+
+Wasn't assigning the old $irqlat to the new irqlat variable, but
+instead was assigning the $irqts to it.
+
+The issue is that assigning the old $irqlat to the new irqlat variable
+creates a variable reference alias, but the alias creation code
+forgets to make sure the alias uses the same var_ref_idx to access the
+reference.
+
+Link: http://lkml.kernel.org/r/1567375321.5282.12.camel@kernel.org
+
+Cc: Linux Trace Devel <linux-trace-devel@vger.kernel.org>
+Cc: linux-rt-users <linux-rt-users@vger.kernel.org>
+Cc: stable@vger.kernel.org
+Fixes: 7e8b88a30b085 ("tracing: Add hist trigger support for variable reference aliases")
+Reported-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Tom Zanussi <zanussi@kernel.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_hist.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -2526,6 +2526,8 @@ static struct hist_field *create_alias(s
+ return NULL;
+ }
+
++ alias->var_ref_idx = var_ref->var_ref_idx;
++
+ return alias;
+ }
+
--- /dev/null
+From 314eed30ede02fa925990f535652254b5bad6b65 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Tue, 17 Sep 2019 11:00:25 -0700
+Subject: usercopy: Avoid HIGHMEM pfn warning
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 314eed30ede02fa925990f535652254b5bad6b65 upstream.
+
+When running on a system with >512MB RAM with a 32-bit kernel built with:
+
+ CONFIG_DEBUG_VIRTUAL=y
+ CONFIG_HIGHMEM=y
+ CONFIG_HARDENED_USERCOPY=y
+
+all execve()s will fail due to argv copying into kmap()ed pages, and on
+usercopy checking the calls ultimately of virt_to_page() will be looking
+for "bad" kmap (highmem) pointers due to CONFIG_DEBUG_VIRTUAL=y:
+
+ ------------[ cut here ]------------
+ kernel BUG at ../arch/x86/mm/physaddr.c:83!
+ invalid opcode: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC
+ CPU: 1 PID: 1 Comm: swapper/0 Not tainted 5.3.0-rc8 #6
+ Hardware name: Dell Inc. Inspiron 1318/0C236D, BIOS A04 01/15/2009
+ EIP: __phys_addr+0xaf/0x100
+ ...
+ Call Trace:
+ __check_object_size+0xaf/0x3c0
+ ? __might_sleep+0x80/0xa0
+ copy_strings+0x1c2/0x370
+ copy_strings_kernel+0x2b/0x40
+ __do_execve_file+0x4ca/0x810
+ ? kmem_cache_alloc+0x1c7/0x370
+ do_execve+0x1b/0x20
+ ...
+
+The check is from arch/x86/mm/physaddr.c:
+
+ VIRTUAL_BUG_ON((phys_addr >> PAGE_SHIFT) > max_low_pfn);
+
+Due to the kmap() in fs/exec.c:
+
+ kaddr = kmap(kmapped_page);
+ ...
+ if (copy_from_user(kaddr+offset, str, bytes_to_copy)) ...
+
+Now we can fetch the correct page to avoid the pfn check. In both cases,
+hardened usercopy will need to walk the page-span checker (if enabled)
+to do sanity checking.
+
+Reported-by: Randy Dunlap <rdunlap@infradead.org>
+Tested-by: Randy Dunlap <rdunlap@infradead.org>
+Fixes: f5509cc18daa ("mm: Hardened usercopy")
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Link: https://lore.kernel.org/r/201909171056.7F2FFD17@keescook
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/usercopy.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/mm/usercopy.c
++++ b/mm/usercopy.c
+@@ -15,6 +15,7 @@
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+ #include <linux/mm.h>
++#include <linux/highmem.h>
+ #include <linux/slab.h>
+ #include <linux/sched.h>
+ #include <linux/sched/task.h>
+@@ -231,7 +232,12 @@ static inline void check_heap_object(con
+ if (!virt_addr_valid(ptr))
+ return;
+
+- page = virt_to_head_page(ptr);
++ /*
++ * When CONFIG_HIGHMEM=y, kmap_to_page() will give either the
++ * highmem page or fallback to virt_to_page(). The following
++ * is effectively a highmem-aware virt_to_head_page().
++ */
++ page = compound_head(kmap_to_page((void *)ptr));
+
+ if (PageSlab(page)) {
+ /* Check slab allocator for flags and size. */