--- /dev/null
+From ben@decadent.org.uk Fri Jan 18 16:59:45 2019
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Thu, 17 Jan 2019 00:25:58 +0000
+Subject: media: em28xx: Fix misplaced reset of dev->v4l::field_count
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable@vger.kernel.org, Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Message-ID: <20190117002558.7r5s27nohy2bkmlh@decadent.org.uk>
+Content-Disposition: inline
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+The backport of commit afeaade90db4 "media: em28xx: make
+v4l2-compliance happier by starting sequence on zero" added a
+reset on em28xx_v4l2::field_count to em28xx_enable_analog_tuner()
+but it should be done in em28xx_start_analog_streaming().
+
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Cc: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/usb/em28xx/em28xx-video.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/media/usb/em28xx/em28xx-video.c
++++ b/drivers/media/usb/em28xx/em28xx-video.c
+@@ -900,8 +900,6 @@ static int em28xx_enable_analog_tuner(st
+ if (!mdev || !v4l2->decoder)
+ return 0;
+
+- dev->v4l2->field_count = 0;
+-
+ /*
+ * This will find the tuner that is connected into the decoder.
+ * Technically, this is not 100% correct, as the device may be
+@@ -1074,6 +1072,8 @@ int em28xx_start_analog_streaming(struct
+
+ em28xx_videodbg("%s\n", __func__);
+
++ dev->v4l2->field_count = 0;
++
+ /* Make sure streaming is not already in progress for this type
+ of filehandle (e.g. video, vbi) */
+ rc = res_get(dev, vq->type);
--- /dev/null
+From a89e7bcb18081c611eb6cf50edd440fa4983a71a Mon Sep 17 00:00:00 2001
+From: Loic Poulain <loic.poulain@linaro.org>
+Date: Tue, 4 Dec 2018 13:25:32 +0100
+Subject: mmc: sdhci-msm: Disable CDR function on TX
+
+From: Loic Poulain <loic.poulain@linaro.org>
+
+commit a89e7bcb18081c611eb6cf50edd440fa4983a71a upstream.
+
+The Clock Data Recovery (CDR) circuit allows to automatically adjust
+the RX sampling-point/phase for high frequency cards (SDR104, HS200...).
+CDR is automatically enabled during DLL configuration.
+However, according to the APQ8016 reference manual, this function
+must be disabled during TX and tuning phase in order to prevent any
+interferences during tuning challenges and unexpected phase alteration
+during TX transfers.
+
+This patch enables/disables CDR according to the current transfer mode.
+
+This fixes sporadic write transfer issues observed with some SDR104 and
+HS200 cards.
+
+Inspired by sdhci-msm downstream patch:
+https://chromium-review.googlesource.com/c/chromiumos/third_party/kernel/+/432516/
+
+Reported-by: Leonid Segal <leonid.s@variscite.com>
+Reported-by: Manabu Igusa <migusa@arrowjapan.com>
+Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Acked-by: Georgi Djakov <georgi.djakov@linaro.org>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+[georgi: backport to v4.14]
+Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci-msm.c | 51 ++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 50 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -138,6 +138,8 @@ struct sdhci_msm_host {
+ bool calibration_done;
+ u8 saved_tuning_phase;
+ bool use_cdclp533;
++ bool use_cdr;
++ u32 transfer_mode;
+ };
+
+ static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host,
+@@ -815,6 +817,23 @@ out:
+ return ret;
+ }
+
++static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable)
++{
++ u32 config, oldconfig = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
++
++ config = oldconfig;
++ if (enable) {
++ config |= CORE_CDR_EN;
++ config &= ~CORE_CDR_EXT_EN;
++ } else {
++ config &= ~CORE_CDR_EN;
++ config |= CORE_CDR_EXT_EN;
++ }
++
++ if (config != oldconfig)
++ writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
++}
++
+ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode)
+ {
+ struct sdhci_host *host = mmc_priv(mmc);
+@@ -832,8 +851,14 @@ static int sdhci_msm_execute_tuning(stru
+ if (host->clock <= CORE_FREQ_100MHZ ||
+ !(ios.timing == MMC_TIMING_MMC_HS400 ||
+ ios.timing == MMC_TIMING_MMC_HS200 ||
+- ios.timing == MMC_TIMING_UHS_SDR104))
++ ios.timing == MMC_TIMING_UHS_SDR104)) {
++ msm_host->use_cdr = false;
++ sdhci_msm_set_cdr(host, false);
+ return 0;
++ }
++
++ /* Clock-Data-Recovery used to dynamically adjust RX sampling point */
++ msm_host->use_cdr = true;
+
+ /*
+ * For HS400 tuning in HS200 timing requires:
+@@ -1092,6 +1117,29 @@ out:
+ __sdhci_msm_set_clock(host, clock);
+ }
+
++static void sdhci_msm_write_w(struct sdhci_host *host, u16 val, int reg)
++{
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
++
++ switch (reg) {
++ case SDHCI_TRANSFER_MODE:
++ msm_host->transfer_mode = val;
++ break;
++ case SDHCI_COMMAND:
++ if (!msm_host->use_cdr)
++ break;
++ if ((msm_host->transfer_mode & SDHCI_TRNS_READ) &&
++ (SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK_HS200) &&
++ (SDHCI_GET_CMD(val) != MMC_SEND_TUNING_BLOCK))
++ sdhci_msm_set_cdr(host, true);
++ else
++ sdhci_msm_set_cdr(host, false);
++ break;
++ }
++ writew(val, host->ioaddr + reg);
++}
++
+ static const struct of_device_id sdhci_msm_dt_match[] = {
+ { .compatible = "qcom,sdhci-msm-v4" },
+ {},
+@@ -1107,6 +1155,7 @@ static const struct sdhci_ops sdhci_msm_
+ .set_bus_width = sdhci_set_bus_width,
+ .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
+ .voltage_switch = sdhci_msm_voltage_switch,
++ .write_w = sdhci_msm_write_w,
+ };
+
+ static const struct sdhci_pltfm_data sdhci_msm_pdata = {
--- /dev/null
+From 512ac999d2755d2b7109e996a76b6fb8b888631d Mon Sep 17 00:00:00 2001
+From: Xunlei Pang <xlpang@linux.alibaba.com>
+Date: Wed, 20 Jun 2018 18:18:33 +0800
+Subject: sched/fair: Fix bandwidth timer clock drift condition
+
+From: Xunlei Pang <xlpang@linux.alibaba.com>
+
+commit 512ac999d2755d2b7109e996a76b6fb8b888631d upstream.
+
+I noticed that cgroup task groups constantly get throttled even
+if they have low CPU usage, this causes some jitters on the response
+time to some of our business containers when enabling CPU quotas.
+
+It's very simple to reproduce:
+
+ mkdir /sys/fs/cgroup/cpu/test
+ cd /sys/fs/cgroup/cpu/test
+ echo 100000 > cpu.cfs_quota_us
+ echo $$ > tasks
+
+then repeat:
+
+ cat cpu.stat | grep nr_throttled # nr_throttled will increase steadily
+
+After some analysis, we found that cfs_rq::runtime_remaining will
+be cleared by expire_cfs_rq_runtime() due to two equal but stale
+"cfs_{b|q}->runtime_expires" after period timer is re-armed.
+
+The current condition to judge clock drift in expire_cfs_rq_runtime()
+is wrong, the two runtime_expires are actually the same when clock
+drift happens, so this condtion can never hit. The orginal design was
+correctly done by this commit:
+
+ a9cf55b28610 ("sched: Expire invalid runtime")
+
+... but was changed to be the current implementation due to its locking bug.
+
+This patch introduces another way, it adds a new field in both structures
+cfs_rq and cfs_bandwidth to record the expiration update sequence, and
+uses them to figure out if clock drift happens (true if they are equal).
+
+Signed-off-by: Xunlei Pang <xlpang@linux.alibaba.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+[alakeshh: backport: Fixed merge conflicts:
+ - sched.h: Fix the indentation and order in which the variables are
+ declared to match with coding style of the existing code in 4.14
+ Struct members of same type were declared in separate lines in
+ upstream patch which has been changed back to having multiple
+ members of same type in the same line.
+ e.g. int a; int b; -> int a, b; ]
+Signed-off-by: Alakesh Haloi <alakeshh@amazon.com>
+Reviewed-by: Ben Segall <bsegall@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: <stable@vger.kernel.org> # 4.14.x
+Fixes: 51f2176d74ac ("sched/fair: Fix unlocked reads of some cfs_b->quota/period")
+Link: http://lkml.kernel.org/r/20180620101834.24455-1-xlpang@linux.alibaba.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/fair.c | 14 ++++++++------
+ kernel/sched/sched.h | 4 +++-
+ 2 files changed, 11 insertions(+), 7 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4087,6 +4087,7 @@ void __refill_cfs_bandwidth_runtime(stru
+ now = sched_clock_cpu(smp_processor_id());
+ cfs_b->runtime = cfs_b->quota;
+ cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
++ cfs_b->expires_seq++;
+ }
+
+ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
+@@ -4109,6 +4110,7 @@ static int assign_cfs_rq_runtime(struct
+ struct task_group *tg = cfs_rq->tg;
+ struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
+ u64 amount = 0, min_amount, expires;
++ int expires_seq;
+
+ /* note: this is a positive sum as runtime_remaining <= 0 */
+ min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
+@@ -4125,6 +4127,7 @@ static int assign_cfs_rq_runtime(struct
+ cfs_b->idle = 0;
+ }
+ }
++ expires_seq = cfs_b->expires_seq;
+ expires = cfs_b->runtime_expires;
+ raw_spin_unlock(&cfs_b->lock);
+
+@@ -4134,8 +4137,10 @@ static int assign_cfs_rq_runtime(struct
+ * spread between our sched_clock and the one on which runtime was
+ * issued.
+ */
+- if ((s64)(expires - cfs_rq->runtime_expires) > 0)
++ if (cfs_rq->expires_seq != expires_seq) {
++ cfs_rq->expires_seq = expires_seq;
+ cfs_rq->runtime_expires = expires;
++ }
+
+ return cfs_rq->runtime_remaining > 0;
+ }
+@@ -4161,12 +4166,9 @@ static void expire_cfs_rq_runtime(struct
+ * has not truly expired.
+ *
+ * Fortunately we can check determine whether this the case by checking
+- * whether the global deadline has advanced. It is valid to compare
+- * cfs_b->runtime_expires without any locks since we only care about
+- * exact equality, so a partial write will still work.
++ * whether the global deadline(cfs_b->expires_seq) has advanced.
+ */
+-
+- if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
++ if (cfs_rq->expires_seq == cfs_b->expires_seq) {
+ /* extend local deadline, drift is bounded above by 2 ticks */
+ cfs_rq->runtime_expires += TICK_NSEC;
+ } else {
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -281,8 +281,9 @@ struct cfs_bandwidth {
+ u64 quota, runtime;
+ s64 hierarchical_quota;
+ u64 runtime_expires;
++ int expires_seq;
+
+- int idle, period_active;
++ short idle, period_active;
+ struct hrtimer period_timer, slack_timer;
+ struct list_head throttled_cfs_rq;
+
+@@ -488,6 +489,7 @@ struct cfs_rq {
+
+ #ifdef CONFIG_CFS_BANDWIDTH
+ int runtime_enabled;
++ int expires_seq;
+ u64 runtime_expires;
+ s64 runtime_remaining;
+
tty-simplify-tty-count-math-in-tty_reopen.patch
tty-don-t-hold-ldisc-lock-in-tty_reopen-if-ldisc-present.patch
can-gw-ensure-dlc-boundaries-after-can-frame-modification.patch
+mmc-sdhci-msm-disable-cdr-function-on-tx.patch
+media-em28xx-fix-misplaced-reset-of-dev-v4l-field_count.patch
+sched-fair-fix-bandwidth-timer-clock-drift-condition.patch