--- /dev/null
+From abb6627910a1e783c8e034b35b7c80e5e7f98f41 Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Wed, 12 Oct 2016 21:47:03 +0200
+Subject: cpufreq: conservative: Fix next frequency selection
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+commit abb6627910a1e783c8e034b35b7c80e5e7f98f41 upstream.
+
+Commit d352cf47d93e (cpufreq: conservative: Do not use transition
+notifications) overlooked the case when the "frequency step" used
+by the conservative governor is small relative to the distances
+between the available frequencies and broke the algorithm by
+using policy->cur instead of the previously requested frequency
+when computing the next one.
+
+As a result, the governor may not be able to go outside of a narrow
+range between two consecutive available frequencies.
+
+Fix the problem by making the governor save the previously requested
+frequency and select the next one relative that value (unless it is
+out of range, in which case policy->cur will be used instead).
+
+Fixes: d352cf47d93e (cpufreq: conservative: Do not use transition notifications)
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=177171
+Reported-and-tested-by: Aleksey Rybalkin <aleksey@rybalkin.org>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq_conservative.c | 19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq_conservative.c
++++ b/drivers/cpufreq/cpufreq_conservative.c
+@@ -17,6 +17,7 @@
+ struct cs_policy_dbs_info {
+ struct policy_dbs_info policy_dbs;
+ unsigned int down_skip;
++ unsigned int requested_freq;
+ };
+
+ static inline struct cs_policy_dbs_info *to_dbs_info(struct policy_dbs_info *policy_dbs)
+@@ -61,6 +62,7 @@ static unsigned int cs_dbs_timer(struct
+ {
+ struct policy_dbs_info *policy_dbs = policy->governor_data;
+ struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy_dbs);
++ unsigned int requested_freq = dbs_info->requested_freq;
+ struct dbs_data *dbs_data = policy_dbs->dbs_data;
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int load = dbs_update(policy);
+@@ -72,10 +74,16 @@ static unsigned int cs_dbs_timer(struct
+ if (cs_tuners->freq_step == 0)
+ goto out;
+
++ /*
++ * If requested_freq is out of range, it is likely that the limits
++ * changed in the meantime, so fall back to current frequency in that
++ * case.
++ */
++ if (requested_freq > policy->max || requested_freq < policy->min)
++ requested_freq = policy->cur;
++
+ /* Check for frequency increase */
+ if (load > dbs_data->up_threshold) {
+- unsigned int requested_freq = policy->cur;
+-
+ dbs_info->down_skip = 0;
+
+ /* if we are already at full speed then break out early */
+@@ -83,8 +91,11 @@ static unsigned int cs_dbs_timer(struct
+ goto out;
+
+ requested_freq += get_freq_target(cs_tuners, policy);
++ if (requested_freq > policy->max)
++ requested_freq = policy->max;
+
+ __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
++ dbs_info->requested_freq = requested_freq;
+ goto out;
+ }
+
+@@ -95,7 +106,7 @@ static unsigned int cs_dbs_timer(struct
+
+ /* Check for frequency decrease */
+ if (load < cs_tuners->down_threshold) {
+- unsigned int freq_target, requested_freq = policy->cur;
++ unsigned int freq_target;
+ /*
+ * if we cannot reduce the frequency anymore, break out early
+ */
+@@ -109,6 +120,7 @@ static unsigned int cs_dbs_timer(struct
+ requested_freq = policy->min;
+
+ __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
++ dbs_info->requested_freq = requested_freq;
+ }
+
+ out:
+@@ -287,6 +299,7 @@ static void cs_start(struct cpufreq_poli
+ struct cs_policy_dbs_info *dbs_info = to_dbs_info(policy->governor_data);
+
+ dbs_info->down_skip = 0;
++ dbs_info->requested_freq = policy->cur;
+ }
+
+ static struct dbs_governor cs_governor = {
--- /dev/null
+From c6fe46a79ecd79606bb96fada4515f6b23f87b62 Mon Sep 17 00:00:00 2001
+From: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Date: Tue, 18 Oct 2016 00:41:12 +0900
+Subject: cpufreq: fix overflow in cpufreq_table_find_index_dl()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+
+commit c6fe46a79ecd79606bb96fada4515f6b23f87b62 upstream.
+
+'best' is always less or equals to 'pos', so `best - pos' returns
+a negative value which is then getting casted to `unsigned int'
+and passed to __cpufreq_driver_target()->acpi_cpufreq_target()
+for policy->freq_table selection. This results in
+
+ BUG: unable to handle kernel paging request at ffff881019b469f8
+ IP: [<ffffffffa00356c1>] acpi_cpufreq_target+0x4f/0x190 [acpi_cpufreq]
+ PGD 267f067
+ PUD 0
+
+ Oops: 0000 [#1] PREEMPT SMP
+ CPU: 6 PID: 70 Comm: kworker/6:1 Not tainted 4.9.0-rc1-next-20161017-dbg-dirty
+ Workqueue: events dbs_work_handler
+ task: ffff88041b808000 task.stack: ffff88041b810000
+ RIP: 0010:[<ffffffffa00356c1>] [<ffffffffa00356c1>] acpi_cpufreq_target+0x4f/0x190 [acpi_cpufreq]
+ RSP: 0018:ffff88041b813c60 EFLAGS: 00010282
+ RAX: ffff880419b46a00 RBX: ffff88041b848400 RCX: ffff880419b20f80
+ RDX: 00000000001dff38 RSI: 00000000ffffffff RDI: ffff88041b848400
+ RBP: ffff88041b813cb0 R08: 0000000000000006 R09: 0000000000000040
+ R10: ffffffff8207f9e0 R11: ffffffff8173595b R12: 0000000000000000
+ R13: ffff88041f1dff38 R14: 0000000000262900 R15: 0000000bfffffff4
+ FS: 0000000000000000(0000) GS:ffff88041f000000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: ffff881019b469f8 CR3: 000000041a2d3000 CR4: 00000000001406e0
+ Stack:
+ ffff88041b813cb0 ffffffff813347f9 ffff88041b813ca0 ffffffff81334663
+ ffff88041f1d4bc0 ffff88041b848400 0000000000000000 0000000000000000
+ 0000000000262900 0000000000000000 ffff88041b813d00 ffffffff813355dc
+ Call Trace:
+ [<ffffffff813347f9>] ? cpufreq_freq_transition_begin+0xf1/0xfc
+ [<ffffffff81334663>] ? get_cpu_idle_time+0x97/0xa6
+ [<ffffffff813355dc>] __cpufreq_driver_target+0x3b6/0x44e
+ [<ffffffff81336ca3>] cs_dbs_timer+0x11a/0x135
+ [<ffffffff81336fda>] dbs_work_handler+0x39/0x62
+ [<ffffffff81057823>] process_one_work+0x280/0x4a5
+ [<ffffffff81058719>] worker_thread+0x24f/0x397
+ [<ffffffff810584ca>] ? rescuer_thread+0x30b/0x30b
+ [<ffffffff81418380>] ? nl80211_get_key+0x29/0x36a
+ [<ffffffff8105d2b7>] kthread+0xfc/0x104
+ [<ffffffff8107ceea>] ? put_lock_stats.isra.9+0xe/0x20
+ [<ffffffff8105d1bb>] ? kthread_create_on_node+0x3f/0x3f
+ [<ffffffff814b2092>] ret_from_fork+0x22/0x30
+ Code: 56 4d 6b ff 0c 41 55 41 54 53 48 83 ec 28 48 8b 15 ad 1e 00 00 44 8b 41
+ 08 48 8b 87 c8 00 00 00 49 89 d5 4e 03 2c c5 80 b2 78 81 <46> 8b 74 38 04 45
+ 3b 75 00 75 11 31 c0 83 39 00 0f 84 1c 01 00
+ RIP [<ffffffffa00356c1>] acpi_cpufreq_target+0x4f/0x190 [acpi_cpufreq]
+ RSP <ffff88041b813c60>
+ CR2: ffff881019b469f8
+ ---[ end trace 16d9fc7a17897d37 ]---
+
+[ rjw: In some cases this bug may also cause incorrect frequencies to
+ be selected by cpufreq governors. ]
+
+Fixes: 899bb6642f2a (cpufreq: skip invalid entries when searching the frequency)
+Link: http://marc.info/?l=linux-kernel&m=147672030714331&w=2
+Reported-and-tested-by: Sedat Dilek <sedat.dilek@gmail.com>
+Reported-and-tested-by: Jörg Otte <jrg.otte@gmail.com>
+Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cpufreq.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -677,10 +677,10 @@ static inline int cpufreq_table_find_ind
+ if (best == table - 1)
+ return pos - table;
+
+- return best - pos;
++ return best - table;
+ }
+
+- return best - pos;
++ return best - table;
+ }
+
+ /* Works only on sorted freq-tables */
--- /dev/null
+From f9f4872df6e1801572949f8a370c886122d4b6da Mon Sep 17 00:00:00 2001
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Date: Sat, 8 Oct 2016 12:42:38 -0700
+Subject: cpufreq: intel_pstate: Fix unsafe HWP MSR access
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+commit f9f4872df6e1801572949f8a370c886122d4b6da upstream.
+
+This is a requirement that MSR MSR_PM_ENABLE must be set to 0x01 before
+reading MSR_HWP_CAPABILITIES on a given CPU. If cpufreq init() is
+scheduled on a CPU which is not same as policy->cpu or migrates to a
+different CPU before calling msr read for MSR_HWP_CAPABILITIES, it
+is possible that MSR_PM_ENABLE was not to set to 0x01 on that CPU.
+This will cause GP fault. So like other places in this path
+rdmsrl_on_cpu should be used instead of rdmsrl.
+
+Moreover the scope of MSR_HWP_CAPABILITIES is on per thread basis, so it
+should be read from the same CPU, for which MSR MSR_HWP_REQUEST is
+getting set.
+
+dmesg dump or warning:
+
+[ 22.014488] WARNING: CPU: 139 PID: 1 at arch/x86/mm/extable.c:50 ex_handler_rdmsr_unsafe+0x68/0x70
+[ 22.014492] unchecked MSR access error: RDMSR from 0x771
+[ 22.014493] Modules linked in:
+[ 22.014507] CPU: 139 PID: 1 Comm: swapper/0 Not tainted 4.7.5+ #1
+...
+...
+[ 22.014516] Call Trace:
+[ 22.014542] [<ffffffff813d7dd1>] dump_stack+0x63/0x82
+[ 22.014558] [<ffffffff8107bc8b>] __warn+0xcb/0xf0
+[ 22.014561] [<ffffffff8107bcff>] warn_slowpath_fmt+0x4f/0x60
+[ 22.014563] [<ffffffff810676f8>] ex_handler_rdmsr_unsafe+0x68/0x70
+[ 22.014564] [<ffffffff810677d9>] fixup_exception+0x39/0x50
+[ 22.014604] [<ffffffff8102e400>] do_general_protection+0x80/0x150
+[ 22.014610] [<ffffffff817f9ec8>] general_protection+0x28/0x30
+[ 22.014635] [<ffffffff81687940>] ? get_target_pstate_use_performance+0xb0/0xb0
+[ 22.014642] [<ffffffff810600c7>] ? native_read_msr+0x7/0x40
+[ 22.014657] [<ffffffff81688123>] intel_pstate_hwp_set+0x23/0x130
+[ 22.014660] [<ffffffff81688406>] intel_pstate_set_policy+0x1b6/0x340
+[ 22.014662] [<ffffffff816829bb>] cpufreq_set_policy+0xeb/0x2c0
+[ 22.014664] [<ffffffff81682f39>] cpufreq_init_policy+0x79/0xe0
+[ 22.014666] [<ffffffff81682cb0>] ? cpufreq_update_policy+0x120/0x120
+[ 22.014669] [<ffffffff816833a6>] cpufreq_online+0x406/0x820
+[ 22.014671] [<ffffffff8168381f>] cpufreq_add_dev+0x5f/0x90
+[ 22.014717] [<ffffffff81530ac8>] subsys_interface_register+0xb8/0x100
+[ 22.014719] [<ffffffff816821bc>] cpufreq_register_driver+0x14c/0x210
+[ 22.014749] [<ffffffff81fe1d90>] intel_pstate_init+0x39d/0x4d5
+[ 22.014751] [<ffffffff81fe13f2>] ? cpufreq_gov_dbs_init+0x12/0x12
+
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/intel_pstate.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -556,12 +556,12 @@ static void intel_pstate_hwp_set(const s
+ int min, hw_min, max, hw_max, cpu, range, adj_range;
+ u64 value, cap;
+
+- rdmsrl(MSR_HWP_CAPABILITIES, cap);
+- hw_min = HWP_LOWEST_PERF(cap);
+- hw_max = HWP_HIGHEST_PERF(cap);
+- range = hw_max - hw_min;
+-
+ for_each_cpu(cpu, cpumask) {
++ rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
++ hw_min = HWP_LOWEST_PERF(cap);
++ hw_max = HWP_HIGHEST_PERF(cap);
++ range = hw_max - hw_min;
++
+ rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
+ adj_range = limits->min_perf_pct * range / 100;
+ min = hw_min + adj_range;
--- /dev/null
+From 899bb6642f2a2f2cd3f77abd6c5a14550e3b37e6 Mon Sep 17 00:00:00 2001
+From: Aaro Koskinen <aaro.koskinen@iki.fi>
+Date: Wed, 12 Oct 2016 08:45:05 +0530
+Subject: cpufreq: skip invalid entries when searching the frequency
+
+From: Aaro Koskinen <aaro.koskinen@iki.fi>
+
+commit 899bb6642f2a2f2cd3f77abd6c5a14550e3b37e6 upstream.
+
+Skip invalid entries when searching the frequency. This fixes cpufreq
+at least on loongson2 MIPS board.
+
+Fixes: da0c6dc00c69 (cpufreq: Handle sorted frequency tables more efficiently)
+Signed-off-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cpufreq.h | 104 ++++++++++++++++++++++++------------------------
+ 1 file changed, 52 insertions(+), 52 deletions(-)
+
+--- a/include/linux/cpufreq.h
++++ b/include/linux/cpufreq.h
+@@ -639,19 +639,19 @@ static inline int cpufreq_table_find_ind
+ unsigned int target_freq)
+ {
+ struct cpufreq_frequency_table *table = policy->freq_table;
++ struct cpufreq_frequency_table *pos, *best = table - 1;
+ unsigned int freq;
+- int i, best = -1;
+
+- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+- freq = table[i].frequency;
++ cpufreq_for_each_valid_entry(pos, table) {
++ freq = pos->frequency;
+
+ if (freq >= target_freq)
+- return i;
++ return pos - table;
+
+- best = i;
++ best = pos;
+ }
+
+- return best;
++ return best - table;
+ }
+
+ /* Find lowest freq at or above target in a table in descending order */
+@@ -659,28 +659,28 @@ static inline int cpufreq_table_find_ind
+ unsigned int target_freq)
+ {
+ struct cpufreq_frequency_table *table = policy->freq_table;
++ struct cpufreq_frequency_table *pos, *best = table - 1;
+ unsigned int freq;
+- int i, best = -1;
+
+- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+- freq = table[i].frequency;
++ cpufreq_for_each_valid_entry(pos, table) {
++ freq = pos->frequency;
+
+ if (freq == target_freq)
+- return i;
++ return pos - table;
+
+ if (freq > target_freq) {
+- best = i;
++ best = pos;
+ continue;
+ }
+
+ /* No freq found above target_freq */
+- if (best == -1)
+- return i;
++ if (best == table - 1)
++ return pos - table;
+
+- return best;
++ return best - pos;
+ }
+
+- return best;
++ return best - pos;
+ }
+
+ /* Works only on sorted freq-tables */
+@@ -700,28 +700,28 @@ static inline int cpufreq_table_find_ind
+ unsigned int target_freq)
+ {
+ struct cpufreq_frequency_table *table = policy->freq_table;
++ struct cpufreq_frequency_table *pos, *best = table - 1;
+ unsigned int freq;
+- int i, best = -1;
+
+- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+- freq = table[i].frequency;
++ cpufreq_for_each_valid_entry(pos, table) {
++ freq = pos->frequency;
+
+ if (freq == target_freq)
+- return i;
++ return pos - table;
+
+ if (freq < target_freq) {
+- best = i;
++ best = pos;
+ continue;
+ }
+
+ /* No freq found below target_freq */
+- if (best == -1)
+- return i;
++ if (best == table - 1)
++ return pos - table;
+
+- return best;
++ return best - table;
+ }
+
+- return best;
++ return best - table;
+ }
+
+ /* Find highest freq at or below target in a table in descending order */
+@@ -729,19 +729,19 @@ static inline int cpufreq_table_find_ind
+ unsigned int target_freq)
+ {
+ struct cpufreq_frequency_table *table = policy->freq_table;
++ struct cpufreq_frequency_table *pos, *best = table - 1;
+ unsigned int freq;
+- int i, best = -1;
+
+- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+- freq = table[i].frequency;
++ cpufreq_for_each_valid_entry(pos, table) {
++ freq = pos->frequency;
+
+ if (freq <= target_freq)
+- return i;
++ return pos - table;
+
+- best = i;
++ best = pos;
+ }
+
+- return best;
++ return best - table;
+ }
+
+ /* Works only on sorted freq-tables */
+@@ -761,32 +761,32 @@ static inline int cpufreq_table_find_ind
+ unsigned int target_freq)
+ {
+ struct cpufreq_frequency_table *table = policy->freq_table;
++ struct cpufreq_frequency_table *pos, *best = table - 1;
+ unsigned int freq;
+- int i, best = -1;
+
+- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+- freq = table[i].frequency;
++ cpufreq_for_each_valid_entry(pos, table) {
++ freq = pos->frequency;
+
+ if (freq == target_freq)
+- return i;
++ return pos - table;
+
+ if (freq < target_freq) {
+- best = i;
++ best = pos;
+ continue;
+ }
+
+ /* No freq found below target_freq */
+- if (best == -1)
+- return i;
++ if (best == table - 1)
++ return pos - table;
+
+ /* Choose the closest freq */
+- if (target_freq - table[best].frequency > freq - target_freq)
+- return i;
++ if (target_freq - best->frequency > freq - target_freq)
++ return pos - table;
+
+- return best;
++ return best - table;
+ }
+
+- return best;
++ return best - table;
+ }
+
+ /* Find closest freq to target in a table in descending order */
+@@ -794,32 +794,32 @@ static inline int cpufreq_table_find_ind
+ unsigned int target_freq)
+ {
+ struct cpufreq_frequency_table *table = policy->freq_table;
++ struct cpufreq_frequency_table *pos, *best = table - 1;
+ unsigned int freq;
+- int i, best = -1;
+
+- for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
+- freq = table[i].frequency;
++ cpufreq_for_each_valid_entry(pos, table) {
++ freq = pos->frequency;
+
+ if (freq == target_freq)
+- return i;
++ return pos - table;
+
+ if (freq > target_freq) {
+- best = i;
++ best = pos;
+ continue;
+ }
+
+ /* No freq found above target_freq */
+- if (best == -1)
+- return i;
++ if (best == table - 1)
++ return pos - table;
+
+ /* Choose the closest freq */
+- if (table[best].frequency - target_freq > target_freq - freq)
+- return i;
++ if (best->frequency - target_freq > target_freq - freq)
++ return pos - table;
+
+- return best;
++ return best - table;
+ }
+
+- return best;
++ return best - table;
+ }
+
+ /* Works only on sorted freq-tables */
--- /dev/null
+From e01072d22d4e7f9ca966f848def22fe41eaef4de Mon Sep 17 00:00:00 2001
+From: Dave Gerlach <d-gerlach@ti.com>
+Date: Wed, 14 Sep 2016 15:41:37 -0500
+Subject: cpufreq: ti: Use generic platdev driver
+
+From: Dave Gerlach <d-gerlach@ti.com>
+
+commit e01072d22d4e7f9ca966f848def22fe41eaef4de upstream.
+
+Now that the cpufreq-dt-platdev is used to create the cpufreq-dt platform
+device for all OMAP platforms and the platform code that did it
+before has been removed, add ti,am33xx and ti,dra7xx to the machine list
+in cpufreq-dt-platdev which had relied on the removed platform code to do
+this previously.
+
+Fixes: 7694ca6e1d6f (cpufreq: omap: Use generic platdev driver)
+Signed-off-by: Dave Gerlach <d-gerlach@ti.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq-dt-platdev.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/cpufreq/cpufreq-dt-platdev.c
++++ b/drivers/cpufreq/cpufreq-dt-platdev.c
+@@ -68,6 +68,8 @@ static const struct of_device_id machine
+
+ { .compatible = "sigma,tango4" },
+
++ { .compatible = "ti,am33xx", },
++ { .compatible = "ti,dra7", },
+ { .compatible = "ti,omap2", },
+ { .compatible = "ti,omap3", },
+ { .compatible = "ti,omap4", },
--- /dev/null
+From f8850abb7ba68229838014b3409460e576751c6d Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Sun, 9 Oct 2016 11:12:34 +0200
+Subject: parisc: Fix kernel memory layout regarding position of __gp
+
+From: Helge Deller <deller@gmx.de>
+
+commit f8850abb7ba68229838014b3409460e576751c6d upstream.
+
+Architecturally we need to keep __gp below 0x1000000.
+
+But because of ftrace and tracepoint support, the RO_DATA_SECTION now gets much
+bigger than it was before. By moving the linkage tables before RO_DATA_SECTION
+we can avoid that __gp gets positioned at a too high address.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/kernel/vmlinux.lds.S | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/arch/parisc/kernel/vmlinux.lds.S
++++ b/arch/parisc/kernel/vmlinux.lds.S
+@@ -89,8 +89,9 @@ SECTIONS
+ /* Start of data section */
+ _sdata = .;
+
+- RO_DATA_SECTION(8)
+-
++ /* Architecturally we need to keep __gp below 0x1000000 and thus
++ * in front of RO_DATA_SECTION() which stores lots of tracepoint
++ * and ftrace symbols. */
+ #ifdef CONFIG_64BIT
+ . = ALIGN(16);
+ /* Linkage tables */
+@@ -105,6 +106,8 @@ SECTIONS
+ }
+ #endif
+
++ RO_DATA_SECTION(8)
++
+ /* unwind info */
+ .PARISC.unwind : {
+ __start___unwind = .;
--- /dev/null
+From 92420bd0d01f040bbf754e1d090be49ca6a1c8d6 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Sat, 24 Sep 2016 22:22:12 +0200
+Subject: parisc: Fix self-detected CPU stall warnings on Mako machines
+
+From: Helge Deller <deller@gmx.de>
+
+commit 92420bd0d01f040bbf754e1d090be49ca6a1c8d6 upstream.
+
+The config option HAVE_UNSTABLE_SCHED_CLOCK is set automatically when compiling
+for SMP. There is no need to clear the stable-clock flag via
+clear_sched_clock_stable() when starting secondary CPUs, and even worse,
+clearing it triggers wrong self-detected CPU stall warnings on 64bit Mako
+machines.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/kernel/time.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/arch/parisc/kernel/time.c
++++ b/arch/parisc/kernel/time.c
+@@ -226,12 +226,6 @@ void __init start_cpu_itimer(void)
+ unsigned int cpu = smp_processor_id();
+ unsigned long next_tick = mfctl(16) + clocktick;
+
+-#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT)
+- /* With multiple 64bit CPUs online, the cr16's are not syncronized. */
+- if (cpu != 0)
+- clear_sched_clock_stable();
+-#endif
+-
+ mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */
+
+ per_cpu(cpu_data, cpu).it_value = next_tick;
--- /dev/null
+From 65bf34f59594c11f13d371c5334a6a0a385cd7ae Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Sun, 9 Oct 2016 09:57:54 +0200
+Subject: parisc: Increase initial kernel mapping size
+
+From: Helge Deller <deller@gmx.de>
+
+commit 65bf34f59594c11f13d371c5334a6a0a385cd7ae upstream.
+
+Increase the initial kernel default page mapping size for 64-bit kernels to
+64 MB and for 32-bit kernels to 32 MB.
+
+Due to the additional support of ftrace, tracepoint and huge pages the kernel
+size can exceed the sizes we used up to now.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/pgtable.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -83,10 +83,10 @@ static inline void purge_tlb_entries(str
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
+
+ /* This is the size of the initially mapped kernel memory */
+-#if defined(CONFIG_64BIT) || defined(CONFIG_SMP)
+-#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
++#if defined(CONFIG_64BIT)
++#define KERNEL_INITIAL_ORDER 26 /* 1<<26 = 64MB */
+ #else
+-#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
++#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
+ #endif
+ #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
+
--- /dev/null
+From 690d097c00c88fa9d93d198591e184164b1d8c20 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Fri, 7 Oct 2016 18:19:55 +0200
+Subject: parisc: Increase KERNEL_INITIAL_SIZE for 32-bit SMP kernels
+
+From: Helge Deller <deller@gmx.de>
+
+commit 690d097c00c88fa9d93d198591e184164b1d8c20 upstream.
+
+Increase the initial kernel default page mapping size for SMP kernels to 32MB
+and add a runtime check which panics early if the kernel is bigger than the
+initial mapping size.
+
+This fixes boot crashes of 32bit SMP kernels. Due to the introduction of huge
+page support in kernel 4.4 and it's required initial kernel layout in memory, a
+32bit SMP kernel usually got bigger (in layout, not size) than 16MB.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/parisc/include/asm/pgtable.h | 2 +-
+ arch/parisc/kernel/setup.c | 8 ++++++++
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+--- a/arch/parisc/include/asm/pgtable.h
++++ b/arch/parisc/include/asm/pgtable.h
+@@ -83,7 +83,7 @@ static inline void purge_tlb_entries(str
+ printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
+
+ /* This is the size of the initially mapped kernel memory */
+-#ifdef CONFIG_64BIT
++#if defined(CONFIG_64BIT) || defined(CONFIG_SMP)
+ #define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
+ #else
+ #define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
+--- a/arch/parisc/kernel/setup.c
++++ b/arch/parisc/kernel/setup.c
+@@ -38,6 +38,7 @@
+ #include <linux/export.h>
+
+ #include <asm/processor.h>
++#include <asm/sections.h>
+ #include <asm/pdc.h>
+ #include <asm/led.h>
+ #include <asm/machdep.h> /* for pa7300lc_init() proto */
+@@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p)
+ #endif
+ printk(KERN_CONT ".\n");
+
++ /*
++ * Check if initial kernel page mappings are sufficient.
++ * panic early if not, else we may access kernel functions
++ * and variables which can't be reached.
++ */
++ if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE)
++ panic("KERNEL_INITIAL_ORDER too small!");
+
+ pdc_console_init();
+
--- /dev/null
+From d5a9bf0b38d2ac85c9a693c7fb851f74fd2a2494 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 8 Sep 2016 13:48:06 +0200
+Subject: pstore/core: drop cmpxchg based updates
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+commit d5a9bf0b38d2ac85c9a693c7fb851f74fd2a2494 upstream.
+
+I have here a FPGA behind PCIe which exports SRAM which I use for
+pstore. Now it seems that the FPGA no longer supports cmpxchg based
+updates and writes back 0xff…ff and returns the same. This leads to
+crash during crash rendering pstore useless.
+Since I doubt that there is much benefit from using cmpxchg() here, I am
+dropping this atomic access and use the spinlock based version.
+
+Cc: Anton Vorontsov <anton@enomsg.org>
+Cc: Colin Cross <ccross@android.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Rabin Vincent <rabinv@axis.com>
+Tested-by: Rabin Vincent <rabinv@axis.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+[kees: remove "_locked" suffix since it's the only option now]
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c | 43 ++-----------------------------------------
+ 1 file changed, 2 insertions(+), 41 deletions(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -47,43 +47,10 @@ static inline size_t buffer_start(struct
+ return atomic_read(&prz->buffer->start);
+ }
+
+-/* increase and wrap the start pointer, returning the old value */
+-static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
+-{
+- int old;
+- int new;
+-
+- do {
+- old = atomic_read(&prz->buffer->start);
+- new = old + a;
+- while (unlikely(new >= prz->buffer_size))
+- new -= prz->buffer_size;
+- } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
+-
+- return old;
+-}
+-
+-/* increase the size counter until it hits the max size */
+-static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
+-{
+- size_t old;
+- size_t new;
+-
+- if (atomic_read(&prz->buffer->size) == prz->buffer_size)
+- return;
+-
+- do {
+- old = atomic_read(&prz->buffer->size);
+- new = old + a;
+- if (new > prz->buffer_size)
+- new = prz->buffer_size;
+- } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
+-}
+-
+ static DEFINE_RAW_SPINLOCK(buffer_lock);
+
+ /* increase and wrap the start pointer, returning the old value */
+-static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
++static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
+ {
+ int old;
+ int new;
+@@ -103,7 +70,7 @@ static size_t buffer_start_add_locked(st
+ }
+
+ /* increase the size counter until it hits the max size */
+-static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
++static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
+ {
+ size_t old;
+ size_t new;
+@@ -124,9 +91,6 @@ exit:
+ raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ }
+
+-static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
+-static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
+-
+ static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+ uint8_t *data, size_t len, uint8_t *ecc)
+ {
+@@ -426,9 +390,6 @@ static void *persistent_ram_iomap(phys_a
+ return NULL;
+ }
+
+- buffer_start_add = buffer_start_add_locked;
+- buffer_size_add = buffer_size_add_locked;
+-
+ if (memtype)
+ va = ioremap(start, size);
+ else
--- /dev/null
+From d771fdf94180de2bd811ac90cba75f0f346abf8d Mon Sep 17 00:00:00 2001
+From: Andrew Bresticker <abrestic@chromium.org>
+Date: Mon, 15 Feb 2016 09:19:49 +0100
+Subject: pstore/ram: Use memcpy_fromio() to save old buffer
+
+From: Andrew Bresticker <abrestic@chromium.org>
+
+commit d771fdf94180de2bd811ac90cba75f0f346abf8d upstream.
+
+The ramoops buffer may be mapped as either I/O memory or uncached
+memory. On ARM64, this results in a device-type (strongly-ordered)
+mapping. Since unnaligned accesses to device-type memory will
+generate an alignment fault (regardless of whether or not strict
+alignment checking is enabled), it is not safe to use memcpy().
+memcpy_fromio() is guaranteed to only use aligned accesses, so use
+that instead.
+
+Signed-off-by: Andrew Bresticker <abrestic@chromium.org>
+Signed-off-by: Enric Balletbo Serra <enric.balletbo@collabora.com>
+Reviewed-by: Puneet Kumar <puneetster@chromium.org>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -286,8 +286,8 @@ void persistent_ram_save_old(struct pers
+ }
+
+ prz->old_log_size = size;
+- memcpy(prz->old_log, &buffer->data[start], size - start);
+- memcpy(prz->old_log + size - start, &buffer->data[0], start);
++ memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
++ memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
+ }
+
+ int notrace persistent_ram_write(struct persistent_ram_zone *prz,
--- /dev/null
+From 7e75678d23167c2527e655658a8ef36a36c8b4d9 Mon Sep 17 00:00:00 2001
+From: Furquan Shaikh <furquan@google.com>
+Date: Mon, 15 Feb 2016 09:19:48 +0100
+Subject: pstore/ram: Use memcpy_toio instead of memcpy
+
+From: Furquan Shaikh <furquan@google.com>
+
+commit 7e75678d23167c2527e655658a8ef36a36c8b4d9 upstream.
+
+persistent_ram_update uses vmap / iomap based on whether the buffer is in
+memory region or reserved region. However, both map it as non-cacheable
+memory. For armv8 specifically, non-cacheable mapping requests use a
+memory type that has to be accessed aligned to the request size. memcpy()
+doesn't guarantee that.
+
+Signed-off-by: Furquan Shaikh <furquan@google.com>
+Signed-off-by: Enric Balletbo Serra <enric.balletbo@collabora.com>
+Reviewed-by: Aaron Durbin <adurbin@chromium.org>
+Reviewed-by: Olof Johansson <olofj@chromium.org>
+Tested-by: Furquan Shaikh <furquan@chromium.org>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -263,7 +263,7 @@ static void notrace persistent_ram_updat
+ const void *s, unsigned int start, unsigned int count)
+ {
+ struct persistent_ram_buffer *buffer = prz->buffer;
+- memcpy(buffer->data + start, s, count);
++ memcpy_toio(buffer->data + start, s, count);
+ persistent_ram_update_ecc(prz, start, count);
+ }
+
--- /dev/null
+From 4407de74df18ed405cc5998990004c813ccfdbde Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 8 Sep 2016 13:48:05 +0200
+Subject: pstore/ramoops: fixup driver removal
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+commit 4407de74df18ed405cc5998990004c813ccfdbde upstream.
+
+A basic rmmod ramoops segfaults. Let's see why.
+
+Since commit 34f0ec82e0a9 ("pstore: Correct the max_dump_cnt clearing of
+ramoops") sets ->max_dump_cnt to zero before looping over ->przs but we
+didn't use it before that either.
+
+And since commit ee1d267423a1 ("pstore: add pstore unregister") we free
+that memory on rmmod.
+
+But even then, we looped until a NULL pointer or ERR. I don't see where
+it is ensured that the last member is NULL. Let's try this instead:
+simply error recovery and free. Clean up in error case where resources
+were allocated. And then, in the free path, rely on ->max_dump_cnt in
+the free path.
+
+Cc: Anton Vorontsov <anton@enomsg.org>
+Cc: Colin Cross <ccross@android.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Acked-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -377,13 +377,14 @@ static void ramoops_free_przs(struct ram
+ {
+ int i;
+
+- cxt->max_dump_cnt = 0;
+ if (!cxt->przs)
+ return;
+
+- for (i = 0; !IS_ERR_OR_NULL(cxt->przs[i]); i++)
++ for (i = 0; i < cxt->max_dump_cnt; i++)
+ persistent_ram_free(cxt->przs[i]);
++
+ kfree(cxt->przs);
++ cxt->max_dump_cnt = 0;
+ }
+
+ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
+@@ -408,7 +409,7 @@ static int ramoops_init_przs(struct devi
+ GFP_KERNEL);
+ if (!cxt->przs) {
+ dev_err(dev, "failed to initialize a prz array for dumps\n");
+- goto fail_prz;
++ goto fail_mem;
+ }
+
+ for (i = 0; i < cxt->max_dump_cnt; i++) {
+@@ -419,6 +420,11 @@ static int ramoops_init_przs(struct devi
+ err = PTR_ERR(cxt->przs[i]);
+ dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
+ cxt->record_size, (unsigned long long)*paddr, err);
++
++ while (i > 0) {
++ i--;
++ persistent_ram_free(cxt->przs[i]);
++ }
+ goto fail_prz;
+ }
+ *paddr += cxt->record_size;
+@@ -426,7 +432,9 @@ static int ramoops_init_przs(struct devi
+
+ return 0;
+ fail_prz:
+- ramoops_free_przs(cxt);
++ kfree(cxt->przs);
++fail_mem:
++ cxt->max_dump_cnt = 0;
+ return err;
+ }
+
+@@ -659,7 +667,6 @@ static int ramoops_remove(struct platfor
+ struct ramoops_context *cxt = &oops_cxt;
+
+ pstore_unregister(&cxt->pstore);
+- cxt->max_dump_cnt = 0;
+
+ kfree(cxt->pstore.buf);
+ cxt->pstore.bufsize = 0;
pci-mark-atheros-ar9580-to-avoid-bus-reset.patch
pci-tegra-fix-argument-order-in-tegra_pcie_phy_disable.patch
platform-don-t-return-0-from-platform_get_irq-on-error.patch
+cpufreq-ti-use-generic-platdev-driver.patch
+cpufreq-conservative-fix-next-frequency-selection.patch
+cpufreq-skip-invalid-entries-when-searching-the-frequency.patch
+cpufreq-intel_pstate-fix-unsafe-hwp-msr-access.patch
+cpufreq-fix-overflow-in-cpufreq_table_find_index_dl.patch
+parisc-increase-kernel_initial_size-for-32-bit-smp-kernels.patch
+parisc-fix-self-detected-cpu-stall-warnings-on-mako-machines.patch
+parisc-fix-kernel-memory-layout-regarding-position-of-__gp.patch
+parisc-increase-initial-kernel-mapping-size.patch
+pstore-ramoops-fixup-driver-removal.patch
+pstore-core-drop-cmpxchg-based-updates.patch
+pstore-ram-use-memcpy_toio-instead-of-memcpy.patch
+pstore-ram-use-memcpy_fromio-to-save-old-buffer.patch