From: Greg Kroah-Hartman Date: Mon, 10 Nov 2014 04:19:46 +0000 (+0900) Subject: 3.17-stable patches X-Git-Tag: v3.10.60~47 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=768cb76222daffc599269efb6ada7bcb7f5bd9bd;p=thirdparty%2Fkernel%2Fstable-queue.git 3.17-stable patches added patches: acpi-ec-fix-regression-due-to-conflicting-firmware-behavior-between-samsung-and-acer.patch acpi-invoke-acpi_device_wakeup-with-correct-parameters.patch acpi-irq-x86-return-irq-instead-of-gsi-in-mp_register_gsi.patch fix-inode-leaks-on-d_splice_alias-failure-exits.patch freezer-do-not-freeze-tasks-killed-by-oom-killer.patch intel_pstate-correct-byt-vid-values.patch intel_pstate-don-t-lose-sysfs-settings-during-cpu-offline.patch intel_pstate-fix-byt-frequency-reporting.patch oom-pm-oom-killed-task-shouldn-t-escape-pm-suspend.patch rtc-disable-efi-rtc-for-x86.patch x86-acpi-do-not-translate-gsi-number-if-ioapic-is-disabled.patch --- diff --git a/queue-3.17/acpi-ec-fix-regression-due-to-conflicting-firmware-behavior-between-samsung-and-acer.patch b/queue-3.17/acpi-ec-fix-regression-due-to-conflicting-firmware-behavior-between-samsung-and-acer.patch new file mode 100644 index 00000000000..c09fd2a2a43 --- /dev/null +++ b/queue-3.17/acpi-ec-fix-regression-due-to-conflicting-firmware-behavior-between-samsung-and-acer.patch @@ -0,0 +1,94 @@ +From 79149001105f18bd2285ada109f9229ea24a7571 Mon Sep 17 00:00:00 2001 +From: Lv Zheng +Date: Wed, 29 Oct 2014 11:33:49 +0800 +Subject: ACPI / EC: Fix regression due to conflicting firmware behavior between Samsung and Acer. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Lv Zheng + +commit 79149001105f18bd2285ada109f9229ea24a7571 upstream. + +It is reported that Samsung laptops that need to poll events are broken by +the following commit: + Commit 3afcf2ece453e1a8c2c6de19cdf06da3772a1b08 + Subject: ACPI / EC: Add support to disallow QR_EC to be issued when SCI_EVT isn't set + +The behaviors of the 2 vendor firmwares are conflict: + 1. Acer: OSPM shouldn't issue QR_EC unless SCI_EVT is set, firmware + automatically sets SCI_EVT as long as there is event queued up. + 2. Samsung: OSPM should issue QR_EC whatever SCI_EVT is set, firmware + returns 0 when there is no event queued up. + +This patch is a quick fix to distinguish the behaviors to make Acer +behavior only effective for Acer EC firmware so that the breakages on +Samsung EC firmware can be avoided. + +Fixes: 3afcf2ece453 (ACPI / EC: Add support to disallow QR_EC to be issued ...) +Link: https://bugzilla.kernel.org/show_bug.cgi?id=44161 +Reported-and-tested-by: Ortwin Glück +Signed-off-by: Lv Zheng +[ rjw : Subject ] +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/acpi/ec.c | 25 ++++++++++++++++++------- + 1 file changed, 18 insertions(+), 7 deletions(-) + +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -126,6 +126,7 @@ static int EC_FLAGS_MSI; /* Out-of-spec + static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ + static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ + static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ ++static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ + + /* -------------------------------------------------------------------------- + Transaction Management +@@ -210,13 +211,8 @@ static bool advance_transaction(struct a + } + return wakeup; + } else { +- /* +- * There is firmware refusing to respond QR_EC when SCI_EVT +- * is not set, for which case, we complete the QR_EC +- * without issuing it to the firmware. +- * https://bugzilla.kernel.org/show_bug.cgi?id=86211 +- */ +- if (!(status & ACPI_EC_FLAG_SCI) && ++ if (EC_FLAGS_QUERY_HANDSHAKE && ++ !(status & ACPI_EC_FLAG_SCI) && + (t->command == ACPI_EC_COMMAND_QUERY)) { + t->flags |= ACPI_EC_COMMAND_POLL; + t->rdata[t->ri++] = 0x00; +@@ -981,6 +977,18 @@ static int ec_enlarge_storm_threshold(co + } + + /* ++ * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for ++ * which case, we complete the QR_EC without issuing it to the firmware. ++ * https://bugzilla.kernel.org/show_bug.cgi?id=86211 ++ */ ++static int ec_flag_query_handshake(const struct dmi_system_id *id) ++{ ++ pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n"); ++ EC_FLAGS_QUERY_HANDSHAKE = 1; ++ return 0; ++} ++ ++/* + * On some hardware it is necessary to clear events accumulated by the EC during + * sleep. These ECs stop reporting GPEs until they are manually polled, if too + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) +@@ -1054,6 +1062,9 @@ static struct dmi_system_id ec_dmi_table + { + ec_clear_on_resume, "Samsung hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, ++ { ++ ec_flag_query_handshake, "Acer hardware", { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL}, + {}, + }; + diff --git a/queue-3.17/acpi-invoke-acpi_device_wakeup-with-correct-parameters.patch b/queue-3.17/acpi-invoke-acpi_device_wakeup-with-correct-parameters.patch new file mode 100644 index 00000000000..a921877e0c5 --- /dev/null +++ b/queue-3.17/acpi-invoke-acpi_device_wakeup-with-correct-parameters.patch @@ -0,0 +1,31 @@ +From 67598a1d3140a66f57aa6bcb8d22c4c2b7e910f5 Mon Sep 17 00:00:00 2001 +From: Zhang Rui +Date: Thu, 23 Oct 2014 20:20:00 +0800 +Subject: ACPI: invoke acpi_device_wakeup() with correct parameters + +From: Zhang Rui + +commit 67598a1d3140a66f57aa6bcb8d22c4c2b7e910f5 upstream. + +Fix a bug that invokes acpi_device_wakeup() with wrong parameters. + +Fixes: f35cec255557 (ACPI / PM: Always enable wakeup GPEs when enabling device wakeup) +Signed-off-by: Zhang Rui +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/acpi/device_pm.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/acpi/device_pm.c ++++ b/drivers/acpi/device_pm.c +@@ -710,7 +710,7 @@ int acpi_pm_device_run_wake(struct devic + return -ENODEV; + } + +- return acpi_device_wakeup(adev, enable, ACPI_STATE_S0); ++ return acpi_device_wakeup(adev, ACPI_STATE_S0, enable); + } + EXPORT_SYMBOL(acpi_pm_device_run_wake); + #endif /* CONFIG_PM_RUNTIME */ diff --git a/queue-3.17/acpi-irq-x86-return-irq-instead-of-gsi-in-mp_register_gsi.patch b/queue-3.17/acpi-irq-x86-return-irq-instead-of-gsi-in-mp_register_gsi.patch new file mode 100644 index 00000000000..412b6a4e5d7 --- /dev/null +++ b/queue-3.17/acpi-irq-x86-return-irq-instead-of-gsi-in-mp_register_gsi.patch @@ -0,0 +1,60 @@ +From b77e8f435337baa1cd15852fb9db3f6d26cd8eb7 Mon Sep 17 00:00:00 2001 +From: Jiang Liu +Date: Mon, 27 Oct 2014 13:21:33 +0800 +Subject: ACPI, irq, x86: Return IRQ instead of GSI in mp_register_gsi() + +From: Jiang Liu + +commit b77e8f435337baa1cd15852fb9db3f6d26cd8eb7 upstream. + +Function mp_register_gsi() returns blindly the GSI number for the ACPI +SCI interrupt. That causes a regression when the GSI for ACPI SCI is +shared with other devices. + +The regression was caused by commit 84245af7297ced9e8fe "x86, irq, ACPI: +Change __acpi_register_gsi to return IRQ number instead of GSI" and +exposed on a SuperMicro system, which shares one GSI between ACPI SCI +and PCI device, with following failure: + +http://sourceforge.net/p/linux1394/mailman/linux1394-user/?viewmonth=201410 +[ 0.000000] ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low +level) +[ 2.699224] firewire_ohci 0000:06:00.0: failed to allocate interrupt +20 + +Return mp_map_gsi_to_irq(gsi, 0) instead of the GSI number. + +Reported-and-Tested-by: Daniel Robbins +Signed-off-by: Jiang Liu +Cc: Konrad Rzeszutek Wilk +Cc: Tony Luck +Cc: Joerg Roedel +Cc: Greg Kroah-Hartman +Cc: Benjamin Herrenschmidt +Cc: Rafael J. Wysocki +Cc: Bjorn Helgaas +Cc: Randy Dunlap +Cc: Yinghai Lu +Cc: Borislav Petkov +Cc: Len Brown +Cc: Pavel Machek +Link: http://lkml.kernel.org/r/1414387308-27148-4-git-send-email-jiang.liu@linux.intel.com +Signed-off-by: Thomas Gleixner +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/acpi/boot.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -397,7 +397,7 @@ static int mp_register_gsi(struct device + + /* Don't set up the ACPI SCI because it's already set up */ + if (acpi_gbl_FADT.sci_interrupt == gsi) +- return gsi; ++ return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC); + + trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1; + polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1; diff --git a/queue-3.17/fix-inode-leaks-on-d_splice_alias-failure-exits.patch b/queue-3.17/fix-inode-leaks-on-d_splice_alias-failure-exits.patch new file mode 100644 index 00000000000..ea28b360e2f --- /dev/null +++ b/queue-3.17/fix-inode-leaks-on-d_splice_alias-failure-exits.patch @@ -0,0 +1,42 @@ +From 51486b900ee92856b977eacfc5bfbe6565028070 Mon Sep 17 00:00:00 2001 +From: Al Viro +Date: Thu, 23 Oct 2014 13:26:21 -0400 +Subject: fix inode leaks on d_splice_alias() failure exits + +From: Al Viro + +commit 51486b900ee92856b977eacfc5bfbe6565028070 upstream. + +d_splice_alias() callers expect it to either stash the inode reference +into a new alias, or drop the inode reference. That makes it possible +to just return d_splice_alias() result from ->lookup() instance, without +any extra housekeeping required. + +Unfortunately, that should include the failure exits. If d_splice_alias() +returns an error, it leaves the dentry it has been given negative and +thus it *must* drop the inode reference. Easily fixed, but it goes way +back and will need backporting. + +Signed-off-by: Al Viro +Signed-off-by: Greg Kroah-Hartman + +--- + fs/dcache.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -2675,11 +2675,13 @@ struct dentry *d_splice_alias(struct ino + if (!IS_ROOT(new)) { + spin_unlock(&inode->i_lock); + dput(new); ++ iput(inode); + return ERR_PTR(-EIO); + } + if (d_ancestor(new, dentry)) { + spin_unlock(&inode->i_lock); + dput(new); ++ iput(inode); + return ERR_PTR(-EIO); + } + write_seqlock(&rename_lock); diff --git a/queue-3.17/freezer-do-not-freeze-tasks-killed-by-oom-killer.patch b/queue-3.17/freezer-do-not-freeze-tasks-killed-by-oom-killer.patch new file mode 100644 index 00000000000..45c3bcc32fd --- /dev/null +++ b/queue-3.17/freezer-do-not-freeze-tasks-killed-by-oom-killer.patch @@ -0,0 +1,54 @@ +From 51fae6da640edf9d266c94f36bc806c63c301991 Mon Sep 17 00:00:00 2001 +From: Cong Wang +Date: Tue, 21 Oct 2014 09:27:12 +0200 +Subject: freezer: Do not freeze tasks killed by OOM killer + +From: Cong Wang + +commit 51fae6da640edf9d266c94f36bc806c63c301991 upstream. + +Since f660daac474c6f (oom: thaw threads if oom killed thread is frozen +before deferring) OOM killer relies on being able to thaw a frozen task +to handle OOM situation but a3201227f803 (freezer: make freezing() test +freeze conditions in effect instead of TIF_FREEZE) has reorganized the +code and stopped clearing freeze flag in __thaw_task. This means that +the target task only wakes up and goes into the fridge again because the +freezing condition hasn't changed for it. This reintroduces the bug +fixed by f660daac474c6f. + +Fix the issue by checking for TIF_MEMDIE thread flag in +freezing_slow_path and exclude the task from freezing completely. If a +task was already frozen it would get woken by __thaw_task from OOM killer +and get out of freezer after rechecking freezing(). + +Changes since v1 +- put TIF_MEMDIE check into freezing_slowpath rather than in __refrigerator + as per Oleg +- return __thaw_task into oom_scan_process_thread because + oom_kill_process will not wake task in the fridge because it is + sleeping uninterruptible + +[mhocko@suse.cz: rewrote the changelog] +Fixes: a3201227f803 (freezer: make freezing() test freeze conditions in effect instead of TIF_FREEZE) +Signed-off-by: Cong Wang +Signed-off-by: Michal Hocko +Acked-by: Oleg Nesterov +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/freezer.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/kernel/freezer.c ++++ b/kernel/freezer.c +@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_stru + if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) + return false; + ++ if (test_thread_flag(TIF_MEMDIE)) ++ return false; ++ + if (pm_nosig_freezing || cgroup_freezing(p)) + return true; + diff --git a/queue-3.17/intel_pstate-correct-byt-vid-values.patch b/queue-3.17/intel_pstate-correct-byt-vid-values.patch new file mode 100644 index 00000000000..4c2537a419b --- /dev/null +++ b/queue-3.17/intel_pstate-correct-byt-vid-values.patch @@ -0,0 +1,55 @@ +From d022a65ed2473fac4a600e3424503dc571160a3e Mon Sep 17 00:00:00 2001 +From: Dirk Brandewie +Date: Mon, 13 Oct 2014 08:37:44 -0700 +Subject: intel_pstate: Correct BYT VID values. + +From: Dirk Brandewie + +commit d022a65ed2473fac4a600e3424503dc571160a3e upstream. + +Using a VID value that is not high enough for the requested P state can +cause machine checks. Add a ceiling function to ensure calulated VIDs +with fractional values are set to the next highest integer VID value. + +The algorythm for calculating the non-trubo VID from the BIOS writers +guide is: + vid_ratio = (vid_max - vid_min) / (max_pstate - min_pstate) + vid = ceiling(vid_min + (req_pstate - min_pstate) * vid_ratio) + +Signed-off-by: Dirk Brandewie +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/cpufreq/intel_pstate.c | 13 ++++++++++++- + 1 file changed, 12 insertions(+), 1 deletion(-) + +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -52,6 +52,17 @@ static inline int32_t div_fp(int32_t x, + return div_s64((int64_t)x << FRAC_BITS, y); + } + ++static inline int ceiling_fp(int32_t x) ++{ ++ int mask, ret; ++ ++ ret = fp_toint(x); ++ mask = (1 << FRAC_BITS) - 1; ++ if (x & mask) ++ ret += 1; ++ return ret; ++} ++ + struct sample { + int32_t core_pct_busy; + u64 aperf; +@@ -425,7 +436,7 @@ static void byt_set_pstate(struct cpudat + cpudata->vid.ratio); + + vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); +- vid = fp_toint(vid_fp); ++ vid = ceiling_fp(vid_fp); + + if (pstate > cpudata->pstate.max_pstate) + vid = cpudata->vid.turbo; diff --git a/queue-3.17/intel_pstate-don-t-lose-sysfs-settings-during-cpu-offline.patch b/queue-3.17/intel_pstate-don-t-lose-sysfs-settings-during-cpu-offline.patch new file mode 100644 index 00000000000..d1122bcc9c2 --- /dev/null +++ b/queue-3.17/intel_pstate-don-t-lose-sysfs-settings-during-cpu-offline.patch @@ -0,0 +1,43 @@ +From c034871712730a33e0267095f48b62eae958499c Mon Sep 17 00:00:00 2001 +From: Dirk Brandewie +Date: Mon, 13 Oct 2014 08:37:42 -0700 +Subject: intel_pstate: Don't lose sysfs settings during cpu offline + +From: Dirk Brandewie + +commit c034871712730a33e0267095f48b62eae958499c upstream. + +The user may have custom settings don't destroy them during suspend. + +Link: https://bugzilla.kernel.org/show_bug.cgi?id=80651 +Reported-by: Tobias Jakobi +Signed-off-by: Dirk Brandewie +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/cpufreq/intel_pstate.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -702,7 +702,9 @@ static int intel_pstate_init_cpu(unsigne + { + struct cpudata *cpu; + +- all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); ++ if (!all_cpu_data[cpunum]) ++ all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), ++ GFP_KERNEL); + if (!all_cpu_data[cpunum]) + return -ENOMEM; + +@@ -783,8 +785,6 @@ static void intel_pstate_stop_cpu(struct + + del_timer_sync(&all_cpu_data[cpu_num]->timer); + intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); +- kfree(all_cpu_data[cpu_num]); +- all_cpu_data[cpu_num] = NULL; + } + + static int intel_pstate_cpu_init(struct cpufreq_policy *policy) diff --git a/queue-3.17/intel_pstate-fix-byt-frequency-reporting.patch b/queue-3.17/intel_pstate-fix-byt-frequency-reporting.patch new file mode 100644 index 00000000000..75d79cd0b0b --- /dev/null +++ b/queue-3.17/intel_pstate-fix-byt-frequency-reporting.patch @@ -0,0 +1,146 @@ +From b27580b05e6f5253228debc60b8ff4a786ff573a Mon Sep 17 00:00:00 2001 +From: Dirk Brandewie +Date: Mon, 13 Oct 2014 08:37:43 -0700 +Subject: intel_pstate: Fix BYT frequency reporting + +From: Dirk Brandewie + +commit b27580b05e6f5253228debc60b8ff4a786ff573a upstream. + +BYT has a different conversion from P state to frequency than the core +processors. This causes the min/max and current frequency to be +misreported on some BYT SKUs. Tested on BYT N2820, Ivybridge and +Haswell processors. + +Link: https://bugzilla.yoctoproject.org/show_bug.cgi?id=6663 +Signed-off-by: Dirk Brandewie +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/cpufreq/intel_pstate.c | 42 +++++++++++++++++++++++++++++++++++------ + 1 file changed, 36 insertions(+), 6 deletions(-) + +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -64,6 +64,7 @@ struct pstate_data { + int current_pstate; + int min_pstate; + int max_pstate; ++ int scaling; + int turbo_pstate; + }; + +@@ -113,6 +114,7 @@ struct pstate_funcs { + int (*get_max)(void); + int (*get_min)(void); + int (*get_turbo)(void); ++ int (*get_scaling)(void); + void (*set)(struct cpudata*, int pstate); + void (*get_vid)(struct cpudata *); + }; +@@ -433,6 +435,22 @@ static void byt_set_pstate(struct cpudat + wrmsrl(MSR_IA32_PERF_CTL, val); + } + ++#define BYT_BCLK_FREQS 5 ++static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800}; ++ ++static int byt_get_scaling(void) ++{ ++ u64 value; ++ int i; ++ ++ rdmsrl(MSR_FSB_FREQ, value); ++ i = value & 0x3; ++ ++ BUG_ON(i > BYT_BCLK_FREQS); ++ ++ return byt_freq_table[i] * 100; ++} ++ + static void byt_get_vid(struct cpudata *cpudata) + { + u64 value; +@@ -478,6 +496,11 @@ static int core_get_turbo_pstate(void) + return ret; + } + ++static inline int core_get_scaling(void) ++{ ++ return 100000; ++} ++ + static void core_set_pstate(struct cpudata *cpudata, int pstate) + { + u64 val; +@@ -502,6 +525,7 @@ static struct cpu_defaults core_params = + .get_max = core_get_max_pstate, + .get_min = core_get_min_pstate, + .get_turbo = core_get_turbo_pstate, ++ .get_scaling = core_get_scaling, + .set = core_set_pstate, + }, + }; +@@ -520,6 +544,7 @@ static struct cpu_defaults byt_params = + .get_min = byt_get_min_pstate, + .get_turbo = byt_get_turbo_pstate, + .set = byt_set_pstate, ++ .get_scaling = byt_get_scaling, + .get_vid = byt_get_vid, + }, + }; +@@ -554,7 +579,7 @@ static void intel_pstate_set_pstate(stru + if (pstate == cpu->pstate.current_pstate) + return; + +- trace_cpu_frequency(pstate * 100000, cpu->cpu); ++ trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu); + + cpu->pstate.current_pstate = pstate; + +@@ -566,6 +591,7 @@ static void intel_pstate_get_cpu_pstates + cpu->pstate.min_pstate = pstate_funcs.get_min(); + cpu->pstate.max_pstate = pstate_funcs.get_max(); + cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); ++ cpu->pstate.scaling = pstate_funcs.get_scaling(); + + if (pstate_funcs.get_vid) + pstate_funcs.get_vid(cpu); +@@ -581,7 +607,9 @@ static inline void intel_pstate_calc_bus + core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); + + sample->freq = fp_toint( +- mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); ++ mul_fp(int_tofp( ++ cpu->pstate.max_pstate * cpu->pstate.scaling / 100), ++ core_pct)); + + sample->core_pct_busy = (int32_t)core_pct; + } +@@ -803,12 +831,13 @@ static int intel_pstate_cpu_init(struct + else + policy->policy = CPUFREQ_POLICY_POWERSAVE; + +- policy->min = cpu->pstate.min_pstate * 100000; +- policy->max = cpu->pstate.turbo_pstate * 100000; ++ policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; ++ policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + + /* cpuinfo and default policy values */ +- policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; +- policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000; ++ policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; ++ policy->cpuinfo.max_freq = ++ cpu->pstate.turbo_pstate * cpu->pstate.scaling; + policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; + cpumask_set_cpu(policy->cpu, policy->cpus); + +@@ -866,6 +895,7 @@ static void copy_cpu_funcs(struct pstate + pstate_funcs.get_max = funcs->get_max; + pstate_funcs.get_min = funcs->get_min; + pstate_funcs.get_turbo = funcs->get_turbo; ++ pstate_funcs.get_scaling = funcs->get_scaling; + pstate_funcs.set = funcs->set; + pstate_funcs.get_vid = funcs->get_vid; + } diff --git a/queue-3.17/oom-pm-oom-killed-task-shouldn-t-escape-pm-suspend.patch b/queue-3.17/oom-pm-oom-killed-task-shouldn-t-escape-pm-suspend.patch new file mode 100644 index 00000000000..88706bd3cc7 --- /dev/null +++ b/queue-3.17/oom-pm-oom-killed-task-shouldn-t-escape-pm-suspend.patch @@ -0,0 +1,171 @@ +From 5695be142e203167e3cb515ef86a88424f3524eb Mon Sep 17 00:00:00 2001 +From: Michal Hocko +Date: Mon, 20 Oct 2014 18:12:32 +0200 +Subject: OOM, PM: OOM killed task shouldn't escape PM suspend + +From: Michal Hocko + +commit 5695be142e203167e3cb515ef86a88424f3524eb upstream. + +PM freezer relies on having all tasks frozen by the time devices are +getting frozen so that no task will touch them while they are getting +frozen. But OOM killer is allowed to kill an already frozen task in +order to handle OOM situtation. In order to protect from late wake ups +OOM killer is disabled after all tasks are frozen. This, however, still +keeps a window open when a killed task didn't manage to die by the time +freeze_processes finishes. + +Reduce the race window by checking all tasks after OOM killer has been +disabled. This is still not race free completely unfortunately because +oom_killer_disable cannot stop an already ongoing OOM killer so a task +might still wake up from the fridge and get killed without +freeze_processes noticing. Full synchronization of OOM and freezer is, +however, too heavy weight for this highly unlikely case. + +Introduce and check oom_kills counter which gets incremented early when +the allocator enters __alloc_pages_may_oom path and only check all the +tasks if the counter changes during the freezing attempt. The counter +is updated so early to reduce the race window since allocator checked +oom_killer_disabled which is set by PM-freezing code. A false positive +will push the PM-freezer into a slow path but that is not a big deal. + +Changes since v1 +- push the re-check loop out of freeze_processes into + check_frozen_processes and invert the condition to make the code more + readable as per Rafael + +Fixes: f660daac474c6f (oom: thaw threads if oom killed thread is frozen before deferring) +Signed-off-by: Michal Hocko +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/oom.h | 3 +++ + kernel/power/process.c | 40 +++++++++++++++++++++++++++++++++++++++- + mm/oom_kill.c | 17 +++++++++++++++++ + mm/page_alloc.c | 8 ++++++++ + 4 files changed, 67 insertions(+), 1 deletion(-) + +--- a/include/linux/oom.h ++++ b/include/linux/oom.h +@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const + extern unsigned long oom_badness(struct task_struct *p, + struct mem_cgroup *memcg, const nodemask_t *nodemask, + unsigned long totalpages); ++ ++extern int oom_kills_count(void); ++extern void note_oom_kill(void); + extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, + unsigned int points, unsigned long totalpages, + struct mem_cgroup *memcg, nodemask_t *nodemask, +--- a/kernel/power/process.c ++++ b/kernel/power/process.c +@@ -108,6 +108,28 @@ static int try_to_freeze_tasks(bool user + return todo ? -EBUSY : 0; + } + ++/* ++ * Returns true if all freezable tasks (except for current) are frozen already ++ */ ++static bool check_frozen_processes(void) ++{ ++ struct task_struct *g, *p; ++ bool ret = true; ++ ++ read_lock(&tasklist_lock); ++ for_each_process_thread(g, p) { ++ if (p != current && !freezer_should_skip(p) && ++ !frozen(p)) { ++ ret = false; ++ goto done; ++ } ++ } ++done: ++ read_unlock(&tasklist_lock); ++ ++ return ret; ++} ++ + /** + * freeze_processes - Signal user space processes to enter the refrigerator. + * The current thread will not be frozen. The same process that calls +@@ -118,6 +140,7 @@ static int try_to_freeze_tasks(bool user + int freeze_processes(void) + { + int error; ++ int oom_kills_saved; + + error = __usermodehelper_disable(UMH_FREEZING); + if (error) +@@ -131,12 +154,27 @@ int freeze_processes(void) + + printk("Freezing user space processes ... "); + pm_freezing = true; ++ oom_kills_saved = oom_kills_count(); + error = try_to_freeze_tasks(true); + if (!error) { +- printk("done."); + __usermodehelper_set_disable_depth(UMH_DISABLED); + oom_killer_disable(); ++ ++ /* ++ * There might have been an OOM kill while we were ++ * freezing tasks and the killed task might be still ++ * on the way out so we have to double check for race. ++ */ ++ if (oom_kills_count() != oom_kills_saved && ++ !check_frozen_processes()) { ++ __usermodehelper_set_disable_depth(UMH_ENABLED); ++ printk("OOM in progress."); ++ error = -EBUSY; ++ goto done; ++ } ++ printk("done."); + } ++done: + printk("\n"); + BUG_ON(in_atomic()); + +--- a/mm/oom_kill.c ++++ b/mm/oom_kill.c +@@ -404,6 +404,23 @@ static void dump_header(struct task_stru + dump_tasks(memcg, nodemask); + } + ++/* ++ * Number of OOM killer invocations (including memcg OOM killer). ++ * Primarily used by PM freezer to check for potential races with ++ * OOM killed frozen task. ++ */ ++static atomic_t oom_kills = ATOMIC_INIT(0); ++ ++int oom_kills_count(void) ++{ ++ return atomic_read(&oom_kills); ++} ++ ++void note_oom_kill(void) ++{ ++ atomic_inc(&oom_kills); ++} ++ + #define K(x) ((x) << (PAGE_SHIFT-10)) + /* + * Must be called while holding a reference to p, which will be released upon +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -2253,6 +2253,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, un + } + + /* ++ * PM-freezer should be notified that there might be an OOM killer on ++ * its way to kill and wake somebody up. This is too early and we might ++ * end up not killing anything but false positives are acceptable. ++ * See freeze_processes. ++ */ ++ note_oom_kill(); ++ ++ /* + * Go through the zonelist yet one more time, keep very high watermark + * here, this is only to catch a parallel oom killing, we must fail if + * we're still under heavy pressure. diff --git a/queue-3.17/rtc-disable-efi-rtc-for-x86.patch b/queue-3.17/rtc-disable-efi-rtc-for-x86.patch new file mode 100644 index 00000000000..4943ec927e2 --- /dev/null +++ b/queue-3.17/rtc-disable-efi-rtc-for-x86.patch @@ -0,0 +1,44 @@ +From 7efe665903d0d963b0ebf4cab25cc3ae32c62600 Mon Sep 17 00:00:00 2001 +From: Matt Fleming +Date: Fri, 3 Oct 2014 13:06:33 +0100 +Subject: rtc: Disable EFI rtc for x86 + +From: Matt Fleming + +commit 7efe665903d0d963b0ebf4cab25cc3ae32c62600 upstream. + +commit da167ad7638759 ("rtc: ia64: allow other architectures to use EFI +RTC") inadvertently introduced a regression for x86 because we've been +careful not to enable the EFI rtc driver due to the generally buggy +implementations of the time-related EFI runtime services. + +In fact, since the above commit was merged we've seen reports of crashes +on 32-bit tablets, + + https://bugzilla.kernel.org/show_bug.cgi?id=84241#c21 + +Disable it explicitly for x86 so that we don't give users false hope +that this driver will work - it won't, and your machine is likely to +crash. + +Acked-by: Mark Salter +Cc: Dave Young +Cc: Alessandro Zummo +Signed-off-by: Matt Fleming +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/rtc/Kconfig | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/rtc/Kconfig ++++ b/drivers/rtc/Kconfig +@@ -806,7 +806,7 @@ config RTC_DRV_DA9063 + + config RTC_DRV_EFI + tristate "EFI RTC" +- depends on EFI ++ depends on EFI && !X86 + help + If you say yes here you will get support for the EFI + Real Time Clock. diff --git a/queue-3.17/series b/queue-3.17/series index bcb36fac0a4..149883249af 100644 --- a/queue-3.17/series +++ b/queue-3.17/series @@ -174,3 +174,14 @@ cpufreq-intel_pstate-reflect-current-no_turbo-state-correctly.patch cpufreq-intel_pstate-fix-setting-max_perf_pct-in-performance-policy.patch x86-platform-intel-iosf-add-braswell-pci-id.patch x86-add-cpu_detect_cache_sizes-to-init_intel-add-quark-legacy_cache.patch +rtc-disable-efi-rtc-for-x86.patch +intel_pstate-don-t-lose-sysfs-settings-during-cpu-offline.patch +intel_pstate-fix-byt-frequency-reporting.patch +intel_pstate-correct-byt-vid-values.patch +freezer-do-not-freeze-tasks-killed-by-oom-killer.patch +fix-inode-leaks-on-d_splice_alias-failure-exits.patch +acpi-invoke-acpi_device_wakeup-with-correct-parameters.patch +x86-acpi-do-not-translate-gsi-number-if-ioapic-is-disabled.patch +acpi-irq-x86-return-irq-instead-of-gsi-in-mp_register_gsi.patch +acpi-ec-fix-regression-due-to-conflicting-firmware-behavior-between-samsung-and-acer.patch +oom-pm-oom-killed-task-shouldn-t-escape-pm-suspend.patch diff --git a/queue-3.17/x86-acpi-do-not-translate-gsi-number-if-ioapic-is-disabled.patch b/queue-3.17/x86-acpi-do-not-translate-gsi-number-if-ioapic-is-disabled.patch new file mode 100644 index 00000000000..2bc85506d19 --- /dev/null +++ b/queue-3.17/x86-acpi-do-not-translate-gsi-number-if-ioapic-is-disabled.patch @@ -0,0 +1,57 @@ +From 961b6a7003acec4f9d70dabc1a253b783cb74272 Mon Sep 17 00:00:00 2001 +From: Jiang Liu +Date: Mon, 20 Oct 2014 22:45:27 +0800 +Subject: x86: ACPI: Do not translate GSI number if IOAPIC is disabled + +From: Jiang Liu + +commit 961b6a7003acec4f9d70dabc1a253b783cb74272 upstream. + +When IOAPIC is disabled, acpi_gsi_to_irq() should return gsi directly +instead of calling mp_map_gsi_to_irq() to translate gsi to IRQ by IOAPIC. +It fixes https://bugzilla.kernel.org/show_bug.cgi?id=84381. + +This regression was introduced with commit 6b9fb7082409 "x86, ACPI, +irq: Consolidate algorithm of mapping (ioapic, pin) to IRQ number" + +Reported-and-Tested-by: Thomas Richter +Signed-off-by: Jiang Liu +Cc: Tony Luck +Cc: Thomas Richter +Cc: rui.zhang@intel.com +Cc: Rafael J. Wysocki +Cc: Bjorn Helgaas +Link: http://lkml.kernel.org/r/1413816327-12850-1-git-send-email-jiang.liu@linux.intel.com +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/acpi/boot.c | 14 +++++++++----- + 1 file changed, 9 insertions(+), 5 deletions(-) + +--- a/arch/x86/kernel/acpi/boot.c ++++ b/arch/x86/kernel/acpi/boot.c +@@ -604,14 +604,18 @@ void __init acpi_pic_sci_set_trigger(uns + + int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) + { +- int irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); ++ int irq; + +- if (irq >= 0) { ++ if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { ++ *irqp = gsi; ++ } else { ++ irq = mp_map_gsi_to_irq(gsi, ++ IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); ++ if (irq < 0) ++ return -1; + *irqp = irq; +- return 0; + } +- +- return -1; ++ return 0; + } + EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); +