]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop sched patches from 4.9 as they break the build
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 12 Oct 2018 14:43:30 +0000 (16:43 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 12 Oct 2018 14:43:30 +0000 (16:43 +0200)
queue-4.9/sched-cputime-convert-kcpustat-to-nsecs.patch [deleted file]
queue-4.9/sched-cputime-fix-ksoftirqd-cputime-accounting-regression.patch [deleted file]
queue-4.9/sched-cputime-increment-kcpustat-directly-on-irqtime-account.patch [deleted file]
queue-4.9/series

diff --git a/queue-4.9/sched-cputime-convert-kcpustat-to-nsecs.patch b/queue-4.9/sched-cputime-convert-kcpustat-to-nsecs.patch
deleted file mode 100644 (file)
index a5b9a1b..0000000
+++ /dev/null
@@ -1,368 +0,0 @@
-From 7fb1327ee9b92fca27662f9b9d60c7c3376d6c69 Mon Sep 17 00:00:00 2001
-From: Frederic Weisbecker <fweisbec@gmail.com>
-Date: Tue, 31 Jan 2017 04:09:19 +0100
-Subject: sched/cputime: Convert kcpustat to nsecs
-
-From: Frederic Weisbecker <fweisbec@gmail.com>
-
-commit 7fb1327ee9b92fca27662f9b9d60c7c3376d6c69 upstream.
-
-Kernel CPU stats are stored in cputime_t which is an architecture
-defined type, and hence a bit opaque and requiring accessors and mutators
-for any operation.
-
-Converting them to nsecs simplifies the code and is one step toward
-the removal of cputime_t in the core code.
-
-Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-Cc: Paul Mackerras <paulus@samba.org>
-Cc: Michael Ellerman <mpe@ellerman.id.au>
-Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
-Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
-Cc: Tony Luck <tony.luck@intel.com>
-Cc: Fenghua Yu <fenghua.yu@intel.com>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Stanislaw Gruszka <sgruszka@redhat.com>
-Cc: Wanpeng Li <wanpeng.li@hotmail.com>
-Link: http://lkml.kernel.org/r/1485832191-26889-4-git-send-email-fweisbec@gmail.com
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-[colona: minor conflict as 527b0a76f41d ("sched/cpuacct: Avoid %lld seq_printf
-warning") is missing from v4.9]
-Signed-off-by: Ivan Delalande <colona@arista.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- arch/s390/appldata/appldata_os.c   |   16 ++++----
- drivers/cpufreq/cpufreq.c          |    6 +--
- drivers/cpufreq/cpufreq_governor.c |    2 -
- drivers/cpufreq/cpufreq_stats.c    |    1 
- drivers/macintosh/rack-meter.c     |    2 -
- fs/proc/stat.c                     |   68 ++++++++++++++++++-------------------
- fs/proc/uptime.c                   |    7 +--
- kernel/sched/cpuacct.c             |    2 -
- kernel/sched/cputime.c             |   22 +++++------
- 9 files changed, 61 insertions(+), 65 deletions(-)
-
---- a/arch/s390/appldata/appldata_os.c
-+++ b/arch/s390/appldata/appldata_os.c
-@@ -113,21 +113,21 @@ static void appldata_get_os_data(void *d
-       j = 0;
-       for_each_online_cpu(i) {
-               os_data->os_cpu[j].per_cpu_user =
--                      cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
-+                      nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
-               os_data->os_cpu[j].per_cpu_nice =
--                      cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
-+                      nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
-               os_data->os_cpu[j].per_cpu_system =
--                      cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
-+                      nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
-               os_data->os_cpu[j].per_cpu_idle =
--                      cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
-+                      nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
-               os_data->os_cpu[j].per_cpu_irq =
--                      cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
-+                      nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
-               os_data->os_cpu[j].per_cpu_softirq =
--                      cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
-+                      nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
-               os_data->os_cpu[j].per_cpu_iowait =
--                      cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
-+                      nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
-               os_data->os_cpu[j].per_cpu_steal =
--                      cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
-+                      nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
-               os_data->os_cpu[j].cpu_id = i;
-               j++;
-       }
---- a/drivers/cpufreq/cpufreq.c
-+++ b/drivers/cpufreq/cpufreq.c
-@@ -132,7 +132,7 @@ static inline u64 get_cpu_idle_time_jiff
-       u64 cur_wall_time;
-       u64 busy_time;
--      cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
-+      cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
-       busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
-       busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
-@@ -143,9 +143,9 @@ static inline u64 get_cpu_idle_time_jiff
-       idle_time = cur_wall_time - busy_time;
-       if (wall)
--              *wall = cputime_to_usecs(cur_wall_time);
-+              *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
--      return cputime_to_usecs(idle_time);
-+      return div_u64(idle_time, NSEC_PER_USEC);
- }
- u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
---- a/drivers/cpufreq/cpufreq_governor.c
-+++ b/drivers/cpufreq/cpufreq_governor.c
-@@ -152,7 +152,7 @@ unsigned int dbs_update(struct cpufreq_p
-               if (ignore_nice) {
-                       u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
--                      idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
-+                      idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
-                       j_cdbs->prev_cpu_nice = cur_nice;
-               }
---- a/drivers/cpufreq/cpufreq_stats.c
-+++ b/drivers/cpufreq/cpufreq_stats.c
-@@ -13,7 +13,6 @@
- #include <linux/cpufreq.h>
- #include <linux/module.h>
- #include <linux/slab.h>
--#include <linux/cputime.h>
- static DEFINE_SPINLOCK(cpufreq_stats_lock);
---- a/drivers/macintosh/rack-meter.c
-+++ b/drivers/macintosh/rack-meter.c
-@@ -91,7 +91,7 @@ static inline cputime64_t get_cpu_idle_t
-       if (rackmeter_ignore_nice)
-               retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
--      return retval;
-+      return nsecs_to_cputime64(retval);
- }
- static void rackmeter_setup_i2s(struct rackmeter *rm)
---- a/fs/proc/stat.c
-+++ b/fs/proc/stat.c
-@@ -21,23 +21,23 @@
- #ifdef arch_idle_time
--static cputime64_t get_idle_time(int cpu)
-+static u64 get_idle_time(int cpu)
- {
--      cputime64_t idle;
-+      u64 idle;
-       idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
-       if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
--              idle += arch_idle_time(cpu);
-+              idle += cputime_to_nsecs(arch_idle_time(cpu));
-       return idle;
- }
--static cputime64_t get_iowait_time(int cpu)
-+static u64 get_iowait_time(int cpu)
- {
--      cputime64_t iowait;
-+      u64 iowait;
-       iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
-       if (cpu_online(cpu) && nr_iowait_cpu(cpu))
--              iowait += arch_idle_time(cpu);
-+              iowait += cputime_to_nsecs(arch_idle_time(cpu));
-       return iowait;
- }
-@@ -45,32 +45,32 @@ static cputime64_t get_iowait_time(int c
- static u64 get_idle_time(int cpu)
- {
--      u64 idle, idle_time = -1ULL;
-+      u64 idle, idle_usecs = -1ULL;
-       if (cpu_online(cpu))
--              idle_time = get_cpu_idle_time_us(cpu, NULL);
-+              idle_usecs = get_cpu_idle_time_us(cpu, NULL);
--      if (idle_time == -1ULL)
-+      if (idle_usecs == -1ULL)
-               /* !NO_HZ or cpu offline so we can rely on cpustat.idle */
-               idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
-       else
--              idle = usecs_to_cputime64(idle_time);
-+              idle = idle_usecs * NSEC_PER_USEC;
-       return idle;
- }
- static u64 get_iowait_time(int cpu)
- {
--      u64 iowait, iowait_time = -1ULL;
-+      u64 iowait, iowait_usecs = -1ULL;
-       if (cpu_online(cpu))
--              iowait_time = get_cpu_iowait_time_us(cpu, NULL);
-+              iowait_usecs = get_cpu_iowait_time_us(cpu, NULL);
--      if (iowait_time == -1ULL)
-+      if (iowait_usecs == -1ULL)
-               /* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
-               iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
-       else
--              iowait = usecs_to_cputime64(iowait_time);
-+              iowait = iowait_usecs * NSEC_PER_USEC;
-       return iowait;
- }
-@@ -115,16 +115,16 @@ static int show_stat(struct seq_file *p,
-       }
-       sum += arch_irq_stat();
--      seq_put_decimal_ull(p, "cpu  ", cputime64_to_clock_t(user));
--      seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
--      seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
--      seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
--      seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
--      seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
--      seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
--      seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
--      seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
--      seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
-+      seq_put_decimal_ull(p, "cpu  ", nsec_to_clock_t(user));
-+      seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
-+      seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
-+      seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
-+      seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
-+      seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
-+      seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
-+      seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
-+      seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
-+      seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
-       seq_putc(p, '\n');
-       for_each_online_cpu(i) {
-@@ -140,16 +140,16 @@ static int show_stat(struct seq_file *p,
-               guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
-               guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
-               seq_printf(p, "cpu%d", i);
--              seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user));
--              seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
--              seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
--              seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
--              seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
--              seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
--              seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
--              seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
--              seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
--              seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
-+              seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
-+              seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
-+              seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
-+              seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
-+              seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
-+              seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
-+              seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
-+              seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
-+              seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
-+              seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
-               seq_putc(p, '\n');
-       }
-       seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
---- a/fs/proc/uptime.c
-+++ b/fs/proc/uptime.c
-@@ -5,23 +5,20 @@
- #include <linux/seq_file.h>
- #include <linux/time.h>
- #include <linux/kernel_stat.h>
--#include <linux/cputime.h>
- static int uptime_proc_show(struct seq_file *m, void *v)
- {
-       struct timespec uptime;
-       struct timespec idle;
--      u64 idletime;
-       u64 nsec;
-       u32 rem;
-       int i;
--      idletime = 0;
-+      nsec = 0;
-       for_each_possible_cpu(i)
--              idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
-+              nsec += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
-       get_monotonic_boottime(&uptime);
--      nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
-       idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
-       idle.tv_nsec = rem;
-       seq_printf(m, "%lu.%02lu %lu.%02lu\n",
---- a/kernel/sched/cpuacct.c
-+++ b/kernel/sched/cpuacct.c
-@@ -297,7 +297,7 @@ static int cpuacct_stats_show(struct seq
-       for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
-               seq_printf(sf, "%s %lld\n",
-                          cpuacct_stat_desc[stat],
--                         cputime64_to_clock_t(val[stat]));
-+                         nsec_to_clock_t(val[stat]));
-       }
-       return 0;
---- a/kernel/sched/cputime.c
-+++ b/kernel/sched/cputime.c
-@@ -75,9 +75,9 @@ static cputime_t irqtime_account_update(
-       u64 *cpustat = kcpustat_this_cpu->cpustat;
-       cputime_t irq_cputime;
--      irq_cputime = nsecs_to_cputime64(irqtime) - cpustat[idx];
-+      irq_cputime = nsecs_to_cputime64(irqtime - cpustat[idx]);
-       irq_cputime = min(irq_cputime, maxtime);
--      cpustat[idx] += irq_cputime;
-+      cpustat[idx] += cputime_to_nsecs(irq_cputime);
-       return irq_cputime;
- }
-@@ -143,7 +143,7 @@ void account_user_time(struct task_struc
-       index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
-       /* Add user time to cpustat. */
--      task_group_account_field(p, index, (__force u64) cputime);
-+      task_group_account_field(p, index, cputime_to_nsecs(cputime));
-       /* Account for user time used */
-       acct_account_cputime(p);
-@@ -168,11 +168,11 @@ static void account_guest_time(struct ta
-       /* Add guest time to cpustat. */
-       if (task_nice(p) > 0) {
--              cpustat[CPUTIME_NICE] += (__force u64) cputime;
--              cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
-+              cpustat[CPUTIME_NICE] += cputime_to_nsecs(cputime);
-+              cpustat[CPUTIME_GUEST_NICE] += cputime_to_nsecs(cputime);
-       } else {
--              cpustat[CPUTIME_USER] += (__force u64) cputime;
--              cpustat[CPUTIME_GUEST] += (__force u64) cputime;
-+              cpustat[CPUTIME_USER] += cputime_to_nsecs(cputime);
-+              cpustat[CPUTIME_GUEST] += cputime_to_nsecs(cputime);
-       }
- }
-@@ -193,7 +193,7 @@ void __account_system_time(struct task_s
-       account_group_system_time(p, cputime);
-       /* Add system time to cpustat. */
--      task_group_account_field(p, index, (__force u64) cputime);
-+      task_group_account_field(p, index, cputime_to_nsecs(cputime));
-       /* Account for system time used */
-       acct_account_cputime(p);
-@@ -234,7 +234,7 @@ void account_steal_time(cputime_t cputim
- {
-       u64 *cpustat = kcpustat_this_cpu->cpustat;
--      cpustat[CPUTIME_STEAL] += (__force u64) cputime;
-+      cpustat[CPUTIME_STEAL] += cputime_to_nsecs(cputime);
- }
- /*
-@@ -247,9 +247,9 @@ void account_idle_time(cputime_t cputime
-       struct rq *rq = this_rq();
-       if (atomic_read(&rq->nr_iowait) > 0)
--              cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
-+              cpustat[CPUTIME_IOWAIT] += cputime_to_nsecs(cputime);
-       else
--              cpustat[CPUTIME_IDLE] += (__force u64) cputime;
-+              cpustat[CPUTIME_IDLE] += cputime_to_nsecs(cputime);
- }
- /*
diff --git a/queue-4.9/sched-cputime-fix-ksoftirqd-cputime-accounting-regression.patch b/queue-4.9/sched-cputime-fix-ksoftirqd-cputime-accounting-regression.patch
deleted file mode 100644 (file)
index 4a62195..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-From 25e2d8c1b9e327ed260edd13169cc22bc7a78bc6 Mon Sep 17 00:00:00 2001
-From: Frederic Weisbecker <fweisbec@gmail.com>
-Date: Tue, 25 Apr 2017 16:10:48 +0200
-Subject: sched/cputime: Fix ksoftirqd cputime accounting regression
-
-From: Frederic Weisbecker <fweisbec@gmail.com>
-
-commit 25e2d8c1b9e327ed260edd13169cc22bc7a78bc6 upstream.
-
-irq_time_read() returns the irqtime minus the ksoftirqd time. This
-is necessary because irq_time_read() is used to substract the IRQ time
-from the sum_exec_runtime of a task. If we were to include the softirq
-time of ksoftirqd, this task would substract its own CPU time everytime
-it updates ksoftirqd->sum_exec_runtime which would therefore never
-progress.
-
-But this behaviour got broken by:
-
-  a499a5a14db ("sched/cputime: Increment kcpustat directly on irqtime account")
-
-... which now includes ksoftirqd softirq time in the time returned by
-irq_time_read().
-
-This has resulted in wrong ksoftirqd cputime reported to userspace
-through /proc/stat and thus "top" not showing ksoftirqd when it should
-after intense networking load.
-
-ksoftirqd->stime happens to be correct but it gets scaled down by
-sum_exec_runtime through task_cputime_adjusted().
-
-To fix this, just account the strict IRQ time in a separate counter and
-use it to report the IRQ time.
-
-Reported-and-tested-by: Jesper Dangaard Brouer <brouer@redhat.com>
-Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-Reviewed-by: Rik van Riel <riel@redhat.com>
-Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Stanislaw Gruszka <sgruszka@redhat.com>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Wanpeng Li <wanpeng.li@hotmail.com>
-Link: http://lkml.kernel.org/r/1493129448-5356-1-git-send-email-fweisbec@gmail.com
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: Ivan Delalande <colona@arista.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- kernel/sched/cputime.c |   27 ++++++++++++++++-----------
- kernel/sched/sched.h   |    9 +++++++--
- 2 files changed, 23 insertions(+), 13 deletions(-)
-
---- a/kernel/sched/cputime.c
-+++ b/kernel/sched/cputime.c
-@@ -37,6 +37,18 @@ void disable_sched_clock_irqtime(void)
-       sched_clock_irqtime = 0;
- }
-+static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
-+                                enum cpu_usage_stat idx)
-+{
-+      u64 *cpustat = kcpustat_this_cpu->cpustat;
-+
-+      u64_stats_update_begin(&irqtime->sync);
-+      cpustat[idx] += delta;
-+      irqtime->total += delta;
-+      irqtime->tick_delta += delta;
-+      u64_stats_update_end(&irqtime->sync);
-+}
-+
- /*
-  * Called before incrementing preempt_count on {soft,}irq_enter
-  * and before decrementing preempt_count on {soft,}irq_exit.
-@@ -44,7 +56,6 @@ void disable_sched_clock_irqtime(void)
- void irqtime_account_irq(struct task_struct *curr)
- {
-       struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
--      u64 *cpustat = kcpustat_this_cpu->cpustat;
-       s64 delta;
-       int cpu;
-@@ -55,22 +66,16 @@ void irqtime_account_irq(struct task_str
-       delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
-       irqtime->irq_start_time += delta;
--      u64_stats_update_begin(&irqtime->sync);
-       /*
-        * We do not account for softirq time from ksoftirqd here.
-        * We want to continue accounting softirq time to ksoftirqd thread
-        * in that case, so as not to confuse scheduler with a special task
-        * that do not consume any time, but still wants to run.
-        */
--      if (hardirq_count()) {
--              cpustat[CPUTIME_IRQ] += delta;
--              irqtime->tick_delta += delta;
--      } else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) {
--              cpustat[CPUTIME_SOFTIRQ] += delta;
--              irqtime->tick_delta += delta;
--      }
--
--      u64_stats_update_end(&irqtime->sync);
-+      if (hardirq_count())
-+              irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
-+      else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
-+              irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
- }
- EXPORT_SYMBOL_GPL(irqtime_account_irq);
---- a/kernel/sched/sched.h
-+++ b/kernel/sched/sched.h
-@@ -1743,6 +1743,7 @@ static inline void nohz_balance_exit_idl
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- struct irqtime {
-+      u64                     total;
-       u64                     tick_delta;
-       u64                     irq_start_time;
-       struct u64_stats_sync   sync;
-@@ -1750,16 +1751,20 @@ struct irqtime {
- DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
-+/*
-+ * Returns the irqtime minus the softirq time computed by ksoftirqd.
-+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
-+ * and never move forward.
-+ */
- static inline u64 irq_time_read(int cpu)
- {
-       struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
--      u64 *cpustat = kcpustat_cpu(cpu).cpustat;
-       unsigned int seq;
-       u64 total;
-       do {
-               seq = __u64_stats_fetch_begin(&irqtime->sync);
--              total = cpustat[CPUTIME_SOFTIRQ] + cpustat[CPUTIME_IRQ];
-+              total = irqtime->total;
-       } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
-       return total;
diff --git a/queue-4.9/sched-cputime-increment-kcpustat-directly-on-irqtime-account.patch b/queue-4.9/sched-cputime-increment-kcpustat-directly-on-irqtime-account.patch
deleted file mode 100644 (file)
index e91ec63..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-From a499a5a14dbd1d0315a96fc62a8798059325e9e6 Mon Sep 17 00:00:00 2001
-From: Frederic Weisbecker <fweisbec@gmail.com>
-Date: Tue, 31 Jan 2017 04:09:32 +0100
-Subject: sched/cputime: Increment kcpustat directly on irqtime account
-
-From: Frederic Weisbecker <fweisbec@gmail.com>
-
-commit a499a5a14dbd1d0315a96fc62a8798059325e9e6 upstream.
-
-The irqtime is accounted is nsecs and stored in
-cpu_irq_time.hardirq_time and cpu_irq_time.softirq_time. Once the
-accumulated amount reaches a new jiffy, this one gets accounted to the
-kcpustat.
-
-This was necessary when kcpustat was stored in cputime_t, which could at
-worst have jiffies granularity. But now kcpustat is stored in nsecs
-so this whole discretization game with temporary irqtime storage has
-become unnecessary.
-
-We can now directly account the irqtime to the kcpustat.
-
-Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
-Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-Cc: Fenghua Yu <fenghua.yu@intel.com>
-Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
-Cc: Linus Torvalds <torvalds@linux-foundation.org>
-Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
-Cc: Michael Ellerman <mpe@ellerman.id.au>
-Cc: Paul Mackerras <paulus@samba.org>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Rik van Riel <riel@redhat.com>
-Cc: Stanislaw Gruszka <sgruszka@redhat.com>
-Cc: Thomas Gleixner <tglx@linutronix.de>
-Cc: Tony Luck <tony.luck@intel.com>
-Cc: Wanpeng Li <wanpeng.li@hotmail.com>
-Link: http://lkml.kernel.org/r/1485832191-26889-17-git-send-email-fweisbec@gmail.com
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Signed-off-by: Ivan Delalande <colona@arista.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- kernel/sched/cputime.c |   50 ++++++++++++++++---------------------------------
- kernel/sched/sched.h   |    7 +++---
- 2 files changed, 21 insertions(+), 36 deletions(-)
-
---- a/kernel/sched/cputime.c
-+++ b/kernel/sched/cputime.c
-@@ -44,6 +44,7 @@ void disable_sched_clock_irqtime(void)
- void irqtime_account_irq(struct task_struct *curr)
- {
-       struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
-+      u64 *cpustat = kcpustat_this_cpu->cpustat;
-       s64 delta;
-       int cpu;
-@@ -61,49 +62,35 @@ void irqtime_account_irq(struct task_str
-        * in that case, so as not to confuse scheduler with a special task
-        * that do not consume any time, but still wants to run.
-        */
--      if (hardirq_count())
--              irqtime->hardirq_time += delta;
--      else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
--              irqtime->softirq_time += delta;
-+      if (hardirq_count()) {
-+              cpustat[CPUTIME_IRQ] += delta;
-+              irqtime->tick_delta += delta;
-+      } else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) {
-+              cpustat[CPUTIME_SOFTIRQ] += delta;
-+              irqtime->tick_delta += delta;
-+      }
-       u64_stats_update_end(&irqtime->sync);
- }
- EXPORT_SYMBOL_GPL(irqtime_account_irq);
--static cputime_t irqtime_account_update(u64 irqtime, int idx, cputime_t maxtime)
-+static cputime_t irqtime_tick_accounted(cputime_t maxtime)
- {
--      u64 *cpustat = kcpustat_this_cpu->cpustat;
--      cputime_t irq_cputime;
--
--      irq_cputime = nsecs_to_cputime64(irqtime - cpustat[idx]);
--      irq_cputime = min(irq_cputime, maxtime);
--      cpustat[idx] += cputime_to_nsecs(irq_cputime);
--
--      return irq_cputime;
--}
-+      struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
-+      cputime_t delta;
--static cputime_t irqtime_account_hi_update(cputime_t maxtime)
--{
--      return irqtime_account_update(__this_cpu_read(cpu_irqtime.hardirq_time),
--                                    CPUTIME_IRQ, maxtime);
--}
-+      delta = nsecs_to_cputime(irqtime->tick_delta);
-+      delta = min(delta, maxtime);
-+      irqtime->tick_delta -= cputime_to_nsecs(delta);
--static cputime_t irqtime_account_si_update(cputime_t maxtime)
--{
--      return irqtime_account_update(__this_cpu_read(cpu_irqtime.softirq_time),
--                                    CPUTIME_SOFTIRQ, maxtime);
-+      return delta;
- }
- #else /* CONFIG_IRQ_TIME_ACCOUNTING */
- #define sched_clock_irqtime   (0)
--static cputime_t irqtime_account_hi_update(cputime_t dummy)
--{
--      return 0;
--}
--
--static cputime_t irqtime_account_si_update(cputime_t dummy)
-+static cputime_t irqtime_tick_accounted(cputime_t dummy)
- {
-       return 0;
- }
-@@ -290,10 +277,7 @@ static inline cputime_t account_other_ti
-       accounted = steal_account_process_time(max);
-       if (accounted < max)
--              accounted += irqtime_account_hi_update(max - accounted);
--
--      if (accounted < max)
--              accounted += irqtime_account_si_update(max - accounted);
-+              accounted += irqtime_tick_accounted(max - accounted);
-       return accounted;
- }
---- a/kernel/sched/sched.h
-+++ b/kernel/sched/sched.h
-@@ -4,6 +4,7 @@
- #include <linux/sched/rt.h>
- #include <linux/u64_stats_sync.h>
- #include <linux/sched/deadline.h>
-+#include <linux/kernel_stat.h>
- #include <linux/binfmts.h>
- #include <linux/mutex.h>
- #include <linux/spinlock.h>
-@@ -1742,8 +1743,7 @@ static inline void nohz_balance_exit_idl
- #ifdef CONFIG_IRQ_TIME_ACCOUNTING
- struct irqtime {
--      u64                     hardirq_time;
--      u64                     softirq_time;
-+      u64                     tick_delta;
-       u64                     irq_start_time;
-       struct u64_stats_sync   sync;
- };
-@@ -1753,12 +1753,13 @@ DECLARE_PER_CPU(struct irqtime, cpu_irqt
- static inline u64 irq_time_read(int cpu)
- {
-       struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
-+      u64 *cpustat = kcpustat_cpu(cpu).cpustat;
-       unsigned int seq;
-       u64 total;
-       do {
-               seq = __u64_stats_fetch_begin(&irqtime->sync);
--              total = irqtime->softirq_time + irqtime->hardirq_time;
-+              total = cpustat[CPUTIME_SOFTIRQ] + cpustat[CPUTIME_IRQ];
-       } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
-       return total;
index 535785f52b73b47cf732cb9da27ae7fd1cd957f0..4b44d527f19fa476e0f5717f53d4cc0638e837bb 100644 (file)
@@ -24,9 +24,6 @@ x86-mm-expand-static-page-table-for-fixmap-space.patch
 f2fs-fix-invalid-memory-access.patch
 ucma-fix-a-use-after-free-in-ucma_resolve_ip.patch
 ubifs-check-for-name-being-null-while-mounting.patch
-sched-cputime-convert-kcpustat-to-nsecs.patch
-sched-cputime-increment-kcpustat-directly-on-irqtime-account.patch
-sched-cputime-fix-ksoftirqd-cputime-accounting-regression.patch
 ath10k-fix-scan-crash-due-to-incorrect-length-calculation.patch
 ebtables-arpreply-add-the-standard-target-sanity-check.patch
 x86-fpu-remove-use_eager_fpu.patch