--- /dev/null
+From d4f4de5e5ef8efde85febb6876cd3c8ab1631999 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Sun, 15 Sep 2019 12:12:39 -0400
+Subject: Fix the locking in dcache_readdir() and friends
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit d4f4de5e5ef8efde85febb6876cd3c8ab1631999 upstream.
+
+There are two problems in dcache_readdir() - one is that lockless traversal
+of the list needs non-trivial cooperation of d_alloc() (at least a switch
+to list_add_rcu(), and probably more than just that) and another is that
+it assumes that no removal will happen without the directory locked exclusive.
+Said assumption had always been there, never had been stated explicitly and
+is violated by several places in the kernel (devpts and selinuxfs).
+
+ * replacement of next_positive() with different calling conventions:
+it returns struct list_head * instead of struct dentry *; the latter is
+passed in and out by reference, grabbing the result and dropping the original
+value.
+ * scan is under ->d_lock. If we run out of timeslice, cursor is moved
+after the last position we'd reached and we reschedule; then the scan continues
+from that place. To avoid livelocks between multiple lseek() (with cursors
+getting moved past each other, never reaching the real entries) we always
+skip the cursors, need_resched() or not.
+ * returned list_head * is either ->d_child of dentry we'd found or
+->d_subdirs of parent (if we got to the end of the list).
+ * dcache_readdir() and dcache_dir_lseek() switched to new helper.
+dcache_readdir() always holds a reference to dentry passed to dir_emit() now.
+Cursor is moved to just before the entry where dir_emit() has failed or into
+the very end of the list, if we'd run out.
+ * move_cursor() eliminated - it had sucky calling conventions and
+after fixing that it became simply list_move() (in lseek and scan_positives)
+or list_move_tail() (in readdir).
+
+ All operations with the list are under ->d_lock now, and we do not
+depend upon having all file removals done with parent locked exclusive
+anymore.
+
+Cc: stable@vger.kernel.org
+Reported-by: "zhengbin (A)" <zhengbin13@huawei.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/libfs.c | 134 +++++++++++++++++++++++++++++++------------------------------
+ 1 file changed, 69 insertions(+), 65 deletions(-)
+
+--- a/fs/libfs.c
++++ b/fs/libfs.c
+@@ -86,58 +86,47 @@ int dcache_dir_close(struct inode *inode
+ EXPORT_SYMBOL(dcache_dir_close);
+
+ /* parent is locked at least shared */
+-static struct dentry *next_positive(struct dentry *parent,
+- struct list_head *from,
+- int count)
++/*
++ * Returns an element of siblings' list.
++ * We are looking for <count>th positive after <p>; if
++ * found, dentry is grabbed and passed to caller via *<res>.
++ * If no such element exists, the anchor of list is returned
++ * and *<res> is set to NULL.
++ */
++static struct list_head *scan_positives(struct dentry *cursor,
++ struct list_head *p,
++ loff_t count,
++ struct dentry **res)
+ {
+- unsigned *seq = &parent->d_inode->i_dir_seq, n;
+- struct dentry *res;
+- struct list_head *p;
+- bool skipped;
+- int i;
++ struct dentry *dentry = cursor->d_parent, *found = NULL;
+
+-retry:
+- i = count;
+- skipped = false;
+- n = smp_load_acquire(seq) & ~1;
+- res = NULL;
+- rcu_read_lock();
+- for (p = from->next; p != &parent->d_subdirs; p = p->next) {
++ spin_lock(&dentry->d_lock);
++ while ((p = p->next) != &dentry->d_subdirs) {
+ struct dentry *d = list_entry(p, struct dentry, d_child);
+- if (!simple_positive(d)) {
+- skipped = true;
+- } else if (!--i) {
+- res = d;
+- break;
++ // we must at least skip cursors, to avoid livelocks
++ if (d->d_flags & DCACHE_DENTRY_CURSOR)
++ continue;
++ if (simple_positive(d) && !--count) {
++ spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
++ if (simple_positive(d))
++ found = dget_dlock(d);
++ spin_unlock(&d->d_lock);
++ if (likely(found))
++ break;
++ count = 1;
++ }
++ if (need_resched()) {
++ list_move(&cursor->d_child, p);
++ p = &cursor->d_child;
++ spin_unlock(&dentry->d_lock);
++ cond_resched();
++ spin_lock(&dentry->d_lock);
+ }
+ }
+- rcu_read_unlock();
+- if (skipped) {
+- smp_rmb();
+- if (unlikely(*seq != n))
+- goto retry;
+- }
+- return res;
+-}
+-
+-static void move_cursor(struct dentry *cursor, struct list_head *after)
+-{
+- struct dentry *parent = cursor->d_parent;
+- unsigned n, *seq = &parent->d_inode->i_dir_seq;
+- spin_lock(&parent->d_lock);
+- for (;;) {
+- n = *seq;
+- if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
+- break;
+- cpu_relax();
+- }
+- __list_del(cursor->d_child.prev, cursor->d_child.next);
+- if (after)
+- list_add(&cursor->d_child, after);
+- else
+- list_add_tail(&cursor->d_child, &parent->d_subdirs);
+- smp_store_release(seq, n + 2);
+- spin_unlock(&parent->d_lock);
++ spin_unlock(&dentry->d_lock);
++ dput(*res);
++ *res = found;
++ return p;
+ }
+
+ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
+@@ -153,17 +142,28 @@ loff_t dcache_dir_lseek(struct file *fil
+ return -EINVAL;
+ }
+ if (offset != file->f_pos) {
++ struct dentry *cursor = file->private_data;
++ struct dentry *to = NULL;
++ struct list_head *p;
++
+ file->f_pos = offset;
+- if (file->f_pos >= 2) {
+- struct dentry *cursor = file->private_data;
+- struct dentry *to;
+- loff_t n = file->f_pos - 2;
+-
+- inode_lock_shared(dentry->d_inode);
+- to = next_positive(dentry, &dentry->d_subdirs, n);
+- move_cursor(cursor, to ? &to->d_child : NULL);
+- inode_unlock_shared(dentry->d_inode);
++ inode_lock_shared(dentry->d_inode);
++
++ if (file->f_pos > 2) {
++ p = scan_positives(cursor, &dentry->d_subdirs,
++ file->f_pos - 2, &to);
++ spin_lock(&dentry->d_lock);
++ list_move(&cursor->d_child, p);
++ spin_unlock(&dentry->d_lock);
++ } else {
++ spin_lock(&dentry->d_lock);
++ list_del_init(&cursor->d_child);
++ spin_unlock(&dentry->d_lock);
+ }
++
++ dput(to);
++
++ inode_unlock_shared(dentry->d_inode);
+ }
+ return offset;
+ }
+@@ -185,25 +185,29 @@ int dcache_readdir(struct file *file, st
+ {
+ struct dentry *dentry = file->f_path.dentry;
+ struct dentry *cursor = file->private_data;
+- struct list_head *p = &cursor->d_child;
+- struct dentry *next;
+- bool moved = false;
++ struct list_head *anchor = &dentry->d_subdirs;
++ struct dentry *next = NULL;
++ struct list_head *p;
+
+ if (!dir_emit_dots(file, ctx))
+ return 0;
+
+ if (ctx->pos == 2)
+- p = &dentry->d_subdirs;
+- while ((next = next_positive(dentry, p, 1)) != NULL) {
++ p = anchor;
++ else
++ p = &cursor->d_child;
++
++ while ((p = scan_positives(cursor, p, 1, &next)) != anchor) {
+ if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
+ d_inode(next)->i_ino, dt_type(d_inode(next))))
+ break;
+- moved = true;
+- p = &next->d_child;
+ ctx->pos++;
+ }
+- if (moved)
+- move_cursor(cursor, p);
++ spin_lock(&dentry->d_lock);
++ list_move_tail(&cursor->d_child, p);
++ spin_unlock(&dentry->d_lock);
++ dput(next);
++
+ return 0;
+ }
+ EXPORT_SYMBOL(dcache_readdir);
--- /dev/null
+From 9ef16693aff8137faa21d16ffe65bb9832d24d71 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Fri, 11 Oct 2019 17:56:57 -0400
+Subject: ftrace: Get a reference counter for the trace_array on filter files
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 9ef16693aff8137faa21d16ffe65bb9832d24d71 upstream.
+
+The ftrace set_ftrace_filter and set_ftrace_notrace files are specific for
+an instance now. They need to take a reference to the instance otherwise
+there could be a race between accessing the files and deleting the instance.
+
+It wasn't until the :mod: caching where these file operations started
+referencing the trace_array directly.
+
+Cc: stable@vger.kernel.org
+Fixes: 673feb9d76ab3 ("ftrace: Add :mod: caching infrastructure to trace_array")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ftrace.c | 27 ++++++++++++++++++---------
+ 1 file changed, 18 insertions(+), 9 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -3629,21 +3629,22 @@ ftrace_regex_open(struct ftrace_ops *ops
+ struct ftrace_hash *hash;
+ struct list_head *mod_head;
+ struct trace_array *tr = ops->private;
+- int ret = 0;
++ int ret = -ENOMEM;
+
+ ftrace_ops_init(ops);
+
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
++ if (tr && trace_array_get(tr) < 0)
++ return -ENODEV;
++
+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ if (!iter)
+- return -ENOMEM;
++ goto out;
+
+- if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
+- kfree(iter);
+- return -ENOMEM;
+- }
++ if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
++ goto out;
+
+ iter->ops = ops;
+ iter->flags = flag;
+@@ -3673,13 +3674,13 @@ ftrace_regex_open(struct ftrace_ops *ops
+
+ if (!iter->hash) {
+ trace_parser_put(&iter->parser);
+- kfree(iter);
+- ret = -ENOMEM;
+ goto out_unlock;
+ }
+ } else
+ iter->hash = hash;
+
++ ret = 0;
++
+ if (file->f_mode & FMODE_READ) {
+ iter->pg = ftrace_pages_start;
+
+@@ -3691,7 +3692,6 @@ ftrace_regex_open(struct ftrace_ops *ops
+ /* Failed */
+ free_ftrace_hash(iter->hash);
+ trace_parser_put(&iter->parser);
+- kfree(iter);
+ }
+ } else
+ file->private_data = iter;
+@@ -3699,6 +3699,13 @@ ftrace_regex_open(struct ftrace_ops *ops
+ out_unlock:
+ mutex_unlock(&ops->func_hash->regex_lock);
+
++ out:
++ if (ret) {
++ kfree(iter);
++ if (tr)
++ trace_array_put(tr);
++ }
++
+ return ret;
+ }
+
+@@ -5098,6 +5105,8 @@ int ftrace_regex_release(struct inode *i
+
+ mutex_unlock(&iter->ops->func_hash->regex_lock);
+ free_ftrace_hash(iter->hash);
++ if (iter->tr)
++ trace_array_put(iter->tr);
+ kfree(iter);
+
+ return 0;
--- /dev/null
+From 30045f2174aab7fb4db7a9cf902d0aa6c75856a7 Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Tue, 1 Oct 2019 10:49:08 +0200
+Subject: media: stkwebcam: fix runtime PM after driver unbind
+
+From: Johan Hovold <johan@kernel.org>
+
+commit 30045f2174aab7fb4db7a9cf902d0aa6c75856a7 upstream.
+
+Since commit c2b71462d294 ("USB: core: Fix bug caused by duplicate
+interface PM usage counter") USB drivers must always balance their
+runtime PM gets and puts, including when the driver has already been
+unbound from the interface.
+
+Leaving the interface with a positive PM usage counter would prevent a
+later bound driver from suspending the device.
+
+Note that runtime PM has never actually been enabled for this driver
+since the support_autosuspend flag in its usb_driver struct is not set.
+
+Fixes: c2b71462d294 ("USB: core: Fix bug caused by duplicate interface PM usage counter")
+Cc: stable <stable@vger.kernel.org>
+Acked-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://lore.kernel.org/r/20191001084908.2003-5-johan@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/media/usb/stkwebcam/stk-webcam.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/media/usb/stkwebcam/stk-webcam.c
++++ b/drivers/media/usb/stkwebcam/stk-webcam.c
+@@ -640,8 +640,7 @@ static int v4l_stk_release(struct file *
+ dev->owner = NULL;
+ }
+
+- if (is_present(dev))
+- usb_autopm_put_interface(dev->interface);
++ usb_autopm_put_interface(dev->interface);
+ mutex_unlock(&dev->lock);
+ return v4l2_fh_release(fp);
+ }
btrfs-fix-incorrect-updating-of-log-root-tree.patch
nfs-fix-o_direct-accounting-of-number-of-bytes-read-written.patch
mips-disable-loongson-mmi-instructions-for-kernel-build.patch
+fix-the-locking-in-dcache_readdir-and-friends.patch
+media-stkwebcam-fix-runtime-pm-after-driver-unbind.patch
+tracing-hwlat-report-total-time-spent-in-all-nmis-during-the-sample.patch
+tracing-hwlat-don-t-ignore-outer-loop-duration-when-calculating-max_latency.patch
+ftrace-get-a-reference-counter-for-the-trace_array-on-filter-files.patch
+tracing-get-trace_array-reference-for-available_tracers-files.patch
+x86-asm-fix-mwaitx-c-state-hint-value.patch
--- /dev/null
+From 194c2c74f5532e62c218adeb8e2b683119503907 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
+Date: Fri, 11 Oct 2019 18:19:17 -0400
+Subject: tracing: Get trace_array reference for available_tracers files
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+commit 194c2c74f5532e62c218adeb8e2b683119503907 upstream.
+
+As instances may have different tracers available, we need to look at the
+trace_array descriptor that shows the list of the available tracers for the
+instance. But there's a race between opening the file and an admin
+deleting the instance. The trace_array_get() needs to be called before
+accessing the trace_array.
+
+Cc: stable@vger.kernel.org
+Fixes: 607e2ea167e56 ("tracing: Set up infrastructure to allow tracers for instances")
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4152,9 +4152,14 @@ static int show_traces_open(struct inode
+ if (tracing_disabled)
+ return -ENODEV;
+
++ if (trace_array_get(tr) < 0)
++ return -ENODEV;
++
+ ret = seq_open(file, &show_traces_seq_ops);
+- if (ret)
++ if (ret) {
++ trace_array_put(tr);
+ return ret;
++ }
+
+ m = file->private_data;
+ m->private = tr;
+@@ -4162,6 +4167,14 @@ static int show_traces_open(struct inode
+ return 0;
+ }
+
++static int show_traces_release(struct inode *inode, struct file *file)
++{
++ struct trace_array *tr = inode->i_private;
++
++ trace_array_put(tr);
++ return seq_release(inode, file);
++}
++
+ static ssize_t
+ tracing_write_stub(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+@@ -4192,8 +4205,8 @@ static const struct file_operations trac
+ static const struct file_operations show_traces_fops = {
+ .open = show_traces_open,
+ .read = seq_read,
+- .release = seq_release,
+ .llseek = seq_lseek,
++ .release = show_traces_release,
+ };
+
+ static ssize_t
--- /dev/null
+From fc64e4ad80d4b72efce116f87b3174f0b7196f8e Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat (VMware)" <srivatsa@csail.mit.edu>
+Date: Thu, 10 Oct 2019 11:51:01 -0700
+Subject: tracing/hwlat: Don't ignore outer-loop duration when calculating max_latency
+
+From: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu>
+
+commit fc64e4ad80d4b72efce116f87b3174f0b7196f8e upstream.
+
+max_latency is intended to record the maximum ever observed hardware
+latency, which may occur in either part of the loop (inner/outer). So
+we need to also consider the outer-loop sample when updating
+max_latency.
+
+Link: http://lkml.kernel.org/r/157073345463.17189.18124025522664682811.stgit@srivatsa-ubuntu
+
+Fixes: e7c15cd8a113 ("tracing: Added hardware latency tracer")
+Cc: stable@vger.kernel.org
+Signed-off-by: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_hwlat.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -258,6 +258,8 @@ static int get_sample(void)
+ /* Keep a running maximum ever recorded hardware latency */
+ if (sample > tr->max_latency)
+ tr->max_latency = sample;
++ if (outer_sample > tr->max_latency)
++ tr->max_latency = outer_sample;
+ }
+
+ out:
--- /dev/null
+From 98dc19c11470ee6048aba723d77079ad2cda8a52 Mon Sep 17 00:00:00 2001
+From: "Srivatsa S. Bhat (VMware)" <srivatsa@csail.mit.edu>
+Date: Thu, 10 Oct 2019 11:50:46 -0700
+Subject: tracing/hwlat: Report total time spent in all NMIs during the sample
+
+From: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu>
+
+commit 98dc19c11470ee6048aba723d77079ad2cda8a52 upstream.
+
+nmi_total_ts is supposed to record the total time spent in *all* NMIs
+that occur on the given CPU during the (active portion of the)
+sampling window. However, the code seems to be overwriting this
+variable for each NMI, thereby only recording the time spent in the
+most recent NMI. Fix it by accumulating the duration instead.
+
+Link: http://lkml.kernel.org/r/157073343544.17189.13911783866738671133.stgit@srivatsa-ubuntu
+
+Fixes: 7b2c86250122 ("tracing: Add NMI tracing in hwlat detector")
+Cc: stable@vger.kernel.org
+Signed-off-by: Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_hwlat.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -152,7 +152,7 @@ void trace_hwlat_callback(bool enter)
+ if (enter)
+ nmi_ts_start = time_get();
+ else
+- nmi_total_ts = time_get() - nmi_ts_start;
++ nmi_total_ts += time_get() - nmi_ts_start;
+ }
+
+ if (enter)
--- /dev/null
+From 454de1e7d970d6bc567686052329e4814842867c Mon Sep 17 00:00:00 2001
+From: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+Date: Mon, 7 Oct 2019 19:00:22 +0000
+Subject: x86/asm: Fix MWAITX C-state hint value
+
+From: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+
+commit 454de1e7d970d6bc567686052329e4814842867c upstream.
+
+As per "AMD64 Architecture Programmer's Manual Volume 3: General-Purpose
+and System Instructions", MWAITX EAX[7:4]+1 specifies the optional hint
+of the optimized C-state. For C0 state, EAX[7:4] should be set to 0xf.
+
+Currently, a value of 0xf is set for EAX[3:0] instead of EAX[7:4]. Fix
+this by changing MWAITX_DISABLE_CSTATES from 0xf to 0xf0.
+
+This hasn't had any implications so far because setting reserved bits in
+EAX is simply ignored by the CPU.
+
+ [ bp: Fixup comment in delay_mwaitx() and massage. ]
+
+Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@amd.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: Frederic Weisbecker <frederic@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: "x86@kernel.org" <x86@kernel.org>
+Cc: Zhenzhong Duan <zhenzhong.duan@oracle.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20191007190011.4859-1-Janakarajan.Natarajan@amd.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/mwait.h | 2 +-
+ arch/x86/lib/delay.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/mwait.h
++++ b/arch/x86/include/asm/mwait.h
+@@ -21,7 +21,7 @@
+ #define MWAIT_ECX_INTERRUPT_BREAK 0x1
+ #define MWAITX_ECX_TIMER_ENABLE BIT(1)
+ #define MWAITX_MAX_LOOPS ((u32)-1)
+-#define MWAITX_DISABLE_CSTATES 0xf
++#define MWAITX_DISABLE_CSTATES 0xf0
+
+ static inline void __monitor(const void *eax, unsigned long ecx,
+ unsigned long edx)
+--- a/arch/x86/lib/delay.c
++++ b/arch/x86/lib/delay.c
+@@ -113,8 +113,8 @@ static void delay_mwaitx(unsigned long _
+ __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
+
+ /*
+- * AMD, like Intel, supports the EAX hint and EAX=0xf
+- * means, do not enter any deep C-state and we use it
++ * AMD, like Intel's MWAIT version, supports the EAX hint and
++ * EAX=0xf0 means, do not enter any deep C-state and we use it
+ * here in delay() to minimize wakeup latency.
+ */
+ __mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);