--- /dev/null
+From 4b90a603a1b21d63cf743cc833680cb195a729f6 Mon Sep 17 00:00:00 2001
+From: Nick Bowler <nbowler@elliptictech.com>
+Date: Thu, 10 Nov 2011 09:01:27 +0000
+Subject: ah: Don't return NET_XMIT_DROP on input.
+
+From: Nick Bowler <nbowler@elliptictech.com>
+
+commit 4b90a603a1b21d63cf743cc833680cb195a729f6 upstream.
+
+When the ahash driver returns -EBUSY, AH4/6 input functions return
+NET_XMIT_DROP, presumably copied from the output code path. But
+returning transmit codes on input doesn't make a lot of sense.
+Since NET_XMIT_DROP is a positive int, this gets interpreted as
+the next header type (i.e., success). As that can only end badly,
+remove the check.
+
+Signed-off-by: Nick Bowler <nbowler@elliptictech.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv4/ah4.c | 2 --
+ net/ipv6/ah6.c | 2 --
+ 2 files changed, 4 deletions(-)
+
+--- a/net/ipv4/ah4.c
++++ b/net/ipv4/ah4.c
+@@ -369,8 +369,6 @@ static int ah_input(struct xfrm_state *x
+ if (err == -EINPROGRESS)
+ goto out;
+
+- if (err == -EBUSY)
+- err = NET_XMIT_DROP;
+ goto out_free;
+ }
+
+--- a/net/ipv6/ah6.c
++++ b/net/ipv6/ah6.c
+@@ -581,8 +581,6 @@ static int ah6_input(struct xfrm_state *
+ if (err == -EINPROGRESS)
+ goto out;
+
+- if (err == -EBUSY)
+- err = NET_XMIT_DROP;
+ goto out_free;
+ }
+
--- /dev/null
+From 84e31fdb7c797a7303e0cc295cb9bc8b73fb872d Mon Sep 17 00:00:00 2001
+From: Alexey Dobriyan <adobriyan@gmail.com>
+Date: Sat, 14 Jan 2012 21:27:37 +0300
+Subject: crypto: sha512 - make it work, undo percpu message schedule
+
+From: Alexey Dobriyan <adobriyan@gmail.com>
+
+commit 84e31fdb7c797a7303e0cc295cb9bc8b73fb872d upstream.
+
+commit f9e2bca6c22d75a289a349f869701214d63b5060
+aka "crypto: sha512 - Move message schedule W[80] to static percpu area"
+created global message schedule area.
+
+If sha512_update will ever be entered twice, hash will be silently
+calculated incorrectly.
+
+Probably the easiest way to notice incorrect hashes being calculated is
+to run 2 ping floods over AH with hmac(sha512):
+
+ #!/usr/sbin/setkey -f
+ flush;
+ spdflush;
+ add IP1 IP2 ah 25 -A hmac-sha512 0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000025;
+ add IP2 IP1 ah 52 -A hmac-sha512 0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000052;
+ spdadd IP1 IP2 any -P out ipsec ah/transport//require;
+ spdadd IP2 IP1 any -P in ipsec ah/transport//require;
+
+XfrmInStateProtoError will start ticking with -EBADMSG being returned
+from ah_input(). This never happens with, say, hmac(sha1).
+
+With patch applied (on BOTH sides), XfrmInStateProtoError does not tick
+with multiple bidirectional ping flood streams like it doesn't tick
+with SHA-1.
+
+After this patch sha512_transform() will start using ~750 bytes of stack on x86_64.
+This is OK for simple loads, for something more heavy, stack reduction will be done
+separatedly.
+
+Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ crypto/sha512_generic.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/crypto/sha512_generic.c
++++ b/crypto/sha512_generic.c
+@@ -21,8 +21,6 @@
+ #include <linux/percpu.h>
+ #include <asm/byteorder.h>
+
+-static DEFINE_PER_CPU(u64[80], msg_schedule);
+-
+ static inline u64 Ch(u64 x, u64 y, u64 z)
+ {
+ return z ^ (x & (y ^ z));
+@@ -89,7 +87,7 @@ sha512_transform(u64 *state, const u8 *i
+ u64 a, b, c, d, e, f, g, h, t1, t2;
+
+ int i;
+- u64 *W = get_cpu_var(msg_schedule);
++ u64 W[80];
+
+ /* load the input */
+ for (i = 0; i < 16; i++)
+@@ -128,8 +126,6 @@ sha512_transform(u64 *state, const u8 *i
+
+ /* erase our data */
+ a = b = c = d = e = f = g = h = t1 = t2 = 0;
+- memset(W, 0, sizeof(__get_cpu_var(msg_schedule)));
+- put_cpu_var(msg_schedule);
+ }
+
+ static int
--- /dev/null
+From 51fc6dc8f948047364f7d42a4ed89b416c6cc0a3 Mon Sep 17 00:00:00 2001
+From: Alexey Dobriyan <adobriyan@gmail.com>
+Date: Sat, 14 Jan 2012 21:40:57 +0300
+Subject: crypto: sha512 - reduce stack usage to safe number
+
+From: Alexey Dobriyan <adobriyan@gmail.com>
+
+commit 51fc6dc8f948047364f7d42a4ed89b416c6cc0a3 upstream.
+
+For rounds 16--79, W[i] only depends on W[i - 2], W[i - 7], W[i - 15] and W[i - 16].
+Consequently, keeping all W[80] array on stack is unnecessary,
+only 16 values are really needed.
+
+Using W[16] instead of W[80] greatly reduces stack usage
+(~750 bytes to ~340 bytes on x86_64).
+
+Line by line explanation:
+* BLEND_OP
+ array is "circular" now, all indexes have to be modulo 16.
+ Round number is positive, so remainder operation should be
+ without surprises.
+
+* initial full message scheduling is trimmed to first 16 values which
+ come from data block, the rest is calculated before it's needed.
+
+* original loop body is unrolled version of new SHA512_0_15 and
+ SHA512_16_79 macros, unrolling was done to not do explicit variable
+ renaming. Otherwise it's the very same code after preprocessing.
+ See sha1_transform() code which does the same trick.
+
+Patch survives in-tree crypto test and original bugreport test
+(ping flood with hmac(sha512).
+
+See FIPS 180-2 for SHA-512 definition
+http://csrc.nist.gov/publications/fips/fips180-2/fips180-2withchangenotice.pdf
+
+Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ crypto/sha512_generic.c | 58 ++++++++++++++++++++++++++++--------------------
+ 1 file changed, 34 insertions(+), 24 deletions(-)
+
+--- a/crypto/sha512_generic.c
++++ b/crypto/sha512_generic.c
+@@ -78,7 +78,7 @@ static inline void LOAD_OP(int I, u64 *W
+
+ static inline void BLEND_OP(int I, u64 *W)
+ {
+- W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
++ W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]);
+ }
+
+ static void
+@@ -87,38 +87,48 @@ sha512_transform(u64 *state, const u8 *i
+ u64 a, b, c, d, e, f, g, h, t1, t2;
+
+ int i;
+- u64 W[80];
++ u64 W[16];
+
+ /* load the input */
+ for (i = 0; i < 16; i++)
+ LOAD_OP(i, W, input);
+
+- for (i = 16; i < 80; i++) {
+- BLEND_OP(i, W);
+- }
+-
+ /* load the state into our registers */
+ a=state[0]; b=state[1]; c=state[2]; d=state[3];
+ e=state[4]; f=state[5]; g=state[6]; h=state[7];
+
+- /* now iterate */
+- for (i=0; i<80; i+=8) {
+- t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i ] + W[i ];
+- t2 = e0(a) + Maj(a,b,c); d+=t1; h=t1+t2;
+- t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1];
+- t2 = e0(h) + Maj(h,a,b); c+=t1; g=t1+t2;
+- t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2];
+- t2 = e0(g) + Maj(g,h,a); b+=t1; f=t1+t2;
+- t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3];
+- t2 = e0(f) + Maj(f,g,h); a+=t1; e=t1+t2;
+- t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4];
+- t2 = e0(e) + Maj(e,f,g); h+=t1; d=t1+t2;
+- t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5];
+- t2 = e0(d) + Maj(d,e,f); g+=t1; c=t1+t2;
+- t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6];
+- t2 = e0(c) + Maj(c,d,e); f+=t1; b=t1+t2;
+- t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7];
+- t2 = e0(b) + Maj(b,c,d); e+=t1; a=t1+t2;
++#define SHA512_0_15(i, a, b, c, d, e, f, g, h) \
++ t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i]; \
++ t2 = e0(a) + Maj(a, b, c); \
++ d += t1; \
++ h = t1 + t2
++
++#define SHA512_16_79(i, a, b, c, d, e, f, g, h) \
++ BLEND_OP(i, W); \
++ t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \
++ t2 = e0(a) + Maj(a, b, c); \
++ d += t1; \
++ h = t1 + t2
++
++ for (i = 0; i < 16; i += 8) {
++ SHA512_0_15(i, a, b, c, d, e, f, g, h);
++ SHA512_0_15(i + 1, h, a, b, c, d, e, f, g);
++ SHA512_0_15(i + 2, g, h, a, b, c, d, e, f);
++ SHA512_0_15(i + 3, f, g, h, a, b, c, d, e);
++ SHA512_0_15(i + 4, e, f, g, h, a, b, c, d);
++ SHA512_0_15(i + 5, d, e, f, g, h, a, b, c);
++ SHA512_0_15(i + 6, c, d, e, f, g, h, a, b);
++ SHA512_0_15(i + 7, b, c, d, e, f, g, h, a);
++ }
++ for (i = 16; i < 80; i += 8) {
++ SHA512_16_79(i, a, b, c, d, e, f, g, h);
++ SHA512_16_79(i + 1, h, a, b, c, d, e, f, g);
++ SHA512_16_79(i + 2, g, h, a, b, c, d, e, f);
++ SHA512_16_79(i + 3, f, g, h, a, b, c, d, e);
++ SHA512_16_79(i + 4, e, f, g, h, a, b, c, d);
++ SHA512_16_79(i + 5, d, e, f, g, h, a, b, c);
++ SHA512_16_79(i + 6, c, d, e, f, g, h, a, b);
++ SHA512_16_79(i + 7, b, c, d, e, f, g, h, a);
+ }
+
+ state[0] += a; state[1] += b; state[2] += c; state[3] += d;
--- /dev/null
+From 41fb61c2d08107ce96a5dcb3a6289b2afd3e135c Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <srostedt@redhat.com>
+Date: Wed, 13 Jul 2011 15:03:44 -0400
+Subject: ftrace: Balance records when updating the hash
+
+From: Steven Rostedt <srostedt@redhat.com>
+
+commit 41fb61c2d08107ce96a5dcb3a6289b2afd3e135c upstream.
+
+Whenever the hash of the ftrace_ops is updated, the record counts
+must be balance. This requires disabling the records that are set
+in the original hash, and then enabling the records that are set
+in the updated hash.
+
+Moving the update into ftrace_hash_move() removes the bug where the
+hash was updated but the records were not, which results in ftrace
+triggering a warning and disabling itself because the ftrace_ops filter
+is updated while the ftrace_ops was registered, and then the failure
+happens when the ftrace_ops is unregistered.
+
+The current code will not trigger this bug, but new code will.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/trace/ftrace.c | 49 +++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 33 insertions(+), 16 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1182,8 +1182,14 @@ alloc_and_copy_ftrace_hash(int size_bits
+ return NULL;
+ }
+
++static void
++ftrace_hash_rec_disable(struct ftrace_ops *ops, int filter_hash);
++static void
++ftrace_hash_rec_enable(struct ftrace_ops *ops, int filter_hash);
++
+ static int
+-ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
++ftrace_hash_move(struct ftrace_ops *ops, int enable,
++ struct ftrace_hash **dst, struct ftrace_hash *src)
+ {
+ struct ftrace_func_entry *entry;
+ struct hlist_node *tp, *tn;
+@@ -1193,9 +1199,16 @@ ftrace_hash_move(struct ftrace_hash **ds
+ unsigned long key;
+ int size = src->count;
+ int bits = 0;
++ int ret;
+ int i;
+
+ /*
++ * Remove the current set, update the hash and add
++ * them back.
++ */
++ ftrace_hash_rec_disable(ops, enable);
++
++ /*
+ * If the new source is empty, just free dst and assign it
+ * the empty_hash.
+ */
+@@ -1215,9 +1228,10 @@ ftrace_hash_move(struct ftrace_hash **ds
+ if (bits > FTRACE_HASH_MAX_BITS)
+ bits = FTRACE_HASH_MAX_BITS;
+
++ ret = -ENOMEM;
+ new_hash = alloc_ftrace_hash(bits);
+ if (!new_hash)
+- return -ENOMEM;
++ goto out;
+
+ size = 1 << src->size_bits;
+ for (i = 0; i < size; i++) {
+@@ -1236,7 +1250,16 @@ ftrace_hash_move(struct ftrace_hash **ds
+ rcu_assign_pointer(*dst, new_hash);
+ free_ftrace_hash_rcu(old_hash);
+
+- return 0;
++ ret = 0;
++ out:
++ /*
++ * Enable regardless of ret:
++ * On success, we enable the new hash.
++ * On failure, we re-enable the original hash.
++ */
++ ftrace_hash_rec_enable(ops, enable);
++
++ return ret;
+ }
+
+ /*
+@@ -2877,7 +2900,7 @@ ftrace_set_regex(struct ftrace_ops *ops,
+ ftrace_match_records(hash, buf, len);
+
+ mutex_lock(&ftrace_lock);
+- ret = ftrace_hash_move(orig_hash, hash);
++ ret = ftrace_hash_move(ops, enable, orig_hash, hash);
+ mutex_unlock(&ftrace_lock);
+
+ mutex_unlock(&ftrace_regex_lock);
+@@ -3060,18 +3083,12 @@ ftrace_regex_release(struct inode *inode
+ orig_hash = &iter->ops->notrace_hash;
+
+ mutex_lock(&ftrace_lock);
+- /*
+- * Remove the current set, update the hash and add
+- * them back.
+- */
+- ftrace_hash_rec_disable(iter->ops, filter_hash);
+- ret = ftrace_hash_move(orig_hash, iter->hash);
+- if (!ret) {
+- ftrace_hash_rec_enable(iter->ops, filter_hash);
+- if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
+- && ftrace_enabled)
+- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+- }
++ ret = ftrace_hash_move(iter->ops, filter_hash,
++ orig_hash, iter->hash);
++ if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
++ && ftrace_enabled)
++ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++
+ mutex_unlock(&ftrace_lock);
+ }
+ free_ftrace_hash(iter->hash);
--- /dev/null
+From 30fb6aa74011dcf595f306ca2727254d708b786e Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@redhat.com>
+Date: Mon, 5 Dec 2011 18:22:48 +0100
+Subject: ftrace: Fix unregister ftrace_ops accounting
+
+From: Jiri Olsa <jolsa@redhat.com>
+
+commit 30fb6aa74011dcf595f306ca2727254d708b786e upstream.
+
+Multiple users of the function tracer can register their functions
+with the ftrace_ops structure. The accounting within ftrace will
+update the counter on each function record that is being traced.
+When the ftrace_ops filtering adds or removes functions, the
+function records will be updated accordingly if the ftrace_ops is
+still registered.
+
+When a ftrace_ops is removed, the counter of the function records,
+that the ftrace_ops traces, are decremented. When they reach zero
+the functions that they represent are modified to stop calling the
+mcount code.
+
+When changes are made, the code is updated via stop_machine() with
+a command passed to the function to tell it what to do. There is an
+ENABLE and DISABLE command that tells the called function to enable
+or disable the functions. But the ENABLE is really a misnomer as it
+should just update the records, as records that have been enabled
+and now have a count of zero should be disabled.
+
+The DISABLE command is used to disable all functions regardless of
+their counter values. This is the big off switch and is not the
+complement of the ENABLE command.
+
+To make matters worse, when a ftrace_ops is unregistered and there
+is another ftrace_ops registered, neither the DISABLE nor the
+ENABLE command are set when calling into the stop_machine() function
+and the records will not be updated to match their counter. A command
+is passed to that function that will update the mcount code to call
+the registered callback directly if it is the only one left. This
+means that the ftrace_ops that is still registered will have its callback
+called by all functions that have been set for it as well as the ftrace_ops
+that was just unregistered.
+
+Here's a way to trigger this bug. Compile the kernel with
+CONFIG_FUNCTION_PROFILER set and with CONFIG_FUNCTION_GRAPH not set:
+
+ CONFIG_FUNCTION_PROFILER=y
+ # CONFIG_FUNCTION_GRAPH is not set
+
+This will force the function profiler to use the function tracer instead
+of the function graph tracer.
+
+ # cd /sys/kernel/debug/tracing
+ # echo schedule > set_ftrace_filter
+ # echo function > current_tracer
+ # cat set_ftrace_filter
+ schedule
+ # cat trace
+ # tracer: nop
+ #
+ # entries-in-buffer/entries-written: 692/68108025 #P:4
+ #
+ # _-----=> irqs-off
+ # / _----=> need-resched
+ # | / _---=> hardirq/softirq
+ # || / _--=> preempt-depth
+ # ||| / delay
+ # TASK-PID CPU# |||| TIMESTAMP FUNCTION
+ # | | | |||| | |
+ kworker/0:2-909 [000] .... 531.235574: schedule <-worker_thread
+ <idle>-0 [001] .N.. 531.235575: schedule <-cpu_idle
+ kworker/0:2-909 [000] .... 531.235597: schedule <-worker_thread
+ sshd-2563 [001] .... 531.235647: schedule <-schedule_hrtimeout_range_clock
+
+ # echo 1 > function_profile_enabled
+ # echo 0 > function_porfile_enabled
+ # cat set_ftrace_filter
+ schedule
+ # cat trace
+ # tracer: function
+ #
+ # entries-in-buffer/entries-written: 159701/118821262 #P:4
+ #
+ # _-----=> irqs-off
+ # / _----=> need-resched
+ # | / _---=> hardirq/softirq
+ # || / _--=> preempt-depth
+ # ||| / delay
+ # TASK-PID CPU# |||| TIMESTAMP FUNCTION
+ # | | | |||| | |
+ <idle>-0 [002] ...1 604.870655: local_touch_nmi <-cpu_idle
+ <idle>-0 [002] d..1 604.870655: enter_idle <-cpu_idle
+ <idle>-0 [002] d..1 604.870656: atomic_notifier_call_chain <-enter_idle
+ <idle>-0 [002] d..1 604.870656: __atomic_notifier_call_chain <-atomic_notifier_call_chain
+
+The same problem could have happened with the trace_probe_ops,
+but they are modified with the set_frace_filter file which does the
+update at closure of the file.
+
+The simple solution is to change ENABLE to UPDATE and call it every
+time an ftrace_ops is unregistered.
+
+Link: http://lkml.kernel.org/r/1323105776-26961-3-git-send-email-jolsa@redhat.com
+
+Signed-off-by: Jiri Olsa <jolsa@redhat.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/trace/ftrace.c | 27 +++++++++++++--------------
+ 1 file changed, 13 insertions(+), 14 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -952,7 +952,7 @@ struct ftrace_func_probe {
+ };
+
+ enum {
+- FTRACE_ENABLE_CALLS = (1 << 0),
++ FTRACE_UPDATE_CALLS = (1 << 0),
+ FTRACE_DISABLE_CALLS = (1 << 1),
+ FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
+ FTRACE_START_FUNC_RET = (1 << 3),
+@@ -1521,7 +1521,7 @@ int ftrace_text_reserved(void *start, vo
+
+
+ static int
+-__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
++__ftrace_replace_code(struct dyn_ftrace *rec, int update)
+ {
+ unsigned long ftrace_addr;
+ unsigned long flag = 0UL;
+@@ -1529,17 +1529,17 @@ __ftrace_replace_code(struct dyn_ftrace
+ ftrace_addr = (unsigned long)FTRACE_ADDR;
+
+ /*
+- * If we are enabling tracing:
++ * If we are updating calls:
+ *
+ * If the record has a ref count, then we need to enable it
+ * because someone is using it.
+ *
+ * Otherwise we make sure its disabled.
+ *
+- * If we are disabling tracing, then disable all records that
++ * If we are disabling calls, then disable all records that
+ * are enabled.
+ */
+- if (enable && (rec->flags & ~FTRACE_FL_MASK))
++ if (update && (rec->flags & ~FTRACE_FL_MASK))
+ flag = FTRACE_FL_ENABLED;
+
+ /* If the state of this record hasn't changed, then do nothing */
+@@ -1555,7 +1555,7 @@ __ftrace_replace_code(struct dyn_ftrace
+ return ftrace_make_nop(NULL, rec, ftrace_addr);
+ }
+
+-static void ftrace_replace_code(int enable)
++static void ftrace_replace_code(int update)
+ {
+ struct dyn_ftrace *rec;
+ struct ftrace_page *pg;
+@@ -1569,7 +1569,7 @@ static void ftrace_replace_code(int enab
+ if (rec->flags & FTRACE_FL_FREE)
+ continue;
+
+- failed = __ftrace_replace_code(rec, enable);
++ failed = __ftrace_replace_code(rec, update);
+ if (failed) {
+ ftrace_bug(failed, rec->ip);
+ /* Stop processing */
+@@ -1619,7 +1619,7 @@ static int __ftrace_modify_code(void *da
+ {
+ int *command = data;
+
+- if (*command & FTRACE_ENABLE_CALLS)
++ if (*command & FTRACE_UPDATE_CALLS)
+ ftrace_replace_code(1);
+ else if (*command & FTRACE_DISABLE_CALLS)
+ ftrace_replace_code(0);
+@@ -1675,7 +1675,7 @@ static int ftrace_startup(struct ftrace_
+ return -ENODEV;
+
+ ftrace_start_up++;
+- command |= FTRACE_ENABLE_CALLS;
++ command |= FTRACE_UPDATE_CALLS;
+
+ /* ops marked global share the filter hashes */
+ if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
+@@ -1727,8 +1727,7 @@ static void ftrace_shutdown(struct ftrac
+ if (ops != &global_ops || !global_start_up)
+ ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+
+- if (!ftrace_start_up)
+- command |= FTRACE_DISABLE_CALLS;
++ command |= FTRACE_UPDATE_CALLS;
+
+ if (saved_ftrace_func != ftrace_trace_function) {
+ saved_ftrace_func = ftrace_trace_function;
+@@ -1750,7 +1749,7 @@ static void ftrace_startup_sysctl(void)
+ saved_ftrace_func = NULL;
+ /* ftrace_start_up is true if we want ftrace running */
+ if (ftrace_start_up)
+- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ }
+
+ static void ftrace_shutdown_sysctl(void)
+@@ -2903,7 +2902,7 @@ ftrace_set_regex(struct ftrace_ops *ops,
+ ret = ftrace_hash_move(ops, enable, orig_hash, hash);
+ if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
+ && ftrace_enabled)
+- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+
+ mutex_unlock(&ftrace_lock);
+
+@@ -3091,7 +3090,7 @@ ftrace_regex_release(struct inode *inode
+ orig_hash, iter->hash);
+ if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
+ && ftrace_enabled)
+- ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+
+ mutex_unlock(&ftrace_lock);
+ }
--- /dev/null
+From 072126f4529196f71a97960248bca54fd4554c2d Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <srostedt@redhat.com>
+Date: Wed, 13 Jul 2011 15:08:31 -0400
+Subject: ftrace: Update filter when tracing enabled in set_ftrace_filter()
+
+From: Steven Rostedt <srostedt@redhat.com>
+
+commit 072126f4529196f71a97960248bca54fd4554c2d upstream.
+
+Currently, if set_ftrace_filter() is called when the ftrace_ops is
+active, the function filters will not be updated. They will only be updated
+when tracing is disabled and re-enabled.
+
+Update the functions immediately during set_ftrace_filter().
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/trace/ftrace.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2901,6 +2901,10 @@ ftrace_set_regex(struct ftrace_ops *ops,
+
+ mutex_lock(&ftrace_lock);
+ ret = ftrace_hash_move(ops, enable, orig_hash, hash);
++ if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
++ && ftrace_enabled)
++ ftrace_run_update_code(FTRACE_ENABLE_CALLS);
++
+ mutex_unlock(&ftrace_lock);
+
+ mutex_unlock(&ftrace_regex_lock);
drm-radeon-kms-add-an-msi-quirk-for-dell-rs690.patch
drm-fix-authentication-kernel-crash.patch
xfs-fix-missing-xfs_iunlock-on-error-recovery-path-in-xfs_readlink.patch
+crypto-sha512-make-it-work-undo-percpu-message-schedule.patch
+crypto-sha512-reduce-stack-usage-to-safe-number.patch
+tpm_tis-add-delay-after-aborting-command.patch
+ftrace-balance-records-when-updating-the-hash.patch
+ftrace-update-filter-when-tracing-enabled-in-set_ftrace_filter.patch
+ftrace-fix-unregister-ftrace_ops-accounting.patch
+ah-don-t-return-net_xmit_drop-on-input.patch
--- /dev/null
+From a927b8131794ee449b7f6666e7ab61301949b20f Mon Sep 17 00:00:00 2001
+From: Stefan Berger <stefanb@linux.vnet.ibm.com>
+Date: Fri, 11 Nov 2011 12:57:06 -0500
+Subject: tpm_tis: add delay after aborting command
+
+From: Stefan Berger <stefanb@linux.vnet.ibm.com>
+
+commit a927b8131794ee449b7f6666e7ab61301949b20f upstream.
+
+This patch adds a delay after aborting a command. Some TPMs need
+this and will not process the subsequent command correctly otherwise.
+
+It's worth noting that a TPM randomly failing to process a command,
+maps to randomly failing suspend/resume operations.
+
+Signed-off-by: Stefan Berger <stefanb@linux.vnet.ibm.com>
+Signed-off-by: Rajiv Andrade <srajiv@linux.vnet.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/tpm/tpm_tis.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/char/tpm/tpm_tis.c
++++ b/drivers/char/tpm/tpm_tis.c
+@@ -354,6 +354,9 @@ static int tpm_tis_send(struct tpm_chip
+ return len;
+ out_err:
+ tpm_tis_ready(chip);
++ /* some TPMs need a break here otherwise they will not work
++ * correctly on the immediately subsequent command */
++ msleep(chip->vendor.timeout_b);
+ release_locality(chip, chip->vendor.locality, 0);
+ return rc;
+ }