--- /dev/null
+From 09e05d4805e6c524c1af74e524e5d0528bb3fef3 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Wed, 11 Jul 2012 23:16:25 +0200
+Subject: jbd: Fix assertion failure in commit code due to lacking transaction credits
+
+From: Jan Kara <jack@suse.cz>
+
+commit 09e05d4805e6c524c1af74e524e5d0528bb3fef3 upstream.
+
+ext3 users of data=journal mode with blocksize < pagesize were occasionally
+hitting assertion failure in journal_commit_transaction() checking whether the
+transaction has at least as many credits reserved as buffers attached. The
+core of the problem is that when a file gets truncated, buffers that still need
+checkpointing or that are attached to the committing transaction are left with
+buffer_mapped set. When this happens to buffers beyond i_size attached to a
+page stradding i_size, subsequent write extending the file will see these
+buffers and as they are mapped (but underlying blocks were freed) things go
+awry from here.
+
+The assertion failure just coincidentally (and in this case luckily as we would
+start corrupting filesystem) triggers due to journal_head not being properly
+cleaned up as well.
+
+Under some rare circumstances this bug could even hit data=ordered mode users.
+There the assertion won't trigger and we would end up corrupting the
+filesystem.
+
+We fix the problem by unmapping buffers if possible (in lots of cases we just
+need a buffer attached to a transaction as a place holder but it must not be
+written out anyway). And in one case, we just have to bite the bullet and wait
+for transaction commit to finish.
+
+Reviewed-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/jbd/commit.c | 45 +++++++++++++++++++++++++++--------
+ fs/jbd/transaction.c | 64 +++++++++++++++++++++++++++++++++++----------------
+ 2 files changed, 78 insertions(+), 31 deletions(-)
+
+--- a/fs/jbd/commit.c
++++ b/fs/jbd/commit.c
+@@ -85,7 +85,12 @@ nope:
+ static void release_data_buffer(struct buffer_head *bh)
+ {
+ if (buffer_freed(bh)) {
++ WARN_ON_ONCE(buffer_dirty(bh));
+ clear_buffer_freed(bh);
++ clear_buffer_mapped(bh);
++ clear_buffer_new(bh);
++ clear_buffer_req(bh);
++ bh->b_bdev = NULL;
+ release_buffer_page(bh);
+ } else
+ put_bh(bh);
+@@ -840,17 +845,35 @@ restart_loop:
+ * there's no point in keeping a checkpoint record for
+ * it. */
+
+- /* A buffer which has been freed while still being
+- * journaled by a previous transaction may end up still
+- * being dirty here, but we want to avoid writing back
+- * that buffer in the future after the "add to orphan"
+- * operation been committed, That's not only a performance
+- * gain, it also stops aliasing problems if the buffer is
+- * left behind for writeback and gets reallocated for another
+- * use in a different page. */
+- if (buffer_freed(bh) && !jh->b_next_transaction) {
+- clear_buffer_freed(bh);
+- clear_buffer_jbddirty(bh);
++ /*
++ * A buffer which has been freed while still being journaled by
++ * a previous transaction.
++ */
++ if (buffer_freed(bh)) {
++ /*
++ * If the running transaction is the one containing
++ * "add to orphan" operation (b_next_transaction !=
++ * NULL), we have to wait for that transaction to
++ * commit before we can really get rid of the buffer.
++ * So just clear b_modified to not confuse transaction
++ * credit accounting and refile the buffer to
++ * BJ_Forget of the running transaction. If the just
++ * committed transaction contains "add to orphan"
++ * operation, we can completely invalidate the buffer
++ * now. We are rather throughout in that since the
++ * buffer may be still accessible when blocksize <
++ * pagesize and it is attached to the last partial
++ * page.
++ */
++ jh->b_modified = 0;
++ if (!jh->b_next_transaction) {
++ clear_buffer_freed(bh);
++ clear_buffer_jbddirty(bh);
++ clear_buffer_mapped(bh);
++ clear_buffer_new(bh);
++ clear_buffer_req(bh);
++ bh->b_bdev = NULL;
++ }
+ }
+
+ if (buffer_jbddirty(bh)) {
+--- a/fs/jbd/transaction.c
++++ b/fs/jbd/transaction.c
+@@ -1837,15 +1837,16 @@ static int __dispose_buffer(struct journ
+ * We're outside-transaction here. Either or both of j_running_transaction
+ * and j_committing_transaction may be NULL.
+ */
+-static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
++static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
++ int partial_page)
+ {
+ transaction_t *transaction;
+ struct journal_head *jh;
+ int may_free = 1;
+- int ret;
+
+ BUFFER_TRACE(bh, "entry");
+
++retry:
+ /*
+ * It is safe to proceed here without the j_list_lock because the
+ * buffers cannot be stolen by try_to_free_buffers as long as we are
+@@ -1873,10 +1874,18 @@ static int journal_unmap_buffer(journal_
+ * clear the buffer dirty bit at latest at the moment when the
+ * transaction marking the buffer as freed in the filesystem
+ * structures is committed because from that moment on the
+- * buffer can be reallocated and used by a different page.
++ * block can be reallocated and used by a different page.
+ * Since the block hasn't been freed yet but the inode has
+ * already been added to orphan list, it is safe for us to add
+ * the buffer to BJ_Forget list of the newest transaction.
++ *
++ * Also we have to clear buffer_mapped flag of a truncated buffer
++ * because the buffer_head may be attached to the page straddling
++ * i_size (can happen only when blocksize < pagesize) and thus the
++ * buffer_head can be reused when the file is extended again. So we end
++ * up keeping around invalidated buffers attached to transactions'
++ * BJ_Forget list just to stop checkpointing code from cleaning up
++ * the transaction this buffer was modified in.
+ */
+ transaction = jh->b_transaction;
+ if (transaction == NULL) {
+@@ -1903,13 +1912,9 @@ static int journal_unmap_buffer(journal_
+ * committed, the buffer won't be needed any
+ * longer. */
+ JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
+- ret = __dispose_buffer(jh,
++ may_free = __dispose_buffer(jh,
+ journal->j_running_transaction);
+- journal_put_journal_head(jh);
+- spin_unlock(&journal->j_list_lock);
+- jbd_unlock_bh_state(bh);
+- spin_unlock(&journal->j_state_lock);
+- return ret;
++ goto zap_buffer;
+ } else {
+ /* There is no currently-running transaction. So the
+ * orphan record which we wrote for this file must have
+@@ -1917,13 +1922,9 @@ static int journal_unmap_buffer(journal_
+ * the committing transaction, if it exists. */
+ if (journal->j_committing_transaction) {
+ JBUFFER_TRACE(jh, "give to committing trans");
+- ret = __dispose_buffer(jh,
++ may_free = __dispose_buffer(jh,
+ journal->j_committing_transaction);
+- journal_put_journal_head(jh);
+- spin_unlock(&journal->j_list_lock);
+- jbd_unlock_bh_state(bh);
+- spin_unlock(&journal->j_state_lock);
+- return ret;
++ goto zap_buffer;
+ } else {
+ /* The orphan record's transaction has
+ * committed. We can cleanse this buffer */
+@@ -1944,10 +1945,24 @@ static int journal_unmap_buffer(journal_
+ }
+ /*
+ * The buffer is committing, we simply cannot touch
+- * it. So we just set j_next_transaction to the
+- * running transaction (if there is one) and mark
+- * buffer as freed so that commit code knows it should
+- * clear dirty bits when it is done with the buffer.
++ * it. If the page is straddling i_size we have to wait
++ * for commit and try again.
++ */
++ if (partial_page) {
++ tid_t tid = journal->j_committing_transaction->t_tid;
++
++ journal_put_journal_head(jh);
++ spin_unlock(&journal->j_list_lock);
++ jbd_unlock_bh_state(bh);
++ spin_unlock(&journal->j_state_lock);
++ log_wait_commit(journal, tid);
++ goto retry;
++ }
++ /*
++ * OK, buffer won't be reachable after truncate. We just set
++ * j_next_transaction to the running transaction (if there is
++ * one) and mark buffer as freed so that commit code knows it
++ * should clear dirty bits when it is done with the buffer.
+ */
+ set_buffer_freed(bh);
+ if (journal->j_running_transaction && buffer_jbddirty(bh))
+@@ -1970,6 +1985,14 @@ static int journal_unmap_buffer(journal_
+ }
+
+ zap_buffer:
++ /*
++ * This is tricky. Although the buffer is truncated, it may be reused
++ * if blocksize < pagesize and it is attached to the page straddling
++ * EOF. Since the buffer might have been added to BJ_Forget list of the
++ * running transaction, journal_get_write_access() won't clear
++ * b_modified and credit accounting gets confused. So clear b_modified
++ * here. */
++ jh->b_modified = 0;
+ journal_put_journal_head(jh);
+ zap_buffer_no_jh:
+ spin_unlock(&journal->j_list_lock);
+@@ -2018,7 +2041,8 @@ void journal_invalidatepage(journal_t *j
+ if (offset <= curr_off) {
+ /* This block is wholly outside the truncation point */
+ lock_buffer(bh);
+- may_free &= journal_unmap_buffer(journal, bh);
++ may_free &= journal_unmap_buffer(journal, bh,
++ offset > 0);
+ unlock_buffer(bh);
+ }
+ curr_off = next_off;
netfilter-xt_limit-have-r-cost-0-case-work.patch
add-cdc-acm-support-for-the-cx93010-2x-ucmxx-usb-modem.patch
drm-radeon-don-t-destroy-i2c-bus-rec-in-radeon_ext_tmds_enc_destroy.patch
+jbd-fix-assertion-failure-in-commit-code-due-to-lacking-transaction-credits.patch
+x86-random-architectural-inlines-to-get-random-integers-with-rdrand.patch
+x86-random-verify-rdrand-functionality-and-allow-it-to-be-disabled.patch
+tpm-propagate-error-from-tpm_transmit-to-fix-a-timeout-hang.patch
--- /dev/null
+From abce9ac292e13da367bbd22c1f7669f988d931ac Mon Sep 17 00:00:00 2001
+From: Peter Huewe <peter.huewe@infineon.com>
+Date: Thu, 27 Sep 2012 16:09:33 +0200
+Subject: tpm: Propagate error from tpm_transmit to fix a timeout hang
+
+From: Peter Huewe <peter.huewe@infineon.com>
+
+commit abce9ac292e13da367bbd22c1f7669f988d931ac upstream.
+
+tpm_write calls tpm_transmit without checking the return value and
+assigns the return value unconditionally to chip->pending_data, even if
+it's an error value.
+This causes three bugs.
+
+So if we write to /dev/tpm0 with a tpm_param_size bigger than
+TPM_BUFSIZE=0x1000 (e.g. 0x100a)
+and a bufsize also bigger than TPM_BUFSIZE (e.g. 0x100a)
+tpm_transmit returns -E2BIG which is assigned to chip->pending_data as
+-7, but tpm_write returns that TPM_BUFSIZE bytes have been successfully
+been written to the TPM, altough this is not true (bug #1).
+
+As we did write more than than TPM_BUFSIZE bytes but tpm_write reports
+that only TPM_BUFSIZE bytes have been written the vfs tries to write
+the remaining bytes (in this case 10 bytes) to the tpm device driver via
+tpm_write which then blocks at
+
+ /* cannot perform a write until the read has cleared
+ either via tpm_read or a user_read_timer timeout */
+ while (atomic_read(&chip->data_pending) != 0)
+ msleep(TPM_TIMEOUT);
+
+for 60 seconds, since data_pending is -7 and nobody is able to
+read it (since tpm_read luckily checks if data_pending is greater than
+0) (#bug 2).
+
+After that the remaining bytes are written to the TPM which are
+interpreted by the tpm as a normal command. (bug #3)
+So if the last bytes of the command stream happen to be a e.g.
+tpm_force_clear this gets accidentally sent to the TPM.
+
+This patch fixes all three bugs, by propagating the error code of
+tpm_write and returning -E2BIG if the input buffer is too big,
+since the response from the tpm for a truncated value is bogus anyway.
+Moreover it returns -EBUSY to userspace if there is a response ready to be
+read.
+
+Signed-off-by: Peter Huewe <peter.huewe@infineon.com>
+Signed-off-by: Kent Yoder <key@linux.vnet.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/char/tpm/tpm.c | 21 ++++++++++++++-------
+ 1 file changed, 14 insertions(+), 7 deletions(-)
+
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -1019,17 +1019,20 @@ ssize_t tpm_write(struct file *file, con
+ size_t size, loff_t *off)
+ {
+ struct tpm_chip *chip = file->private_data;
+- size_t in_size = size, out_size;
++ size_t in_size = size;
++ ssize_t out_size;
+
+ /* cannot perform a write until the read has cleared
+- either via tpm_read or a user_read_timer timeout */
+- while (atomic_read(&chip->data_pending) != 0)
+- msleep(TPM_TIMEOUT);
+-
+- mutex_lock(&chip->buffer_mutex);
++ either via tpm_read or a user_read_timer timeout.
++ This also prevents splitted buffered writes from blocking here.
++ */
++ if (atomic_read(&chip->data_pending) != 0)
++ return -EBUSY;
+
+ if (in_size > TPM_BUFSIZE)
+- in_size = TPM_BUFSIZE;
++ return -E2BIG;
++
++ mutex_lock(&chip->buffer_mutex);
+
+ if (copy_from_user
+ (chip->data_buffer, (void __user *) buf, in_size)) {
+@@ -1039,6 +1042,10 @@ ssize_t tpm_write(struct file *file, con
+
+ /* atomic tpm command send and result receive */
+ out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE);
++ if (out_size < 0) {
++ mutex_unlock(&chip->buffer_mutex);
++ return out_size;
++ }
+
+ atomic_set(&chip->data_pending, out_size);
+ mutex_unlock(&chip->buffer_mutex);
--- /dev/null
+From 628c6246d47b85f5357298601df2444d7f4dd3fd Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@zytor.com>
+Date: Sun, 31 Jul 2011 13:59:29 -0700
+Subject: x86, random: Architectural inlines to get random integers with RDRAND
+
+From: "H. Peter Anvin" <hpa@zytor.com>
+
+commit 628c6246d47b85f5357298601df2444d7f4dd3fd upstream.
+
+Architectural inlines to get random ints and longs using the RDRAND
+instruction.
+
+Intel has introduced a new RDRAND instruction, a Digital Random Number
+Generator (DRNG), which is functionally an high bandwidth entropy
+source, cryptographic whitener, and integrity monitor all built into
+hardware. This enables RDRAND to be used directly, bypassing the
+kernel random number pool.
+
+For technical documentation, see:
+
+http://software.intel.com/en-us/articles/download-the-latest-bull-mountain-software-implementation-guide/
+
+In this patch, this is *only* used for the nonblocking random number
+pool. RDRAND is a nonblocking source, similar to our /dev/urandom,
+and is therefore not a direct replacement for /dev/random. The
+architectural hooks presented in the previous patch only feed the
+kernel internal users, which only use the nonblocking pool, and so
+this is not a problem.
+
+Since this instruction is available in userspace, there is no reason
+to have a /dev/hw_rng device driver for the purpose of feeding rngd.
+This is especially so since RDRAND is a nonblocking source, and needs
+additional whitening and reduction (see the above technical
+documentation for details) in order to be of "pure entropy source"
+quality.
+
+The CONFIG_EXPERT compile-time option can be used to disable this use
+of RDRAND.
+
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Originally-by: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Matt Mackall <mpm@selenic.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/Kconfig | 9 ++++
+ arch/x86/include/asm/archrandom.h | 73 ++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 82 insertions(+)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -1451,6 +1451,15 @@ config ARCH_USES_PG_UNCACHED
+ def_bool y
+ depends on X86_PAT
+
++config ARCH_RANDOM
++ def_bool y
++ prompt "x86 architectural random number generator" if EXPERT
++ ---help---
++ Enable the x86 architectural RDRAND instruction
++ (Intel Bull Mountain technology) to generate random numbers.
++ If supported, this is a high bandwidth, cryptographically
++ secure hardware random number generator.
++
+ config EFI
+ bool "EFI runtime service support"
+ depends on ACPI
+--- /dev/null
++++ b/arch/x86/include/asm/archrandom.h
+@@ -0,0 +1,73 @@
++/*
++ * This file is part of the Linux kernel.
++ *
++ * Copyright (c) 2011, Intel Corporation
++ * Authors: Fenghua Yu <fenghua.yu@intel.com>,
++ * H. Peter Anvin <hpa@linux.intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#ifndef ASM_X86_ARCHRANDOM_H
++#define ASM_X86_ARCHRANDOM_H
++
++#include <asm/processor.h>
++#include <asm/cpufeature.h>
++#include <asm/alternative.h>
++#include <asm/nops.h>
++
++#define RDRAND_RETRY_LOOPS 10
++
++#define RDRAND_INT ".byte 0x0f,0xc7,0xf0"
++#ifdef CONFIG_X86_64
++# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0"
++#else
++# define RDRAND_LONG RDRAND_INT
++#endif
++
++#ifdef CONFIG_ARCH_RANDOM
++
++#define GET_RANDOM(name, type, rdrand, nop) \
++static inline int name(type *v) \
++{ \
++ int ok; \
++ alternative_io("movl $0, %0\n\t" \
++ nop, \
++ "\n1: " rdrand "\n\t" \
++ "jc 2f\n\t" \
++ "decl %0\n\t" \
++ "jnz 1b\n\t" \
++ "2:", \
++ X86_FEATURE_RDRAND, \
++ ASM_OUTPUT2("=r" (ok), "=a" (*v)), \
++ "0" (RDRAND_RETRY_LOOPS)); \
++ return ok; \
++}
++
++#ifdef CONFIG_X86_64
++
++GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5);
++GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4);
++
++#else
++
++GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3);
++GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3);
++
++#endif /* CONFIG_X86_64 */
++
++#endif /* CONFIG_ARCH_RANDOM */
++
++#endif /* ASM_X86_ARCHRANDOM_H */
--- /dev/null
+From 49d859d78c5aeb998b6936fcb5f288f78d713489 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@zytor.com>
+Date: Sun, 31 Jul 2011 14:02:19 -0700
+Subject: x86, random: Verify RDRAND functionality and allow it to be disabled
+
+From: "H. Peter Anvin" <hpa@zytor.com>
+
+commit 49d859d78c5aeb998b6936fcb5f288f78d713489 upstream.
+
+If the CPU declares that RDRAND is available, go through a guranteed
+reseed sequence, and make sure that it is actually working (producing
+data.) If it does not, disable the CPU feature flag.
+
+Allow RDRAND to be disabled on the command line (as opposed to at
+compile time) for a user who has special requirements with regards to
+random numbers.
+
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Cc: Matt Mackall <mpm@selenic.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Theodore Ts'o" <tytso@mit.edu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ Documentation/kernel-parameters.txt | 5 ++
+ arch/x86/include/asm/archrandom.h | 2
+ arch/x86/kernel/cpu/Makefile | 1
+ arch/x86/kernel/cpu/common.c | 2
+ arch/x86/kernel/cpu/rdrand.c | 73 ++++++++++++++++++++++++++++++++++++
+ 5 files changed, 83 insertions(+)
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1764,6 +1764,11 @@ bytes respectively. Such letter suffixes
+
+ noresidual [PPC] Don't use residual data on PReP machines.
+
++ nordrand [X86] Disable the direct use of the RDRAND
++ instruction even if it is supported by the
++ processor. RDRAND is still available to user
++ space applications.
++
+ noresume [SWSUSP] Disables resume and restores original swap
+ space.
+
+--- a/arch/x86/include/asm/archrandom.h
++++ b/arch/x86/include/asm/archrandom.h
+@@ -70,4 +70,6 @@ GET_RANDOM(arch_get_random_int, unsigned
+
+ #endif /* CONFIG_ARCH_RANDOM */
+
++extern void x86_init_rdrand(struct cpuinfo_x86 *c);
++
+ #endif /* ASM_X86_ARCHRANDOM_H */
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -15,6 +15,7 @@ CFLAGS_common.o := $(nostackp)
+ obj-y := intel_cacheinfo.o scattered.o topology.o
+ obj-y += proc.o capflags.o powerflags.o common.o
+ obj-y += vmware.o hypervisor.o sched.o mshyperv.o
++obj-y += rdrand.o
+
+ obj-$(CONFIG_X86_32) += bugs.o
+ obj-$(CONFIG_X86_64) += bugs_64.o
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -15,6 +15,7 @@
+ #include <asm/stackprotector.h>
+ #include <asm/perf_event.h>
+ #include <asm/mmu_context.h>
++#include <asm/archrandom.h>
+ #include <asm/hypervisor.h>
+ #include <asm/processor.h>
+ #include <asm/sections.h>
+@@ -852,6 +853,7 @@ static void __cpuinit identify_cpu(struc
+ #endif
+
+ init_hypervisor(c);
++ x86_init_rdrand(c);
+
+ /*
+ * Clear/Set all flags overriden by options, need do it
+--- /dev/null
++++ b/arch/x86/kernel/cpu/rdrand.c
+@@ -0,0 +1,73 @@
++/*
++ * This file is part of the Linux kernel.
++ *
++ * Copyright (c) 2011, Intel Corporation
++ * Authors: Fenghua Yu <fenghua.yu@intel.com>,
++ * H. Peter Anvin <hpa@linux.intel.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms and conditions of the GNU General Public License,
++ * version 2, as published by the Free Software Foundation.
++ *
++ * This program is distributed in the hope it will be useful, but WITHOUT
++ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
++ * more details.
++ *
++ * You should have received a copy of the GNU General Public License along with
++ * this program; if not, write to the Free Software Foundation, Inc.,
++ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
++ *
++ */
++
++#include <asm/processor.h>
++#include <asm/archrandom.h>
++#include <asm/sections.h>
++
++static int __init x86_rdrand_setup(char *s)
++{
++ setup_clear_cpu_cap(X86_FEATURE_RDRAND);
++ return 1;
++}
++__setup("nordrand", x86_rdrand_setup);
++
++/* We can't use arch_get_random_long() here since alternatives haven't run */
++static inline int rdrand_long(unsigned long *v)
++{
++ int ok;
++ asm volatile("1: " RDRAND_LONG "\n\t"
++ "jc 2f\n\t"
++ "decl %0\n\t"
++ "jnz 1b\n\t"
++ "2:"
++ : "=r" (ok), "=a" (*v)
++ : "0" (RDRAND_RETRY_LOOPS));
++ return ok;
++}
++
++/*
++ * Force a reseed cycle; we are architecturally guaranteed a reseed
++ * after no more than 512 128-bit chunks of random data. This also
++ * acts as a test of the CPU capability.
++ */
++#define RESEED_LOOP ((512*128)/sizeof(unsigned long))
++
++void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_ARCH_RANDOM
++ unsigned long tmp;
++ int i, count, ok;
++
++ if (!cpu_has(c, X86_FEATURE_RDRAND))
++ return; /* Nothing to do */
++
++ for (count = i = 0; i < RESEED_LOOP; i++) {
++ ok = rdrand_long(&tmp);
++ if (ok)
++ count++;
++ }
++
++ if (count != RESEED_LOOP)
++ clear_cpu_cap(c, X86_FEATURE_RDRAND);
++#endif
++}