--- /dev/null
+From c4686c71a9183f76e3ef59098da5c098748672f6 Mon Sep 17 00:00:00 2001
+From: Thomas Renninger <trenn@suse.de>
+Date: Thu, 12 Jul 2012 12:24:33 +0200
+Subject: cpufreq / ACPI: Fix not loading acpi-cpufreq driver regression
+
+From: Thomas Renninger <trenn@suse.de>
+
+commit c4686c71a9183f76e3ef59098da5c098748672f6 upstream.
+
+Commit d640113fe80e45ebd4a5b420b introduced a regression on SMP
+systems where the processor core with ACPI id zero is disabled
+(typically should be the case because of hyperthreading).
+The regression got spread through stable kernels.
+On 3.0.X it got introduced via 3.0.18.
+
+Such platforms may be rare, but do exist.
+Look out for a disabled processor with acpi_id 0 in dmesg:
+ACPI: LAPIC (acpi_id[0x00] lapic_id[0x10] disabled)
+
+This problem has been observed on a:
+HP Proliant BL280c G6 blade
+
+This patch restricts the introduced workaround to platforms
+with nr_cpu_ids <= 1.
+
+Signed-off-by: Thomas Renninger <trenn@suse.de>
+Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/processor_core.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/acpi/processor_core.c
++++ b/drivers/acpi/processor_core.c
+@@ -188,10 +188,12 @@ int acpi_get_cpuid(acpi_handle handle, i
+ * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
+ * }
+ *
+- * Ignores apic_id and always return 0 for CPU0's handle.
++ * Ignores apic_id and always returns 0 for the processor
++ * handle with acpi id 0 if nr_cpu_ids is 1.
++ * This should be the case if SMP tables are not found.
+ * Return -1 for other CPU's handle.
+ */
+- if (acpi_id == 0)
++ if (nr_cpu_ids <= 1 && acpi_id == 0)
+ return acpi_id;
+ else
+ return apic_id;
--- /dev/null
+From 4a26620df451ad46151ad21d711ed43e963c004e Mon Sep 17 00:00:00 2001
+From: Tyler Hicks <tyhicks@canonical.com>
+Date: Sat, 5 Nov 2011 13:45:08 -0400
+Subject: eCryptfs: Improve statfs reporting
+
+From: Tyler Hicks <tyhicks@canonical.com>
+
+commit 4a26620df451ad46151ad21d711ed43e963c004e upstream.
+
+statfs() calls on eCryptfs files returned the wrong filesystem type and,
+when using filename encryption, the wrong maximum filename length.
+
+If mount-wide filename encryption is enabled, the cipher block size and
+the lower filesystem's max filename length will determine the max
+eCryptfs filename length. Pre-tested, known good lengths are used when
+the lower filesystem's namelen is 255 and a cipher with 8 or 16 byte
+block sizes is used. In other, less common cases, we fall back to a safe
+rounded-down estimate when determining the eCryptfs namelen.
+
+https://launchpad.net/bugs/885744
+
+Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
+Reported-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Reviewed-by: John Johansen <john.johansen@canonical.com>
+Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ecryptfs/crypto.c | 68 +++++++++++++++++++++++++++++++++++++-----
+ fs/ecryptfs/ecryptfs_kernel.h | 6 +++
+ fs/ecryptfs/keystore.c | 9 +----
+ fs/ecryptfs/super.c | 14 ++++++++
+ 4 files changed, 83 insertions(+), 14 deletions(-)
+
+--- a/fs/ecryptfs/crypto.c
++++ b/fs/ecryptfs/crypto.c
+@@ -1987,6 +1987,17 @@ out:
+ return;
+ }
+
++static size_t ecryptfs_max_decoded_size(size_t encoded_size)
++{
++ /* Not exact; conservatively long. Every block of 4
++ * encoded characters decodes into a block of 3
++ * decoded characters. This segment of code provides
++ * the caller with the maximum amount of allocated
++ * space that @dst will need to point to in a
++ * subsequent call. */
++ return ((encoded_size + 1) * 3) / 4;
++}
++
+ /**
+ * ecryptfs_decode_from_filename
+ * @dst: If NULL, this function only sets @dst_size and returns. If
+@@ -2005,13 +2016,7 @@ ecryptfs_decode_from_filename(unsigned c
+ size_t dst_byte_offset = 0;
+
+ if (dst == NULL) {
+- /* Not exact; conservatively long. Every block of 4
+- * encoded characters decodes into a block of 3
+- * decoded characters. This segment of code provides
+- * the caller with the maximum amount of allocated
+- * space that @dst will need to point to in a
+- * subsequent call. */
+- (*dst_size) = (((src_size + 1) * 3) / 4);
++ (*dst_size) = ecryptfs_max_decoded_size(src_size);
+ goto out;
+ }
+ while (src_byte_offset < src_size) {
+@@ -2236,3 +2241,52 @@ out_free:
+ out:
+ return rc;
+ }
++
++#define ENC_NAME_MAX_BLOCKLEN_8_OR_16 143
++
++int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
++ struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
++{
++ struct blkcipher_desc desc;
++ struct mutex *tfm_mutex;
++ size_t cipher_blocksize;
++ int rc;
++
++ if (!(mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) {
++ (*namelen) = lower_namelen;
++ return 0;
++ }
++
++ rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex,
++ mount_crypt_stat->global_default_fn_cipher_name);
++ if (unlikely(rc)) {
++ (*namelen) = 0;
++ return rc;
++ }
++
++ mutex_lock(tfm_mutex);
++ cipher_blocksize = crypto_blkcipher_blocksize(desc.tfm);
++ mutex_unlock(tfm_mutex);
++
++ /* Return an exact amount for the common cases */
++ if (lower_namelen == NAME_MAX
++ && (cipher_blocksize == 8 || cipher_blocksize == 16)) {
++ (*namelen) = ENC_NAME_MAX_BLOCKLEN_8_OR_16;
++ return 0;
++ }
++
++ /* Return a safe estimate for the uncommon cases */
++ (*namelen) = lower_namelen;
++ (*namelen) -= ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX_SIZE;
++ /* Since this is the max decoded size, subtract 1 "decoded block" len */
++ (*namelen) = ecryptfs_max_decoded_size(*namelen) - 3;
++ (*namelen) -= ECRYPTFS_TAG_70_MAX_METADATA_SIZE;
++ (*namelen) -= ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES;
++ /* Worst case is that the filename is padded nearly a full block size */
++ (*namelen) -= cipher_blocksize - 1;
++
++ if ((*namelen) < 0)
++ (*namelen) = 0;
++
++ return 0;
++}
+--- a/fs/ecryptfs/ecryptfs_kernel.h
++++ b/fs/ecryptfs/ecryptfs_kernel.h
+@@ -227,6 +227,10 @@ ecryptfs_get_key_payload_data(struct key
+ #define ECRYPTFS_NON_NULL 0x42 /* A reasonable substitute for NULL */
+ #define MD5_DIGEST_SIZE 16
+ #define ECRYPTFS_TAG_70_DIGEST_SIZE MD5_DIGEST_SIZE
++#define ECRYPTFS_TAG_70_MIN_METADATA_SIZE (1 + ECRYPTFS_MIN_PKT_LEN_SIZE \
++ + ECRYPTFS_SIG_SIZE + 1 + 1)
++#define ECRYPTFS_TAG_70_MAX_METADATA_SIZE (1 + ECRYPTFS_MAX_PKT_LEN_SIZE \
++ + ECRYPTFS_SIG_SIZE + 1 + 1)
+ #define ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX "ECRYPTFS_FEK_ENCRYPTED."
+ #define ECRYPTFS_FEK_ENCRYPTED_FILENAME_PREFIX_SIZE 23
+ #define ECRYPTFS_FNEK_ENCRYPTED_FILENAME_PREFIX "ECRYPTFS_FNEK_ENCRYPTED."
+@@ -765,6 +769,8 @@ ecryptfs_parse_tag_70_packet(char **file
+ size_t *packet_size,
+ struct ecryptfs_mount_crypt_stat *mount_crypt_stat,
+ char *data, size_t max_packet_size);
++int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
++ struct ecryptfs_mount_crypt_stat *mount_crypt_stat);
+ int ecryptfs_derive_iv(char *iv, struct ecryptfs_crypt_stat *crypt_stat,
+ loff_t offset);
+
+--- a/fs/ecryptfs/keystore.c
++++ b/fs/ecryptfs/keystore.c
+@@ -678,10 +678,7 @@ ecryptfs_write_tag_70_packet(char *dest,
+ * Octets N3-N4: Block-aligned encrypted filename
+ * - Consists of a minimum number of random characters, a \0
+ * separator, and then the filename */
+- s->max_packet_size = (1 /* Tag 70 identifier */
+- + 3 /* Max Tag 70 packet size */
+- + ECRYPTFS_SIG_SIZE /* FNEK sig */
+- + 1 /* Cipher identifier */
++ s->max_packet_size = (ECRYPTFS_TAG_70_MAX_METADATA_SIZE
+ + s->block_aligned_filename_size);
+ if (dest == NULL) {
+ (*packet_size) = s->max_packet_size;
+@@ -933,10 +930,10 @@ ecryptfs_parse_tag_70_packet(char **file
+ goto out;
+ }
+ s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+- if (max_packet_size < (1 + 1 + ECRYPTFS_SIG_SIZE + 1 + 1)) {
++ if (max_packet_size < ECRYPTFS_TAG_70_MIN_METADATA_SIZE) {
+ printk(KERN_WARNING "%s: max_packet_size is [%zd]; it must be "
+ "at least [%d]\n", __func__, max_packet_size,
+- (1 + 1 + ECRYPTFS_SIG_SIZE + 1 + 1));
++ ECRYPTFS_TAG_70_MIN_METADATA_SIZE);
+ rc = -EINVAL;
+ goto out;
+ }
+--- a/fs/ecryptfs/super.c
++++ b/fs/ecryptfs/super.c
+@@ -30,6 +30,8 @@
+ #include <linux/seq_file.h>
+ #include <linux/file.h>
+ #include <linux/crypto.h>
++#include <linux/statfs.h>
++#include <linux/magic.h>
+ #include "ecryptfs_kernel.h"
+
+ struct kmem_cache *ecryptfs_inode_info_cache;
+@@ -103,10 +105,20 @@ static void ecryptfs_destroy_inode(struc
+ static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+ {
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
++ int rc;
+
+ if (!lower_dentry->d_sb->s_op->statfs)
+ return -ENOSYS;
+- return lower_dentry->d_sb->s_op->statfs(lower_dentry, buf);
++
++ rc = lower_dentry->d_sb->s_op->statfs(lower_dentry, buf);
++ if (rc)
++ return rc;
++
++ buf->f_type = ECRYPTFS_SUPER_MAGIC;
++ rc = ecryptfs_set_f_namelen(&buf->f_namelen, buf->f_namelen,
++ &ecryptfs_superblock_to_private(dentry->d_sb)->mount_crypt_stat);
++
++ return rc;
+ }
+
+ /**
--- /dev/null
+From akpm@linux-foundation.org Thu Sep 27 10:42:06 2012
+From: Li Zhong <zhong@linux.vnet.ibm.com>
+Date: Tue, 24 Jul 2012 15:02:49 -0700
+Subject: Fix a dead loop in async_synchronize_full()
+
+From: Li Zhong <zhong@linux.vnet.ibm.com>
+
+[Fixed upstream by commits 2955b47d2c1983998a8c5915cb96884e67f7cb53 and
+a4683487f90bfe3049686fc5c566bdc1ad03ace6 from Dan Williams, but they are much
+more intrusive than this tiny fix, according to Andrew - gregkh]
+
+This patch tries to fix a dead loop in async_synchronize_full(), which
+could be seen when preemption is disabled on a single cpu machine.
+
+void async_synchronize_full(void)
+{
+ do {
+ async_synchronize_cookie(next_cookie);
+ } while (!list_empty(&async_running) || !
+list_empty(&async_pending));
+}
+
+async_synchronize_cookie() calls async_synchronize_cookie_domain() with
+&async_running as the default domain to synchronize.
+
+However, there might be some works in the async_pending list from other
+domains. On a single cpu system, without preemption, there is no chance
+for the other works to finish, so async_synchronize_full() enters a dead
+loop.
+
+It seems async_synchronize_full() wants to synchronize all entries in
+all running lists(domains), so maybe we could just check the entry_count
+to know whether all works are finished.
+
+Currently, async_synchronize_cookie_domain() expects a non-NULL running
+list ( if NULL, there would be NULL pointer dereference ), so maybe a
+NULL pointer could be used as an indication for the functions to
+synchronize all works in all domains.
+
+Reported-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Li Zhong <zhong@linux.vnet.ibm.com>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Tested-by: Christian Kujau <lists@nerdbynature.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Dan Williams <dan.j.williams@gmail.com>
+Cc: Christian Kujau <lists@nerdbynature.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/async.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/async.c b/kernel/async.c
+index bd0c168..32d8dc9 100644
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -86,6 +86,13 @@ static async_cookie_t __lowest_in_progress(struct list_head *running)
+ {
+ struct async_entry *entry;
+
++ if (!running) { /* just check the entry count */
++ if (atomic_read(&entry_count))
++ return 0; /* smaller than any cookie */
++ else
++ return next_cookie;
++ }
++
+ if (!list_empty(running)) {
+ entry = list_first_entry(running,
+ struct async_entry, list);
+@@ -236,9 +243,7 @@ EXPORT_SYMBOL_GPL(async_schedule_domain);
+ */
+ void async_synchronize_full(void)
+ {
+- do {
+- async_synchronize_cookie(next_cookie);
+- } while (!list_empty(&async_running) || !list_empty(&async_pending));
++ async_synchronize_cookie_domain(next_cookie, NULL);
+ }
+ EXPORT_SYMBOL_GPL(async_synchronize_full);
+
+@@ -258,7 +263,7 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
+ /**
+ * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
+ * @cookie: async_cookie_t to use as checkpoint
+- * @running: running list to synchronize on
++ * @running: running list to synchronize on, NULL indicates all lists
+ *
+ * This function waits until all asynchronous function calls for the
+ * synchronization domain specified by the running list @list submitted
+--
+1.7.9.5
+
--- /dev/null
+From c531077f40abc9f2129c4c83a30b3f8d6ce1c0e7 Mon Sep 17 00:00:00 2001
+From: Daniel J Blueman <daniel@quora.org>
+Date: Mon, 23 Jul 2012 12:22:37 +0800
+Subject: libata: Prevent interface errors with Seagate FreeAgent GoFlex
+
+From: Daniel J Blueman <daniel@quora.org>
+
+commit c531077f40abc9f2129c4c83a30b3f8d6ce1c0e7 upstream.
+
+When using my Seagate FreeAgent GoFlex eSATAp external disk enclosure,
+interface errors are always seen until 1.5Gbps is negotiated [1]. This
+occurs using any disk in the enclosure, and when the disk is connected
+directly with a generic passive eSATAp cable, we see stable 3Gbps
+operation as expected.
+
+Blacklist 3Gbps mode to avoid dataloss and the ~30s delay bus reset
+and renegotiation incurs.
+
+Signed-off-by: Daniel J Blueman <daniel@quora.org>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/libata-core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -4138,6 +4138,7 @@ static const struct ata_blacklist_entry
+
+ /* Devices which aren't very happy with higher link speeds */
+ { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, },
++ { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, },
+
+ /*
+ * Devices which choke on SETXFER. Applies only if both the
--- /dev/null
+From 4b71ca6bce8fab3d08c61bf330e781f957934ae1 Mon Sep 17 00:00:00 2001
+From: Jarod Wilson <jarod@redhat.com>
+Date: Mon, 4 Jun 2012 13:05:24 -0300
+Subject: media: lirc_sir: make device registration work
+
+From: Jarod Wilson <jarod@redhat.com>
+
+commit 4b71ca6bce8fab3d08c61bf330e781f957934ae1 upstream.
+
+For one, the driver device pointer needs to be filled in, or the lirc core
+will refuse to load the driver. And we really need to wire up all the
+platform_device bits. This has been tested via the lirc sourceforge tree
+and verified to work, been sitting there for months, finally getting
+around to sending it. :\
+
+Signed-off-by: Jarod Wilson <jarod@redhat.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+CC: Josh Boyer <jwboyer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/lirc/lirc_sir.c | 60 ++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 58 insertions(+), 2 deletions(-)
+
+--- a/drivers/staging/lirc/lirc_sir.c
++++ b/drivers/staging/lirc/lirc_sir.c
+@@ -53,6 +53,7 @@
+ #include <linux/io.h>
+ #include <asm/irq.h>
+ #include <linux/fcntl.h>
++#include <linux/platform_device.h>
+ #ifdef LIRC_ON_SA1100
+ #include <asm/hardware.h>
+ #ifdef CONFIG_SA1100_COLLIE
+@@ -488,9 +489,11 @@ static struct lirc_driver driver = {
+ .owner = THIS_MODULE,
+ };
+
++static struct platform_device *lirc_sir_dev;
+
+ static int init_chrdev(void)
+ {
++ driver.dev = &lirc_sir_dev->dev;
+ driver.minor = lirc_register_driver(&driver);
+ if (driver.minor < 0) {
+ printk(KERN_ERR LIRC_DRIVER_NAME ": init_chrdev() failed.\n");
+@@ -1216,20 +1219,71 @@ static int init_lirc_sir(void)
+ return 0;
+ }
+
++static int __devinit lirc_sir_probe(struct platform_device *dev)
++{
++ return 0;
++}
++
++static int __devexit lirc_sir_remove(struct platform_device *dev)
++{
++ return 0;
++}
++
++static struct platform_driver lirc_sir_driver = {
++ .probe = lirc_sir_probe,
++ .remove = __devexit_p(lirc_sir_remove),
++ .driver = {
++ .name = "lirc_sir",
++ .owner = THIS_MODULE,
++ },
++};
+
+ static int __init lirc_sir_init(void)
+ {
+ int retval;
+
++ retval = platform_driver_register(&lirc_sir_driver);
++ if (retval) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform driver register "
++ "failed!\n");
++ return -ENODEV;
++ }
++
++ lirc_sir_dev = platform_device_alloc("lirc_dev", 0);
++ if (!lirc_sir_dev) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device alloc "
++ "failed!\n");
++ retval = -ENOMEM;
++ goto pdev_alloc_fail;
++ }
++
++ retval = platform_device_add(lirc_sir_dev);
++ if (retval) {
++ printk(KERN_ERR LIRC_DRIVER_NAME ": Platform device add "
++ "failed!\n");
++ retval = -ENODEV;
++ goto pdev_add_fail;
++ }
++
+ retval = init_chrdev();
+ if (retval < 0)
+- return retval;
++ goto fail;
++
+ retval = init_lirc_sir();
+ if (retval) {
+ drop_chrdev();
+- return retval;
++ goto fail;
+ }
++
+ return 0;
++
++fail:
++ platform_device_del(lirc_sir_dev);
++pdev_add_fail:
++ platform_device_put(lirc_sir_dev);
++pdev_alloc_fail:
++ platform_driver_unregister(&lirc_sir_driver);
++ return retval;
+ }
+
+ static void __exit lirc_sir_exit(void)
+@@ -1237,6 +1291,8 @@ static void __exit lirc_sir_exit(void)
+ drop_hardware();
+ drop_chrdev();
+ drop_port();
++ platform_device_unregister(lirc_sir_dev);
++ platform_driver_unregister(&lirc_sir_driver);
+ printk(KERN_INFO LIRC_DRIVER_NAME ": Uninstalled.\n");
+ }
+
--- /dev/null
+From 734b65417b24d6eea3e3d7457e1f11493890ee1d Mon Sep 17 00:00:00 2001
+From: "Rustad, Mark D" <mark.d.rustad@intel.com>
+Date: Wed, 18 Jul 2012 09:06:07 +0000
+Subject: net: Statically initialize init_net.dev_base_head
+
+From: "Rustad, Mark D" <mark.d.rustad@intel.com>
+
+commit 734b65417b24d6eea3e3d7457e1f11493890ee1d upstream.
+
+This change eliminates an initialization-order hazard most
+recently seen when netprio_cgroup is built into the kernel.
+
+With thanks to Eric Dumazet for catching a bug.
+
+Signed-off-by: Mark Rustad <mark.d.rustad@intel.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/core/dev.c | 3 ++-
+ net/core/net_namespace.c | 4 +++-
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6260,7 +6260,8 @@ static struct hlist_head *netdev_create_
+ /* Initialize per network namespace state */
+ static int __net_init netdev_init(struct net *net)
+ {
+- INIT_LIST_HEAD(&net->dev_base_head);
++ if (net != &init_net)
++ INIT_LIST_HEAD(&net->dev_base_head);
+
+ net->dev_name_head = netdev_create_hash();
+ if (net->dev_name_head == NULL)
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -24,7 +24,9 @@ static DEFINE_MUTEX(net_mutex);
+ LIST_HEAD(net_namespace_list);
+ EXPORT_SYMBOL_GPL(net_namespace_list);
+
+-struct net init_net;
++struct net init_net = {
++ .dev_base_head = LIST_HEAD_INIT(init_net.dev_base_head),
++};
+ EXPORT_SYMBOL(init_net);
+
+ #define INITIAL_NET_GEN_PTRS 13 /* +1 for len +2 for rcu_head */
--- /dev/null
+From 06b6a1cf6e776426766298d055bb3991957d90a7 Mon Sep 17 00:00:00 2001
+From: Weiping Pan <wpan@redhat.com>
+Date: Mon, 23 Jul 2012 10:37:48 +0800
+Subject: rds: set correct msg_namelen
+
+From: Weiping Pan <wpan@redhat.com>
+
+commit 06b6a1cf6e776426766298d055bb3991957d90a7 upstream.
+
+Jay Fenlason (fenlason@redhat.com) found a bug,
+that recvfrom() on an RDS socket can return the contents of random kernel
+memory to userspace if it was called with a address length larger than
+sizeof(struct sockaddr_in).
+rds_recvmsg() also fails to set the addr_len paramater properly before
+returning, but that's just a bug.
+There are also a number of cases wher recvfrom() can return an entirely bogus
+address. Anything in rds_recvmsg() that returns a non-negative value but does
+not go through the "sin = (struct sockaddr_in *)msg->msg_name;" code path
+at the end of the while(1) loop will return up to 128 bytes of kernel memory
+to userspace.
+
+And I write two test programs to reproduce this bug, you will see that in
+rds_server, fromAddr will be overwritten and the following sock_fd will be
+destroyed.
+Yes, it is the programmer's fault to set msg_namelen incorrectly, but it is
+better to make the kernel copy the real length of address to user space in
+such case.
+
+How to run the test programs ?
+I test them on 32bit x86 system, 3.5.0-rc7.
+
+1 compile
+gcc -o rds_client rds_client.c
+gcc -o rds_server rds_server.c
+
+2 run ./rds_server on one console
+
+3 run ./rds_client on another console
+
+4 you will see something like:
+server is waiting to receive data...
+old socket fd=3
+server received data from client:data from client
+msg.msg_namelen=32
+new socket fd=-1067277685
+sendmsg()
+: Bad file descriptor
+
+/***************** rds_client.c ********************/
+
+int main(void)
+{
+ int sock_fd;
+ struct sockaddr_in serverAddr;
+ struct sockaddr_in toAddr;
+ char recvBuffer[128] = "data from client";
+ struct msghdr msg;
+ struct iovec iov;
+
+ sock_fd = socket(AF_RDS, SOCK_SEQPACKET, 0);
+ if (sock_fd < 0) {
+ perror("create socket error\n");
+ exit(1);
+ }
+
+ memset(&serverAddr, 0, sizeof(serverAddr));
+ serverAddr.sin_family = AF_INET;
+ serverAddr.sin_addr.s_addr = inet_addr("127.0.0.1");
+ serverAddr.sin_port = htons(4001);
+
+ if (bind(sock_fd, (struct sockaddr*)&serverAddr, sizeof(serverAddr)) < 0) {
+ perror("bind() error\n");
+ close(sock_fd);
+ exit(1);
+ }
+
+ memset(&toAddr, 0, sizeof(toAddr));
+ toAddr.sin_family = AF_INET;
+ toAddr.sin_addr.s_addr = inet_addr("127.0.0.1");
+ toAddr.sin_port = htons(4000);
+ msg.msg_name = &toAddr;
+ msg.msg_namelen = sizeof(toAddr);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_iov->iov_base = recvBuffer;
+ msg.msg_iov->iov_len = strlen(recvBuffer) + 1;
+ msg.msg_control = 0;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ if (sendmsg(sock_fd, &msg, 0) == -1) {
+ perror("sendto() error\n");
+ close(sock_fd);
+ exit(1);
+ }
+
+ printf("client send data:%s\n", recvBuffer);
+
+ memset(recvBuffer, '\0', 128);
+
+ msg.msg_name = &toAddr;
+ msg.msg_namelen = sizeof(toAddr);
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_iov->iov_base = recvBuffer;
+ msg.msg_iov->iov_len = 128;
+ msg.msg_control = 0;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+ if (recvmsg(sock_fd, &msg, 0) == -1) {
+ perror("recvmsg() error\n");
+ close(sock_fd);
+ exit(1);
+ }
+
+ printf("receive data from server:%s\n", recvBuffer);
+
+ close(sock_fd);
+
+ return 0;
+}
+
+/***************** rds_server.c ********************/
+
+int main(void)
+{
+ struct sockaddr_in fromAddr;
+ int sock_fd;
+ struct sockaddr_in serverAddr;
+ unsigned int addrLen;
+ char recvBuffer[128];
+ struct msghdr msg;
+ struct iovec iov;
+
+ sock_fd = socket(AF_RDS, SOCK_SEQPACKET, 0);
+ if(sock_fd < 0) {
+ perror("create socket error\n");
+ exit(0);
+ }
+
+ memset(&serverAddr, 0, sizeof(serverAddr));
+ serverAddr.sin_family = AF_INET;
+ serverAddr.sin_addr.s_addr = inet_addr("127.0.0.1");
+ serverAddr.sin_port = htons(4000);
+ if (bind(sock_fd, (struct sockaddr*)&serverAddr, sizeof(serverAddr)) < 0) {
+ perror("bind error\n");
+ close(sock_fd);
+ exit(1);
+ }
+
+ printf("server is waiting to receive data...\n");
+ msg.msg_name = &fromAddr;
+
+ /*
+ * I add 16 to sizeof(fromAddr), ie 32,
+ * and pay attention to the definition of fromAddr,
+ * recvmsg() will overwrite sock_fd,
+ * since kernel will copy 32 bytes to userspace.
+ *
+ * If you just use sizeof(fromAddr), it works fine.
+ * */
+ msg.msg_namelen = sizeof(fromAddr) + 16;
+ /* msg.msg_namelen = sizeof(fromAddr); */
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_iov->iov_base = recvBuffer;
+ msg.msg_iov->iov_len = 128;
+ msg.msg_control = 0;
+ msg.msg_controllen = 0;
+ msg.msg_flags = 0;
+
+ while (1) {
+ printf("old socket fd=%d\n", sock_fd);
+ if (recvmsg(sock_fd, &msg, 0) == -1) {
+ perror("recvmsg() error\n");
+ close(sock_fd);
+ exit(1);
+ }
+ printf("server received data from client:%s\n", recvBuffer);
+ printf("msg.msg_namelen=%d\n", msg.msg_namelen);
+ printf("new socket fd=%d\n", sock_fd);
+ strcat(recvBuffer, "--data from server");
+ if (sendmsg(sock_fd, &msg, 0) == -1) {
+ perror("sendmsg()\n");
+ close(sock_fd);
+ exit(1);
+ }
+ }
+
+ close(sock_fd);
+ return 0;
+}
+
+Signed-off-by: Weiping Pan <wpan@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/rds/recv.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/rds/recv.c
++++ b/net/rds/recv.c
+@@ -409,6 +409,8 @@ int rds_recvmsg(struct kiocb *iocb, stru
+
+ rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo);
+
++ msg->msg_namelen = 0;
++
+ if (msg_flags & MSG_OOB)
+ goto out;
+
+@@ -484,6 +486,7 @@ int rds_recvmsg(struct kiocb *iocb, stru
+ sin->sin_port = inc->i_hdr.h_sport;
+ sin->sin_addr.s_addr = inc->i_saddr;
+ memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
++ msg->msg_namelen = sizeof(*sin);
+ }
+ break;
+ }
--- /dev/null
+From 8323f26ce3425460769605a6aece7a174edaa7d1 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 22 Jun 2012 13:36:05 +0200
+Subject: sched: Fix race in task_group()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 8323f26ce3425460769605a6aece7a174edaa7d1 upstream.
+
+Stefan reported a crash on a kernel before a3e5d1091c1 ("sched:
+Don't call task_group() too many times in set_task_rq()"), he
+found the reason to be that the multiple task_group()
+invocations in set_task_rq() returned different values.
+
+Looking at all that I found a lack of serialization and plain
+wrong comments.
+
+The below tries to fix it using an extra pointer which is
+updated under the appropriate scheduler locks. Its not pretty,
+but I can't really see another way given how all the cgroup
+stuff works.
+
+Reported-and-tested-by: Stefan Bader <stefan.bader@canonical.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Link: http://lkml.kernel.org/r/1340364965.18025.71.camel@twins
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/init_task.h | 12 +++++++++++-
+ include/linux/sched.h | 5 ++++-
+ kernel/sched.c | 32 ++++++++++++++++++--------------
+ 3 files changed, 33 insertions(+), 16 deletions(-)
+
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -124,8 +124,17 @@ extern struct group_info init_groups;
+
+ extern struct cred init_cred;
+
++extern struct task_group root_task_group;
++
++#ifdef CONFIG_CGROUP_SCHED
++# define INIT_CGROUP_SCHED(tsk) \
++ .sched_task_group = &root_task_group,
++#else
++# define INIT_CGROUP_SCHED(tsk)
++#endif
++
+ #ifdef CONFIG_PERF_EVENTS
+-# define INIT_PERF_EVENTS(tsk) \
++# define INIT_PERF_EVENTS(tsk) \
+ .perf_event_mutex = \
+ __MUTEX_INITIALIZER(tsk.perf_event_mutex), \
+ .perf_event_list = LIST_HEAD_INIT(tsk.perf_event_list),
+@@ -160,6 +169,7 @@ extern struct cred init_cred;
+ }, \
+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
+ INIT_PUSHABLE_TASKS(tsk) \
++ INIT_CGROUP_SCHED(tsk) \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
+ .real_parent = &tsk, \
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1235,6 +1235,9 @@ struct task_struct {
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
++#ifdef CONFIG_CGROUP_SCHED
++ struct task_group *sched_task_group;
++#endif
+
+ #ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* list of struct preempt_notifier: */
+@@ -2613,7 +2616,7 @@ extern int sched_group_set_rt_period(str
+ extern long sched_group_rt_period(struct task_group *tg);
+ extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
+ #endif
+-#endif
++#endif /* CONFIG_CGROUP_SCHED */
+
+ extern int task_can_switch_user(struct user_struct *up,
+ struct task_struct *tsk);
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -605,22 +605,19 @@ static inline int cpu_of(struct rq *rq)
+ /*
+ * Return the group to which this tasks belongs.
+ *
+- * We use task_subsys_state_check() and extend the RCU verification with
+- * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
+- * task it moves into the cgroup. Therefore by holding either of those locks,
+- * we pin the task to the current cgroup.
++ * We cannot use task_subsys_state() and friends because the cgroup
++ * subsystem changes that value before the cgroup_subsys::attach() method
++ * is called, therefore we cannot pin it and might observe the wrong value.
++ *
++ * The same is true for autogroup's p->signal->autogroup->tg, the autogroup
++ * core changes this before calling sched_move_task().
++ *
++ * Instead we use a 'copy' which is updated from sched_move_task() while
++ * holding both task_struct::pi_lock and rq::lock.
+ */
+ static inline struct task_group *task_group(struct task_struct *p)
+ {
+- struct task_group *tg;
+- struct cgroup_subsys_state *css;
+-
+- css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
+- lockdep_is_held(&p->pi_lock) ||
+- lockdep_is_held(&task_rq(p)->lock));
+- tg = container_of(css, struct task_group, css);
+-
+- return autogroup_task_group(p, tg);
++ return p->sched_task_group;
+ }
+
+ /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
+@@ -2206,7 +2203,7 @@ void set_task_cpu(struct task_struct *p,
+ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+ *
+ * sched_move_task() holds both and thus holding either pins the cgroup,
+- * see set_task_rq().
++ * see task_group().
+ *
+ * Furthermore, all task_rq users should acquire both locks, see
+ * task_rq_lock().
+@@ -8545,6 +8542,7 @@ void sched_destroy_group(struct task_gro
+ */
+ void sched_move_task(struct task_struct *tsk)
+ {
++ struct task_group *tg;
+ int on_rq, running;
+ unsigned long flags;
+ struct rq *rq;
+@@ -8559,6 +8557,12 @@ void sched_move_task(struct task_struct
+ if (unlikely(running))
+ tsk->sched_class->put_prev_task(rq, tsk);
+
++ tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
++ lockdep_is_held(&tsk->sighand->siglock)),
++ struct task_group, css);
++ tg = autogroup_task_group(tsk, tg);
++ tsk->sched_task_group = tg;
++
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+ if (tsk->sched_class->task_move_group)
+ tsk->sched_class->task_move_group(tsk, on_rq);
bluetooth-btusb-add-vendor-specific-id-0a5c-21f4-bcm20702a0.patch
bluetooth-use-usb_vendor_and_interface-for-broadcom-devices.patch
bluetooth-add-support-for-apple-vendor-specific-devices.patch
+net-statically-initialize-init_net.dev_base_head.patch
+fix-a-dead-loop-in-async_synchronize_full.patch
+rds-set-correct-msg_namelen.patch
+libata-prevent-interface-errors-with-seagate-freeagent-goflex.patch
+cpufreq-acpi-fix-not-loading-acpi-cpufreq-driver-regression.patch
+sched-fix-race-in-task_group.patch
+media-lirc_sir-make-device-registration-work.patch
+ecryptfs-improve-statfs-reporting.patch