--- /dev/null
+From b706e65b40417e03c2451bb3f92488f3736843fa Mon Sep 17 00:00:00 2001
+From: Andrey Yurovsky <andrey@cozybit.com>
+Date: Mon, 13 Oct 2008 18:23:07 -0700
+Subject: ath5k: fix mesh point operation
+
+From: Andrey Yurovsky <andrey@cozybit.com>
+
+commit b706e65b40417e03c2451bb3f92488f3736843fa upstream.
+
+This patch fixes mesh point operation (thanks to YanBo for pointing
+out the problem): make mesh point interfaces start beaconing when
+they come up and configure the RX filter in mesh mode so that mesh
+beacons and action frames are received. Add mesh point to the check
+in ath5k_add_interface. Tested with multiple AR5211 cards.
+
+Signed-off-by: Andrey Yurovsky <andrey@cozybit.com>
+Acked-by: Nick Kossifidis <mickflemm@gmail.com>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Cc: Bob Copeland <me@bobcopeland.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/net/wireless/ath5k/base.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/ath5k/base.c
++++ b/drivers/net/wireless/ath5k/base.c
+@@ -2157,7 +2157,8 @@ ath5k_beacon_config(struct ath5k_softc *
+
+ if (sc->opmode == NL80211_IFTYPE_STATION) {
+ sc->imask |= AR5K_INT_BMISS;
+- } else if (sc->opmode == NL80211_IFTYPE_ADHOC) {
++ } else if (sc->opmode == NL80211_IFTYPE_ADHOC ||
++ sc->opmode == NL80211_IFTYPE_MESH_POINT) {
+ /*
+ * In IBSS mode we use a self-linked tx descriptor and let the
+ * hardware send the beacons automatically. We have to load it
+@@ -2748,6 +2749,7 @@ static int ath5k_add_interface(struct ie
+ switch (conf->type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
++ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_MONITOR:
+ sc->opmode = conf->type;
+ break;
+@@ -2819,7 +2821,8 @@ ath5k_config_interface(struct ieee80211_
+ }
+
+ if (conf->changed & IEEE80211_IFCC_BEACON &&
+- vif->type == NL80211_IFTYPE_ADHOC) {
++ (vif->type == NL80211_IFTYPE_ADHOC ||
++ vif->type == NL80211_IFTYPE_MESH_POINT)) {
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+ if (!beacon) {
+ ret = -ENOMEM;
+@@ -2951,6 +2954,9 @@ static void ath5k_configure_filter(struc
+ sc->opmode == NL80211_IFTYPE_ADHOC) {
+ rfilt |= AR5K_RX_FILTER_BEACON;
+ }
++ if (sc->opmode == NL80211_IFTYPE_MESH_POINT)
++ rfilt |= AR5K_RX_FILTER_CONTROL | AR5K_RX_FILTER_BEACON |
++ AR5K_RX_FILTER_PROBEREQ | AR5K_RX_FILTER_PROM;
+
+ /* Set filters */
+ ath5k_hw_set_rx_filter(ah,rfilt);
--- /dev/null
+From 26c3679101dbccc054dcf370143941844ba70531 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@suse.cz>
+Date: Mon, 26 Jan 2009 15:00:59 +0100
+Subject: fuse: destroy bdi on umount
+
+From: Miklos Szeredi <mszeredi@suse.cz>
+
+commit 26c3679101dbccc054dcf370143941844ba70531 upstream.
+
+If a fuse filesystem is unmounted but the device file descriptor
+remains open and a new mount reuses the old device number, then the
+mount fails with EEXIST and the following warning is printed in the
+kernel log:
+
+ WARNING: at fs/sysfs/dir.c:462 sysfs_add_one+0x35/0x3d()
+ sysfs: duplicate filename '0:15' can not be created
+
+The cause is that the bdi belonging to the fuse filesystem was
+destoryed only after the device file was released. Fix this by
+calling bdi_destroy() from fuse_put_super() instead.
+
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/fuse/dev.c | 3 ++-
+ fs/fuse/inode.c | 2 +-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -281,7 +281,8 @@ static void request_end(struct fuse_conn
+ fc->blocked = 0;
+ wake_up_all(&fc->blocked_waitq);
+ }
+- if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
++ if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
++ fc->connected) {
+ clear_bdi_congested(&fc->bdi, READ);
+ clear_bdi_congested(&fc->bdi, WRITE);
+ }
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -292,6 +292,7 @@ static void fuse_put_super(struct super_
+ list_del(&fc->entry);
+ fuse_ctl_remove_conn(fc);
+ mutex_unlock(&fuse_mutex);
++ bdi_destroy(&fc->bdi);
+ fuse_conn_put(fc);
+ }
+
+@@ -531,7 +532,6 @@ void fuse_conn_put(struct fuse_conn *fc)
+ if (fc->destroy_req)
+ fuse_request_free(fc->destroy_req);
+ mutex_destroy(&fc->inst_mutex);
+- bdi_destroy(&fc->bdi);
+ kfree(fc);
+ }
+ }
--- /dev/null
+From 3ddf1e7f57237ac7c5d5bfb7058f1ea4f970b661 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@suse.cz>
+Date: Mon, 26 Jan 2009 15:00:58 +0100
+Subject: fuse: fix missing fput on error
+
+From: Miklos Szeredi <mszeredi@suse.cz>
+
+commit 3ddf1e7f57237ac7c5d5bfb7058f1ea4f970b661 upstream.
+
+Fix the leaking file reference if allocation or initialization of
+fuse_conn failed.
+
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/fuse/inode.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -825,12 +825,16 @@ static int fuse_fill_super(struct super_
+ if (!file)
+ return -EINVAL;
+
+- if (file->f_op != &fuse_dev_operations)
++ if (file->f_op != &fuse_dev_operations) {
++ fput(file);
+ return -EINVAL;
++ }
+
+ fc = new_conn(sb);
+- if (!fc)
++ if (!fc) {
++ fput(file);
+ return -ENOMEM;
++ }
+
+ fc->flags = d.flags;
+ fc->user_id = d.user_id;
--- /dev/null
+From bb875b38dc5e343bdb696b2eab8233e4d195e208 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <error27@gmail.com>
+Date: Mon, 26 Jan 2009 15:00:58 +0100
+Subject: fuse: fix NULL deref in fuse_file_alloc()
+
+From: Dan Carpenter <error27@gmail.com>
+
+commit bb875b38dc5e343bdb696b2eab8233e4d195e208 upstream.
+
+ff is set to NULL and then dereferenced on line 65. Compile tested only.
+
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/fuse/file.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -54,7 +54,7 @@ struct fuse_file *fuse_file_alloc(void)
+ ff->reserved_req = fuse_request_alloc();
+ if (!ff->reserved_req) {
+ kfree(ff);
+- ff = NULL;
++ return NULL;
+ } else {
+ INIT_LIST_HEAD(&ff->write_entry);
+ atomic_set(&ff->count, 0);
--- /dev/null
+From 3632dee2f8b8a9720329f29eeaa4ec4669a3aff8 Mon Sep 17 00:00:00 2001
+From: Vegard Nossum <vegard.nossum@gmail.com>
+Date: Thu, 22 Jan 2009 15:29:45 +0100
+Subject: inotify: clean up inotify_read and fix locking problems
+
+From: Vegard Nossum <vegard.nossum@gmail.com>
+
+commit 3632dee2f8b8a9720329f29eeaa4ec4669a3aff8 upstream.
+
+If userspace supplies an invalid pointer to a read() of an inotify
+instance, the inotify device's event list mutex is unlocked twice.
+This causes an unbalance which effectively leaves the data structure
+unprotected, and we can trigger oopses by accessing the inotify
+instance from different tasks concurrently.
+
+The best fix (contributed largely by Linus) is a total rewrite
+of the function in question:
+
+On Thu, Jan 22, 2009 at 7:05 AM, Linus Torvalds wrote:
+> The thing to notice is that:
+>
+> - locking is done in just one place, and there is no question about it
+> not having an unlock.
+>
+> - that whole double-while(1)-loop thing is gone.
+>
+> - use multiple functions to make nesting and error handling sane
+>
+> - do error testing after doing the things you always need to do, ie do
+> this:
+>
+> mutex_lock(..)
+> ret = function_call();
+> mutex_unlock(..)
+>
+> .. test ret here ..
+>
+> instead of doing conditional exits with unlocking or freeing.
+>
+> So if the code is written in this way, it may still be buggy, but at least
+> it's not buggy because of subtle "forgot to unlock" or "forgot to free"
+> issues.
+>
+> This _always_ unlocks if it locked, and it always frees if it got a
+> non-error kevent.
+
+Cc: John McCutchan <ttb@tentacle.dhs.org>
+Cc: Robert Love <rlove@google.com>
+Signed-off-by: Vegard Nossum <vegard.nossum@gmail.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/inotify_user.c | 135 +++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 74 insertions(+), 61 deletions(-)
+
+--- a/fs/inotify_user.c
++++ b/fs/inotify_user.c
+@@ -427,10 +427,61 @@ static unsigned int inotify_poll(struct
+ return ret;
+ }
+
++/*
++ * Get an inotify_kernel_event if one exists and is small
++ * enough to fit in "count". Return an error pointer if
++ * not large enough.
++ *
++ * Called with the device ev_mutex held.
++ */
++static struct inotify_kernel_event *get_one_event(struct inotify_device *dev,
++ size_t count)
++{
++ size_t event_size = sizeof(struct inotify_event);
++ struct inotify_kernel_event *kevent;
++
++ if (list_empty(&dev->events))
++ return NULL;
++
++ kevent = inotify_dev_get_event(dev);
++ if (kevent->name)
++ event_size += kevent->event.len;
++
++ if (event_size > count)
++ return ERR_PTR(-EINVAL);
++
++ remove_kevent(dev, kevent);
++ return kevent;
++}
++
++/*
++ * Copy an event to user space, returning how much we copied.
++ *
++ * We already checked that the event size is smaller than the
++ * buffer we had in "get_one_event()" above.
++ */
++static ssize_t copy_event_to_user(struct inotify_kernel_event *kevent,
++ char __user *buf)
++{
++ size_t event_size = sizeof(struct inotify_event);
++
++ if (copy_to_user(buf, &kevent->event, event_size))
++ return -EFAULT;
++
++ if (kevent->name) {
++ buf += event_size;
++
++ if (copy_to_user(buf, kevent->name, kevent->event.len))
++ return -EFAULT;
++
++ event_size += kevent->event.len;
++ }
++ return event_size;
++}
++
+ static ssize_t inotify_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+ {
+- size_t event_size = sizeof (struct inotify_event);
+ struct inotify_device *dev;
+ char __user *start;
+ int ret;
+@@ -440,81 +491,43 @@ static ssize_t inotify_read(struct file
+ dev = file->private_data;
+
+ while (1) {
++ struct inotify_kernel_event *kevent;
+
+ prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
+
+ mutex_lock(&dev->ev_mutex);
+- if (!list_empty(&dev->events)) {
+- ret = 0;
+- break;
+- }
++ kevent = get_one_event(dev, count);
+ mutex_unlock(&dev->ev_mutex);
+
+- if (file->f_flags & O_NONBLOCK) {
+- ret = -EAGAIN;
+- break;
+- }
+-
+- if (signal_pending(current)) {
+- ret = -EINTR;
+- break;
++ if (kevent) {
++ ret = PTR_ERR(kevent);
++ if (IS_ERR(kevent))
++ break;
++ ret = copy_event_to_user(kevent, buf);
++ free_kevent(kevent);
++ if (ret < 0)
++ break;
++ buf += ret;
++ count -= ret;
++ continue;
+ }
+
+- schedule();
+- }
+-
+- finish_wait(&dev->wq, &wait);
+- if (ret)
+- return ret;
+-
+- while (1) {
+- struct inotify_kernel_event *kevent;
+-
+- ret = buf - start;
+- if (list_empty(&dev->events))
++ ret = -EAGAIN;
++ if (file->f_flags & O_NONBLOCK)
+ break;
+-
+- kevent = inotify_dev_get_event(dev);
+- if (event_size + kevent->event.len > count) {
+- if (ret == 0 && count > 0) {
+- /*
+- * could not get a single event because we
+- * didn't have enough buffer space.
+- */
+- ret = -EINVAL;
+- }
++ ret = -EINTR;
++ if (signal_pending(current))
+ break;
+- }
+- remove_kevent(dev, kevent);
+
+- /*
+- * Must perform the copy_to_user outside the mutex in order
+- * to avoid a lock order reversal with mmap_sem.
+- */
+- mutex_unlock(&dev->ev_mutex);
+-
+- if (copy_to_user(buf, &kevent->event, event_size)) {
+- ret = -EFAULT;
++ if (start != buf)
+ break;
+- }
+- buf += event_size;
+- count -= event_size;
+-
+- if (kevent->name) {
+- if (copy_to_user(buf, kevent->name, kevent->event.len)){
+- ret = -EFAULT;
+- break;
+- }
+- buf += kevent->event.len;
+- count -= kevent->event.len;
+- }
+
+- free_kevent(kevent);
+-
+- mutex_lock(&dev->ev_mutex);
++ schedule();
+ }
+- mutex_unlock(&dev->ev_mutex);
+
++ finish_wait(&dev->wq, &wait);
++ if (start != buf && ret != -EFAULT)
++ ret = buf - start;
+ return ret;
+ }
+
--- /dev/null
+From c0e69a5bbc6fc74184aa043aadb9a53bc58f953b Mon Sep 17 00:00:00 2001
+From: Jesper Nilsson <Jesper.Nilsson@axis.com>
+Date: Wed, 14 Jan 2009 11:19:08 +0100
+Subject: klist.c: bit 0 in pointer can't be used as flag
+
+From: Jesper Nilsson <Jesper.Nilsson@axis.com>
+
+commit c0e69a5bbc6fc74184aa043aadb9a53bc58f953b upstream.
+
+The commit a1ed5b0cffe4b16a93a6a3390e8cee0fbef94f86
+(klist: don't iterate over deleted entries) introduces use of the
+low bit in a pointer to indicate if the knode is dead or not,
+assuming that this bit is always free.
+
+This is not true for all architectures, CRIS for example may align data
+on byte borders.
+
+The result is a bunch of warnings on bootup, devices not being
+added correctly etc, reported by Hinko Kocevar <hinko.kocevar@cetrtapot.si>:
+
+------------[ cut here ]------------
+WARNING: at lib/klist.c:62 ()
+Modules linked in:
+
+Stack from c1fe1cf0:
+ c01cc7f4 c1fe1d11 c000eb4e c000e4de 00000000 00000000 c1f4f78f c1f50c2d
+ c01d008c c1fdd1a0 c1fdd1a0 c1fe1d38 c0192954 c1fe0000 00000000 c1fe1dc0
+ 00000002 7fffffff c1fe1da8 c0192d50 c1fe1dc0 00000002 7fffffff c1ff9fcc
+Call Trace: [<c000eb4e>] [<c000e4de>] [<c0192954>] [<c0192d50>] [<c001d49e>] [<c000b688>] [<c0192a3c>]
+ [<c000b63e>] [<c000b63e>] [<c001a542>] [<c00b55b0>] [<c00411c0>] [<c00b559c>] [<c01918e6>] [<c0191988>]
+ [<c01919d0>] [<c00cd9c8>] [<c00cdd6a>] [<c0034178>] [<c000409a>] [<c0015576>] [<c0029130>] [<c0029078>]
+ [<c0029170>] [<c0012336>] [<c00b4076>] [<c00b4770>] [<c006d6e4>] [<c006d974>] [<c006dca0>] [<c0028d6c>]
+ [<c0028e12>] [<c0006424>] <4>---[ end trace 4eaa2a86a8e2da22 ]---
+------------[ cut here ]------------
+Repeat ad nauseam.
+
+Wed, Jan 14, 2009 at 12:11:32AM +0100, Bastien ROUCARIES wrote:
+> Perhaps using a pointerhackalign trick on this structure where
+> #define pointerhackalign(x) __attribute__ ((aligned (x)))
+> and declare
+> struct klist_node {
+> ...
+> } pointerhackalign(2);
+>
+> Because __attribute__ ((aligned (x))) could only increase alignment
+> it will safe to do that and serve as documentation purpose :)
+
+That works, but we need to do it not for the struct klist_node,
+but for the struct we insert into the void * in klist_node,
+which is struct klist.
+
+Reported-by: Hinko Kocevar <hinko.kocevar@cetrtapot.si
+Cc: Bastien ROUCARIES <roucaries.bastien@gmail.com>
+Signed-off-by: Jesper Nilsson <jesper.nilsson@axis.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/klist.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/linux/klist.h
++++ b/include/linux/klist.h
+@@ -23,7 +23,7 @@ struct klist {
+ struct list_head k_list;
+ void (*get)(struct klist_node *);
+ void (*put)(struct klist_node *);
+-};
++} __attribute__ ((aligned (4)));
+
+ #define KLIST_INIT(_name, _get, _put) \
+ { .k_lock = __SPIN_LOCK_UNLOCKED(_name.k_lock), \
--- /dev/null
+From 5dc306f3bd1d4cfdf79df39221b3036eab1ddcf3 Mon Sep 17 00:00:00 2001
+From: Brian Cavagnolo <brian@cozybit.com>
+Date: Fri, 16 Jan 2009 19:04:49 -0800
+Subject: mac80211: decrement ref count to netdev after launching mesh discovery
+
+From: Brian Cavagnolo <brian@cozybit.com>
+
+commit 5dc306f3bd1d4cfdf79df39221b3036eab1ddcf3 upstream.
+
+After launching mesh discovery in tx path, reference count was not being
+decremented. This was preventing module unload.
+
+Signed-off-by: Brian Cavagnolo <brian@cozybit.com>
+Signed-off-by: Andrey Yurovsky <andrey@cozybit.com>
+Acked-by: Johannes Berg <johannes@sipsolutions.net>
+Signed-off-by: John W. Linville <linville@tuxdriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/mac80211/tx.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -1299,8 +1299,10 @@ int ieee80211_master_start_xmit(struct s
+ if (is_multicast_ether_addr(hdr->addr3))
+ memcpy(hdr->addr1, hdr->addr3, ETH_ALEN);
+ else
+- if (mesh_nexthop_lookup(skb, osdata))
+- return 0;
++ if (mesh_nexthop_lookup(skb, osdata)) {
++ dev_put(odev);
++ return 0;
++ }
+ if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
+ IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
+ fwded_frames);
--- /dev/null
+ath5k-fix-mesh-point-operation.patch
+mac80211-decrement-ref-count-to-netdev-after-launching-mesh-discovery.patch
+inotify-clean-up-inotify_read-and-fix-locking-problems.patch
+fuse-destroy-bdi-on-umount.patch
+fuse-fix-missing-fput-on-error.patch
+fuse-fix-null-deref-in-fuse_file_alloc.patch
+x86-mm-fix-pte_free.patch
+klist.c-bit-0-in-pointer-can-t-be-used-as-flag.patch
+sysfs-fix-problems-with-binary-files.patch
+x86-fix-page-attribute-corruption-with-cpa.patch
+usb-fix-toggle-mismatch-in-disable_endpoint-paths.patch
--- /dev/null
+From 4503efd0891c40e30928afb4b23dc3f99c62a6b2 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@suse.de>
+Date: Tue, 20 Jan 2009 15:51:16 -0800
+Subject: sysfs: fix problems with binary files
+
+From: Greg Kroah-Hartman <gregkh@suse.de>
+
+commit 4503efd0891c40e30928afb4b23dc3f99c62a6b2 upstream.
+
+Some sysfs binary files don't like having 0 passed to them as a size.
+Fix this up at the root by just returning to the vfs if userspace asks
+us for a zero sized buffer.
+
+Thanks to Pavel Roskin for pointing this out.
+
+Reported-by: Pavel Roskin <proski@gnu.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/sysfs/bin.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/sysfs/bin.c
++++ b/fs/sysfs/bin.c
+@@ -63,6 +63,9 @@ read(struct file *file, char __user *use
+ int count = min_t(size_t, bytes, PAGE_SIZE);
+ char *temp;
+
++ if (!bytes)
++ return 0;
++
+ if (size) {
+ if (offs > size)
+ return 0;
+@@ -131,6 +134,9 @@ static ssize_t write(struct file *file,
+ int count = min_t(size_t, bytes, PAGE_SIZE);
+ char *temp;
+
++ if (!bytes)
++ return 0;
++
+ if (size) {
+ if (offs > size)
+ return 0;
--- /dev/null
+From ddeac4e75f2527a340f9dc655bde49bb2429b39b Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Thu, 15 Jan 2009 17:03:33 -0500
+Subject: USB: fix toggle mismatch in disable_endpoint paths
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit ddeac4e75f2527a340f9dc655bde49bb2429b39b upstream.
+
+This patch (as1200) finishes some fixes that were left incomplete by
+an earlier patch.
+
+Although nobody has addressed this issue in the past, it turns out
+that we need to distinguish between two different modes of disabling
+and enabling endpoints. In one mode only the data structures in
+usbcore are affected, and in the other mode the host controller and
+device hardware states are affected as well.
+
+The earlier patch added an extra argument to the routines in the
+enable_endpoint pathways to reflect this difference. This patch adds
+corresponding arguments to the disable_endpoint pathways. Without
+this change, the endpoint toggle state can get out of sync between
+the host and the device. The exact mechanism depends on the details
+of the host controller (whether or not it stores its own copy of the
+toggle values).
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Reported-by: Dan Streetman <ddstreet@ieee.org>
+Tested-by: Dan Streetman <ddstreet@ieee.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/usb/core/driver.c | 2 +-
+ drivers/usb/core/hub.c | 4 ++--
+ drivers/usb/core/message.c | 40 ++++++++++++++++++++++++----------------
+ drivers/usb/core/usb.h | 5 +++--
+ 4 files changed, 30 insertions(+), 21 deletions(-)
+
+--- a/drivers/usb/core/driver.c
++++ b/drivers/usb/core/driver.c
+@@ -269,7 +269,7 @@ static int usb_unbind_interface(struct d
+ * supports "soft" unbinding.
+ */
+ if (!driver->soft_unbind)
+- usb_disable_interface(udev, intf);
++ usb_disable_interface(udev, intf, false);
+
+ driver->disconnect(intf);
+
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -2383,8 +2383,8 @@ static int hub_port_debounce(struct usb_
+
+ void usb_ep0_reinit(struct usb_device *udev)
+ {
+- usb_disable_endpoint(udev, 0 + USB_DIR_IN);
+- usb_disable_endpoint(udev, 0 + USB_DIR_OUT);
++ usb_disable_endpoint(udev, 0 + USB_DIR_IN, true);
++ usb_disable_endpoint(udev, 0 + USB_DIR_OUT, true);
+ usb_enable_endpoint(udev, &udev->ep0, true);
+ }
+ EXPORT_SYMBOL_GPL(usb_ep0_reinit);
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1009,14 +1009,15 @@ EXPORT_SYMBOL_GPL(usb_clear_halt);
+ * @dev: the device whose endpoint is being disabled
+ * @epaddr: the endpoint's address. Endpoint number for output,
+ * endpoint number + USB_DIR_IN for input
++ * @reset_hardware: flag to erase any endpoint state stored in the
++ * controller hardware
+ *
+- * Deallocates hcd/hardware state for this endpoint ... and nukes all
+- * pending urbs.
+- *
+- * If the HCD hasn't registered a disable() function, this sets the
+- * endpoint's maxpacket size to 0 to prevent further submissions.
++ * Disables the endpoint for URB submission and nukes all pending URBs.
++ * If @reset_hardware is set then also deallocates hcd/hardware state
++ * for the endpoint.
+ */
+-void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr)
++void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
++ bool reset_hardware)
+ {
+ unsigned int epnum = epaddr & USB_ENDPOINT_NUMBER_MASK;
+ struct usb_host_endpoint *ep;
+@@ -1026,15 +1027,18 @@ void usb_disable_endpoint(struct usb_dev
+
+ if (usb_endpoint_out(epaddr)) {
+ ep = dev->ep_out[epnum];
+- dev->ep_out[epnum] = NULL;
++ if (reset_hardware)
++ dev->ep_out[epnum] = NULL;
+ } else {
+ ep = dev->ep_in[epnum];
+- dev->ep_in[epnum] = NULL;
++ if (reset_hardware)
++ dev->ep_in[epnum] = NULL;
+ }
+ if (ep) {
+ ep->enabled = 0;
+ usb_hcd_flush_endpoint(dev, ep);
+- usb_hcd_disable_endpoint(dev, ep);
++ if (reset_hardware)
++ usb_hcd_disable_endpoint(dev, ep);
+ }
+ }
+
+@@ -1042,17 +1046,21 @@ void usb_disable_endpoint(struct usb_dev
+ * usb_disable_interface -- Disable all endpoints for an interface
+ * @dev: the device whose interface is being disabled
+ * @intf: pointer to the interface descriptor
++ * @reset_hardware: flag to erase any endpoint state stored in the
++ * controller hardware
+ *
+ * Disables all the endpoints for the interface's current altsetting.
+ */
+-void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf)
++void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf,
++ bool reset_hardware)
+ {
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ int i;
+
+ for (i = 0; i < alt->desc.bNumEndpoints; ++i) {
+ usb_disable_endpoint(dev,
+- alt->endpoint[i].desc.bEndpointAddress);
++ alt->endpoint[i].desc.bEndpointAddress,
++ reset_hardware);
+ }
+ }
+
+@@ -1073,8 +1081,8 @@ void usb_disable_device(struct usb_devic
+ dev_dbg(&dev->dev, "%s nuking %s URBs\n", __func__,
+ skip_ep0 ? "non-ep0" : "all");
+ for (i = skip_ep0; i < 16; ++i) {
+- usb_disable_endpoint(dev, i);
+- usb_disable_endpoint(dev, i + USB_DIR_IN);
++ usb_disable_endpoint(dev, i, true);
++ usb_disable_endpoint(dev, i + USB_DIR_IN, true);
+ }
+ dev->toggle[0] = dev->toggle[1] = 0;
+
+@@ -1242,7 +1250,7 @@ int usb_set_interface(struct usb_device
+ /* prevent submissions using previous endpoint settings */
+ if (iface->cur_altsetting != alt)
+ usb_remove_sysfs_intf_files(iface);
+- usb_disable_interface(dev, iface);
++ usb_disable_interface(dev, iface, true);
+
+ iface->cur_altsetting = alt;
+
+@@ -1320,8 +1328,8 @@ int usb_reset_configuration(struct usb_d
+ */
+
+ for (i = 1; i < 16; ++i) {
+- usb_disable_endpoint(dev, i);
+- usb_disable_endpoint(dev, i + USB_DIR_IN);
++ usb_disable_endpoint(dev, i, true);
++ usb_disable_endpoint(dev, i + USB_DIR_IN, true);
+ }
+
+ config = dev->actconfig;
+--- a/drivers/usb/core/usb.h
++++ b/drivers/usb/core/usb.h
+@@ -13,9 +13,10 @@ extern void usb_enable_endpoint(struct u
+ struct usb_host_endpoint *ep, bool reset_toggle);
+ extern void usb_enable_interface(struct usb_device *dev,
+ struct usb_interface *intf, bool reset_toggles);
+-extern void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr);
++extern void usb_disable_endpoint(struct usb_device *dev, unsigned int epaddr,
++ bool reset_hardware);
+ extern void usb_disable_interface(struct usb_device *dev,
+- struct usb_interface *intf);
++ struct usb_interface *intf, bool reset_hardware);
+ extern void usb_release_interface_cache(struct kref *ref);
+ extern void usb_disable_device(struct usb_device *dev, int skip_ep0);
+ extern int usb_deauthorize_device(struct usb_device *);
--- /dev/null
+From a1e46212a410793d575718818e81ddc442a65283 Mon Sep 17 00:00:00 2001
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+Date: Tue, 20 Jan 2009 14:20:21 -0800
+Subject: x86: fix page attribute corruption with cpa()
+
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+
+commit a1e46212a410793d575718818e81ddc442a65283 upstream.
+
+Impact: fix sporadic slowdowns and warning messages
+
+This patch fixes a performance issue reported by Linus on his
+Nehalem system. While Linus reverted the PAT patch (commit
+58dab916dfb57328d50deb0aa9b3fc92efa248ff) which exposed the issue,
+existing cpa() code can potentially still cause wrong(page attribute
+corruption) behavior.
+
+This patch also fixes the "WARNING: at arch/x86/mm/pageattr.c:560" that
+various people reported.
+
+In 64bit kernel, kernel identity mapping might have holes depending
+on the available memory and how e820 reports the address range
+covering the RAM, ACPI, PCI reserved regions. If there is a 2MB/1GB hole
+in the address range that is not listed by e820 entries, kernel identity
+mapping will have a corresponding hole in its 1-1 identity mapping.
+
+If cpa() happens on the kernel identity mapping which falls into these holes,
+existing code fails like this:
+
+ __change_page_attr_set_clr()
+ __change_page_attr()
+ returns 0 because of if (!kpte). But doesn't
+ set cpa->numpages and cpa->pfn.
+ cpa_process_alias()
+ uses uninitialized cpa->pfn (random value)
+ which can potentially lead to changing the page
+ attribute of kernel text/data, kernel identity
+ mapping of RAM pages etc. oops!
+
+This bug was easily exposed by another PAT patch which was doing
+cpa() more often on kernel identity mapping holes (physical range between
+max_low_pfn_mapped and 4GB), where in here it was setting the
+cache disable attribute(PCD) for kernel identity mappings aswell.
+
+Fix cpa() to handle the kernel identity mapping holes. Retain
+the WARN() for cpa() calls to other not present address ranges
+(kernel-text/data, ioremap() addresses)
+
+Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
+Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/pageattr.c | 49 ++++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 34 insertions(+), 15 deletions(-)
+
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -534,6 +534,36 @@ out_unlock:
+ return 0;
+ }
+
++static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
++ int primary)
++{
++ /*
++ * Ignore all non primary paths.
++ */
++ if (!primary)
++ return 0;
++
++ /*
++ * Ignore the NULL PTE for kernel identity mapping, as it is expected
++ * to have holes.
++ * Also set numpages to '1' indicating that we processed cpa req for
++ * one virtual address page and its pfn. TBD: numpages can be set based
++ * on the initial value and the level returned by lookup_address().
++ */
++ if (within(vaddr, PAGE_OFFSET,
++ PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
++ cpa->numpages = 1;
++ cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
++ return 0;
++ } else {
++ WARN(1, KERN_WARNING "CPA: called for zero pte. "
++ "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
++ *cpa->vaddr);
++
++ return -EFAULT;
++ }
++}
++
+ static int __change_page_attr(struct cpa_data *cpa, int primary)
+ {
+ unsigned long address;
+@@ -549,17 +579,11 @@ static int __change_page_attr(struct cpa
+ repeat:
+ kpte = lookup_address(address, &level);
+ if (!kpte)
+- return 0;
++ return __cpa_process_fault(cpa, address, primary);
+
+ old_pte = *kpte;
+- if (!pte_val(old_pte)) {
+- if (!primary)
+- return 0;
+- WARN(1, KERN_WARNING "CPA: called for zero pte. "
+- "vaddr = %lx cpa->vaddr = %lx\n", address,
+- *cpa->vaddr);
+- return -EINVAL;
+- }
++ if (!pte_val(old_pte))
++ return __cpa_process_fault(cpa, address, primary);
+
+ if (level == PG_LEVEL_4K) {
+ pte_t new_pte;
+@@ -657,12 +681,7 @@ static int cpa_process_alias(struct cpa_
+ vaddr = *cpa->vaddr;
+
+ if (!(within(vaddr, PAGE_OFFSET,
+- PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT))
+-#ifdef CONFIG_X86_64
+- || within(vaddr, PAGE_OFFSET + (1UL<<32),
+- PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))
+-#endif
+- )) {
++ PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
+
+ alias_cpa = *cpa;
+ temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
--- /dev/null
+From 42ef73fe134732b2e91c0326df5fd568da17c4b2 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 23 Jan 2009 17:37:49 +0100
+Subject: x86, mm: fix pte_free()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 42ef73fe134732b2e91c0326df5fd568da17c4b2 upstream.
+
+On -rt we were seeing spurious bad page states like:
+
+Bad page state in process 'firefox'
+page:c1bc2380 flags:0x40000000 mapping:c1bc2390 mapcount:0 count:0
+Trying to fix it up, but a reboot is needed
+Backtrace:
+Pid: 503, comm: firefox Not tainted 2.6.26.8-rt13 #3
+[<c043d0f3>] ? printk+0x14/0x19
+[<c0272d4e>] bad_page+0x4e/0x79
+[<c0273831>] free_hot_cold_page+0x5b/0x1d3
+[<c02739f6>] free_hot_page+0xf/0x11
+[<c0273a18>] __free_pages+0x20/0x2b
+[<c027d170>] __pte_alloc+0x87/0x91
+[<c027d25e>] handle_mm_fault+0xe4/0x733
+[<c043f680>] ? rt_mutex_down_read_trylock+0x57/0x63
+[<c043f680>] ? rt_mutex_down_read_trylock+0x57/0x63
+[<c0218875>] do_page_fault+0x36f/0x88a
+
+This is the case where a concurrent fault already installed the PTE and
+we get to free the newly allocated one.
+
+This is due to pgtable_page_ctor() doing the spin_lock_init(&page->ptl)
+which is overlaid with the {private, mapping} struct.
+
+union {
+ struct {
+ unsigned long private;
+ struct address_space *mapping;
+ };
+ spinlock_t ptl;
+ struct kmem_cache *slab;
+ struct page *first_page;
+};
+
+Normally the spinlock is small enough to not stomp on page->mapping, but
+PREEMPT_RT=y has huge 'spin'locks.
+
+But lockdep kernels should also be able to trigger this splat, as the
+lock tracking code grows the spinlock to cover page->mapping.
+
+The obvious fix is calling pgtable_page_dtor() like the regular pte free
+path __pte_free_tlb() does.
+
+It seems all architectures except x86 and nm10300 already do this, and
+nm10300 doesn't seem to use pgtable_page_ctor(), which suggests it
+doesn't do SMP or simply doesnt do MMU at all or something.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlsta@chello.nl>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/include/asm/pgalloc.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/include/asm/pgalloc.h
++++ b/arch/x86/include/asm/pgalloc.h
+@@ -42,6 +42,7 @@ static inline void pte_free_kernel(struc
+
+ static inline void pte_free(struct mm_struct *mm, struct page *pte)
+ {
++ pgtable_page_dtor(pte);
+ __free_page(pte);
+ }
+