--- /dev/null
+From 7ef9964e6d1b911b78709f144000aacadd0ebc21 Mon Sep 17 00:00:00 2001
+From: Davide Libenzi <davidel@xmailserver.org>
+Date: Mon, 1 Dec 2008 13:13:55 -0800
+Subject: epoll: introduce resource usage limits
+
+From: Davide Libenzi <davidel@xmailserver.org>
+
+commit 7ef9964e6d1b911b78709f144000aacadd0ebc21 upstream.
+
+It has been thought that the per-user file descriptors limit would also
+limit the resources that a normal user can request via the epoll
+interface. Vegard Nossum reported a very simple program (a modified
+version attached) that can make a normal user to request a pretty large
+amount of kernel memory, well within the its maximum number of fds. To
+solve such problem, default limits are now imposed, and /proc based
+configuration has been introduced. A new directory has been created,
+named /proc/sys/fs/epoll/ and inside there, there are two configuration
+points:
+
+ max_user_instances = Maximum number of devices - per user
+
+ max_user_watches = Maximum number of "watched" fds - per user
+
+The current default for "max_user_watches" limits the memory used by epoll
+to store "watches", to 1/32 of the amount of the low RAM. As example, a
+256MB 32bit machine, will have "max_user_watches" set to roughly 90000.
+That should be enough to not break existing heavy epoll users. The
+default value for "max_user_instances" is set to 128, that should be
+enough too.
+
+This also changes the userspace, because a new error code can now come out
+from EPOLL_CTL_ADD (-ENOSPC). The EMFILE from epoll_create() was already
+listed, so that should be ok.
+
+[akpm@linux-foundation.org: use get_current_user()]
+Signed-off-by: Davide Libenzi <davidel@xmailserver.org>
+Cc: Michael Kerrisk <mtk.manpages@gmail.com>
+Cc: Cyrill Gorcunov <gorcunov@gmail.com>
+Reported-by: Vegard Nossum <vegardno@ifi.uio.no>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ Documentation/filesystems/proc.txt | 27 +++++++++++
+ fs/eventpoll.c | 85 +++++++++++++++++++++++++++++++++----
+ include/linux/sched.h | 4 +
+ kernel/sysctl.c | 10 ++++
+ 4 files changed, 118 insertions(+), 8 deletions(-)
+
+--- a/Documentation/filesystems/proc.txt
++++ b/Documentation/filesystems/proc.txt
+@@ -44,6 +44,7 @@ Table of Contents
+ 2.14 /proc/<pid>/io - Display the IO accounting fields
+ 2.15 /proc/<pid>/coredump_filter - Core dump filtering settings
+ 2.16 /proc/<pid>/mountinfo - Information about mounts
++ 2.17 /proc/sys/fs/epoll - Configuration options for the epoll interface
+
+ ------------------------------------------------------------------------------
+ Preface
+@@ -2471,4 +2472,30 @@ For more information on mount propagatio
+
+ Documentation/filesystems/sharedsubtree.txt
+
++2.17 /proc/sys/fs/epoll - Configuration options for the epoll interface
++--------------------------------------------------------
++
++This directory contains configuration options for the epoll(7) interface.
++
++max_user_instances
++------------------
++
++This is the maximum number of epoll file descriptors that a single user can
++have open at a given time. The default value is 128, and should be enough
++for normal users.
++
++max_user_watches
++----------------
++
++Every epoll file descriptor can store a number of files to be monitored
++for event readiness. Each one of these monitored files constitutes a "watch".
++This configuration option sets the maximum number of "watches" that are
++allowed for each user.
++Each "watch" costs roughly 90 bytes on a 32bit kernel, and roughly 160 bytes
++on a 64bit one.
++The current default value for max_user_watches is the 1/32 of the available
++low memory, divided for the "watch" cost in bytes.
++
++
+ ------------------------------------------------------------------------------
++
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -102,6 +102,8 @@
+
+ #define EP_UNACTIVE_PTR ((void *) -1L)
+
++#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
++
+ struct epoll_filefd {
+ struct file *file;
+ int fd;
+@@ -200,6 +202,9 @@ struct eventpoll {
+ * holding ->lock.
+ */
+ struct epitem *ovflist;
++
++ /* The user that created the eventpoll descriptor */
++ struct user_struct *user;
+ };
+
+ /* Wait structure used by the poll hooks */
+@@ -227,9 +232,17 @@ struct ep_pqueue {
+ };
+
+ /*
++ * Configuration options available inside /proc/sys/fs/epoll/
++ */
++/* Maximum number of epoll devices, per user */
++static int max_user_instances __read_mostly;
++/* Maximum number of epoll watched descriptors, per user */
++static int max_user_watches __read_mostly;
++
++/*
+ * This mutex is used to serialize ep_free() and eventpoll_release_file().
+ */
+-static struct mutex epmutex;
++static DEFINE_MUTEX(epmutex);
+
+ /* Safe wake up implementation */
+ static struct poll_safewake psw;
+@@ -240,6 +253,33 @@ static struct kmem_cache *epi_cache __re
+ /* Slab cache used to allocate "struct eppoll_entry" */
+ static struct kmem_cache *pwq_cache __read_mostly;
+
++#ifdef CONFIG_SYSCTL
++
++#include <linux/sysctl.h>
++
++static int zero;
++
++ctl_table epoll_table[] = {
++ {
++ .procname = "max_user_instances",
++ .data = &max_user_instances,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ },
++ {
++ .procname = "max_user_watches",
++ .data = &max_user_watches,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec_minmax,
++ .extra1 = &zero,
++ },
++ { .ctl_name = 0 }
++};
++#endif /* CONFIG_SYSCTL */
++
+
+ /* Setup the structure that is used as key for the RB tree */
+ static inline void ep_set_ffd(struct epoll_filefd *ffd,
+@@ -402,6 +442,8 @@ static int ep_remove(struct eventpoll *e
+ /* At this point it is safe to free the eventpoll item */
+ kmem_cache_free(epi_cache, epi);
+
++ atomic_dec(&ep->user->epoll_watches);
++
+ DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_remove(%p, %p)\n",
+ current, ep, file));
+
+@@ -449,6 +491,8 @@ static void ep_free(struct eventpoll *ep
+
+ mutex_unlock(&epmutex);
+ mutex_destroy(&ep->mtx);
++ atomic_dec(&ep->user->epoll_devs);
++ free_uid(ep->user);
+ kfree(ep);
+ }
+
+@@ -532,10 +576,19 @@ void eventpoll_release_file(struct file
+
+ static int ep_alloc(struct eventpoll **pep)
+ {
+- struct eventpoll *ep = kzalloc(sizeof(*ep), GFP_KERNEL);
++ int error;
++ struct user_struct *user;
++ struct eventpoll *ep;
+
+- if (!ep)
+- return -ENOMEM;
++ user = get_current_user();
++ error = -EMFILE;
++ if (unlikely(atomic_read(&user->epoll_devs) >=
++ max_user_instances))
++ goto free_uid;
++ error = -ENOMEM;
++ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
++ if (unlikely(!ep))
++ goto free_uid;
+
+ spin_lock_init(&ep->lock);
+ mutex_init(&ep->mtx);
+@@ -544,12 +597,17 @@ static int ep_alloc(struct eventpoll **p
+ INIT_LIST_HEAD(&ep->rdllist);
+ ep->rbr = RB_ROOT;
+ ep->ovflist = EP_UNACTIVE_PTR;
++ ep->user = user;
+
+ *pep = ep;
+
+ DNPRINTK(3, (KERN_INFO "[%p] eventpoll: ep_alloc() ep=%p\n",
+ current, ep));
+ return 0;
++
++free_uid:
++ free_uid(user);
++ return error;
+ }
+
+ /*
+@@ -703,9 +761,11 @@ static int ep_insert(struct eventpoll *e
+ struct epitem *epi;
+ struct ep_pqueue epq;
+
+- error = -ENOMEM;
++ if (unlikely(atomic_read(&ep->user->epoll_watches) >=
++ max_user_watches))
++ return -ENOSPC;
+ if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
+- goto error_return;
++ return -ENOMEM;
+
+ /* Item initialization follow here ... */
+ INIT_LIST_HEAD(&epi->rdllink);
+@@ -735,6 +795,7 @@ static int ep_insert(struct eventpoll *e
+ * install process. Namely an allocation for a wait queue failed due
+ * high memory pressure.
+ */
++ error = -ENOMEM;
+ if (epi->nwait < 0)
+ goto error_unregister;
+
+@@ -765,6 +826,8 @@ static int ep_insert(struct eventpoll *e
+
+ spin_unlock_irqrestore(&ep->lock, flags);
+
++ atomic_inc(&ep->user->epoll_watches);
++
+ /* We have to call this outside the lock */
+ if (pwake)
+ ep_poll_safewake(&psw, &ep->poll_wait);
+@@ -789,7 +852,7 @@ error_unregister:
+ spin_unlock_irqrestore(&ep->lock, flags);
+
+ kmem_cache_free(epi_cache, epi);
+-error_return:
++
+ return error;
+ }
+
+@@ -1074,6 +1137,7 @@ asmlinkage long sys_epoll_create1(int fl
+ flags & O_CLOEXEC);
+ if (fd < 0)
+ ep_free(ep);
++ atomic_inc(&ep->user->epoll_devs);
+
+ error_return:
+ DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
+@@ -1295,7 +1359,12 @@ asmlinkage long sys_epoll_pwait(int epfd
+
+ static int __init eventpoll_init(void)
+ {
+- mutex_init(&epmutex);
++ struct sysinfo si;
++
++ si_meminfo(&si);
++ max_user_instances = 128;
++ max_user_watches = (((si.totalram - si.totalhigh) / 32) << PAGE_SHIFT) /
++ EP_ITEM_COST;
+
+ /* Initialize the structure used to perform safe poll wait head wake ups */
+ ep_poll_safewake_init(&psw);
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -587,6 +587,10 @@ struct user_struct {
+ atomic_t inotify_watches; /* How many inotify watches does this user have? */
+ atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
+ #endif
++#ifdef CONFIG_EPOLL
++ atomic_t epoll_devs; /* The number of epoll descriptors currently open */
++ atomic_t epoll_watches; /* The number of file descriptors currently watched */
++#endif
+ #ifdef CONFIG_POSIX_MQUEUE
+ /* protected by mq_lock */
+ unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -179,6 +179,9 @@ extern struct ctl_table random_table[];
+ #ifdef CONFIG_INOTIFY_USER
+ extern struct ctl_table inotify_table[];
+ #endif
++#ifdef CONFIG_EPOLL
++extern struct ctl_table epoll_table[];
++#endif
+
+ #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
+ int sysctl_legacy_va_layout;
+@@ -1313,6 +1316,13 @@ static struct ctl_table fs_table[] = {
+ .child = inotify_table,
+ },
+ #endif
++#ifdef CONFIG_EPOLL
++ {
++ .procname = "epoll",
++ .mode = 0555,
++ .child = epoll_table,
++ },
++#endif
+ #endif
+ {
+ .ctl_name = KERN_SETUID_DUMPABLE,
--- /dev/null
+From cf7ee554f3a324e98181b0ea249d9d5be3a0acb8 Mon Sep 17 00:00:00 2001
+From: Clemens Ladisch <clemens@ladisch.de>
+Date: Wed, 19 Nov 2008 15:36:10 -0800
+Subject: fbdev: clean the penguin's dirty feet
+
+From: Clemens Ladisch <clemens@ladisch.de>
+
+commit cf7ee554f3a324e98181b0ea249d9d5be3a0acb8 upstream.
+
+When booting in a direct color mode, the penguin has dirty feet, i.e.,
+some pixels have the wrong color. This is caused by
+fb_set_logo_directpalette() which does not initialize the last 32 palette
+entries.
+
+Signed-off-by: Clemens Ladisch <clemens@ladisch.de>
+Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Krzysztof Helt <krzysztof.h1@poczta.fm>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/video/fbmem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -232,7 +232,7 @@ static void fb_set_logo_directpalette(st
+ greenshift = info->var.green.offset;
+ blueshift = info->var.blue.offset;
+
+- for (i = 32; i < logo->clutsize; i++)
++ for (i = 32; i < 32 + logo->clutsize; i++)
+ palette[i] = i << redshift | i << greenshift | i << blueshift;
+ }
+
--- /dev/null
+From 8f7b0ba1c853919b85b54774775f567f30006107 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@ZenIV.linux.org.uk>
+Date: Sat, 15 Nov 2008 01:15:43 +0000
+Subject: Fix inotify watch removal/umount races
+
+From: Al Viro <viro@ZenIV.linux.org.uk>
+
+commit 8f7b0ba1c853919b85b54774775f567f30006107 upstream.
+
+Inotify watch removals suck violently.
+
+To kick the watch out we need (in this order) inode->inotify_mutex and
+ih->mutex. That's fine if we have a hold on inode; however, for all
+other cases we need to make damn sure we don't race with umount. We can
+*NOT* just grab a reference to a watch - inotify_unmount_inodes() will
+happily sail past it and we'll end with reference to inode potentially
+outliving its superblock.
+
+Ideally we just want to grab an active reference to superblock if we
+can; that will make sure we won't go into inotify_umount_inodes() until
+we are done. Cleanup is just deactivate_super().
+
+However, that leaves a messy case - what if we *are* racing with
+umount() and active references to superblock can't be acquired anymore?
+We can bump ->s_count, grab ->s_umount, which will almost certainly wait
+until the superblock is shut down and the watch in question is pining
+for fjords. That's fine, but there is a problem - we might have hit the
+window between ->s_active getting to 0 / ->s_count - below S_BIAS (i.e.
+the moment when superblock is past the point of no return and is heading
+for shutdown) and the moment when deactivate_super() acquires
+->s_umount.
+
+We could just do drop_super() yield() and retry, but that's rather
+antisocial and this stuff is luser-triggerable. OTOH, having grabbed
+->s_umount and having found that we'd got there first (i.e. that
+->s_root is non-NULL) we know that we won't race with
+inotify_umount_inodes().
+
+So we could grab a reference to watch and do the rest as above, just
+with drop_super() instead of deactivate_super(), right? Wrong. We had
+to drop ih->mutex before we could grab ->s_umount. So the watch
+could've been gone already.
+
+That still can be dealt with - we need to save watch->wd, do idr_find()
+and compare its result with our pointer. If they match, we either have
+the damn thing still alive or we'd lost not one but two races at once,
+the watch had been killed and a new one got created with the same ->wd
+at the same address. That couldn't have happened in inotify_destroy(),
+but inotify_rm_wd() could run into that. Still, "new one got created"
+is not a problem - we have every right to kill it or leave it alone,
+whatever's more convenient.
+
+So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
+"grab it and kill it" check. If it's been our original watch, we are
+fine, if it's a newcomer - nevermind, just pretend that we'd won the
+race and kill the fscker anyway; we are safe since we know that its
+superblock won't be going away.
+
+And yes, this is far beyond mere "not very pretty"; so's the entire
+concept of inotify to start with.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Acked-by: Greg KH <greg@kroah.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/inotify.c | 150 ++++++++++++++++++++++++++++++++++++++++++++++--
+ include/linux/inotify.h | 11 +++
+ kernel/audit_tree.c | 91 +++++++++++++++++------------
+ kernel/auditfilter.c | 14 ++--
+ 4 files changed, 218 insertions(+), 48 deletions(-)
+
+--- a/fs/inotify.c
++++ b/fs/inotify.c
+@@ -106,6 +106,20 @@ void get_inotify_watch(struct inotify_wa
+ }
+ EXPORT_SYMBOL_GPL(get_inotify_watch);
+
++int pin_inotify_watch(struct inotify_watch *watch)
++{
++ struct super_block *sb = watch->inode->i_sb;
++ spin_lock(&sb_lock);
++ if (sb->s_count >= S_BIAS) {
++ atomic_inc(&sb->s_active);
++ spin_unlock(&sb_lock);
++ atomic_inc(&watch->count);
++ return 1;
++ }
++ spin_unlock(&sb_lock);
++ return 0;
++}
++
+ /**
+ * put_inotify_watch - decrements the ref count on a given watch. cleans up
+ * watch references if the count reaches zero. inotify_watch is freed by
+@@ -124,6 +138,13 @@ void put_inotify_watch(struct inotify_wa
+ }
+ EXPORT_SYMBOL_GPL(put_inotify_watch);
+
++void unpin_inotify_watch(struct inotify_watch *watch)
++{
++ struct super_block *sb = watch->inode->i_sb;
++ put_inotify_watch(watch);
++ deactivate_super(sb);
++}
++
+ /*
+ * inotify_handle_get_wd - returns the next WD for use by the given handle
+ *
+@@ -479,6 +500,112 @@ void inotify_init_watch(struct inotify_w
+ }
+ EXPORT_SYMBOL_GPL(inotify_init_watch);
+
++/*
++ * Watch removals suck violently. To kick the watch out we need (in this
++ * order) inode->inotify_mutex and ih->mutex. That's fine if we have
++ * a hold on inode; however, for all other cases we need to make damn sure
++ * we don't race with umount. We can *NOT* just grab a reference to a
++ * watch - inotify_unmount_inodes() will happily sail past it and we'll end
++ * with reference to inode potentially outliving its superblock. Ideally
++ * we just want to grab an active reference to superblock if we can; that
++ * will make sure we won't go into inotify_umount_inodes() until we are
++ * done. Cleanup is just deactivate_super(). However, that leaves a messy
++ * case - what if we *are* racing with umount() and active references to
++ * superblock can't be acquired anymore? We can bump ->s_count, grab
++ * ->s_umount, which will almost certainly wait until the superblock is shut
++ * down and the watch in question is pining for fjords. That's fine, but
++ * there is a problem - we might have hit the window between ->s_active
++ * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock
++ * is past the point of no return and is heading for shutdown) and the
++ * moment when deactivate_super() acquires ->s_umount. We could just do
++ * drop_super() yield() and retry, but that's rather antisocial and this
++ * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having
++ * found that we'd got there first (i.e. that ->s_root is non-NULL) we know
++ * that we won't race with inotify_umount_inodes(). So we could grab a
++ * reference to watch and do the rest as above, just with drop_super() instead
++ * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we
++ * could grab ->s_umount. So the watch could've been gone already.
++ *
++ * That still can be dealt with - we need to save watch->wd, do idr_find()
++ * and compare its result with our pointer. If they match, we either have
++ * the damn thing still alive or we'd lost not one but two races at once,
++ * the watch had been killed and a new one got created with the same ->wd
++ * at the same address. That couldn't have happened in inotify_destroy(),
++ * but inotify_rm_wd() could run into that. Still, "new one got created"
++ * is not a problem - we have every right to kill it or leave it alone,
++ * whatever's more convenient.
++ *
++ * So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
++ * "grab it and kill it" check. If it's been our original watch, we are
++ * fine, if it's a newcomer - nevermind, just pretend that we'd won the
++ * race and kill the fscker anyway; we are safe since we know that its
++ * superblock won't be going away.
++ *
++ * And yes, this is far beyond mere "not very pretty"; so's the entire
++ * concept of inotify to start with.
++ */
++
++/**
++ * pin_to_kill - pin the watch down for removal
++ * @ih: inotify handle
++ * @watch: watch to kill
++ *
++ * Called with ih->mutex held, drops it. Possible return values:
++ * 0 - nothing to do, it has died
++ * 1 - remove it, drop the reference and deactivate_super()
++ * 2 - remove it, drop the reference and drop_super(); we tried hard to avoid
++ * that variant, since it involved a lot of PITA, but that's the best that
++ * could've been done.
++ */
++static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch)
++{
++ struct super_block *sb = watch->inode->i_sb;
++ s32 wd = watch->wd;
++
++ spin_lock(&sb_lock);
++ if (sb->s_count >= S_BIAS) {
++ atomic_inc(&sb->s_active);
++ spin_unlock(&sb_lock);
++ get_inotify_watch(watch);
++ mutex_unlock(&ih->mutex);
++ return 1; /* the best outcome */
++ }
++ sb->s_count++;
++ spin_unlock(&sb_lock);
++ mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */
++ down_read(&sb->s_umount);
++ if (likely(!sb->s_root)) {
++ /* fs is already shut down; the watch is dead */
++ drop_super(sb);
++ return 0;
++ }
++ /* raced with the final deactivate_super() */
++ mutex_lock(&ih->mutex);
++ if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) {
++ /* the watch is dead */
++ mutex_unlock(&ih->mutex);
++ drop_super(sb);
++ return 0;
++ }
++ /* still alive or freed and reused with the same sb and wd; kill */
++ get_inotify_watch(watch);
++ mutex_unlock(&ih->mutex);
++ return 2;
++}
++
++static void unpin_and_kill(struct inotify_watch *watch, int how)
++{
++ struct super_block *sb = watch->inode->i_sb;
++ put_inotify_watch(watch);
++ switch (how) {
++ case 1:
++ deactivate_super(sb);
++ break;
++ case 2:
++ drop_super(sb);
++ }
++}
++
+ /**
+ * inotify_destroy - clean up and destroy an inotify instance
+ * @ih: inotify handle
+@@ -490,11 +617,15 @@ void inotify_destroy(struct inotify_hand
+ * pretty. We cannot do a simple iteration over the list, because we
+ * do not know the inode until we iterate to the watch. But we need to
+ * hold inode->inotify_mutex before ih->mutex. The following works.
++ *
++ * AV: it had to become even uglier to start working ;-/
+ */
+ while (1) {
+ struct inotify_watch *watch;
+ struct list_head *watches;
++ struct super_block *sb;
+ struct inode *inode;
++ int how;
+
+ mutex_lock(&ih->mutex);
+ watches = &ih->watches;
+@@ -503,8 +634,10 @@ void inotify_destroy(struct inotify_hand
+ break;
+ }
+ watch = list_first_entry(watches, struct inotify_watch, h_list);
+- get_inotify_watch(watch);
+- mutex_unlock(&ih->mutex);
++ sb = watch->inode->i_sb;
++ how = pin_to_kill(ih, watch);
++ if (!how)
++ continue;
+
+ inode = watch->inode;
+ mutex_lock(&inode->inotify_mutex);
+@@ -518,7 +651,7 @@ void inotify_destroy(struct inotify_hand
+
+ mutex_unlock(&ih->mutex);
+ mutex_unlock(&inode->inotify_mutex);
+- put_inotify_watch(watch);
++ unpin_and_kill(watch, how);
+ }
+
+ /* free this handle: the put matching the get in inotify_init() */
+@@ -719,7 +852,9 @@ void inotify_evict_watch(struct inotify_
+ int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
+ {
+ struct inotify_watch *watch;
++ struct super_block *sb;
+ struct inode *inode;
++ int how;
+
+ mutex_lock(&ih->mutex);
+ watch = idr_find(&ih->idr, wd);
+@@ -727,9 +862,12 @@ int inotify_rm_wd(struct inotify_handle
+ mutex_unlock(&ih->mutex);
+ return -EINVAL;
+ }
+- get_inotify_watch(watch);
++ sb = watch->inode->i_sb;
++ how = pin_to_kill(ih, watch);
++ if (!how)
++ return 0;
++
+ inode = watch->inode;
+- mutex_unlock(&ih->mutex);
+
+ mutex_lock(&inode->inotify_mutex);
+ mutex_lock(&ih->mutex);
+@@ -740,7 +878,7 @@ int inotify_rm_wd(struct inotify_handle
+
+ mutex_unlock(&ih->mutex);
+ mutex_unlock(&inode->inotify_mutex);
+- put_inotify_watch(watch);
++ unpin_and_kill(watch, how);
+
+ return 0;
+ }
+--- a/include/linux/inotify.h
++++ b/include/linux/inotify.h
+@@ -134,6 +134,8 @@ extern void inotify_remove_watch_locked(
+ struct inotify_watch *);
+ extern void get_inotify_watch(struct inotify_watch *);
+ extern void put_inotify_watch(struct inotify_watch *);
++extern int pin_inotify_watch(struct inotify_watch *);
++extern void unpin_inotify_watch(struct inotify_watch *);
+
+ #else
+
+@@ -228,6 +230,15 @@ static inline void put_inotify_watch(str
+ {
+ }
+
++extern inline int pin_inotify_watch(struct inotify_watch *watch)
++{
++ return 0;
++}
++
++extern inline void unpin_inotify_watch(struct inotify_watch *watch)
++{
++}
++
+ #endif /* CONFIG_INOTIFY */
+
+ #endif /* __KERNEL __ */
+--- a/kernel/auditfilter.c
++++ b/kernel/auditfilter.c
+@@ -1094,8 +1094,8 @@ static void audit_inotify_unregister(str
+ list_for_each_entry_safe(p, n, in_list, ilist) {
+ list_del(&p->ilist);
+ inotify_rm_watch(audit_ih, &p->wdata);
+- /* the put matching the get in audit_do_del_rule() */
+- put_inotify_watch(&p->wdata);
++ /* the unpin matching the pin in audit_do_del_rule() */
++ unpin_inotify_watch(&p->wdata);
+ }
+ }
+
+@@ -1389,9 +1389,13 @@ static inline int audit_del_rule(struct
+ /* Put parent on the inotify un-registration
+ * list. Grab a reference before releasing
+ * audit_filter_mutex, to be released in
+- * audit_inotify_unregister(). */
+- list_add(&parent->ilist, &inotify_list);
+- get_inotify_watch(&parent->wdata);
++ * audit_inotify_unregister().
++ * If filesystem is going away, just leave
++ * the sucker alone, eviction will take
++ * care of it.
++ */
++ if (pin_inotify_watch(&parent->wdata))
++ list_add(&parent->ilist, &inotify_list);
+ }
+ }
+ }
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -24,6 +24,7 @@ struct audit_chunk {
+ struct list_head trees; /* with root here */
+ int dead;
+ int count;
++ atomic_long_t refs;
+ struct rcu_head head;
+ struct node {
+ struct list_head list;
+@@ -56,7 +57,8 @@ static LIST_HEAD(prune_list);
+ * tree is refcounted; one reference for "some rules on rules_list refer to
+ * it", one for each chunk with pointer to it.
+ *
+- * chunk is refcounted by embedded inotify_watch.
++ * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount
++ * of watch contributes 1 to .refs).
+ *
+ * node.index allows to get from node.list to containing chunk.
+ * MSB of that sucker is stolen to mark taggings that we might have to
+@@ -121,6 +123,7 @@ static struct audit_chunk *alloc_chunk(i
+ INIT_LIST_HEAD(&chunk->hash);
+ INIT_LIST_HEAD(&chunk->trees);
+ chunk->count = count;
++ atomic_long_set(&chunk->refs, 1);
+ for (i = 0; i < count; i++) {
+ INIT_LIST_HEAD(&chunk->owners[i].list);
+ chunk->owners[i].index = i;
+@@ -129,9 +132,8 @@ static struct audit_chunk *alloc_chunk(i
+ return chunk;
+ }
+
+-static void __free_chunk(struct rcu_head *rcu)
++static void free_chunk(struct audit_chunk *chunk)
+ {
+- struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
+ int i;
+
+ for (i = 0; i < chunk->count; i++) {
+@@ -141,14 +143,16 @@ static void __free_chunk(struct rcu_head
+ kfree(chunk);
+ }
+
+-static inline void free_chunk(struct audit_chunk *chunk)
++void audit_put_chunk(struct audit_chunk *chunk)
+ {
+- call_rcu(&chunk->head, __free_chunk);
++ if (atomic_long_dec_and_test(&chunk->refs))
++ free_chunk(chunk);
+ }
+
+-void audit_put_chunk(struct audit_chunk *chunk)
++static void __put_chunk(struct rcu_head *rcu)
+ {
+- put_inotify_watch(&chunk->watch);
++ struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
++ audit_put_chunk(chunk);
+ }
+
+ enum {HASH_SIZE = 128};
+@@ -176,7 +180,7 @@ struct audit_chunk *audit_tree_lookup(co
+
+ list_for_each_entry_rcu(p, list, hash) {
+ if (p->watch.inode == inode) {
+- get_inotify_watch(&p->watch);
++ atomic_long_inc(&p->refs);
+ return p;
+ }
+ }
+@@ -194,17 +198,49 @@ int audit_tree_match(struct audit_chunk
+
+ /* tagging and untagging inodes with trees */
+
+-static void untag_chunk(struct audit_chunk *chunk, struct node *p)
++static struct audit_chunk *find_chunk(struct node *p)
++{
++ int index = p->index & ~(1U<<31);
++ p -= index;
++ return container_of(p, struct audit_chunk, owners[0]);
++}
++
++static void untag_chunk(struct node *p)
+ {
++ struct audit_chunk *chunk = find_chunk(p);
+ struct audit_chunk *new;
+ struct audit_tree *owner;
+ int size = chunk->count - 1;
+ int i, j;
+
++ if (!pin_inotify_watch(&chunk->watch)) {
++ /*
++ * Filesystem is shutting down; all watches are getting
++ * evicted, just take it off the node list for this
++ * tree and let the eviction logics take care of the
++ * rest.
++ */
++ owner = p->owner;
++ if (owner->root == chunk) {
++ list_del_init(&owner->same_root);
++ owner->root = NULL;
++ }
++ list_del_init(&p->list);
++ p->owner = NULL;
++ put_tree(owner);
++ return;
++ }
++
++ spin_unlock(&hash_lock);
++
++ /*
++ * pin_inotify_watch() succeeded, so the watch won't go away
++ * from under us.
++ */
+ mutex_lock(&chunk->watch.inode->inotify_mutex);
+ if (chunk->dead) {
+ mutex_unlock(&chunk->watch.inode->inotify_mutex);
+- return;
++ goto out;
+ }
+
+ owner = p->owner;
+@@ -221,7 +257,7 @@ static void untag_chunk(struct audit_chu
+ inotify_evict_watch(&chunk->watch);
+ mutex_unlock(&chunk->watch.inode->inotify_mutex);
+ put_inotify_watch(&chunk->watch);
+- return;
++ goto out;
+ }
+
+ new = alloc_chunk(size);
+@@ -263,7 +299,7 @@ static void untag_chunk(struct audit_chu
+ inotify_evict_watch(&chunk->watch);
+ mutex_unlock(&chunk->watch.inode->inotify_mutex);
+ put_inotify_watch(&chunk->watch);
+- return;
++ goto out;
+
+ Fallback:
+ // do the best we can
+@@ -277,6 +313,9 @@ Fallback:
+ put_tree(owner);
+ spin_unlock(&hash_lock);
+ mutex_unlock(&chunk->watch.inode->inotify_mutex);
++out:
++ unpin_inotify_watch(&chunk->watch);
++ spin_lock(&hash_lock);
+ }
+
+ static int create_chunk(struct inode *inode, struct audit_tree *tree)
+@@ -387,13 +426,6 @@ static int tag_chunk(struct inode *inode
+ return 0;
+ }
+
+-static struct audit_chunk *find_chunk(struct node *p)
+-{
+- int index = p->index & ~(1U<<31);
+- p -= index;
+- return container_of(p, struct audit_chunk, owners[0]);
+-}
+-
+ static void kill_rules(struct audit_tree *tree)
+ {
+ struct audit_krule *rule, *next;
+@@ -431,17 +463,10 @@ static void prune_one(struct audit_tree
+ spin_lock(&hash_lock);
+ while (!list_empty(&victim->chunks)) {
+ struct node *p;
+- struct audit_chunk *chunk;
+
+ p = list_entry(victim->chunks.next, struct node, list);
+- chunk = find_chunk(p);
+- get_inotify_watch(&chunk->watch);
+- spin_unlock(&hash_lock);
+-
+- untag_chunk(chunk, p);
+
+- put_inotify_watch(&chunk->watch);
+- spin_lock(&hash_lock);
++ untag_chunk(p);
+ }
+ spin_unlock(&hash_lock);
+ put_tree(victim);
+@@ -469,7 +494,6 @@ static void trim_marked(struct audit_tre
+
+ while (!list_empty(&tree->chunks)) {
+ struct node *node;
+- struct audit_chunk *chunk;
+
+ node = list_entry(tree->chunks.next, struct node, list);
+
+@@ -477,14 +501,7 @@ static void trim_marked(struct audit_tre
+ if (!(node->index & (1U<<31)))
+ break;
+
+- chunk = find_chunk(node);
+- get_inotify_watch(&chunk->watch);
+- spin_unlock(&hash_lock);
+-
+- untag_chunk(chunk, node);
+-
+- put_inotify_watch(&chunk->watch);
+- spin_lock(&hash_lock);
++ untag_chunk(node);
+ }
+ if (!tree->root && !tree->goner) {
+ tree->goner = 1;
+@@ -878,7 +895,7 @@ static void handle_event(struct inotify_
+ static void destroy_watch(struct inotify_watch *watch)
+ {
+ struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch);
+- free_chunk(chunk);
++ call_rcu(&chunk->head, __put_chunk);
+ }
+
+ static const struct inotify_operations rtree_inotify_ops = {
--- /dev/null
+From 6e8ba729b6332f2a75572e02480936d2b51665aa Mon Sep 17 00:00:00 2001
+From: Jarkko Nikula <jarkko.nikula@nokia.com>
+Date: Wed, 19 Nov 2008 15:36:17 -0800
+Subject: gpiolib: extend gpio label column width in debugfs file
+
+From: Jarkko Nikula <jarkko.nikula@nokia.com>
+
+commit 6e8ba729b6332f2a75572e02480936d2b51665aa upstream.
+
+There are already various drivers having bigger label than 12 bytes. Most
+of them fit well under 20 bytes but make column width exact so that
+oversized labels don't mess up output alignment.
+
+Signed-off-by: Jarkko Nikula <jarkko.nikula@nokia.com>
+Acked-by: David Brownell <david-b@pacbell.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpio/gpiolib.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -1049,7 +1049,7 @@ static void gpiolib_dbg_show(struct seq_
+ continue;
+
+ is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
+- seq_printf(s, " gpio-%-3d (%-12s) %s %s",
++ seq_printf(s, " gpio-%-3d (%-20.20s) %s %s",
+ gpio, gdesc->label,
+ is_out ? "out" : "in ",
+ chip->get
--- /dev/null
+From 62ee0540f5e5a804b79cae8b3c0185a85f02436b Mon Sep 17 00:00:00 2001
+From: Doug Chapman <doug.chapman@hp.com>
+Date: Wed, 5 Nov 2008 17:57:52 -0500
+Subject: IA64: fix boot panic caused by offline CPUs
+
+From: Doug Chapman <doug.chapman@hp.com>
+
+commit 62ee0540f5e5a804b79cae8b3c0185a85f02436b upstream.
+
+This fixes a regression introduced by 2c6e6db41f01b6b4eb98809350827c9678996698
+"Minimize per_cpu reservations." That patch incorrectly used information about
+what CPUs are possible that was not yet initialized by ACPI. The end result
+was that per_cpu structures for offline CPUs were not initialized causing a
+NULL pointer reference.
+
+Since we cannot do the full acpi_boot_init() call any earlier, the simplest
+fix is to just parse the MADT for SAPIC entries early to find the CPU
+info. This should also allow for some cleanup of the code added by the
+"Minimize per_cpu reservations". This patch just fixes the regressions, the
+cleanup will come in a later patch.
+
+Signed-off-by: Doug Chapman <doug.chapman@hp.com>
+Signed-off-by: Alex Chiang <achiang@hp.com>
+CC: Robin Holt <holt@sgi.com>
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Cc: Rafael J. Wysocki <rjw@sisk.pl>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/ia64/kernel/acpi.c | 29 ++++++++++++++++++++++++-----
+ arch/ia64/kernel/setup.c | 7 ++++---
+ 2 files changed, 28 insertions(+), 8 deletions(-)
+
+--- a/arch/ia64/kernel/acpi.c
++++ b/arch/ia64/kernel/acpi.c
+@@ -656,6 +656,30 @@ static int __init acpi_parse_fadt(struct
+ return 0;
+ }
+
++int __init early_acpi_boot_init(void)
++{
++ int ret;
++
++ /*
++ * do a partial walk of MADT to determine how many CPUs
++ * we have including offline CPUs
++ */
++ if (acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
++ printk(KERN_ERR PREFIX "Can't find MADT\n");
++ return 0;
++ }
++
++ ret = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC,
++ acpi_parse_lsapic, NR_CPUS);
++ if (ret < 1)
++ printk(KERN_ERR PREFIX
++ "Error parsing MADT - no LAPIC entries\n");
++
++ return 0;
++}
++
++
++
+ int __init acpi_boot_init(void)
+ {
+
+@@ -679,11 +703,6 @@ int __init acpi_boot_init(void)
+ printk(KERN_ERR PREFIX
+ "Error parsing LAPIC address override entry\n");
+
+- if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_SAPIC, acpi_parse_lsapic, NR_CPUS)
+- < 1)
+- printk(KERN_ERR PREFIX
+- "Error parsing MADT - no LAPIC entries\n");
+-
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_NMI, acpi_parse_lapic_nmi, 0)
+ < 0)
+ printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
+--- a/arch/ia64/kernel/setup.c
++++ b/arch/ia64/kernel/setup.c
+@@ -549,8 +549,12 @@ setup_arch (char **cmdline_p)
+ #ifdef CONFIG_ACPI
+ /* Initialize the ACPI boot-time table parser */
+ acpi_table_init();
++ early_acpi_boot_init();
+ # ifdef CONFIG_ACPI_NUMA
+ acpi_numa_init();
++#ifdef CONFIG_ACPI_HOTPLUG_CPU
++ prefill_possible_map();
++#endif
+ per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
+ 32 : cpus_weight(early_cpu_possible_map)),
+ additional_cpus > 0 ? additional_cpus : 0);
+@@ -841,9 +845,6 @@ void __init
+ setup_per_cpu_areas (void)
+ {
+ /* start_kernel() requires this... */
+-#ifdef CONFIG_ACPI_HOTPLUG_CPU
+- prefill_possible_map();
+-#endif
+ }
+
+ /*
--- /dev/null
+From a8215b81cc31cf267506bc6a4a4bfe93f4ca1652 Mon Sep 17 00:00:00 2001
+From: Matthew Garrett <mjg@redhat.com>
+Date: Tue, 11 Nov 2008 09:40:42 -0500
+Subject: Input: atkbd - add keymap quirk for Inventec Symphony systems
+
+From: Matthew Garrett <mjg@redhat.com>
+
+commit a8215b81cc31cf267506bc6a4a4bfe93f4ca1652 upstream.
+
+The Zepto 6615WD laptop (rebranded Inventec Symphony system) needs a
+key release quirk for its volume keys to work. The attached patch adds
+the quirk to the atkbd driver.
+
+Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=460237
+
+Signed-off-by: Matthew Garrett <mjg@redhat.com>
+Signed-off-by: Adel Gadllah <adel.gadllah@gmail.com>
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/input/keyboard/atkbd.c | 25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/drivers/input/keyboard/atkbd.c
++++ b/drivers/input/keyboard/atkbd.c
+@@ -868,6 +868,22 @@ static void atkbd_hp_keymap_fixup(struct
+ }
+
+ /*
++ * Inventec system with broken key release on volume keys
++ */
++static void atkbd_inventec_keymap_fixup(struct atkbd *atkbd)
++{
++ const unsigned int forced_release_keys[] = {
++ 0xae, 0xb0,
++ };
++ int i;
++
++ if (atkbd->set == 2)
++ for (i = 0; i < ARRAY_SIZE(forced_release_keys); i++)
++ __set_bit(forced_release_keys[i],
++ atkbd->force_release_mask);
++}
++
++/*
+ * atkbd_set_keycode_table() initializes keyboard's keycode table
+ * according to the selected scancode set
+ */
+@@ -1478,6 +1494,15 @@ static struct dmi_system_id atkbd_dmi_qu
+ .callback = atkbd_setup_fixup,
+ .driver_data = atkbd_hp_keymap_fixup,
+ },
++ {
++ .ident = "Inventec Symphony",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "SYMPHONY 6.0/7.0"),
++ },
++ .callback = atkbd_setup_fixup,
++ .driver_data = atkbd_inventec_keymap_fixup,
++ },
+ { }
+ };
+
--- /dev/null
+From f652c521e0bec2e70cf123f47e80117a7e6ed139 Mon Sep 17 00:00:00 2001
+From: Arjan van de Ven <arjan@infradead.org>
+Date: Wed, 19 Nov 2008 15:36:19 -0800
+Subject: lib/scatterlist.c: fix kunmap() argument in sg_miter_stop()
+
+From: Arjan van de Ven <arjan@infradead.org>
+
+commit f652c521e0bec2e70cf123f47e80117a7e6ed139 upstream.
+
+kunmap() takes as argument the struct page that orginally got kmap()'d,
+however the sg_miter_stop() function passed it the kernel virtual address
+instead, resulting in weird stuff.
+
+Somehow I ended up fixing this bug by accident while looking for a bug in
+the same area.
+
+Reported-by: kerneloops.org
+Acked-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Hugh Dickins <hugh@veritas.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ lib/scatterlist.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/scatterlist.c
++++ b/lib/scatterlist.c
+@@ -395,7 +395,7 @@ void sg_miter_stop(struct sg_mapping_ite
+ WARN_ON(!irqs_disabled());
+ kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ);
+ } else
+- kunmap(miter->addr);
++ kunmap(miter->page);
+
+ miter->page = NULL;
+ miter->addr = NULL;
--- /dev/null
+From 7a3f5134a8f5bd7fa38b5645eef05e8a4eb62951 Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Wed, 26 Nov 2008 12:46:22 -0800
+Subject: parisc: fix kernel crash when unwinding a userspace process
+
+From: Helge Deller <deller@gmx.de>
+
+commit 7a3f5134a8f5bd7fa38b5645eef05e8a4eb62951 upstream.
+
+Any user on existing parisc 32- and 64bit-kernels can easily crash
+the kernel and as such enforce a DSO.
+A simple testcase is available here:
+ http://gsyprf10.external.hp.com/~deller/crash.tgz
+
+The problem is introduced by the fact, that the handle_interruption()
+crash handler calls the show_regs() function, which in turn tries to
+unwind the stack by calling parisc_show_stack(). Since the stack contains
+userspace addresses, a try to unwind the stack is dangerous and useless
+and leads to the crash.
+
+The fix is trivial: For userspace processes
+a) avoid to unwind the stack, and
+b) avoid to resolve userspace addresses to kernel symbol names.
+
+While touching this code, I converted print_symbol() to %pS
+printk formats and made parisc_show_stack() static.
+
+An initial patch for this was written by Kyle McMartin back in August:
+http://marc.info/?l=linux-parisc&m=121805168830283&w=2
+
+Compile and run-tested with a 64bit parisc kernel.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Cc: Grant Grundler <grundler@parisc-linux.org>
+Cc: Matthew Wilcox <matthew@wil.cx>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Kyle McMartin <kyle@mcmartin.ca>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/parisc/kernel/traps.c | 41 ++++++++++++++++++++---------------------
+ 1 file changed, 20 insertions(+), 21 deletions(-)
+
+--- a/arch/parisc/kernel/traps.c
++++ b/arch/parisc/kernel/traps.c
+@@ -24,7 +24,6 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/console.h>
+-#include <linux/kallsyms.h>
+ #include <linux/bug.h>
+
+ #include <asm/assembly.h>
+@@ -51,7 +50,7 @@
+ DEFINE_SPINLOCK(pa_dbit_lock);
+ #endif
+
+-void parisc_show_stack(struct task_struct *t, unsigned long *sp,
++static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
+ struct pt_regs *regs);
+
+ static int printbinary(char *buf, unsigned long x, int nbits)
+@@ -121,18 +120,19 @@ static void print_fr(char *level, struct
+
+ void show_regs(struct pt_regs *regs)
+ {
+- int i;
++ int i, user;
+ char *level;
+ unsigned long cr30, cr31;
+
+- level = user_mode(regs) ? KERN_DEBUG : KERN_CRIT;
++ user = user_mode(regs);
++ level = user ? KERN_DEBUG : KERN_CRIT;
+
+ print_gr(level, regs);
+
+ for (i = 0; i < 8; i += 4)
+ PRINTREGS(level, regs->sr, "sr", RFMT, i);
+
+- if (user_mode(regs))
++ if (user)
+ print_fr(level, regs);
+
+ cr30 = mfctl(30);
+@@ -145,14 +145,18 @@ void show_regs(struct pt_regs *regs)
+ printk("%s CPU: %8d CR30: " RFMT " CR31: " RFMT "\n",
+ level, current_thread_info()->cpu, cr30, cr31);
+ printk("%s ORIG_R28: " RFMT "\n", level, regs->orig_r28);
+- printk(level);
+- print_symbol(" IAOQ[0]: %s\n", regs->iaoq[0]);
+- printk(level);
+- print_symbol(" IAOQ[1]: %s\n", regs->iaoq[1]);
+- printk(level);
+- print_symbol(" RP(r2): %s\n", regs->gr[2]);
+
+- parisc_show_stack(current, NULL, regs);
++ if (user) {
++ printk("%s IAOQ[0]: " RFMT "\n", level, regs->iaoq[0]);
++ printk("%s IAOQ[1]: " RFMT "\n", level, regs->iaoq[1]);
++ printk("%s RP(r2): " RFMT "\n", level, regs->gr[2]);
++ } else {
++ printk("%s IAOQ[0]: %pS\n", level, (void *) regs->iaoq[0]);
++ printk("%s IAOQ[1]: %pS\n", level, (void *) regs->iaoq[1]);
++ printk("%s RP(r2): %pS\n", level, (void *) regs->gr[2]);
++
++ parisc_show_stack(current, NULL, regs);
++ }
+ }
+
+
+@@ -173,20 +177,15 @@ static void do_show_stack(struct unwind_
+ break;
+
+ if (__kernel_text_address(info->ip)) {
+- printk("%s [<" RFMT ">] ", (i&0x3)==1 ? KERN_CRIT : "", info->ip);
+-#ifdef CONFIG_KALLSYMS
+- print_symbol("%s\n", info->ip);
+-#else
+- if ((i & 0x03) == 0)
+- printk("\n");
+-#endif
++ printk(KERN_CRIT " [<" RFMT ">] %pS\n",
++ info->ip, (void *) info->ip);
+ i++;
+ }
+ }
+- printk("\n");
++ printk(KERN_CRIT "\n");
+ }
+
+-void parisc_show_stack(struct task_struct *task, unsigned long *sp,
++static void parisc_show_stack(struct task_struct *task, unsigned long *sp,
+ struct pt_regs *regs)
+ {
+ struct unwind_frame_info info;
cgroups-fix-a-serious-bug-in-cgroupstats.patch
ecryptfs-allocate-up-to-two-scatterlists-for-crypto-ops-on-keys.patch
pxa2xx_spi-bugfix-full-duplex-dma-data-corruption.patch
+fbdev-clean-the-penguin-s-dirty-feet.patch
+gpiolib-extend-gpio-label-column-width-in-debugfs-file.patch
+lib-scatterlist.c-fix-kunmap-argument-in-sg_miter_stop.patch
+sysvipc-fix-the-ipc-structures-initialization.patch
+parisc-fix-kernel-crash-when-unwinding-a-userspace-process.patch
+epoll-introduce-resource-usage-limits.patch
+fix-inotify-watch-removal-umount-races.patch
+ia64-fix-boot-panic-caused-by-offline-cpus.patch
+v4l-dvb-add-some-missing-compat32-ioctls.patch
+input-atkbd-add-keymap-quirk-for-inventec-symphony-systems.patch
--- /dev/null
+From e00b4ff7ebf098b11b11be403921c1cf41d9e321 Mon Sep 17 00:00:00 2001
+From: Nadia Derbey <Nadia.Derbey@bull.net>
+Date: Wed, 19 Nov 2008 15:36:08 -0800
+Subject: sysvipc: fix the ipc structures initialization
+
+From: Nadia Derbey <Nadia.Derbey@bull.net>
+
+commit e00b4ff7ebf098b11b11be403921c1cf41d9e321 upstream.
+
+A problem was found while reviewing the code after Bugzilla bug
+http://bugzilla.kernel.org/show_bug.cgi?id=11796.
+
+In ipc_addid(), the newly allocated ipc structure is inserted into the
+ipcs tree (i.e made visible to readers) without locking it. This is not
+correct since its initialization continues after it has been inserted in
+the tree.
+
+This patch moves the ipc structure lock initialization + locking before
+the actual insertion.
+
+Signed-off-by: Nadia Derbey <Nadia.Derbey@bull.net>
+Reported-by: Clement Calmels <cboulte@gmail.com>
+Cc: Manfred Spraul <manfred@colorfullife.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ ipc/util.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -266,9 +266,17 @@ int ipc_addid(struct ipc_ids* ids, struc
+ if (ids->in_use >= size)
+ return -ENOSPC;
+
++ spin_lock_init(&new->lock);
++ new->deleted = 0;
++ rcu_read_lock();
++ spin_lock(&new->lock);
++
+ err = idr_get_new(&ids->ipcs_idr, new, &id);
+- if (err)
++ if (err) {
++ spin_unlock(&new->lock);
++ rcu_read_unlock();
+ return err;
++ }
+
+ ids->in_use++;
+
+@@ -280,10 +288,6 @@ int ipc_addid(struct ipc_ids* ids, struc
+ ids->seq = 0;
+
+ new->id = ipc_buildid(id, new->seq);
+- spin_lock_init(&new->lock);
+- new->deleted = 0;
+- rcu_read_lock();
+- spin_lock(&new->lock);
+ return id;
+ }
+
--- /dev/null
+From c7f09db6852d85e7f76322815051aad1c88d08cf Mon Sep 17 00:00:00 2001
+From: Gregor Jasny <jasny@vidsoft.de>
+Date: Thu, 23 Oct 2008 09:55:22 -0300
+Subject: V4L/DVB (9352): Add some missing compat32 ioctls
+
+From: Gregor Jasny <jasny@vidsoft.de>
+
+commit c7f09db6852d85e7f76322815051aad1c88d08cf upstream.
+
+This patch adds the missing compat ioctls that are needed to
+operate Skype in combination with libv4l and a MJPEG only camera.
+
+If you think it's trivial enough please submit it to -stable, too.
+
+Signed-off-by: Gregor Jasny <gjasny@web.de>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/media/video/compat_ioctl32.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/media/video/compat_ioctl32.c
++++ b/drivers/media/video/compat_ioctl32.c
+@@ -867,6 +867,7 @@ long v4l_compat_ioctl32(struct file *fil
+ case VIDIOC_STREAMON32:
+ case VIDIOC_STREAMOFF32:
+ case VIDIOC_G_PARM:
++ case VIDIOC_S_PARM:
+ case VIDIOC_G_STD:
+ case VIDIOC_S_STD:
+ case VIDIOC_G_TUNER:
+@@ -885,6 +886,8 @@ long v4l_compat_ioctl32(struct file *fil
+ case VIDIOC_S_INPUT32:
+ case VIDIOC_TRY_FMT32:
+ case VIDIOC_S_HW_FREQ_SEEK:
++ case VIDIOC_ENUM_FRAMESIZES:
++ case VIDIOC_ENUM_FRAMEINTERVALS:
+ ret = do_video_ioctl(file, cmd, arg);
+ break;
+