]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
.34 patches
authorGreg Kroah-Hartman <gregkh@suse.de>
Tue, 1 Jun 2010 23:00:36 +0000 (16:00 -0700)
committerGreg Kroah-Hartman <gregkh@suse.de>
Tue, 1 Jun 2010 23:00:36 +0000 (16:00 -0700)
12 files changed:
queue-2.6.34/fix-racy-use-of-anon_inode_getfd-in-perf_event.c.patch [new file with mode: 0644]
queue-2.6.34/libata-disable-atapi-an-by-default.patch [new file with mode: 0644]
queue-2.6.34/libata-don-t-flush-dcache-on-slab-pages.patch [new file with mode: 0644]
queue-2.6.34/oprofile-remove-double-ring-buffering.patch [new file with mode: 0644]
queue-2.6.34/oprofile-x86-fix-uninitialized-counter-usage-during-cpu-hotplug.patch [new file with mode: 0644]
queue-2.6.34/perf-fix-endianness-argument-compatibility-with-opt_boolean-and-introduce-opt_incr.patch [new file with mode: 0644]
queue-2.6.34/perf-fix-exit-vs-event-groups.patch [new file with mode: 0644]
queue-2.6.34/perf-fix-exit-vs-perf_format_group.patch [new file with mode: 0644]
queue-2.6.34/perf-top-properly-notify-the-user-that-vmlinux-is-missing.patch [new file with mode: 0644]
queue-2.6.34/posix_timer-fix-error-path-in-timer_create.patch [new file with mode: 0644]
queue-2.6.34/series [new file with mode: 0644]
queue-2.6.34/vfs-fix-recent-breakage-of-fs_reval_dot.patch [new file with mode: 0644]

diff --git a/queue-2.6.34/fix-racy-use-of-anon_inode_getfd-in-perf_event.c.patch b/queue-2.6.34/fix-racy-use-of-anon_inode_getfd-in-perf_event.c.patch
new file mode 100644 (file)
index 0000000..1ad6042
--- /dev/null
@@ -0,0 +1,100 @@
+From ea635c64e007061f6468ece5cc9cc62d41d4ecf2 Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Wed, 26 May 2010 17:40:29 -0400
+Subject: Fix racy use of anon_inode_getfd() in perf_event.c
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit ea635c64e007061f6468ece5cc9cc62d41d4ecf2 upstream.
+
+once anon_inode_getfd() is called, you can't expect *anything* about
+struct file that descriptor points to - another thread might be doing
+whatever it likes with descriptor table at that point.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/perf_event.c |   40 ++++++++++++++++++++++------------------
+ 1 file changed, 22 insertions(+), 18 deletions(-)
+
+--- a/kernel/perf_event.c
++++ b/kernel/perf_event.c
+@@ -4811,8 +4811,8 @@ SYSCALL_DEFINE5(perf_event_open,
+       struct perf_event_context *ctx;
+       struct file *event_file = NULL;
+       struct file *group_file = NULL;
++      int event_fd;
+       int fput_needed = 0;
+-      int fput_needed2 = 0;
+       int err;
+       /* for future expandability... */
+@@ -4833,12 +4833,18 @@ SYSCALL_DEFINE5(perf_event_open,
+                       return -EINVAL;
+       }
++      event_fd = get_unused_fd_flags(O_RDWR);
++      if (event_fd < 0)
++              return event_fd;
++
+       /*
+        * Get the target context (task or percpu):
+        */
+       ctx = find_get_context(pid, cpu);
+-      if (IS_ERR(ctx))
+-              return PTR_ERR(ctx);
++      if (IS_ERR(ctx)) {
++              err = PTR_ERR(ctx);
++              goto err_fd;
++      }
+       /*
+        * Look up the group leader (we will attach this event to it):
+@@ -4878,13 +4884,11 @@ SYSCALL_DEFINE5(perf_event_open,
+       if (IS_ERR(event))
+               goto err_put_context;
+-      err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR);
+-      if (err < 0)
+-              goto err_free_put_context;
+-
+-      event_file = fget_light(err, &fput_needed2);
+-      if (!event_file)
++      event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
++      if (IS_ERR(event_file)) {
++              err = PTR_ERR(event_file);
+               goto err_free_put_context;
++      }
+       if (flags & PERF_FLAG_FD_OUTPUT) {
+               err = perf_event_set_output(event, group_fd);
+@@ -4905,19 +4909,19 @@ SYSCALL_DEFINE5(perf_event_open,
+       list_add_tail(&event->owner_entry, &current->perf_event_list);
+       mutex_unlock(&current->perf_event_mutex);
+-err_fput_free_put_context:
+-      fput_light(event_file, fput_needed2);
++      fput_light(group_file, fput_needed);
++      fd_install(event_fd, event_file);
++      return event_fd;
++err_fput_free_put_context:
++      fput(event_file);
+ err_free_put_context:
+-      if (err < 0)
+-              free_event(event);
+-
++      free_event(event);
+ err_put_context:
+-      if (err < 0)
+-              put_ctx(ctx);
+-
+       fput_light(group_file, fput_needed);
+-
++      put_ctx(ctx);
++err_fd:
++      put_unused_fd(event_fd);
+       return err;
+ }
diff --git a/queue-2.6.34/libata-disable-atapi-an-by-default.patch b/queue-2.6.34/libata-disable-atapi-an-by-default.patch
new file mode 100644 (file)
index 0000000..9e078d7
--- /dev/null
@@ -0,0 +1,59 @@
+From e7ecd435692ca9bde9d124be30b3a26e672ea6c2 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Wed, 19 May 2010 15:38:58 +0200
+Subject: libata: disable ATAPI AN by default
+
+From: Tejun Heo <tj@kernel.org>
+
+commit e7ecd435692ca9bde9d124be30b3a26e672ea6c2 upstream.
+
+There are ATAPI devices which raise AN when hit by commands issued by
+open().  This leads to infinite loop of AN -> MEDIA_CHANGE uevent ->
+udev open() to check media -> AN.
+
+Both ACS and SerialATA standards don't define in which case ATAPI
+devices are supposed to raise or not raise AN.  They both list media
+insertion event as a possible use case for ATAPI ANs but there is no
+clear description of what constitutes such events.  As such, it seems
+a bit too naive to export ANs directly to userland as MEDIA_CHANGE
+events without further verification (which should behave similarly to
+windows as it apparently is the only thing that some hardware vendors
+are testing against).
+
+This patch adds libata.atapi_an module parameter and disables ATAPI AN
+by default for now.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Cc: Kay Sievers <kay.sievers@vrfy.org>
+Cc: Nick Bowler <nbowler@elliptictech.com>
+Cc: David Zeuthen <david@fubar.dk>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/libata-core.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/ata/libata-core.c
++++ b/drivers/ata/libata-core.c
+@@ -160,6 +160,10 @@ int libata_allow_tpm = 0;
+ module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
+ MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)");
++static int atapi_an;
++module_param(atapi_an, int, 0444);
++MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)");
++
+ MODULE_AUTHOR("Jeff Garzik");
+ MODULE_DESCRIPTION("Library module for ATA devices");
+ MODULE_LICENSE("GPL");
+@@ -2572,7 +2576,8 @@ int ata_dev_configure(struct ata_device
+                * to enable ATAPI AN to discern between PHY status
+                * changed notifications and ATAPI ANs.
+                */
+-              if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
++              if (atapi_an &&
++                  (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
+                   (!sata_pmp_attached(ap) ||
+                    sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
+                       unsigned int err_mask;
diff --git a/queue-2.6.34/libata-don-t-flush-dcache-on-slab-pages.patch b/queue-2.6.34/libata-don-t-flush-dcache-on-slab-pages.patch
new file mode 100644 (file)
index 0000000..98c49b6
--- /dev/null
@@ -0,0 +1,33 @@
+From 3842e835490cdf17013b30a788f6311bdcfd0571 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+Date: Sun, 21 Mar 2010 22:52:23 +0100
+Subject: libata: don't flush dcache on slab pages
+
+From: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+
+commit 3842e835490cdf17013b30a788f6311bdcfd0571 upstream.
+
+page_mapping() check this via VM_BUG_ON(PageSlab(page)) so we bug here
+with the according debuging turned on.
+
+Future TODO: replace this with a flush_dcache_page_for_pio() API
+
+Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/ata/libata-sff.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/ata/libata-sff.c
++++ b/drivers/ata/libata-sff.c
+@@ -894,7 +894,7 @@ static void ata_pio_sector(struct ata_qu
+                                      do_write);
+       }
+-      if (!do_write)
++      if (!do_write && !PageSlab(page))
+               flush_dcache_page(page);
+       qc->curbytes += qc->sect_size;
diff --git a/queue-2.6.34/oprofile-remove-double-ring-buffering.patch b/queue-2.6.34/oprofile-remove-double-ring-buffering.patch
new file mode 100644 (file)
index 0000000..9c51ae9
--- /dev/null
@@ -0,0 +1,145 @@
+From cb6e943ccf19ab6d3189147e9d625a992e016084 Mon Sep 17 00:00:00 2001
+From: Andi Kleen <andi@firstfloor.org>
+Date: Thu, 1 Apr 2010 03:17:25 +0200
+Subject: oprofile: remove double ring buffering
+
+From: Andi Kleen <andi@firstfloor.org>
+
+commit cb6e943ccf19ab6d3189147e9d625a992e016084 upstream.
+
+oprofile used a double buffer scheme for its cpu event buffer
+to avoid races on reading with the old locked ring buffer.
+
+But that is obsolete now with the new ring buffer, so simply
+use a single buffer. This greatly simplifies the code and avoids
+a lot of sample drops on large runs, especially with call graph.
+
+Based on suggestions from Steven Rostedt
+
+For stable kernels from v2.6.32, but not earlier.
+
+Signed-off-by: Andi Kleen <ak@linux.intel.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Robert Richter <robert.richter@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/oprofile/cpu_buffer.c |   63 ++++++++----------------------------------
+ 1 file changed, 13 insertions(+), 50 deletions(-)
+
+--- a/drivers/oprofile/cpu_buffer.c
++++ b/drivers/oprofile/cpu_buffer.c
+@@ -30,23 +30,7 @@
+ #define OP_BUFFER_FLAGS       0
+-/*
+- * Read and write access is using spin locking. Thus, writing to the
+- * buffer by NMI handler (x86) could occur also during critical
+- * sections when reading the buffer. To avoid this, there are 2
+- * buffers for independent read and write access. Read access is in
+- * process context only, write access only in the NMI handler. If the
+- * read buffer runs empty, both buffers are swapped atomically. There
+- * is potentially a small window during swapping where the buffers are
+- * disabled and samples could be lost.
+- *
+- * Using 2 buffers is a little bit overhead, but the solution is clear
+- * and does not require changes in the ring buffer implementation. It
+- * can be changed to a single buffer solution when the ring buffer
+- * access is implemented as non-locking atomic code.
+- */
+-static struct ring_buffer *op_ring_buffer_read;
+-static struct ring_buffer *op_ring_buffer_write;
++static struct ring_buffer *op_ring_buffer;
+ DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
+ static void wq_sync_buffer(struct work_struct *work);
+@@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(v
+ void free_cpu_buffers(void)
+ {
+-      if (op_ring_buffer_read)
+-              ring_buffer_free(op_ring_buffer_read);
+-      op_ring_buffer_read = NULL;
+-      if (op_ring_buffer_write)
+-              ring_buffer_free(op_ring_buffer_write);
+-      op_ring_buffer_write = NULL;
++      if (op_ring_buffer)
++              ring_buffer_free(op_ring_buffer);
++      op_ring_buffer = NULL;
+ }
+ #define RB_EVENT_HDR_SIZE 4
+@@ -86,11 +67,8 @@ int alloc_cpu_buffers(void)
+       unsigned long byte_size = buffer_size * (sizeof(struct op_sample) +
+                                                RB_EVENT_HDR_SIZE);
+-      op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
+-      if (!op_ring_buffer_read)
+-              goto fail;
+-      op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
+-      if (!op_ring_buffer_write)
++      op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS);
++      if (!op_ring_buffer)
+               goto fail;
+       for_each_possible_cpu(i) {
+@@ -162,16 +140,11 @@ struct op_sample
+ *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size)
+ {
+       entry->event = ring_buffer_lock_reserve
+-              (op_ring_buffer_write, sizeof(struct op_sample) +
++              (op_ring_buffer, sizeof(struct op_sample) +
+                size * sizeof(entry->sample->data[0]));
+-      if (entry->event)
+-              entry->sample = ring_buffer_event_data(entry->event);
+-      else
+-              entry->sample = NULL;
+-
+-      if (!entry->sample)
++      if (!entry->event)
+               return NULL;
+-
++      entry->sample = ring_buffer_event_data(entry->event);
+       entry->size = size;
+       entry->data = entry->sample->data;
+@@ -180,25 +153,16 @@ struct op_sample
+ int op_cpu_buffer_write_commit(struct op_entry *entry)
+ {
+-      return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event);
++      return ring_buffer_unlock_commit(op_ring_buffer, entry->event);
+ }
+ struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
+ {
+       struct ring_buffer_event *e;
+-      e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
+-      if (e)
+-              goto event;
+-      if (ring_buffer_swap_cpu(op_ring_buffer_read,
+-                               op_ring_buffer_write,
+-                               cpu))
++      e = ring_buffer_consume(op_ring_buffer, cpu, NULL);
++      if (!e)
+               return NULL;
+-      e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL);
+-      if (e)
+-              goto event;
+-      return NULL;
+-event:
+       entry->event = e;
+       entry->sample = ring_buffer_event_data(e);
+       entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample))
+@@ -209,8 +173,7 @@ event:
+ unsigned long op_cpu_buffer_entries(int cpu)
+ {
+-      return ring_buffer_entries_cpu(op_ring_buffer_read, cpu)
+-              + ring_buffer_entries_cpu(op_ring_buffer_write, cpu);
++      return ring_buffer_entries_cpu(op_ring_buffer, cpu);
+ }
+ static int
diff --git a/queue-2.6.34/oprofile-x86-fix-uninitialized-counter-usage-during-cpu-hotplug.patch b/queue-2.6.34/oprofile-x86-fix-uninitialized-counter-usage-during-cpu-hotplug.patch
new file mode 100644 (file)
index 0000000..9e6da84
--- /dev/null
@@ -0,0 +1,181 @@
+From 2623a1d55a6260c855e1f6d1895900b50b40a896 Mon Sep 17 00:00:00 2001
+From: Robert Richter <robert.richter@amd.com>
+Date: Mon, 3 May 2010 19:44:32 +0200
+Subject: oprofile/x86: fix uninitialized counter usage during cpu hotplug
+
+From: Robert Richter <robert.richter@amd.com>
+
+commit 2623a1d55a6260c855e1f6d1895900b50b40a896 upstream.
+
+This fixes a NULL pointer dereference that is triggered when taking a
+cpu offline after oprofile was initialized, e.g.:
+
+ $ opcontrol --init
+ $ opcontrol --start-daemon
+ $ opcontrol --shutdown
+ $ opcontrol --deinit
+ $ echo 0 > /sys/devices/system/cpu/cpu1/online
+
+See the crash dump below. Though the counter has been disabled the cpu
+notifier is still active and trying to use already freed counter data.
+
+This fix is for linux-stable. To proper fix this, the hotplug code
+must be rewritten. Thus I will leave a WARN_ON_ONCE() message with
+this patch.
+
+BUG: unable to handle kernel NULL pointer dereference at (null)
+IP: [<ffffffff8132ad57>] op_amd_stop+0x2d/0x8e
+PGD 0
+Oops: 0000 [#1] SMP
+last sysfs file: /sys/devices/system/cpu/cpu1/online
+CPU 1
+Modules linked in:
+
+Pid: 0, comm: swapper Not tainted 2.6.34-rc5-oprofile-x86_64-standard-00210-g8c00f06 #16 Anaheim/Anaheim
+RIP: 0010:[<ffffffff8132ad57>]  [<ffffffff8132ad57>] op_amd_stop+0x2d/0x8e
+RSP: 0018:ffff880001843f28  EFLAGS: 00010006
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: dead000000200200
+RDX: ffff880001843f68 RSI: dead000000100100 RDI: 0000000000000000
+RBP: ffff880001843f48 R08: 0000000000000000 R09: ffff880001843f08
+R10: ffffffff8102c9a5 R11: ffff88000184ea80 R12: 0000000000000000
+R13: ffff88000184f6c0 R14: 0000000000000000 R15: 0000000000000000
+FS:  00007fec6a92e6f0(0000) GS:ffff880001840000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
+CR2: 0000000000000000 CR3: 000000000163b000 CR4: 00000000000006e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
+Process swapper (pid: 0, threadinfo ffff88042fcd8000, task ffff88042fcd51d0)
+Stack:
+ ffff880001843f48 0000000000000001 ffff88042e9f7d38 ffff880001843f68
+<0> ffff880001843f58 ffffffff8132a602 ffff880001843f98 ffffffff810521b3
+<0> ffff880001843f68 ffff880001843f68 ffff880001843f88 ffff88042fcd9fd8
+Call Trace:
+ <IRQ>
+ [<ffffffff8132a602>] nmi_cpu_stop+0x21/0x23
+ [<ffffffff810521b3>] generic_smp_call_function_single_interrupt+0xdf/0x11b
+ [<ffffffff8101804f>] smp_call_function_single_interrupt+0x22/0x31
+ [<ffffffff810029f3>] call_function_single_interrupt+0x13/0x20
+ <EOI>
+ [<ffffffff8102c9a5>] ? wake_up_process+0x10/0x12
+ [<ffffffff81008701>] ? default_idle+0x22/0x37
+ [<ffffffff8100896d>] c1e_idle+0xdf/0xe6
+ [<ffffffff813f1170>] ? atomic_notifier_call_chain+0x13/0x15
+ [<ffffffff810012fb>] cpu_idle+0x4b/0x7e
+ [<ffffffff813e8a4e>] start_secondary+0x1ae/0x1b2
+Code: 89 e5 41 55 49 89 fd 41 54 45 31 e4 53 31 db 48 83 ec 08 89 df e8 be f8 ff ff 48 98 48 83 3c c5 10 67 7a 81 00 74 1f 49 8b 45 08 <42> 8b 0c 20 0f 32 48 c1 e2 20 25 ff ff bf ff 48 09 d0 48 89 c2
+RIP  [<ffffffff8132ad57>] op_amd_stop+0x2d/0x8e
+ RSP <ffff880001843f28>
+CR2: 0000000000000000
+---[ end trace 679ac372d674b757 ]---
+Kernel panic - not syncing: Fatal exception in interrupt
+Pid: 0, comm: swapper Tainted: G      D    2.6.34-rc5-oprofile-x86_64-standard-00210-g8c00f06 #16
+Call Trace:
+ <IRQ>  [<ffffffff813ebd6a>] panic+0x9e/0x10c
+ [<ffffffff810474b0>] ? up+0x34/0x39
+ [<ffffffff81031ccc>] ? kmsg_dump+0x112/0x12c
+ [<ffffffff813eeff1>] oops_end+0x81/0x8e
+ [<ffffffff8101efee>] no_context+0x1f3/0x202
+ [<ffffffff8101f1b7>] __bad_area_nosemaphore+0x1ba/0x1e0
+ [<ffffffff81028d24>] ? enqueue_task_fair+0x16d/0x17a
+ [<ffffffff810264dc>] ? activate_task+0x42/0x53
+ [<ffffffff8102c967>] ? try_to_wake_up+0x272/0x284
+ [<ffffffff8101f1eb>] bad_area_nosemaphore+0xe/0x10
+ [<ffffffff813f0f3f>] do_page_fault+0x1c8/0x37c
+ [<ffffffff81028d24>] ? enqueue_task_fair+0x16d/0x17a
+ [<ffffffff813ee55f>] page_fault+0x1f/0x30
+ [<ffffffff8102c9a5>] ? wake_up_process+0x10/0x12
+ [<ffffffff8132ad57>] ? op_amd_stop+0x2d/0x8e
+ [<ffffffff8132ad46>] ? op_amd_stop+0x1c/0x8e
+ [<ffffffff8132a602>] nmi_cpu_stop+0x21/0x23
+ [<ffffffff810521b3>] generic_smp_call_function_single_interrupt+0xdf/0x11b
+ [<ffffffff8101804f>] smp_call_function_single_interrupt+0x22/0x31
+ [<ffffffff810029f3>] call_function_single_interrupt+0x13/0x20
+ <EOI>  [<ffffffff8102c9a5>] ? wake_up_process+0x10/0x12
+ [<ffffffff81008701>] ? default_idle+0x22/0x37
+ [<ffffffff8100896d>] c1e_idle+0xdf/0xe6
+ [<ffffffff813f1170>] ? atomic_notifier_call_chain+0x13/0x15
+ [<ffffffff810012fb>] cpu_idle+0x4b/0x7e
+ [<ffffffff813e8a4e>] start_secondary+0x1ae/0x1b2
+------------[ cut here ]------------
+WARNING: at /local/rrichter/.source/linux/arch/x86/kernel/smp.c:118 native_smp_send_reschedule+0x27/0x53()
+Hardware name: Anaheim
+Modules linked in:
+Pid: 0, comm: swapper Tainted: G      D    2.6.34-rc5-oprofile-x86_64-standard-00210-g8c00f06 #16
+Call Trace:
+ <IRQ>  [<ffffffff81017f32>] ? native_smp_send_reschedule+0x27/0x53
+ [<ffffffff81030ee2>] warn_slowpath_common+0x77/0xa4
+ [<ffffffff81030f1e>] warn_slowpath_null+0xf/0x11
+ [<ffffffff81017f32>] native_smp_send_reschedule+0x27/0x53
+ [<ffffffff8102634b>] resched_task+0x60/0x62
+ [<ffffffff8102653a>] check_preempt_curr_idle+0x10/0x12
+ [<ffffffff8102c8ea>] try_to_wake_up+0x1f5/0x284
+ [<ffffffff8102c986>] default_wake_function+0xd/0xf
+ [<ffffffff810a110d>] pollwake+0x57/0x5a
+ [<ffffffff8102c979>] ? default_wake_function+0x0/0xf
+ [<ffffffff81026be5>] __wake_up_common+0x46/0x75
+ [<ffffffff81026ed0>] __wake_up+0x38/0x50
+ [<ffffffff81031694>] printk_tick+0x39/0x3b
+ [<ffffffff8103ac37>] update_process_times+0x3f/0x5c
+ [<ffffffff8104dc63>] tick_periodic+0x5d/0x69
+ [<ffffffff8104dc90>] tick_handle_periodic+0x21/0x71
+ [<ffffffff81018fd0>] smp_apic_timer_interrupt+0x82/0x95
+ [<ffffffff81002853>] apic_timer_interrupt+0x13/0x20
+ [<ffffffff81030cb5>] ? panic_blink_one_second+0x0/0x7b
+ [<ffffffff813ebdd6>] ? panic+0x10a/0x10c
+ [<ffffffff810474b0>] ? up+0x34/0x39
+ [<ffffffff81031ccc>] ? kmsg_dump+0x112/0x12c
+ [<ffffffff813eeff1>] ? oops_end+0x81/0x8e
+ [<ffffffff8101efee>] ? no_context+0x1f3/0x202
+ [<ffffffff8101f1b7>] ? __bad_area_nosemaphore+0x1ba/0x1e0
+ [<ffffffff81028d24>] ? enqueue_task_fair+0x16d/0x17a
+ [<ffffffff810264dc>] ? activate_task+0x42/0x53
+ [<ffffffff8102c967>] ? try_to_wake_up+0x272/0x284
+ [<ffffffff8101f1eb>] ? bad_area_nosemaphore+0xe/0x10
+ [<ffffffff813f0f3f>] ? do_page_fault+0x1c8/0x37c
+ [<ffffffff81028d24>] ? enqueue_task_fair+0x16d/0x17a
+ [<ffffffff813ee55f>] ? page_fault+0x1f/0x30
+ [<ffffffff8102c9a5>] ? wake_up_process+0x10/0x12
+ [<ffffffff8132ad57>] ? op_amd_stop+0x2d/0x8e
+ [<ffffffff8132ad46>] ? op_amd_stop+0x1c/0x8e
+ [<ffffffff8132a602>] ? nmi_cpu_stop+0x21/0x23
+ [<ffffffff810521b3>] ? generic_smp_call_function_single_interrupt+0xdf/0x11b
+ [<ffffffff8101804f>] ? smp_call_function_single_interrupt+0x22/0x31
+ [<ffffffff810029f3>] ? call_function_single_interrupt+0x13/0x20
+ <EOI>  [<ffffffff8102c9a5>] ? wake_up_process+0x10/0x12
+ [<ffffffff81008701>] ? default_idle+0x22/0x37
+ [<ffffffff8100896d>] ? c1e_idle+0xdf/0xe6
+ [<ffffffff813f1170>] ? atomic_notifier_call_chain+0x13/0x15
+ [<ffffffff810012fb>] ? cpu_idle+0x4b/0x7e
+ [<ffffffff813e8a4e>] ? start_secondary+0x1ae/0x1b2
+---[ end trace 679ac372d674b758 ]---
+
+Cc: Andi Kleen <andi@firstfloor.org>
+Signed-off-by: Robert Richter <robert.richter@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+--- a/arch/x86/oprofile/nmi_int.c
++++ b/arch/x86/oprofile/nmi_int.c
+@@ -95,7 +95,10 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
+ static void nmi_cpu_start(void *dummy)
+ {
+       struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
+-      model->start(msrs);
++      if (!msrs->controls)
++              WARN_ON_ONCE(1);
++      else
++              model->start(msrs);
+ }
+ static int nmi_start(void)
+@@ -107,7 +110,10 @@ static int nmi_start(void)
+ static void nmi_cpu_stop(void *dummy)
+ {
+       struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
+-      model->stop(msrs);
++      if (!msrs->controls)
++              WARN_ON_ONCE(1);
++      else
++              model->stop(msrs);
+ }
+ static void nmi_stop(void)
diff --git a/queue-2.6.34/perf-fix-endianness-argument-compatibility-with-opt_boolean-and-introduce-opt_incr.patch b/queue-2.6.34/perf-fix-endianness-argument-compatibility-with-opt_boolean-and-introduce-opt_incr.patch
new file mode 100644 (file)
index 0000000..dd13d25
--- /dev/null
@@ -0,0 +1,537 @@
+From c05556421742eb47f80301767653a4bcb19de9de Mon Sep 17 00:00:00 2001
+From: Ian Munsie <imunsie@au.ibm.com>
+Date: Tue, 13 Apr 2010 18:37:33 +1000
+Subject: perf: Fix endianness argument compatibility with OPT_BOOLEAN() and introduce OPT_INCR()
+
+From: Ian Munsie <imunsie@au.ibm.com>
+
+commit c05556421742eb47f80301767653a4bcb19de9de upstream.
+
+Parsing an option from the command line with OPT_BOOLEAN on a
+bool data type would not work on a big-endian machine due to the
+manner in which the boolean was being cast into an int and
+incremented. For example, running 'perf probe --list' on a
+PowerPC machine would fail to properly set the list_events bool
+and would therefore print out the usage information and
+terminate.
+
+This patch makes OPT_BOOLEAN work as expected with a bool
+datatype. For cases where the original OPT_BOOLEAN was
+intentionally being used to increment an int each time it was
+passed in on the command line, this patch introduces OPT_INCR
+with the old behaviour of OPT_BOOLEAN (the verbose variable is
+currently the only such example of this).
+
+I have reviewed every use of OPT_BOOLEAN to verify that a true
+C99 bool was passed. Where integers were used, I verified that
+they were only being used for boolean logic and changed them to
+bools to ensure that they would not be mistakenly used as ints.
+The major exception was the verbose variable which now uses
+OPT_INCR instead of OPT_BOOLEAN.
+
+Signed-off-by: Ian Munsie <imunsie@au.ibm.com>
+Acked-by: David S. Miller <davem@davemloft.net>
+Cc: <stable@kernel.org> # NOTE: wont apply to .3[34].x cleanly, please backport
+Cc: Git development list <git@vger.kernel.org>
+Cc: Ian Munsie <imunsie@au1.ibm.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Eric B Munson <ebmunson@us.ibm.com>
+Cc: Valdis.Kletnieks@vt.edu
+Cc: WANG Cong <amwang@redhat.com>
+Cc: Thiago Farina <tfransosi@gmail.com>
+Cc: Masami Hiramatsu <mhiramat@redhat.com>
+Cc: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
+Cc: Jaswinder Singh Rajput <jaswinderrajput@gmail.com>
+Cc: Arjan van de Ven <arjan@linux.intel.com>
+Cc: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Tom Zanussi <tzanussi@gmail.com>
+Cc: Anton Blanchard <anton@samba.org>
+Cc: John Kacur <jkacur@redhat.com>
+Cc: Li Zefan <lizf@cn.fujitsu.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+LKML-Reference: <1271147857-11604-1-git-send-email-imunsie@au.ibm.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ tools/perf/bench/mem-memcpy.c       |    2 +-
+ tools/perf/bench/sched-messaging.c  |    4 ++--
+ tools/perf/builtin-annotate.c       |    8 ++++----
+ tools/perf/builtin-buildid-cache.c  |    2 +-
+ tools/perf/builtin-buildid-list.c   |    4 ++--
+ tools/perf/builtin-diff.c           |    4 ++--
+ tools/perf/builtin-help.c           |    2 +-
+ tools/perf/builtin-lock.c           |    2 +-
+ tools/perf/builtin-probe.c          |    2 +-
+ tools/perf/builtin-record.c         |   24 ++++++++++++------------
+ tools/perf/builtin-report.c         |    6 +++---
+ tools/perf/builtin-sched.c          |    6 +++---
+ tools/perf/builtin-stat.c           |   10 +++++-----
+ tools/perf/builtin-timechart.c      |    2 +-
+ tools/perf/builtin-top.c            |   14 +++++++-------
+ tools/perf/builtin-trace.c          |    2 +-
+ tools/perf/util/debug.c             |    2 +-
+ tools/perf/util/debug.h             |    3 ++-
+ tools/perf/util/parse-options.c     |    6 ++++++
+ tools/perf/util/parse-options.h     |    4 +++-
+ tools/perf/util/trace-event-parse.c |    2 +-
+ tools/perf/util/trace-event.h       |    3 ++-
+ 22 files changed, 62 insertions(+), 52 deletions(-)
+
+--- a/tools/perf/bench/mem-memcpy.c
++++ b/tools/perf/bench/mem-memcpy.c
+@@ -24,7 +24,7 @@
+ static const char     *length_str     = "1MB";
+ static const char     *routine        = "default";
+-static int            use_clock       = 0;
++static bool           use_clock       = false;
+ static int            clock_fd;
+ static const struct option options[] = {
+--- a/tools/perf/bench/sched-messaging.c
++++ b/tools/perf/bench/sched-messaging.c
+@@ -31,9 +31,9 @@
+ #define DATASIZE 100
+-static int use_pipes = 0;
++static bool use_pipes = false;
+ static unsigned int loops = 100;
+-static unsigned int thread_mode = 0;
++static bool thread_mode = false;
+ static unsigned int num_groups = 10;
+ struct sender_context {
+--- a/tools/perf/builtin-annotate.c
++++ b/tools/perf/builtin-annotate.c
+@@ -29,11 +29,11 @@
+ static char           const *input_name = "perf.data";
+-static int            force;
++static bool           force;
+-static int            full_paths;
++static bool           full_paths;
+-static int            print_line;
++static bool           print_line;
+ struct sym_hist {
+       u64             sum;
+@@ -584,7 +584,7 @@ static const struct option options[] = {
+       OPT_STRING('s', "symbol", &sym_hist_filter, "symbol",
+                   "symbol to annotate"),
+       OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose (show symbol address, etc)"),
+       OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+                   "dump raw trace in ASCII"),
+--- a/tools/perf/builtin-buildid-cache.c
++++ b/tools/perf/builtin-buildid-cache.c
+@@ -27,7 +27,7 @@ static const struct option buildid_cache
+                  "file list", "file(s) to add"),
+       OPT_STRING('r', "remove", &remove_name_list_str, "file list",
+                   "file(s) to remove"),
+-      OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"),
++      OPT_INCR('v', "verbose", &verbose, "be more verbose"),
+       OPT_END()
+ };
+--- a/tools/perf/builtin-buildid-list.c
++++ b/tools/perf/builtin-buildid-list.c
+@@ -16,7 +16,7 @@
+ #include "util/symbol.h"
+ static char const *input_name = "perf.data";
+-static int force;
++static bool force;
+ static bool with_hits;
+ static const char * const buildid_list_usage[] = {
+@@ -29,7 +29,7 @@ static const struct option options[] = {
+       OPT_STRING('i', "input", &input_name, "file",
+                   "input file name"),
+       OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose"),
+       OPT_END()
+ };
+--- a/tools/perf/builtin-diff.c
++++ b/tools/perf/builtin-diff.c
+@@ -19,7 +19,7 @@
+ static char const *input_old = "perf.data.old",
+                 *input_new = "perf.data";
+ static char     diff__default_sort_order[] = "dso,symbol";
+-static int  force;
++static bool  force;
+ static bool show_displacement;
+ static int perf_session__add_hist_entry(struct perf_session *self,
+@@ -188,7 +188,7 @@ static const char * const diff_usage[] =
+ };
+ static const struct option options[] = {
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose (show symbol address, etc)"),
+       OPT_BOOLEAN('m', "displacement", &show_displacement,
+                   "Show position displacement relative to baseline"),
+--- a/tools/perf/builtin-help.c
++++ b/tools/perf/builtin-help.c
+@@ -29,7 +29,7 @@ enum help_format {
+       HELP_FORMAT_WEB,
+ };
+-static int show_all = 0;
++static bool show_all = false;
+ static enum help_format help_format = HELP_FORMAT_MAN;
+ static struct option builtin_help_options[] = {
+       OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
+--- a/tools/perf/builtin-lock.c
++++ b/tools/perf/builtin-lock.c
+@@ -744,7 +744,7 @@ static const char * const lock_usage[] =
+ static const struct option lock_options[] = {
+       OPT_STRING('i', "input", &input_name, "file", "input file name"),
+-      OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
++      OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
+       OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
+       OPT_END()
+ };
+--- a/tools/perf/builtin-probe.c
++++ b/tools/perf/builtin-probe.c
+@@ -162,7 +162,7 @@ static const char * const probe_usage[]
+ };
+ static const struct option options[] = {
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose (show parsed arguments, etc)"),
+ #ifndef NO_DWARF_SUPPORT
+       OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
+--- a/tools/perf/builtin-record.c
++++ b/tools/perf/builtin-record.c
+@@ -39,19 +39,19 @@ static int                 output;
+ static const char             *output_name                    = "perf.data";
+ static int                    group                           =      0;
+ static unsigned int           realtime_prio                   =      0;
+-static int                    raw_samples                     =      0;
+-static int                    system_wide                     =      0;
++static bool                   raw_samples                     =  false;
++static bool                   system_wide                     =  false;
+ static int                    profile_cpu                     =     -1;
+ static pid_t                  target_pid                      =     -1;
+ static pid_t                  child_pid                       =     -1;
+-static int                    inherit                         =      1;
+-static int                    force                           =      0;
+-static int                    append_file                     =      0;
+-static int                    call_graph                      =      0;
+-static int                    inherit_stat                    =      0;
+-static int                    no_samples                      =      0;
+-static int                    sample_address                  =      0;
+-static int                    multiplex                       =      0;
++static bool                   inherit                         =   true;
++static bool                   force                           =  false;
++static bool                   append_file                     =  false;
++static bool                   call_graph                      =  false;
++static bool                   inherit_stat                    =  false;
++static bool                   no_samples                      =  false;
++static bool                   sample_address                  =  false;
++static bool                   multiplex                       =  false;
+ static int                    multiplex_fd                    =     -1;
+ static long                   samples                         =      0;
+@@ -451,7 +451,7 @@ static int __cmd_record(int argc, const
+                       rename(output_name, oldname);
+               }
+       } else {
+-              append_file = 0;
++              append_file = false;
+       }
+       flags = O_CREAT|O_RDWR;
+@@ -676,7 +676,7 @@ static const struct option options[] = {
+                   "number of mmap data pages"),
+       OPT_BOOLEAN('g', "call-graph", &call_graph,
+                   "do call-graph (stack chain/backtrace) recording"),
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose (show counter open errors, etc)"),
+       OPT_BOOLEAN('s', "stat", &inherit_stat,
+                   "per thread counts"),
+--- a/tools/perf/builtin-report.c
++++ b/tools/perf/builtin-report.c
+@@ -33,11 +33,11 @@
+ static char           const *input_name = "perf.data";
+-static int            force;
++static bool           force;
+ static bool           hide_unresolved;
+ static bool           dont_use_callchains;
+-static int            show_threads;
++static bool           show_threads;
+ static struct perf_read_values        show_threads_values;
+ static char           default_pretty_printing_style[] = "normal";
+@@ -400,7 +400,7 @@ static const char * const report_usage[]
+ static const struct option options[] = {
+       OPT_STRING('i', "input", &input_name, "file",
+                   "input file name"),
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose (show symbol address, etc)"),
+       OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+                   "dump raw trace in ASCII"),
+--- a/tools/perf/builtin-sched.c
++++ b/tools/perf/builtin-sched.c
+@@ -1790,7 +1790,7 @@ static const char * const sched_usage[]
+ static const struct option sched_options[] = {
+       OPT_STRING('i', "input", &input_name, "file",
+                   "input file name"),
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose (show symbol address, etc)"),
+       OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+                   "dump raw trace in ASCII"),
+@@ -1805,7 +1805,7 @@ static const char * const latency_usage[
+ static const struct option latency_options[] = {
+       OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
+                  "sort by key(s): runtime, switch, avg, max"),
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose (show symbol address, etc)"),
+       OPT_INTEGER('C', "CPU", &profile_cpu,
+                   "CPU to profile on"),
+@@ -1822,7 +1822,7 @@ static const char * const replay_usage[]
+ static const struct option replay_options[] = {
+       OPT_INTEGER('r', "repeat", &replay_repeat,
+                   "repeat the workload replay N times (-1: infinite)"),
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose (show symbol address, etc)"),
+       OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+                   "dump raw trace in ASCII"),
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -66,16 +66,16 @@ static struct perf_event_attr default_at
+ };
+-static int                    system_wide                     =  0;
++static bool                   system_wide                     =  false;
+ static unsigned int           nr_cpus                         =  0;
+ static int                    run_idx                         =  0;
+ static int                    run_count                       =  1;
+-static int                    inherit                         =  1;
+-static int                    scale                           =  1;
++static bool                   inherit                         =  true;
++static bool                   scale                           =  true;
+ static pid_t                  target_pid                      = -1;
+ static pid_t                  child_pid                       = -1;
+-static int                    null_run                        =  0;
++static bool                   null_run                        =  false;
+ static int                    fd[MAX_NR_CPUS][MAX_COUNTERS];
+@@ -494,7 +494,7 @@ static const struct option options[] = {
+                   "system-wide collection from all CPUs"),
+       OPT_BOOLEAN('c', "scale", &scale,
+                   "scale/normalize counters"),
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose (show counter open errors, etc)"),
+       OPT_INTEGER('r', "repeat", &run_count,
+                   "repeat command and print average + stddev (max: 100)"),
+--- a/tools/perf/builtin-timechart.c
++++ b/tools/perf/builtin-timechart.c
+@@ -43,7 +43,7 @@ static u64           turbo_frequency;
+ static u64            first_time, last_time;
+-static int            power_only;
++static bool           power_only;
+ struct per_pid;
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -57,7 +57,7 @@
+ static int                    fd[MAX_NR_CPUS][MAX_COUNTERS];
+-static int                    system_wide                     =      0;
++static bool                   system_wide                     =  false;
+ static int                    default_interval                =      0;
+@@ -65,18 +65,18 @@ static int                 count_filter                    =      5;
+ static int                    print_entries;
+ static int                    target_pid                      =     -1;
+-static int                    inherit                         =      0;
++static bool                   inherit                         =  false;
+ static int                    profile_cpu                     =     -1;
+ static int                    nr_cpus                         =      0;
+ static unsigned int           realtime_prio                   =      0;
+-static int                    group                           =      0;
++static bool                   group                           =  false;
+ static unsigned int           page_size;
+ static unsigned int           mmap_pages                      =     16;
+ static int                    freq                            =   1000; /* 1 KHz */
+ static int                    delay_secs                      =      2;
+-static int                    zero                            =      0;
+-static int                    dump_symtab                     =      0;
++static bool                   zero                            =  false;
++static bool                   dump_symtab                     =  false;
+ static bool                   hide_kernel_symbols             =  false;
+ static bool                   hide_user_symbols               =  false;
+@@ -839,7 +839,7 @@ static void handle_keypress(int c)
+                       display_weighted = ~display_weighted;
+                       break;
+               case 'z':
+-                      zero = ~zero;
++                      zero = !zero;
+                       break;
+               default:
+                       break;
+@@ -1296,7 +1296,7 @@ static const struct option options[] = {
+                   "display this many functions"),
+       OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols,
+                   "hide user symbols"),
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose (show counter open errors, etc)"),
+       OPT_END()
+ };
+--- a/tools/perf/builtin-trace.c
++++ b/tools/perf/builtin-trace.c
+@@ -505,7 +505,7 @@ static const char * const trace_usage[]
+ static const struct option options[] = {
+       OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
+                   "dump raw trace in ASCII"),
+-      OPT_BOOLEAN('v', "verbose", &verbose,
++      OPT_INCR('v', "verbose", &verbose,
+                   "be more verbose (show symbol address, etc)"),
+       OPT_BOOLEAN('L', "Latency", &latency_format,
+                   "show latency attributes (irqs/preemption disabled, etc)"),
+--- a/tools/perf/util/debug.c
++++ b/tools/perf/util/debug.c
+@@ -12,7 +12,7 @@
+ #include "util.h"
+ int verbose = 0;
+-int dump_trace = 0;
++bool dump_trace = false;
+ int eprintf(int level, const char *fmt, ...)
+ {
+--- a/tools/perf/util/debug.h
++++ b/tools/perf/util/debug.h
+@@ -2,10 +2,11 @@
+ #ifndef __PERF_DEBUG_H
+ #define __PERF_DEBUG_H
++#include <stdbool.h>
+ #include "event.h"
+ extern int verbose;
+-extern int dump_trace;
++extern bool dump_trace;
+ int eprintf(int level,
+           const char *fmt, ...) __attribute__((format(printf, 2, 3)));
+--- a/tools/perf/util/parse-options.c
++++ b/tools/perf/util/parse-options.c
+@@ -49,6 +49,7 @@ static int get_value(struct parse_opt_ct
+                               break;
+                       /* FALLTHROUGH */
+               case OPTION_BOOLEAN:
++              case OPTION_INCR:
+               case OPTION_BIT:
+               case OPTION_SET_INT:
+               case OPTION_SET_PTR:
+@@ -73,6 +74,10 @@ static int get_value(struct parse_opt_ct
+               return 0;
+       case OPTION_BOOLEAN:
++              *(bool *)opt->value = unset ? false : true;
++              return 0;
++
++      case OPTION_INCR:
+               *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1;
+               return 0;
+@@ -478,6 +483,7 @@ int usage_with_options_internal(const ch
+               case OPTION_GROUP:
+               case OPTION_BIT:
+               case OPTION_BOOLEAN:
++              case OPTION_INCR:
+               case OPTION_SET_INT:
+               case OPTION_SET_PTR:
+               case OPTION_LONG:
+--- a/tools/perf/util/parse-options.h
++++ b/tools/perf/util/parse-options.h
+@@ -8,7 +8,8 @@ enum parse_opt_type {
+       OPTION_GROUP,
+       /* options with no arguments */
+       OPTION_BIT,
+-      OPTION_BOOLEAN, /* _INCR would have been a better name */
++      OPTION_BOOLEAN,
++      OPTION_INCR,
+       OPTION_SET_INT,
+       OPTION_SET_PTR,
+       /* options with arguments (usually) */
+@@ -95,6 +96,7 @@ struct option {
+ #define OPT_GROUP(h)                { .type = OPTION_GROUP, .help = (h) }
+ #define OPT_BIT(s, l, v, h, b)      { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (b) }
+ #define OPT_BOOLEAN(s, l, v, h)     { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
++#define OPT_INCR(s, l, v, h)        { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
+ #define OPT_SET_INT(s, l, v, h, i)  { .type = OPTION_SET_INT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (i) }
+ #define OPT_SET_PTR(s, l, v, h, p)  { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) }
+ #define OPT_INTEGER(s, l, v, h)     { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = (v), .help = (h) }
+--- a/tools/perf/util/trace-event-parse.c
++++ b/tools/perf/util/trace-event-parse.c
+@@ -40,7 +40,7 @@ int header_page_size_size;
+ int header_page_data_offset;
+ int header_page_data_size;
+-int latency_format;
++bool latency_format;
+ static char *input_buf;
+ static unsigned long long input_buf_ptr;
+--- a/tools/perf/util/trace-event.h
++++ b/tools/perf/util/trace-event.h
+@@ -1,6 +1,7 @@
+ #ifndef __PERF_TRACE_EVENTS_H
+ #define __PERF_TRACE_EVENTS_H
++#include <stdbool.h>
+ #include "parse-events.h"
+ #define __unused __attribute__((unused))
+@@ -241,7 +242,7 @@ extern int header_page_size_size;
+ extern int header_page_data_offset;
+ extern int header_page_data_size;
+-extern int latency_format;
++extern bool latency_format;
+ int parse_header_page(char *buf, unsigned long size);
+ int trace_parse_common_type(void *data);
diff --git a/queue-2.6.34/perf-fix-exit-vs-event-groups.patch b/queue-2.6.34/perf-fix-exit-vs-event-groups.patch
new file mode 100644 (file)
index 0000000..25889e8
--- /dev/null
@@ -0,0 +1,78 @@
+From 96c21a460a37880abfbc8445d5b098dbab958a29 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Tue, 11 May 2010 16:19:10 +0200
+Subject: perf: Fix exit() vs event-groups
+
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+
+commit 96c21a460a37880abfbc8445d5b098dbab958a29 upstream.
+
+Corey reported that the value scale times of group siblings are not
+updated when the monitored task dies.
+
+The problem appears to be that we only update the group leader's
+time values, fix it by updating the whole group.
+
+Reported-by: Corey Ashford <cjashfor@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+LKML-Reference: <1273588935.1810.6.camel@laptop>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/perf_event.c |   26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+--- a/kernel/perf_event.c
++++ b/kernel/perf_event.c
+@@ -262,6 +262,18 @@ static void update_event_times(struct pe
+       event->total_time_running = run_end - event->tstamp_running;
+ }
++/*
++ * Update total_time_enabled and total_time_running for all events in a group.
++ */
++static void update_group_times(struct perf_event *leader)
++{
++      struct perf_event *event;
++
++      update_event_times(leader);
++      list_for_each_entry(event, &leader->sibling_list, group_entry)
++              update_event_times(event);
++}
++
+ static struct list_head *
+ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
+ {
+@@ -327,7 +339,7 @@ list_del_event(struct perf_event *event,
+       if (event->group_leader != event)
+               event->group_leader->nr_siblings--;
+-      update_event_times(event);
++      update_group_times(event);
+       /*
+        * If event was in error state, then keep it
+@@ -509,18 +521,6 @@ retry:
+ }
+ /*
+- * Update total_time_enabled and total_time_running for all events in a group.
+- */
+-static void update_group_times(struct perf_event *leader)
+-{
+-      struct perf_event *event;
+-
+-      update_event_times(leader);
+-      list_for_each_entry(event, &leader->sibling_list, group_entry)
+-              update_event_times(event);
+-}
+-
+-/*
+  * Cross CPU call to disable a performance event
+  */
+ static void __perf_event_disable(void *info)
diff --git a/queue-2.6.34/perf-fix-exit-vs-perf_format_group.patch b/queue-2.6.34/perf-fix-exit-vs-perf_format_group.patch
new file mode 100644 (file)
index 0000000..79e5fad
--- /dev/null
@@ -0,0 +1,77 @@
+From 050735b08ca8a016bbace4445fa025b88fee770b Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 11 May 2010 11:51:53 +0200
+Subject: perf: Fix exit() vs PERF_FORMAT_GROUP
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 050735b08ca8a016bbace4445fa025b88fee770b upstream.
+
+Both Stephane and Corey reported that PERF_FORMAT_GROUP didn't
+work as expected if the task the counters were attached to quit
+before the read() call.
+
+The cause is that we unconditionally destroy the grouping when
+we remove counters from their context. Fix this by splitting off
+the group destroy from the list removal such that
+perf_event_remove_from_context() does not do this and change
+perf_event_release() to do so.
+
+Reported-by: Corey Ashford <cjashfor@linux.vnet.ibm.com>
+Reported-by: Stephane Eranian <eranian@google.com>
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Paul Mackerras <paulus@samba.org>
+LKML-Reference: <1273571513.5605.3527.camel@twins>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/perf_event.c |   19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/kernel/perf_event.c
++++ b/kernel/perf_event.c
+@@ -315,8 +315,6 @@ list_add_event(struct perf_event *event,
+ static void
+ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
+ {
+-      struct perf_event *sibling, *tmp;
+-
+       if (list_empty(&event->group_entry))
+               return;
+       ctx->nr_events--;
+@@ -340,6 +338,12 @@ list_del_event(struct perf_event *event,
+        */
+       if (event->state > PERF_EVENT_STATE_OFF)
+               event->state = PERF_EVENT_STATE_OFF;
++}
++
++static void
++perf_destroy_group(struct perf_event *event, struct perf_event_context *ctx)
++{
++      struct perf_event *sibling, *tmp;
+       /*
+        * If this was a group event with sibling events then
+@@ -1856,9 +1860,18 @@ int perf_event_release_kernel(struct per
+ {
+       struct perf_event_context *ctx = event->ctx;
++      /*
++       * Remove from the PMU, can't get re-enabled since we got
++       * here because the last ref went.
++       */
++      perf_event_disable(event);
++
+       WARN_ON_ONCE(ctx->parent_ctx);
+       mutex_lock(&ctx->mutex);
+-      perf_event_remove_from_context(event);
++      raw_spin_lock_irq(&ctx->lock);
++      list_del_event(event, ctx);
++      perf_destroy_group(event, ctx);
++      raw_spin_unlock_irq(&ctx->lock);
+       mutex_unlock(&ctx->mutex);
+       mutex_lock(&event->owner->perf_event_mutex);
diff --git a/queue-2.6.34/perf-top-properly-notify-the-user-that-vmlinux-is-missing.patch b/queue-2.6.34/perf-top-properly-notify-the-user-that-vmlinux-is-missing.patch
new file mode 100644 (file)
index 0000000..469c662
--- /dev/null
@@ -0,0 +1,210 @@
+From b0a9ab62ab96e258a0ddd81d7fe2719c3db36006 Mon Sep 17 00:00:00 2001
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Date: Mon, 15 Mar 2010 11:46:58 -0300
+Subject: perf top: Properly notify the user that vmlinux is missing
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+commit b0a9ab62ab96e258a0ddd81d7fe2719c3db36006 upstream.
+
+Before this patch this message would very briefly appear on the
+screen and then the screen would get updates only on the top,
+for number of interrupts received, etc, but no annotation would
+be performed:
+
+ [root@doppio linux-2.6-tip]# perf top -s n_tty_write > /tmp/bla
+ objdump: '[kernel.kallsyms]': No such file
+
+Now this is what the user gets:
+
+ [root@doppio linux-2.6-tip]# perf top -s n_tty_write
+ Can't annotate n_tty_write: No vmlinux file was found in the
+ path: [0] vmlinux
+ [1] /boot/vmlinux
+ [2] /boot/vmlinux-2.6.33-rc5
+ [3] /lib/modules/2.6.33-rc5/build/vmlinux
+ [4] /usr/lib/debug/lib/modules/2.6.33-rc5/vmlinux
+ [root@doppio linux-2.6-tip]#
+
+This bug was introduced when we added automatic search for
+vmlinux, before that time the user had to specify a vmlinux
+file.
+
+Reported-by: David S. Miller <davem@davemloft.net>
+Reported-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Frédéric Weisbecker <fweisbec@gmail.com>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Paul Mackerras <paulus@samba.org>
+LKML-Reference: <1268664418-28328-2-git-send-email-acme@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ tools/perf/builtin-top.c |   33 +++++++++++++++++++++++++--------
+ tools/perf/util/symbol.c |   25 ++++++++++++-------------
+ tools/perf/util/symbol.h |   15 +++++++++++++++
+ 3 files changed, 52 insertions(+), 21 deletions(-)
+
+--- a/tools/perf/builtin-top.c
++++ b/tools/perf/builtin-top.c
+@@ -169,7 +169,7 @@ static void sig_winch_handler(int sig __
+       update_print_entries(&winsize);
+ }
+-static void parse_source(struct sym_entry *syme)
++static int parse_source(struct sym_entry *syme)
+ {
+       struct symbol *sym;
+       struct sym_entry_source *source;
+@@ -180,12 +180,21 @@ static void parse_source(struct sym_entr
+       u64 len;
+       if (!syme)
+-              return;
++              return -1;
++
++      sym = sym_entry__symbol(syme);
++      map = syme->map;
++
++      /*
++       * We can't annotate with just /proc/kallsyms
++       */
++      if (map->dso->origin == DSO__ORIG_KERNEL)
++              return -1;
+       if (syme->src == NULL) {
+               syme->src = zalloc(sizeof(*source));
+               if (syme->src == NULL)
+-                      return;
++                      return -1;
+               pthread_mutex_init(&syme->src->lock, NULL);
+       }
+@@ -195,9 +204,6 @@ static void parse_source(struct sym_entr
+               pthread_mutex_lock(&source->lock);
+               goto out_assign;
+       }
+-
+-      sym = sym_entry__symbol(syme);
+-      map = syme->map;
+       path = map->dso->long_name;
+       len = sym->end - sym->start;
+@@ -209,7 +215,7 @@ static void parse_source(struct sym_entr
+       file = popen(command, "r");
+       if (!file)
+-              return;
++              return -1;
+       pthread_mutex_lock(&source->lock);
+       source->lines_tail = &source->lines;
+@@ -245,6 +251,7 @@ static void parse_source(struct sym_entr
+ out_assign:
+       sym_filter_entry = syme;
+       pthread_mutex_unlock(&source->lock);
++      return 0;
+ }
+ static void __zero_source_counters(struct sym_entry *syme)
+@@ -990,7 +997,17 @@ static void event__process_sample(const
+       if (sym_filter_entry_sched) {
+               sym_filter_entry = sym_filter_entry_sched;
+               sym_filter_entry_sched = NULL;
+-              parse_source(sym_filter_entry);
++              if (parse_source(sym_filter_entry) < 0) {
++                      struct symbol *sym = sym_entry__symbol(sym_filter_entry);
++
++                      pr_err("Can't annotate %s", sym->name);
++                      if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) {
++                              pr_err(": No vmlinux file was found in the path:\n");
++                              vmlinux_path__fprintf(stderr);
++                      } else
++                              pr_err(".\n");
++                      exit(1);
++              }
+       }
+       syme = symbol__priv(al.sym);
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -18,18 +18,6 @@
+ #define NT_GNU_BUILD_ID 3
+ #endif
+-enum dso_origin {
+-      DSO__ORIG_KERNEL = 0,
+-      DSO__ORIG_JAVA_JIT,
+-      DSO__ORIG_BUILD_ID_CACHE,
+-      DSO__ORIG_FEDORA,
+-      DSO__ORIG_UBUNTU,
+-      DSO__ORIG_BUILDID,
+-      DSO__ORIG_DSO,
+-      DSO__ORIG_KMODULE,
+-      DSO__ORIG_NOT_FOUND,
+-};
+-
+ static void dsos__add(struct list_head *head, struct dso *dso);
+ static struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
+ static int dso__load_kernel_sym(struct dso *self, struct map *map,
+@@ -1025,7 +1013,7 @@ static int dso__load_sym(struct dso *sel
+                               }
+                               curr_map->map_ip = identity__map_ip;
+                               curr_map->unmap_ip = identity__map_ip;
+-                              curr_dso->origin = DSO__ORIG_KERNEL;
++                              curr_dso->origin = self->origin;
+                               map_groups__insert(kmap->kmaps, curr_map);
+                               dsos__add(&dsos__kernel, curr_dso);
+                               dso__set_loaded(curr_dso, map->type);
+@@ -1895,6 +1883,17 @@ out_fail:
+       return -1;
+ }
++size_t vmlinux_path__fprintf(FILE *fp)
++{
++      int i;
++      size_t printed = 0;
++
++      for (i = 0; i < vmlinux_path__nr_entries; ++i)
++              printed += fprintf(fp, "[%d] %s\n", i, vmlinux_path[i]);
++
++      return printed;
++}
++
+ static int setup_list(struct strlist **list, const char *list_str,
+                     const char *list_name)
+ {
+--- a/tools/perf/util/symbol.h
++++ b/tools/perf/util/symbol.h
+@@ -150,6 +150,19 @@ size_t dsos__fprintf_buildid(FILE *fp, b
+ size_t dso__fprintf_buildid(struct dso *self, FILE *fp);
+ size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp);
++
++enum dso_origin {
++      DSO__ORIG_KERNEL = 0,
++      DSO__ORIG_JAVA_JIT,
++      DSO__ORIG_BUILD_ID_CACHE,
++      DSO__ORIG_FEDORA,
++      DSO__ORIG_UBUNTU,
++      DSO__ORIG_BUILDID,
++      DSO__ORIG_DSO,
++      DSO__ORIG_KMODULE,
++      DSO__ORIG_NOT_FOUND,
++};
++
+ char dso__symtab_origin(const struct dso *self);
+ void dso__set_long_name(struct dso *self, char *name);
+ void dso__set_build_id(struct dso *self, void *build_id);
+@@ -169,4 +182,6 @@ int kallsyms__parse(const char *filename
+ int symbol__init(void);
+ bool symbol_type__is_a(char symbol_type, enum map_type map_type);
++size_t vmlinux_path__fprintf(FILE *fp);
++
+ #endif /* __PERF_SYMBOL */
diff --git a/queue-2.6.34/posix_timer-fix-error-path-in-timer_create.patch b/queue-2.6.34/posix_timer-fix-error-path-in-timer_create.patch
new file mode 100644 (file)
index 0000000..b16a356
--- /dev/null
@@ -0,0 +1,57 @@
+From 45e0fffc8a7778282e6a1514a6ae3e7ae6545111 Mon Sep 17 00:00:00 2001
+From: Andrey Vagin <avagin@openvz.org>
+Date: Mon, 24 May 2010 12:15:33 -0700
+Subject: posix_timer: Fix error path in timer_create
+
+From: Andrey Vagin <avagin@openvz.org>
+
+commit 45e0fffc8a7778282e6a1514a6ae3e7ae6545111 upstream.
+
+Move CLOCK_DISPATCH(which_clock, timer_create, (new_timer)) after all
+posible EFAULT erros.
+
+*_timer_create may allocate/get resources.
+(for example posix_cpu_timer_create does get_task_struct)
+
+[ tglx: fold the remove crappy comment patch into this ]
+
+Signed-off-by: Andrey Vagin <avagin@openvz.org>
+Cc: Oleg Nesterov <oleg@tv-sign.ru>
+Cc: Pavel Emelyanov <xemul@openvz.org>
+Reviewed-by: Stanislaw Gruszka <sgruszka@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/posix-timers.c |   11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+--- a/kernel/posix-timers.c
++++ b/kernel/posix-timers.c
+@@ -559,14 +559,7 @@ SYSCALL_DEFINE3(timer_create, const cloc
+       new_timer->it_id = (timer_t) new_timer_id;
+       new_timer->it_clock = which_clock;
+       new_timer->it_overrun = -1;
+-      error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
+-      if (error)
+-              goto out;
+-      /*
+-       * return the timer_id now.  The next step is hard to
+-       * back out if there is an error.
+-       */
+       if (copy_to_user(created_timer_id,
+                        &new_timer_id, sizeof (new_timer_id))) {
+               error = -EFAULT;
+@@ -597,6 +590,10 @@ SYSCALL_DEFINE3(timer_create, const cloc
+       new_timer->sigq->info.si_tid   = new_timer->it_id;
+       new_timer->sigq->info.si_code  = SI_TIMER;
++      error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer));
++      if (error)
++              goto out;
++
+       spin_lock_irq(&current->sighand->siglock);
+       new_timer->it_signal = current->signal;
+       list_add(&new_timer->list, &current->signal->posix_timers);
diff --git a/queue-2.6.34/series b/queue-2.6.34/series
new file mode 100644 (file)
index 0000000..a211c2a
--- /dev/null
@@ -0,0 +1,11 @@
+oprofile-x86-fix-uninitialized-counter-usage-during-cpu-hotplug.patch
+oprofile-remove-double-ring-buffering.patch
+perf-fix-endianness-argument-compatibility-with-opt_boolean-and-introduce-opt_incr.patch
+perf-fix-exit-vs-perf_format_group.patch
+perf-top-properly-notify-the-user-that-vmlinux-is-missing.patch
+perf-fix-exit-vs-event-groups.patch
+fix-racy-use-of-anon_inode_getfd-in-perf_event.c.patch
+vfs-fix-recent-breakage-of-fs_reval_dot.patch
+posix_timer-fix-error-path-in-timer_create.patch
+libata-disable-atapi-an-by-default.patch
+libata-don-t-flush-dcache-on-slab-pages.patch
diff --git a/queue-2.6.34/vfs-fix-recent-breakage-of-fs_reval_dot.patch b/queue-2.6.34/vfs-fix-recent-breakage-of-fs_reval_dot.patch
new file mode 100644 (file)
index 0000000..e4d3a67
--- /dev/null
@@ -0,0 +1,51 @@
+From 176306f59ac7a35369cbba87aff13e14c5916074 Mon Sep 17 00:00:00 2001
+From: Neil Brown <neilb@suse.de>
+Date: Mon, 24 May 2010 16:57:56 +1000
+Subject: VFS: fix recent breakage of FS_REVAL_DOT
+
+From: Neil Brown <neilb@suse.de>
+
+commit 176306f59ac7a35369cbba87aff13e14c5916074 upstream.
+
+Commit 1f36f774b22a0ceb7dd33eca626746c81a97b6a5 broke FS_REVAL_DOT semantics.
+
+In particular, before this patch, the command
+   ls -l
+in an NFS mounted directory would always check if the directory on the server
+had changed and if so would flush and refill the pagecache for the dir.
+After this patch, the same "ls -l" will repeatedly return stale date until
+the cached attributes for the directory time out.
+
+The following patch fixes this by ensuring the d_revalidate is called by
+do_last when "." is being looked-up.
+link_path_walk has already called d_revalidate, but in that case LOOKUP_OPEN
+is not set so nfs_lookup_verify_inode chooses not to do any validation.
+
+The following patch restores the original behaviour.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/namei.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1620,6 +1620,7 @@ static struct file *do_last(struct namei
+       case LAST_DOTDOT:
+               follow_dotdot(nd);
+               dir = nd->path.dentry;
++      case LAST_DOT:
+               if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) {
+                       if (!dir->d_op->d_revalidate(dir, nd)) {
+                               error = -ESTALE;
+@@ -1627,7 +1628,6 @@ static struct file *do_last(struct namei
+                       }
+               }
+               /* fallthrough */
+-      case LAST_DOT:
+       case LAST_ROOT:
+               if (open_flag & O_CREAT)
+                       goto exit;