]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
start 2.6.29.2 queue
authorChris Wright <chrisw@sous-sol.org>
Wed, 22 Apr 2009 00:09:56 +0000 (17:09 -0700)
committerChris Wright <chrisw@sous-sol.org>
Wed, 22 Apr 2009 00:09:56 +0000 (17:09 -0700)
105 files changed:
queue-2.6.29/acer-wmi-blacklist-acer-aspire-one.patch [new file with mode: 0644]
queue-2.6.29/acpi-cap-off-p-state-transition-latency-from-buggy-bioses.patch [new file with mode: 0644]
queue-2.6.29/acpi-fix-of-pmtimer-overflow-that-make-cx-states-time-incorrect.patch [new file with mode: 0644]
queue-2.6.29/add-some-long-missing-capabilities-to-fs_mask.patch [new file with mode: 0644]
queue-2.6.29/af_rose-x25-sanity-check-the-maximum-user-frame-size.patch [new file with mode: 0644]
queue-2.6.29/agp-zero-pages-before-sending-to-userspace.patch [new file with mode: 0644]
queue-2.6.29/alsa-hda-add-missing-comma-in-ad1884_slave_vols.patch [new file with mode: 0644]
queue-2.6.29/alsa-hda-fix-the-cmd-cache-keys-for-amp-verbs.patch [new file with mode: 0644]
queue-2.6.29/cap_prctl-don-t-set-error-to-0-at-no_change.patch [new file with mode: 0644]
queue-2.6.29/cpumask-fix-slab-corruption-caused-by-alloc_cpumask_var_node.patch [new file with mode: 0644]
queue-2.6.29/crypto-shash-fix-unaligned-calculation-with-short-length.patch [new file with mode: 0644]
queue-2.6.29/dm-io-make-sync_io-uninterruptible.patch [new file with mode: 0644]
queue-2.6.29/dm-kcopyd-fix-callback-race.patch [new file with mode: 0644]
queue-2.6.29/dm-kcopyd-prepare-for-callback-race-fix.patch [new file with mode: 0644]
queue-2.6.29/dm-path-selector-use-module-refcount-directly.patch [new file with mode: 0644]
queue-2.6.29/dm-preserve-bi_io_vec-when-resubmitting-bios.patch [new file with mode: 0644]
queue-2.6.29/dm-raid1-switch-read_record-from-kmalloc-to-slab-to-save-memory.patch [new file with mode: 0644]
queue-2.6.29/dm-snapshot-avoid-dropping-lock-in-__find_pending_exception.patch [new file with mode: 0644]
queue-2.6.29/dm-snapshot-avoid-having-two-exceptions-for-the-same-chunk.patch [new file with mode: 0644]
queue-2.6.29/dm-snapshot-refactor-__find_pending_exception.patch [new file with mode: 0644]
queue-2.6.29/dm-table-fix-upgrade-mode-race.patch [new file with mode: 0644]
queue-2.6.29/dm-target-use-module-refcount-directly.patch [new file with mode: 0644]
queue-2.6.29/dock-fix-dereference-after-kfree.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-change-dcc-tiling-detection-case-to-cover-only-mobile-parts.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-check-for-dev-primary-master-before-dereference.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-check-for-einval-from-vm_insert_pfn.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-check-the-return-value-from-the-copy-from-user.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-fix-lock-order-reversal-in-gtt-pwrite-path.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-fix-lock-order-reversal-in-shmem-pread-path.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-fix-lock-order-reversal-in-shmem-pwrite-path.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-fix-lock-order-reversal-with-cliprects-and-cmdbuf-in-non-dri2-paths.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-fix-tv-mode-setting-in-property-change.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-make-gem-object-s-page-lists-refcounted-instead-of-get-free.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-only-set-tv-mode-when-any-property-changed.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-read-the-right-sdvo-register-when-detecting-svdo-hdmi.patch [new file with mode: 0644]
queue-2.6.29/drm-i915-sync-crt-hotplug-detection-with-intel-video-driver.patch [new file with mode: 0644]
queue-2.6.29/drm-use-pgprot_writecombine-in-gem-gtt-mapping-to-get-the-right-bits-for-pat.patch [new file with mode: 0644]
queue-2.6.29/ext4-fix-locking-typo-in-mballoc-which-could-cause-soft-lockup-hangs.patch [new file with mode: 0644]
queue-2.6.29/ext4-fix-typo-which-causes-a-memory-leak-on-error-path.patch [new file with mode: 0644]
queue-2.6.29/fbdev-fix-info-lock-deadlock-in-fbcon_event_notify.patch [new file with mode: 0644]
queue-2.6.29/fbmem-fix-fb_info-lock-and-mm-mmap_sem-circular-locking-dependency.patch [new file with mode: 0644]
queue-2.6.29/fix-i_mutex-vs.-readdir-handling-in-nfsd.patch [new file with mode: 0644]
queue-2.6.29/gso-fix-support-for-linear-packets.patch [new file with mode: 0644]
queue-2.6.29/hpt366-fix-hpt370-dma-timeouts.patch [new file with mode: 0644]
queue-2.6.29/hrtimer-fix-rq-lock-inversion.patch [new file with mode: 0644]
queue-2.6.29/hugetlbfs-return-negative-error-code-for-bad-mount-option.patch [new file with mode: 0644]
queue-2.6.29/ide-atapi-start-dma-after-issuing-a-packet-command.patch [new file with mode: 0644]
queue-2.6.29/ide-drivers-ide-ide-atapi.c-needs-linux-scatterlist.h.patch [new file with mode: 0644]
queue-2.6.29/ide-fix-code-dealing-with-sleeping-devices-in-do_ide_request.patch [new file with mode: 0644]
queue-2.6.29/input-gameport-fix-attach-driver-code.patch [new file with mode: 0644]
queue-2.6.29/ixgbe-fix-potential-memory-leak-driver-panic-issue-while-setting-up-tx-rx-ring-parameters.patch [new file with mode: 0644]
queue-2.6.29/kprobes-fix-locking-imbalance-in-kretprobes.patch [new file with mode: 0644]
queue-2.6.29/kvm-add-config_have_kvm_irqchip.patch [new file with mode: 0644]
queue-2.6.29/kvm-fix-kvm_vm_ioctl_deassign_device.patch [new file with mode: 0644]
queue-2.6.29/kvm-fix-missing-smp-tlb-flush-in-invlpg.patch [new file with mode: 0644]
queue-2.6.29/kvm-interrupt-mask-notifiers-for-ioapic.patch [new file with mode: 0644]
queue-2.6.29/kvm-is_long_mode-should-check-for-efer.lma.patch [new file with mode: 0644]
queue-2.6.29/kvm-mmu-handle-compound-pages-in-kvm_is_mmio_pfn.patch [new file with mode: 0644]
queue-2.6.29/kvm-reset-pit-irq-injection-logic-when-the-pit-irq-is-unmasked.patch [new file with mode: 0644]
queue-2.6.29/kvm-vmx-update-necessary-state-when-guest-enters-long-mode.patch [new file with mode: 0644]
queue-2.6.29/md-raid1-don-t-assume-newly-allocated-bvecs-are-initialised.patch [new file with mode: 0644]
queue-2.6.29/mips-compat-zero-upper-32-bit-of-offset_high-and-offset_low.patch [new file with mode: 0644]
queue-2.6.29/mm-define-a-unique-value-for-as_unevictable-flag.patch [new file with mode: 0644]
queue-2.6.29/mm-do_xip_mapping_read-fix-length-calculation.patch [new file with mode: 0644]
queue-2.6.29/mm-pass-correct-mm-when-growing-stack.patch [new file with mode: 0644]
queue-2.6.29/netfilter-ip-ip6-arp-_tables-fix-incorrect-loop-detection.patch [new file with mode: 0644]
queue-2.6.29/nfs-fix-the-xdr-iovec-calculation-in-nfs3_xdr_setaclargs.patch [new file with mode: 0644]
queue-2.6.29/pata_hpt37x-fix-hpt370-dma-timeouts.patch [new file with mode: 0644]
queue-2.6.29/pci-x86-detect-host-bridge-config-space-size-w-o-using-quirks.patch [new file with mode: 0644]
queue-2.6.29/posix-timers-fix-rlimit_cpu-fork.patch [new file with mode: 0644]
queue-2.6.29/posix-timers-fix-rlimit_cpu-setitimer.patch [new file with mode: 0644]
queue-2.6.29/posixtimers-sched-fix-posix-clock-monotonicity.patch [new file with mode: 0644]
queue-2.6.29/powerpc-fix-data-corrupting-bug-in-__futex_atomic_op.patch [new file with mode: 0644]
queue-2.6.29/r8169-reset-intrstatus-after-chip-reset.patch [new file with mode: 0644]
queue-2.6.29/revert-console-ascii-glyph-1-1-mapping.patch [new file with mode: 0644]
queue-2.6.29/rt2x00-fix-slab-corruption-during-rmmod.patch [new file with mode: 0644]
queue-2.6.29/sched-do-not-count-frozen-tasks-toward-load.patch [new file with mode: 0644]
queue-2.6.29/scsi-libiscsi-fix-iscsi-pool-error-path-fixlet.patch [new file with mode: 0644]
queue-2.6.29/scsi-libiscsi-fix-iscsi-pool-error-path.patch [new file with mode: 0644]
queue-2.6.29/scsi-mpt-suppress-debugobjects-warning.patch [new file with mode: 0644]
queue-2.6.29/scsi-sg-fix-iovec-bugs-introduced-by-the-block-layer-conversion.patch [new file with mode: 0644]
queue-2.6.29/scsi-sg-fix-q-queue_lock-on-scsi_error_handler-path.patch [new file with mode: 0644]
queue-2.6.29/scsi-sg-fix-races-during-device-removal.patch [new file with mode: 0644]
queue-2.6.29/scsi-sg-fix-races-with-ioctl.patch [new file with mode: 0644]
queue-2.6.29/security-smack-fix-oops-when-setting-a-size-0-smack64-xattr.patch [new file with mode: 0644]
queue-2.6.29/series [new file with mode: 0644]
queue-2.6.29/sfc-match-calls-to-netif_napi_add-and-netif_napi_del.patch [new file with mode: 0644]
queue-2.6.29/sg-avoid-blk_put_request-blk_rq_unmap_user-in-interrupt.patch [new file with mode: 0644]
queue-2.6.29/sparc64-fix-bug-in.patch [new file with mode: 0644]
queue-2.6.29/spi-spi_write_then_read-bugfixes.patch [new file with mode: 0644]
queue-2.6.29/splice-fix-deadlock-in-splicing-to-file.patch [new file with mode: 0644]
queue-2.6.29/sysctl-fix-suid_dumpable-and-lease-break-time-sysctls.patch [new file with mode: 0644]
queue-2.6.29/tracing-core-fix-early-free-of-cpumasks.patch [new file with mode: 0644]
queue-2.6.29/tty-fix-leak-in-ti-usb.patch [new file with mode: 0644]
queue-2.6.29/usb-fix-oops-in-cdc-wdm-in-case-of-malformed-descriptors.patch [new file with mode: 0644]
queue-2.6.29/usb-ftdi_sio-add-vendor-project-id-for-jeti-specbos-1201-spectrometer.patch [new file with mode: 0644]
queue-2.6.29/usb-gadget-fix-ethernet-link-reports-to-ethtool.patch [new file with mode: 0644]
queue-2.6.29/usb-usb-storage-augment-unusual_devs-entry-for-simple-tech-datafab.patch [new file with mode: 0644]
queue-2.6.29/v4l-dvb-cx88-prevent-general-protection-fault-on-rmmod.patch [new file with mode: 0644]
queue-2.6.29/vfs-skip-i_clear-state-inodes.patch [new file with mode: 0644]
queue-2.6.29/virtio-fix-suspend-when-using-virtio_balloon.patch [new file with mode: 0644]
queue-2.6.29/x86-disable-x86_ptrace_bts-for-now.patch [new file with mode: 0644]
queue-2.6.29/x86-fix-broken-irq-migration-logic-while-cleaning-up-multiple-vectors.patch [new file with mode: 0644]
queue-2.6.29/x86-pat-remove-page-granularity-tracking-for-vm_insert_pfn-maps.patch [new file with mode: 0644]
queue-2.6.29/x86-setup-mark-esi-as-clobbered-in-e820-bios-call.patch [new file with mode: 0644]

diff --git a/queue-2.6.29/acer-wmi-blacklist-acer-aspire-one.patch b/queue-2.6.29/acer-wmi-blacklist-acer-aspire-one.patch
new file mode 100644 (file)
index 0000000..e3324f7
--- /dev/null
@@ -0,0 +1,68 @@
+From stable-bounces@linux.kernel.org  Sun Apr  5 18:20:09 2009
+Date: Sun, 5 Apr 2009 18:20:04 GMT
+Message-Id: <200904051820.n35IK4HY010974@hera.kernel.org>
+From: Carlos Corbacho <carlos@strangeworlds.co.uk>
+To: jejb@kernel.org, stable@kernel.org
+Subject: acer-wmi: Blacklist Acer Aspire One
+
+upstream commit: a74dd5fdabcd34c93e17e9c7024eeb503c92b048
+
+The Aspire One's ACPI-WMI interface is a placeholder that does nothing,
+and the invalid results that we get from it are now causing userspace
+problems as acer-wmi always returns that the rfkill is enabled (i.e. the
+radio is off, when it isn't). As it's hardware controlled, acer-wmi
+isn't needed on the Aspire One either.
+
+Thanks to Andy Whitcroft at Canonical for tracking down Ubuntu's userspace
+issues to this.
+
+Signed-off-by: Carlos Corbacho <carlos@strangeworlds.co.uk>
+Reported-by: Andy Whitcroft <apw@canonical.com>
+Cc: stable@kernel.org
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/platform/x86/acer-wmi.c |   25 +++++++++++++++++++++++++
+ 1 file changed, 25 insertions(+)
+
+--- a/drivers/platform/x86/acer-wmi.c
++++ b/drivers/platform/x86/acer-wmi.c
+@@ -225,6 +225,25 @@ static struct quirk_entry quirk_fujitsu_
+       .wireless = 2,
+ };
++/* The Aspire One has a dummy ACPI-WMI interface - disable it */
++static struct dmi_system_id __devinitdata acer_blacklist[] = {
++      {
++              .ident = "Acer Aspire One (SSD)",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
++              },
++      },
++      {
++              .ident = "Acer Aspire One (HDD)",
++              .matches = {
++                      DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++                      DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"),
++              },
++      },
++      {}
++};
++
+ static struct dmi_system_id acer_quirks[] = {
+       {
+               .callback = dmi_matched,
+@@ -1254,6 +1273,12 @@ static int __init acer_wmi_init(void)
+       printk(ACER_INFO "Acer Laptop ACPI-WMI Extras\n");
++      if (dmi_check_system(acer_blacklist)) {
++              printk(ACER_INFO "Blacklisted hardware detected - "
++                              "not loading\n");
++              return -ENODEV;
++      }
++
+       find_quirks();
+       /*
diff --git a/queue-2.6.29/acpi-cap-off-p-state-transition-latency-from-buggy-bioses.patch b/queue-2.6.29/acpi-cap-off-p-state-transition-latency-from-buggy-bioses.patch
new file mode 100644 (file)
index 0000000..8d7074d
--- /dev/null
@@ -0,0 +1,59 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 03:55:09 2009
+Date: Mon, 06 Apr 2009 23:53:45 -0400 (EDT)
+From: Len Brown <lenb@kernel.org>
+To: stable@kernel.org
+Message-id: <alpine.LFD.2.00.0904062351580.5698@localhost.localdomain>
+Cc: linux-acpi@vger.kernel.org
+Subject: ACPI: cap off P-state transition latency from buggy BIOSes
+
+From: Pallipadi, Venkatesh <venkatesh.pallipadi@intel.com>
+
+upstream commit: a59d1637eb0e0a37ee0e5c92800c60abe3624e24
+
+Some BIOSes report very high frequency transition latency which are plainly
+wrong on CPus that can change frequency using native MSR interface.
+
+One such system is IBM T42 (2327-8ZU) as reported by Owen Taylor and
+Rik van Riel.
+
+cpufreq_ondemand driver uses this transition latency to come up with a
+reasonable sampling interval to sample CPU usage and with such high
+latency value, ondemand sampling interval ends up being very high
+(0.5 sec, in this particular case), resulting in performance impact due to
+slow response to increasing frequency.
+
+Fix it by capping-off the transition latency to 20uS for native MSR based
+frequency transitions.
+
+mjg: We've confirmed that this also helps on the X31
+
+Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+Acked-by: Matthew Garrett <mjg@redhat.com>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+
+ arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
++++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+@@ -680,6 +680,18 @@ static int acpi_cpufreq_cpu_init(struct 
+                           perf->states[i].transition_latency * 1000;
+       }
++      /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
++      if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
++          policy->cpuinfo.transition_latency > 20 * 1000) {
++              static int print_once;
++              policy->cpuinfo.transition_latency = 20 * 1000;
++              if (!print_once) {
++                      print_once = 1;
++                      printk(KERN_INFO "Capping off P-state tranision latency"
++                              " at 20 uS\n");
++              }
++      }
++
+       data->max_freq = perf->states[0].core_frequency * 1000;
+       /* table init */
+       for (i=0; i<perf->state_count; i++) {
diff --git a/queue-2.6.29/acpi-fix-of-pmtimer-overflow-that-make-cx-states-time-incorrect.patch b/queue-2.6.29/acpi-fix-of-pmtimer-overflow-that-make-cx-states-time-incorrect.patch
new file mode 100644 (file)
index 0000000..8f164aa
--- /dev/null
@@ -0,0 +1,232 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 03:18:08 2009
+Date: Mon, 06 Apr 2009 23:16:54 -0400 (EDT)
+From: Len Brown <lenb@kernel.org>
+To: stable@kernel.org
+Message-id: <alpine.LFD.2.00.0904062315480.5698@localhost.localdomain>
+Cc: linux-acpi@vger.kernel.org
+Subject: acpi: fix of pmtimer overflow that make Cx states time incorrect
+
+From: alex.shi <alex.shi@intel.com>
+
+upstream commit: ff69f2bba67bd45514923aaedbf40fe351787c59
+
+We found Cx states time abnormal in our some of machines which have 16
+LCPUs, the C0 take too many time while system is really idle when kernel
+enabled tickless and highres.  powertop output is below:
+
+     PowerTOP version 1.9       (C) 2007 Intel Corporation
+
+Cn                Avg residency       P-states (frequencies)
+C0 (cpu running)        (40.5%)         2.53 Ghz     0.0%
+C1                0.0ms ( 0.0%)         2.53 Ghz     0.0%
+C2              128.8ms (59.5%)         2.40 Ghz     0.0%
+                                        1.60 Ghz   100.0%
+
+Wakeups-from-idle per second :  4.7     interval: 20.0s
+no ACPI power usage estimate available
+
+Top causes for wakeups:
+  41.4% ( 24.9)       <interrupt> : extra timer interrupt
+  20.2% ( 12.2)     <kernel core> : usb_hcd_poll_rh_status
+(rh_timer_func)
+
+After tacking detailed for this issue, Yakui and I find it is due to 24
+bit PM timer overflows when some of cpu sleep more than 4 seconds.  With
+tickless kernel, the CPU want to sleep as much as possible when system
+idle.  But the Cx sleep time are recorded by pmtimer which length is
+determined by BIOS.  The current Cx time was gotten in the following
+function from driver/acpi/processor_idle.c:
+
+static inline u32 ticks_elapsed(u32 t1, u32 t2)
+{
+       if (t2 >= t1)
+               return (t2 - t1);
+       else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
+               return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
+       else
+               return ((0xFFFFFFFF - t1) + t2);
+}
+
+If pmtimer is 24 bits and it take 5 seconds from t1 to t2, in above
+function, just about 1 seconds ticks was recorded.  So the Cx time will be
+reduced about 4 seconds.  and this is why we see above powertop output.
+
+To resolve this problem, Yakui and I use ktime_get() to record the Cx
+states time instead of PM timer as the following patch.  the patch was
+tested with i386/x86_64 modes on several platforms.
+
+Acked-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+Tested-by: Alex Shi <alex.shi@intel.com>
+Signed-off-by: Alex Shi <alex.shi@intel.com>
+Signed-off-by: Yakui.zhao <yakui.zhao@intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/acpi/processor_idle.c |   63 ++++++++++++++++++------------------------
+ 1 file changed, 27 insertions(+), 36 deletions(-)
+
+--- a/drivers/acpi/processor_idle.c
++++ b/drivers/acpi/processor_idle.c
+@@ -64,7 +64,6 @@
+ #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
+ ACPI_MODULE_NAME("processor_idle");
+ #define ACPI_PROCESSOR_FILE_POWER     "power"
+-#define US_TO_PM_TIMER_TICKS(t)               ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
+ #define PM_TIMER_TICK_NS              (1000000000ULL/PM_TIMER_FREQUENCY)
+ #define C2_OVERHEAD                   1       /* 1us */
+ #define C3_OVERHEAD                   1       /* 1us */
+@@ -78,6 +77,10 @@ module_param(nocst, uint, 0000);
+ static unsigned int latency_factor __read_mostly = 2;
+ module_param(latency_factor, uint, 0644);
++static s64 us_to_pm_timer_ticks(s64 t)
++{
++      return div64_u64(t * PM_TIMER_FREQUENCY, 1000000);
++}
+ /*
+  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
+  * For now disable this. Probably a bug somewhere else.
+@@ -159,25 +162,6 @@ static struct dmi_system_id __cpuinitdat
+       {},
+ };
+-static inline u32 ticks_elapsed(u32 t1, u32 t2)
+-{
+-      if (t2 >= t1)
+-              return (t2 - t1);
+-      else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
+-              return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
+-      else
+-              return ((0xFFFFFFFF - t1) + t2);
+-}
+-
+-static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
+-{
+-      if (t2 >= t1)
+-              return PM_TIMER_TICKS_TO_US(t2 - t1);
+-      else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
+-              return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
+-      else
+-              return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
+-}
+ /*
+  * Callers should disable interrupts before the call and enable
+@@ -853,7 +837,8 @@ static inline void acpi_idle_do_entry(st
+ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
+                             struct cpuidle_state *state)
+ {
+-      u32 t1, t2;
++      ktime_t  kt1, kt2;
++      s64 idle_time;
+       struct acpi_processor *pr;
+       struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+@@ -871,14 +856,15 @@ static int acpi_idle_enter_c1(struct cpu
+               return 0;
+       }
+-      t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
++      kt1 = ktime_get_real();
+       acpi_idle_do_entry(cx);
+-      t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
++      kt2 = ktime_get_real();
++      idle_time =  ktime_to_us(ktime_sub(kt2, kt1));
+       local_irq_enable();
+       cx->usage++;
+-      return ticks_elapsed_in_us(t1, t2);
++      return idle_time;
+ }
+ /**
+@@ -891,8 +877,9 @@ static int acpi_idle_enter_simple(struct
+ {
+       struct acpi_processor *pr;
+       struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+-      u32 t1, t2;
+-      int sleep_ticks = 0;
++      ktime_t  kt1, kt2;
++      s64 idle_time;
++      s64 sleep_ticks = 0;
+       pr = __get_cpu_var(processors);
+@@ -925,18 +912,19 @@ static int acpi_idle_enter_simple(struct
+       if (cx->type == ACPI_STATE_C3)
+               ACPI_FLUSH_CPU_CACHE();
+-      t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
++      kt1 = ktime_get_real();
+       /* Tell the scheduler that we are going deep-idle: */
+       sched_clock_idle_sleep_event();
+       acpi_idle_do_entry(cx);
+-      t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
++      kt2 = ktime_get_real();
++      idle_time =  ktime_to_us(ktime_sub(kt2, kt1));
+ #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
+       /* TSC could halt in idle, so notify users */
+       if (tsc_halts_in_c(cx->type))
+               mark_tsc_unstable("TSC halts in idle");;
+ #endif
+-      sleep_ticks = ticks_elapsed(t1, t2);
++      sleep_ticks = us_to_pm_timer_ticks(idle_time);
+       /* Tell the scheduler how much we idled: */
+       sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+@@ -948,7 +936,7 @@ static int acpi_idle_enter_simple(struct
+       acpi_state_timer_broadcast(pr, cx, 0);
+       cx->time += sleep_ticks;
+-      return ticks_elapsed_in_us(t1, t2);
++      return idle_time;
+ }
+ static int c3_cpu_count;
+@@ -966,8 +954,10 @@ static int acpi_idle_enter_bm(struct cpu
+ {
+       struct acpi_processor *pr;
+       struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
+-      u32 t1, t2;
+-      int sleep_ticks = 0;
++      ktime_t  kt1, kt2;
++      s64 idle_time;
++      s64 sleep_ticks = 0;
++
+       pr = __get_cpu_var(processors);
+@@ -1034,9 +1024,10 @@ static int acpi_idle_enter_bm(struct cpu
+               ACPI_FLUSH_CPU_CACHE();
+       }
+-      t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
++      kt1 = ktime_get_real();
+       acpi_idle_do_entry(cx);
+-      t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
++      kt2 = ktime_get_real();
++      idle_time =  ktime_to_us(ktime_sub(kt2, kt1));
+       /* Re-enable bus master arbitration */
+       if (pr->flags.bm_check && pr->flags.bm_control) {
+@@ -1051,7 +1042,7 @@ static int acpi_idle_enter_bm(struct cpu
+       if (tsc_halts_in_c(ACPI_STATE_C3))
+               mark_tsc_unstable("TSC halts in idle");
+ #endif
+-      sleep_ticks = ticks_elapsed(t1, t2);
++      sleep_ticks = us_to_pm_timer_ticks(idle_time);
+       /* Tell the scheduler how much we idled: */
+       sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
+@@ -1062,7 +1053,7 @@ static int acpi_idle_enter_bm(struct cpu
+       acpi_state_timer_broadcast(pr, cx, 0);
+       cx->time += sleep_ticks;
+-      return ticks_elapsed_in_us(t1, t2);
++      return idle_time;
+ }
+ struct cpuidle_driver acpi_idle_driver = {
diff --git a/queue-2.6.29/add-some-long-missing-capabilities-to-fs_mask.patch b/queue-2.6.29/add-some-long-missing-capabilities-to-fs_mask.patch
new file mode 100644 (file)
index 0000000..f97e211
--- /dev/null
@@ -0,0 +1,79 @@
+From stable-bounces@linux.kernel.org  Mon Apr 13 17:25:07 2009
+Date: Mon, 13 Apr 2009 17:25:03 GMT
+Message-Id: <200904131725.n3DHP3NJ014096@hera.kernel.org>
+From: Serge E. Hallyn <serue@us.ibm.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: add some long-missing capabilities to fs_mask
+
+upstream commit: 0ad30b8fd5fe798aae80df6344b415d8309342cc
+
+When POSIX capabilities were introduced during the 2.1 Linux
+cycle, the fs mask, which represents the capabilities which having
+fsuid==0 is supposed to grant, did not include CAP_MKNOD and
+CAP_LINUX_IMMUTABLE.  However, before capabilities the privilege
+to call these did in fact depend upon fsuid==0.
+
+This patch introduces those capabilities into the fsmask,
+restoring the old behavior.
+
+See the thread starting at http://lkml.org/lkml/2009/3/11/157 for
+reference.
+
+Note that if this fix is deemed valid, then earlier kernel versions (2.4
+and 2.2) ought to be fixed too.
+
+Changelog:
+       [Mar 23] Actually delete old CAP_FS_SET definition...
+       [Mar 20] Updated against J. Bruce Fields's patch
+
+Reported-by: Igor Zhbanov <izh1979@gmail.com>
+Signed-off-by: Serge E. Hallyn <serue@us.ibm.com>
+Cc: stable@kernel.org
+Cc: J. Bruce Fields <bfields@citi.umich.edu>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ include/linux/capability.h |   23 +++++++++++++++++++----
+ 1 file changed, 19 insertions(+), 4 deletions(-)
+
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -377,7 +377,21 @@ struct cpu_vfs_cap_data {
+ #define CAP_FOR_EACH_U32(__capi)  \
+       for (__capi = 0; __capi < _KERNEL_CAPABILITY_U32S; ++__capi)
++/*
++ * CAP_FS_MASK and CAP_NFSD_MASKS:
++ *
++ * The fs mask is all the privileges that fsuid==0 historically meant.
++ * At one time in the past, that included CAP_MKNOD and CAP_LINUX_IMMUTABLE.
++ *
++ * It has never meant setting security.* and trusted.* xattrs.
++ *
++ * We could also define fsmask as follows:
++ *   1. CAP_FS_MASK is the privilege to bypass all fs-related DAC permissions
++ *   2. The security.* and trusted.* xattrs are fs-related MAC permissions
++ */
++
+ # define CAP_FS_MASK_B0     (CAP_TO_MASK(CAP_CHOWN)           \
++                          | CAP_TO_MASK(CAP_MKNOD)            \
+                           | CAP_TO_MASK(CAP_DAC_OVERRIDE)     \
+                           | CAP_TO_MASK(CAP_DAC_READ_SEARCH)  \
+                           | CAP_TO_MASK(CAP_FOWNER)           \
+@@ -392,11 +406,12 @@ struct cpu_vfs_cap_data {
+ # define CAP_EMPTY_SET    ((kernel_cap_t){{ 0, 0 }})
+ # define CAP_FULL_SET     ((kernel_cap_t){{ ~0, ~0 }})
+ # define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }})
+-# define CAP_FS_SET       ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } })
++# define CAP_FS_SET       ((kernel_cap_t){{ CAP_FS_MASK_B0 \
++                                  | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \
++                                  CAP_FS_MASK_B1 } })
+ # define CAP_NFSD_SET     ((kernel_cap_t){{ CAP_FS_MASK_B0 \
+-                                          | CAP_TO_MASK(CAP_SYS_RESOURCE) \
+-                                          | CAP_TO_MASK(CAP_MKNOD), \
+-                                          CAP_FS_MASK_B1 } })
++                                  | CAP_TO_MASK(CAP_SYS_RESOURCE), \
++                                  CAP_FS_MASK_B1 } })
+ #endif /* _KERNEL_CAPABILITY_U32S != 2 */
diff --git a/queue-2.6.29/af_rose-x25-sanity-check-the-maximum-user-frame-size.patch b/queue-2.6.29/af_rose-x25-sanity-check-the-maximum-user-frame-size.patch
new file mode 100644 (file)
index 0000000..26e0004
--- /dev/null
@@ -0,0 +1,67 @@
+From 83e0bbcbe2145f160fbaa109b0439dae7f4a38a9 Mon Sep 17 00:00:00 2001
+Message-ID: <20090403202225.GA13161@galadriel.inutil.org>
+From: Alan Cox <alan@lxorguk.ukuu.org.uk>
+Cc: Moritz Muehlenhoff <jmm@debian.org>
+Date: Fri, 27 Mar 2009 00:28:21 -0700
+Subject: af_rose/x25: Sanity check the maximum user frame size
+
+upstream commit: 83e0bbcbe2145f160fbaa109b0439dae7f4a38a9
+
+CVE-2009-0795.
+
+Otherwise we can wrap the sizes and end up sending garbage.
+
+Closes #10423
+
+Signed-off-by: Alan Cox <alan@lxorguk.ukuu.org.uk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ net/netrom/af_netrom.c |    6 +++++-
+ net/rose/af_rose.c     |    4 ++++
+ net/x25/af_x25.c       |    6 ++++++
+ 3 files changed, 15 insertions(+), 1 deletion(-)
+
+--- a/net/netrom/af_netrom.c
++++ b/net/netrom/af_netrom.c
+@@ -1082,7 +1082,11 @@ static int nr_sendmsg(struct kiocb *iocb
+       SOCK_DEBUG(sk, "NET/ROM: sendto: Addresses built.\n");
+-      /* Build a packet */
++      /* Build a packet - the conventional user limit is 236 bytes. We can
++         do ludicrously large NetROM frames but must not overflow */
++      if (len > 65536)
++              return -EMSGSIZE;
++
+       SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
+       size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;
+--- a/net/rose/af_rose.c
++++ b/net/rose/af_rose.c
+@@ -1124,6 +1124,10 @@ static int rose_sendmsg(struct kiocb *io
+       /* Build a packet */
+       SOCK_DEBUG(sk, "ROSE: sendto: building packet.\n");
++      /* Sanity check the packet size */
++      if (len > 65535)
++              return -EMSGSIZE;
++
+       size = len + AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN;
+       if ((skb = sock_alloc_send_skb(sk, size, msg->msg_flags & MSG_DONTWAIT, &err)) == NULL)
+--- a/net/x25/af_x25.c
++++ b/net/x25/af_x25.c
+@@ -1037,6 +1037,12 @@ static int x25_sendmsg(struct kiocb *ioc
+               sx25.sx25_addr   = x25->dest_addr;
+       }
++      /* Sanity check the packet size */
++      if (len > 65535) {
++              rc = -EMSGSIZE;
++              goto out;
++      }
++
+       SOCK_DEBUG(sk, "x25_sendmsg: sendto: Addresses built.\n");
+       /* Build a packet */
diff --git a/queue-2.6.29/agp-zero-pages-before-sending-to-userspace.patch b/queue-2.6.29/agp-zero-pages-before-sending-to-userspace.patch
new file mode 100644 (file)
index 0000000..9074c73
--- /dev/null
@@ -0,0 +1,39 @@
+From 59de2bebabc5027f93df999d59cc65df591c3e6e Mon Sep 17 00:00:00 2001
+Message-Id: <1240275604.8324.7.camel@sli10-desk.sh.intel.com>
+From: Shaohua Li <shaohua.li@intel.com>
+Date: Mon, 20 Apr 2009 10:08:35 +1000
+Subject: agp: zero pages before sending to userspace
+
+upstream commit: 59de2bebabc5027f93df999d59cc65df591c3e6e
+
+AGP pages might be mapped into userspace finally, so the pages should be
+set to zero before userspace can use it. Otherwise there is potential
+information leakage.
+
+Signed-off-by: Shaohua Li <shaohua.li@intel.com>
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/char/agp/generic.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/char/agp/generic.c
++++ b/drivers/char/agp/generic.c
+@@ -1226,7 +1226,7 @@ int agp_generic_alloc_pages(struct agp_b
+       int i, ret = -ENOMEM;
+       for (i = 0; i < num_pages; i++) {
+-              page = alloc_page(GFP_KERNEL | GFP_DMA32);
++              page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+               /* agp_free_memory() needs gart address */
+               if (page == NULL)
+                       goto out;
+@@ -1257,7 +1257,7 @@ void *agp_generic_alloc_page(struct agp_
+ {
+       struct page * page;
+-      page = alloc_page(GFP_KERNEL | GFP_DMA32);
++      page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+       if (page == NULL)
+               return NULL;
diff --git a/queue-2.6.29/alsa-hda-add-missing-comma-in-ad1884_slave_vols.patch b/queue-2.6.29/alsa-hda-add-missing-comma-in-ad1884_slave_vols.patch
new file mode 100644 (file)
index 0000000..d93941d
--- /dev/null
@@ -0,0 +1,28 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 16:25:09 2009
+Date: Tue, 7 Apr 2009 16:25:04 GMT
+Message-Id: <200904071625.n37GP4DO014580@hera.kernel.org>
+From: Akinobu Mita <akinobu.mita@gmail.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: ALSA: hda - add missing comma in ad1884_slave_vols
+
+upstream commit: bca68467b59a24396554d8dd5979ee363c174854
+
+Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ sound/pci/hda/patch_analog.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/pci/hda/patch_analog.c
++++ b/sound/pci/hda/patch_analog.c
+@@ -3239,7 +3239,7 @@ static const char *ad1884_slave_vols[] =
+       "Mic Playback Volume",
+       "CD Playback Volume",
+       "Internal Mic Playback Volume",
+-      "Docking Mic Playback Volume"
++      "Docking Mic Playback Volume",
+       "Beep Playback Volume",
+       "IEC958 Playback Volume",
+       NULL
diff --git a/queue-2.6.29/alsa-hda-fix-the-cmd-cache-keys-for-amp-verbs.patch b/queue-2.6.29/alsa-hda-fix-the-cmd-cache-keys-for-amp-verbs.patch
new file mode 100644 (file)
index 0000000..356f4af
--- /dev/null
@@ -0,0 +1,39 @@
+From stable-bounces@linux.kernel.org  Wed Apr 15 17:25:08 2009
+Date: Wed, 15 Apr 2009 17:25:03 GMT
+Message-Id: <200904151725.n3FHP3ds026651@hera.kernel.org>
+From: Takashi Iwai <tiwai@suse.de>
+To: jejb@kernel.org, stable@kernel.org
+Subject: ALSA: hda - Fix the cmd cache keys for amp verbs
+
+upstream commit: fcad94a4c71c36a05f4d5c6dcb174534b4e0b136
+
+Fix the key value generation for get/set amp verbs.  The upper bits of
+the parameter have to be combined with the verb value to be unique for
+each direction/index of amp access.
+
+This fixes the resume problem on some hardwares like Macbook after
+the channel mode is changed.
+
+Tested-by: Johannes Berg <johannes@sipsolutions.net>
+Cc: <stable@kernel.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ sound/pci/hda/hda_codec.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2003,7 +2003,11 @@ int snd_hda_codec_write_cache(struct hda
+       err = bus->ops.command(bus, res);
+       if (!err) {
+               struct hda_cache_head *c;
+-              u32 key = build_cmd_cache_key(nid, verb);
++              u32 key;
++              /* parm may contain the verb stuff for get/set amp */
++              verb = verb | (parm >> 8);
++              parm &= 0xff;
++              key = build_cmd_cache_key(nid, verb);
+               c = get_alloc_hash(&codec->cmd_cache, key);
+               if (c)
+                       c->val = parm;
diff --git a/queue-2.6.29/cap_prctl-don-t-set-error-to-0-at-no_change.patch b/queue-2.6.29/cap_prctl-don-t-set-error-to-0-at-no_change.patch
new file mode 100644 (file)
index 0000000..9487d71
--- /dev/null
@@ -0,0 +1,37 @@
+From 5bf37ec3e0f5eb79f23e024a7fbc8f3557c087f0 Mon Sep 17 00:00:00 2001
+Message-ID: <20090408215558.GA15250@us.ibm.com>
+From: Serge E. Hallyn <serue@us.ibm.com>
+Date: Wed, 8 Apr 2009 16:55:58 -0500
+Subject: cap_prctl: don't set error to 0 at 'no_change'
+
+upstream commit: 5bf37ec3e0f5eb79f23e024a7fbc8f3557c087f0
+
+One-liner: capsh --print is broken without this patch.
+
+In certain cases, cap_prctl returns error > 0 for success.  However,
+the 'no_change' label was always setting error to 0.  As a result,
+for example, 'prctl(CAP_BSET_READ, N)' would always return 0.
+It should return 1 if a process has N in its bounding set (as
+by default it does).
+
+I'm keeping the no_change label even though it's now functionally
+the same as 'error'.
+
+Signed-off-by: Serge Hallyn <serue@us.ibm.com>
+Acked-by: David Howells <dhowells@redhat.com>
+Signed-off-by: James Morris <jmorris@namei.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ security/commoncap.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -916,7 +916,6 @@ changed:
+       return commit_creds(new);
+ no_change:
+-      error = 0;
+ error:
+       abort_creds(new);
+       return error;
diff --git a/queue-2.6.29/cpumask-fix-slab-corruption-caused-by-alloc_cpumask_var_node.patch b/queue-2.6.29/cpumask-fix-slab-corruption-caused-by-alloc_cpumask_var_node.patch
new file mode 100644 (file)
index 0000000..ee51b23
--- /dev/null
@@ -0,0 +1,46 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 04:35:09 2009
+Date: Fri, 3 Apr 2009 04:35:02 GMT
+Message-Id: <200904030435.n334Z2YM010151@hera.kernel.org>
+From: Jack Steiner <steiner@sgi.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: cpumask: fix slab corruption caused by alloc_cpumask_var_node()
+
+upstream commit: 4f032ac4122a77dbabf7a24b2739b2790448180f
+
+Fix slab corruption caused by alloc_cpumask_var_node() overwriting the
+tail end of an off-stack cpumask.
+
+The function zeros out cpumask bits beyond the last possible cpu.  The
+starting point for zeroing should be the beginning of the mask offset by a
+byte count derived from the number of possible cpus.  The offset was
+calculated in bits instead of bytes.  This resulted in overwriting the end
+of the cpumask.
+
+Signed-off-by: Jack Steiner <steiner@sgi.com>
+Acked-by: Mike Travis <travis.sgi.com>
+Acked-by: Ingo Molnar <mingo@elte.hu>
+Cc: Rusty Russell <rusty@rustcorp.com.au>
+Cc: Stephen Rothwell <sfr@canb.auug.org.au>
+Cc: <stable@kernel.org>                [2.6.29.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ lib/cpumask.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/lib/cpumask.c
++++ b/lib/cpumask.c
+@@ -109,10 +109,10 @@ bool alloc_cpumask_var_node(cpumask_var_
+ #endif
+       /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
+       if (*mask) {
++              unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
+               unsigned int tail;
+               tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
+-              memset(cpumask_bits(*mask) + cpumask_size() - tail,
+-                     0, tail);
++              memset(ptr + cpumask_size() - tail, 0, tail);
+       }
+       return *mask != NULL;
diff --git a/queue-2.6.29/crypto-shash-fix-unaligned-calculation-with-short-length.patch b/queue-2.6.29/crypto-shash-fix-unaligned-calculation-with-short-length.patch
new file mode 100644 (file)
index 0000000..b3c746e
--- /dev/null
@@ -0,0 +1,33 @@
+From stable-bounces@linux.kernel.org  Sat Apr  4 04:32:25 2009
+To: stable@kernel.org
+Message-Id: <E1LpxX4-0004sF-Fq@gondolin.me.apana.org.au>
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Sat, 04 Apr 2009 12:31:02 +0800
+Subject: crypto: shash - Fix unaligned calculation with short length
+
+From: Yehuda Sadeh <yehuda@hq.newdream.net>
+
+upstream commit: f4f689933c63e0fbfba62f2a80efb2b424b139ae
+
+When the total length is shorter than the calculated number of unaligned bytes, the call to shash->update breaks. For example, calling crc32c on unaligned buffer with length of 1 can result in a system crash.
+
+Signed-off-by: Yehuda Sadeh <yehuda@hq.newdream.net>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+
+ crypto/shash.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -82,6 +82,9 @@ static int shash_update_unaligned(struct
+       u8 buf[shash_align_buffer_size(unaligned_len, alignmask)]
+               __attribute__ ((aligned));
++      if (unaligned_len > len)
++              unaligned_len = len;
++
+       memcpy(buf, data, unaligned_len);
+       return shash->update(desc, buf, unaligned_len) ?:
diff --git a/queue-2.6.29/dm-io-make-sync_io-uninterruptible.patch b/queue-2.6.29/dm-io-make-sync_io-uninterruptible.patch
new file mode 100644 (file)
index 0000000..1ca29bd
--- /dev/null
@@ -0,0 +1,51 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 17:45:25 2009
+Date: Fri, 3 Apr 2009 17:45:21 GMT
+Message-Id: <200904031745.n33HjLGe030420@hera.kernel.org>
+From: Mikulas Patocka <mpatocka@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: dm io: make sync_io uninterruptible
+
+upstream commit: b64b6bf4fd8b678a9f8477c11773c38a0a246a6d
+
+If someone sends signal to a process performing synchronous dm-io call,
+the kernel may crash.
+
+The function sync_io attempts to exit with -EINTR if it has pending signal,
+however the structure "io" is allocated on stack, so already submitted io
+requests end up touching unallocated stack space and corrupting kernel memory.
+
+sync_io sets its state to TASK_UNINTERRUPTIBLE, so the signal can't break out
+of io_schedule() --- however, if the signal was pending before sync_io entered
+while (1) loop, the corruption of kernel memory will happen.
+
+There is no way to cancel in-progress IOs, so the best solution is to ignore
+signals at this point.
+
+Cc: stable@kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/dm-io.c |    5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/drivers/md/dm-io.c
++++ b/drivers/md/dm-io.c
+@@ -370,16 +370,13 @@ static int sync_io(struct dm_io_client *
+       while (1) {
+               set_current_state(TASK_UNINTERRUPTIBLE);
+-              if (!atomic_read(&io.count) || signal_pending(current))
++              if (!atomic_read(&io.count))
+                       break;
+               io_schedule();
+       }
+       set_current_state(TASK_RUNNING);
+-      if (atomic_read(&io.count))
+-              return -EINTR;
+-
+       if (error_bits)
+               *error_bits = io.error_bits;
diff --git a/queue-2.6.29/dm-kcopyd-fix-callback-race.patch b/queue-2.6.29/dm-kcopyd-fix-callback-race.patch
new file mode 100644 (file)
index 0000000..e654991
--- /dev/null
@@ -0,0 +1,77 @@
+From stable-bounces@linux.kernel.org  Thu Apr  9 18:20:09 2009
+Date: Thu, 9 Apr 2009 18:20:04 GMT
+Message-Id: <200904091820.n39IK4uZ027492@hera.kernel.org>
+From: Mikulas Patocka <mpatocka@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: dm kcopyd: fix callback race
+
+upstream commit: 340cd44451fb0bfa542365e6b4b565bbd44836e2
+
+If the thread calling dm_kcopyd_copy is delayed due to scheduling inside
+split_job/segment_complete and the subjobs complete before the loop in
+split_job completes, the kcopyd callback could be invoked from the
+thread that called dm_kcopyd_copy instead of the kcopyd workqueue.
+
+dm_kcopyd_copy -> split_job -> segment_complete -> job->fn()
+
+Snapshots depend on the fact that callbacks are called from the singlethreaded
+kcopyd workqueue and expect that there is no racing between individual
+callbacks. The racing between callbacks can lead to corruption of exception
+store and it can also mean that exception store callbacks are called twice
+for the same exception - a likely reason for crashes reported inside
+pending_complete() / remove_exception().
+
+This patch fixes two problems:
+
+1. job->fn being called from the thread that submitted the job (see above).
+
+- Fix: hand over the completion callback to the kcopyd thread.
+
+2. job->fn(read_err, write_err, job->context); in segment_complete
+reports the error of the last subjob, not the union of all errors.
+
+- Fix: pass job->write_err to the callback to report all error bits
+  (it is done already in run_complete_job)
+
+Cc: stable@kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/dm-kcopyd.c |   17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+--- a/drivers/md/dm-kcopyd.c
++++ b/drivers/md/dm-kcopyd.c
+@@ -511,13 +511,16 @@ static void segment_complete(int read_er
+       } else if (atomic_dec_and_test(&job->sub_jobs)) {
+               /*
+-               * To avoid a race we must keep the job around
+-               * until after the notify function has completed.
+-               * Otherwise the client may try and stop the job
+-               * after we've completed.
++               * Queue the completion callback to the kcopyd thread.
++               *
++               * Some callers assume that all the completions are called
++               * from a single thread and don't race with each other.
++               *
++               * We must not call the callback directly here because this
++               * code may not be executing in the thread.
+                */
+-              job->fn(read_err, write_err, job->context);
+-              mempool_free(job, job->kc->job_pool);
++              push(&kc->complete_jobs, job);
++              wake(kc);
+       }
+ }
+@@ -530,6 +533,8 @@ static void split_job(struct kcopyd_job 
+ {
+       int i;
++      atomic_inc(&job->kc->nr_jobs);
++
+       atomic_set(&job->sub_jobs, SPLIT_COUNT);
+       for (i = 0; i < SPLIT_COUNT; i++)
+               segment_complete(0, 0u, job);
diff --git a/queue-2.6.29/dm-kcopyd-prepare-for-callback-race-fix.patch b/queue-2.6.29/dm-kcopyd-prepare-for-callback-race-fix.patch
new file mode 100644 (file)
index 0000000..c7b46c3
--- /dev/null
@@ -0,0 +1,50 @@
+From stable-bounces@linux.kernel.org  Thu Apr  9 18:20:11 2009
+Date: Thu, 9 Apr 2009 18:20:06 GMT
+Message-Id: <200904091820.n39IK6Rb027515@hera.kernel.org>
+From: Mikulas Patocka <mpatocka@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: dm kcopyd: prepare for callback race fix
+
+upstream commit: 73830857bca6f6c9dbd48e906daea50bea42d676
+
+Use a variable in segment_complete() to point to the dm_kcopyd_client
+struct and only release job->pages in run_complete_job() if any are
+defined.  These changes are needed by the next patch.
+
+Cc: stable@kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/dm-kcopyd.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/dm-kcopyd.c
++++ b/drivers/md/dm-kcopyd.c
+@@ -297,7 +297,8 @@ static int run_complete_job(struct kcopy
+       dm_kcopyd_notify_fn fn = job->fn;
+       struct dm_kcopyd_client *kc = job->kc;
+-      kcopyd_put_pages(kc, job->pages);
++      if (job->pages)
++              kcopyd_put_pages(kc, job->pages);
+       mempool_free(job, kc->job_pool);
+       fn(read_err, write_err, context);
+@@ -461,6 +462,7 @@ static void segment_complete(int read_er
+       sector_t progress = 0;
+       sector_t count = 0;
+       struct kcopyd_job *job = (struct kcopyd_job *) context;
++      struct dm_kcopyd_client *kc = job->kc;
+       mutex_lock(&job->lock);
+@@ -490,7 +492,7 @@ static void segment_complete(int read_er
+       if (count) {
+               int i;
+-              struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool,
++              struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
+                                                          GFP_NOIO);
+               *sub_job = *job;
diff --git a/queue-2.6.29/dm-path-selector-use-module-refcount-directly.patch b/queue-2.6.29/dm-path-selector-use-module-refcount-directly.patch
new file mode 100644 (file)
index 0000000..1a632c9
--- /dev/null
@@ -0,0 +1,97 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 17:45:15 2009
+Date: Fri, 3 Apr 2009 17:45:10 GMT
+Message-Id: <200904031745.n33HjAZ3030295@hera.kernel.org>
+From: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: dm: path selector use module refcount directly
+
+upstream commit: aea9058801c0acfa2831af1714da412dfb0018c2
+
+Fix refcount corruption in dm-path-selector
+
+Refcounting with non-atomic ops under shared lock will corrupt the counter
+in multi-processor system and may trigger BUG_ON().
+Use module refcount.
+# same approach as dm-target-use-module-refcount-directly.patch here
+# https://www.redhat.com/archives/dm-devel/2008-December/msg00075.html
+
+Typical oops:
+  kernel BUG at linux-2.6.29-rc3/drivers/md/dm-path-selector.c:90!
+  Pid: 11148, comm: dmsetup Not tainted 2.6.29-rc3-nm #1
+  dm_put_path_selector+0x4d/0x61 [dm_multipath]
+  Call Trace:
+   [<ffffffffa031d3f9>] free_priority_group+0x33/0xb3 [dm_multipath]
+   [<ffffffffa031d4aa>] free_multipath+0x31/0x67 [dm_multipath]
+   [<ffffffffa031d50d>] multipath_dtr+0x2d/0x32 [dm_multipath]
+   [<ffffffffa015d6c2>] dm_table_destroy+0x64/0xd8 [dm_mod]
+   [<ffffffffa015b73a>] __unbind+0x46/0x4b [dm_mod]
+   [<ffffffffa015b79f>] dm_swap_table+0x60/0x14d [dm_mod]
+   [<ffffffffa015f963>] dev_suspend+0xfd/0x177 [dm_mod]
+   [<ffffffffa0160250>] dm_ctl_ioctl+0x24c/0x29c [dm_mod]
+   [<ffffffff80288cd3>] ? get_page_from_freelist+0x49c/0x61d
+   [<ffffffffa015f866>] ? dev_suspend+0x0/0x177 [dm_mod]
+   [<ffffffff802bf05c>] vfs_ioctl+0x2a/0x77
+   [<ffffffff802bf4f1>] do_vfs_ioctl+0x448/0x4a0
+   [<ffffffff802bf5a0>] sys_ioctl+0x57/0x7a
+   [<ffffffff8020c05b>] system_call_fastpath+0x16/0x1b
+
+Cc: stable@kernel.org
+Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/dm-path-selector.c |   21 +++------------------
+ 1 file changed, 3 insertions(+), 18 deletions(-)
+
+--- a/drivers/md/dm-path-selector.c
++++ b/drivers/md/dm-path-selector.c
+@@ -17,9 +17,7 @@
+ struct ps_internal {
+       struct path_selector_type pst;
+-
+       struct list_head list;
+-      long use;
+ };
+ #define pst_to_psi(__pst) container_of((__pst), struct ps_internal, pst)
+@@ -45,12 +43,8 @@ static struct ps_internal *get_path_sele
+       down_read(&_ps_lock);
+       psi = __find_path_selector_type(name);
+-      if (psi) {
+-              if ((psi->use == 0) && !try_module_get(psi->pst.module))
+-                      psi = NULL;
+-              else
+-                      psi->use++;
+-      }
++      if (psi && !try_module_get(psi->pst.module))
++              psi = NULL;
+       up_read(&_ps_lock);
+       return psi;
+@@ -84,11 +78,7 @@ void dm_put_path_selector(struct path_se
+       if (!psi)
+               goto out;
+-      if (--psi->use == 0)
+-              module_put(psi->pst.module);
+-
+-      BUG_ON(psi->use < 0);
+-
++      module_put(psi->pst.module);
+ out:
+       up_read(&_ps_lock);
+ }
+@@ -136,11 +126,6 @@ int dm_unregister_path_selector(struct p
+               return -EINVAL;
+       }
+-      if (psi->use) {
+-              up_write(&_ps_lock);
+-              return -ETXTBSY;
+-      }
+-
+       list_del(&psi->list);
+       up_write(&_ps_lock);
diff --git a/queue-2.6.29/dm-preserve-bi_io_vec-when-resubmitting-bios.patch b/queue-2.6.29/dm-preserve-bi_io_vec-when-resubmitting-bios.patch
new file mode 100644 (file)
index 0000000..061af07
--- /dev/null
@@ -0,0 +1,87 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 17:45:30 2009
+Date: Fri, 3 Apr 2009 17:45:25 GMT
+Message-Id: <200904031745.n33HjP2r030468@hera.kernel.org>
+From: Mikulas Patocka <mpatocka@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: dm: preserve bi_io_vec when resubmitting bios
+
+upstream commit: a920f6b3accc77d9dddbc98a7426be23ee479625
+
+Device mapper saves and restores various fields in the bio, but it doesn't save
+bi_io_vec.  If the device driver modifies this after a partially successful
+request, dm-raid1 and dm-multipath may attempt to resubmit a bio that has
+bi_size inconsistent with the size of vector.
+
+To make requests resubmittable in dm-raid1 and dm-multipath, we must save
+and restore the bio vector as well.
+
+To reduce the memory overhead involved in this, we do not save the pages in a
+vector and use a 16-bit field size if the page size is less than 65536.
+
+Cc: stable@kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/dm-bio-record.h |   26 ++++++++++++++++++++++++++
+ 1 file changed, 26 insertions(+)
+
+--- a/drivers/md/dm-bio-record.h
++++ b/drivers/md/dm-bio-record.h
+@@ -16,30 +16,56 @@
+  * functions in this file help the target record and restore the
+  * original bio state.
+  */
++
++struct dm_bio_vec_details {
++#if PAGE_SIZE < 65536
++      __u16 bv_len;
++      __u16 bv_offset;
++#else
++      unsigned bv_len;
++      unsigned bv_offset;
++#endif
++};
++
+ struct dm_bio_details {
+       sector_t bi_sector;
+       struct block_device *bi_bdev;
+       unsigned int bi_size;
+       unsigned short bi_idx;
+       unsigned long bi_flags;
++      struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES];
+ };
+ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
+ {
++      unsigned i;
++
+       bd->bi_sector = bio->bi_sector;
+       bd->bi_bdev = bio->bi_bdev;
+       bd->bi_size = bio->bi_size;
+       bd->bi_idx = bio->bi_idx;
+       bd->bi_flags = bio->bi_flags;
++
++      for (i = 0; i < bio->bi_vcnt; i++) {
++              bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
++              bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
++      }
+ }
+ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
+ {
++      unsigned i;
++
+       bio->bi_sector = bd->bi_sector;
+       bio->bi_bdev = bd->bi_bdev;
+       bio->bi_size = bd->bi_size;
+       bio->bi_idx = bd->bi_idx;
+       bio->bi_flags = bd->bi_flags;
++
++      for (i = 0; i < bio->bi_vcnt; i++) {
++              bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
++              bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
++      }
+ }
+ #endif
diff --git a/queue-2.6.29/dm-raid1-switch-read_record-from-kmalloc-to-slab-to-save-memory.patch b/queue-2.6.29/dm-raid1-switch-read_record-from-kmalloc-to-slab-to-save-memory.patch
new file mode 100644 (file)
index 0000000..34d9400
--- /dev/null
@@ -0,0 +1,86 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 17:45:27 2009
+Date: Fri, 3 Apr 2009 17:45:23 GMT
+Message-Id: <200904031745.n33HjNXG030447@hera.kernel.org>
+From: Mikulas Patocka <mpatocka@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: dm raid1: switch read_record from kmalloc to slab to save memory
+
+upstream commit: 95f8fac8dc6139fedfb87746e0c8fda9b803cb46
+
+With my previous patch to save bi_io_vec, the size of dm_raid1_read_record
+is significantly increased (the vector list takes 3072 bytes on 32-bit machines
+and 4096 bytes on 64-bit machines).
+
+The structure dm_raid1_read_record used to be allocated with kmalloc,
+but kmalloc aligns the size on the next power-of-two so an object
+slightly greater than 4096 will allocate 8192 bytes of memory and half of
+that memory will be wasted.
+
+This patch turns kmalloc into a slab cache which doesn't have this
+padding so it will reduce the memory consumed.
+
+Cc: stable@kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/dm-raid1.c |   25 +++++++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/dm-raid1.c
++++ b/drivers/md/dm-raid1.c
+@@ -145,6 +145,8 @@ struct dm_raid1_read_record {
+       struct dm_bio_details details;
+ };
++static struct kmem_cache *_dm_raid1_read_record_cache;
++
+ /*
+  * Every mirror should look like this one.
+  */
+@@ -764,9 +766,9 @@ static struct mirror_set *alloc_context(
+       atomic_set(&ms->suspend, 0);
+       atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
+-      len = sizeof(struct dm_raid1_read_record);
+-      ms->read_record_pool = mempool_create_kmalloc_pool(MIN_READ_RECORDS,
+-                                                         len);
++      ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
++                                              _dm_raid1_read_record_cache);
++
+       if (!ms->read_record_pool) {
+               ti->error = "Error creating mirror read_record_pool";
+               kfree(ms);
+@@ -1279,16 +1281,31 @@ static int __init dm_mirror_init(void)
+ {
+       int r;
++      _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
++      if (!_dm_raid1_read_record_cache) {
++              DMERR("Can't allocate dm_raid1_read_record cache");
++              r = -ENOMEM;
++              goto bad_cache;
++      }
++
+       r = dm_register_target(&mirror_target);
+-      if (r < 0)
++      if (r < 0) {
+               DMERR("Failed to register mirror target");
++              goto bad_target;
++      }
++
++      return 0;
++bad_target:
++      kmem_cache_destroy(_dm_raid1_read_record_cache);
++bad_cache:
+       return r;
+ }
+ static void __exit dm_mirror_exit(void)
+ {
+       dm_unregister_target(&mirror_target);
++      kmem_cache_destroy(_dm_raid1_read_record_cache);
+ }
+ /* Module hooks */
diff --git a/queue-2.6.29/dm-snapshot-avoid-dropping-lock-in-__find_pending_exception.patch b/queue-2.6.29/dm-snapshot-avoid-dropping-lock-in-__find_pending_exception.patch
new file mode 100644 (file)
index 0000000..d775b02
--- /dev/null
@@ -0,0 +1,86 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 17:45:21 2009
+Date: Fri, 3 Apr 2009 17:45:16 GMT
+Message-Id: <200904031745.n33HjGwC030370@hera.kernel.org>
+From: Mikulas Patocka <mpatocka@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: dm snapshot: avoid dropping lock in __find_pending_exception
+
+upstream commit: c66213921c816f6b1b16a84911618ba9a363b134
+
+It is uncommon and bug-prone to drop a lock in a function that is called with
+the lock held, so this is moved to the caller.
+
+Cc: stable@kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/dm-snap.c |   42 ++++++++++++++++++++++++------------------
+ 1 file changed, 24 insertions(+), 18 deletions(-)
+
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -992,23 +992,10 @@ __lookup_pending_exception(struct dm_sna
+  * this.
+  */
+ static struct dm_snap_pending_exception *
+-__find_pending_exception(struct dm_snapshot *s, struct bio *bio)
++__find_pending_exception(struct dm_snapshot *s,
++                       struct dm_snap_pending_exception *pe, chunk_t chunk)
+ {
+-      struct dm_snap_pending_exception *pe, *pe2;
+-      chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
+-
+-      /*
+-       * Create a new pending exception, we don't want
+-       * to hold the lock while we do this.
+-       */
+-      up_write(&s->lock);
+-      pe = alloc_pending_exception(s);
+-      down_write(&s->lock);
+-
+-      if (!s->valid) {
+-              free_pending_exception(pe);
+-              return NULL;
+-      }
++      struct dm_snap_pending_exception *pe2;
+       pe2 = __lookup_pending_exception(s, chunk);
+       if (pe2) {
+@@ -1083,7 +1070,17 @@ static int snapshot_map(struct dm_target
+       if (bio_rw(bio) == WRITE) {
+               pe = __lookup_pending_exception(s, chunk);
+               if (!pe) {
+-                      pe = __find_pending_exception(s, bio);
++                      up_write(&s->lock);
++                      pe = alloc_pending_exception(s);
++                      down_write(&s->lock);
++
++                      if (!s->valid) {
++                              free_pending_exception(pe);
++                              r = -EIO;
++                              goto out_unlock;
++                      }
++
++                      pe = __find_pending_exception(s, pe, chunk);
+                       if (!pe) {
+                               __invalidate_snapshot(s, -ENOMEM);
+                               r = -EIO;
+@@ -1220,7 +1217,16 @@ static int __origin_write(struct list_he
+               pe = __lookup_pending_exception(snap, chunk);
+               if (!pe) {
+-                      pe = __find_pending_exception(snap, bio);
++                      up_write(&snap->lock);
++                      pe = alloc_pending_exception(snap);
++                      down_write(&snap->lock);
++
++                      if (!snap->valid) {
++                              free_pending_exception(pe);
++                              goto next_snapshot;
++                      }
++
++                      pe = __find_pending_exception(snap, pe, chunk);
+                       if (!pe) {
+                               __invalidate_snapshot(snap, -ENOMEM);
+                               goto next_snapshot;
diff --git a/queue-2.6.29/dm-snapshot-avoid-having-two-exceptions-for-the-same-chunk.patch b/queue-2.6.29/dm-snapshot-avoid-having-two-exceptions-for-the-same-chunk.patch
new file mode 100644 (file)
index 0000000..e3d55db
--- /dev/null
@@ -0,0 +1,62 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 17:45:19 2009
+Date: Fri, 3 Apr 2009 17:45:14 GMT
+Message-Id: <200904031745.n33HjE1l030349@hera.kernel.org>
+From: Mikulas Patocka <mpatocka@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: dm snapshot: avoid having two exceptions for the same chunk
+
+upstream commit: 35bf659b008e83e725dcd30f542e38461dbb867c
+
+We need to check if the exception was completed after dropping the lock.
+
+After regaining the lock, __find_pending_exception checks if the exception
+was already placed into &s->pending hash.
+
+But we don't check if the exception was already completed and placed into
+&s->complete hash. If the process waiting in alloc_pending_exception was
+delayed at this point because of a scheduling latency and the exception
+was meanwhile completed, we'd miss that and allocate another pending
+exception for already completed chunk.
+
+It would lead to a situation where two records for the same chunk exist
+and potential data corruption because multiple snapshot I/Os to the
+affected chunk could be redirected to different locations in the
+snapshot.
+
+Cc: stable@kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/dm-snap.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -1080,6 +1080,13 @@ static int snapshot_map(struct dm_target
+                               goto out_unlock;
+                       }
++                      e = lookup_exception(&s->complete, chunk);
++                      if (e) {
++                              free_pending_exception(pe);
++                              remap_exception(s, e, bio, chunk);
++                              goto out_unlock;
++                      }
++
+                       pe = __find_pending_exception(s, pe, chunk);
+                       if (!pe) {
+                               __invalidate_snapshot(s, -ENOMEM);
+@@ -1226,6 +1233,12 @@ static int __origin_write(struct list_he
+                               goto next_snapshot;
+                       }
++                      e = lookup_exception(&snap->complete, chunk);
++                      if (e) {
++                              free_pending_exception(pe);
++                              goto next_snapshot;
++                      }
++
+                       pe = __find_pending_exception(snap, pe, chunk);
+                       if (!pe) {
+                               __invalidate_snapshot(snap, -ENOMEM);
diff --git a/queue-2.6.29/dm-snapshot-refactor-__find_pending_exception.patch b/queue-2.6.29/dm-snapshot-refactor-__find_pending_exception.patch
new file mode 100644 (file)
index 0000000..8677435
--- /dev/null
@@ -0,0 +1,122 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 17:45:24 2009
+Date: Fri, 3 Apr 2009 17:45:19 GMT
+Message-Id: <200904031745.n33HjJEi030396@hera.kernel.org>
+From: Mikulas Patocka <mpatocka@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: dm snapshot: refactor __find_pending_exception
+
+upstream commit: 2913808eb56a6445a7b277eb8d17651c8defb035
+
+Move looking-up of a pending exception from __find_pending_exception to another
+function.
+
+Cc: stable@kernel.org
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/dm-snap.c |   52 +++++++++++++++++++++++++++------------------------
+ 1 file changed, 28 insertions(+), 24 deletions(-)
+
+--- a/drivers/md/dm-snap.c
++++ b/drivers/md/dm-snap.c
+@@ -972,6 +972,17 @@ static void start_copy(struct dm_snap_pe
+                   &src, 1, &dest, 0, copy_callback, pe);
+ }
++static struct dm_snap_pending_exception *
++__lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk)
++{
++      struct dm_snap_exception *e = lookup_exception(&s->pending, chunk);
++
++      if (!e)
++              return NULL;
++
++      return container_of(e, struct dm_snap_pending_exception, e);
++}
++
+ /*
+  * Looks to see if this snapshot already has a pending exception
+  * for this chunk, otherwise it allocates a new one and inserts
+@@ -983,21 +994,10 @@ static void start_copy(struct dm_snap_pe
+ static struct dm_snap_pending_exception *
+ __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
+ {
+-      struct dm_snap_exception *e;
+-      struct dm_snap_pending_exception *pe;
++      struct dm_snap_pending_exception *pe, *pe2;
+       chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
+       /*
+-       * Is there a pending exception for this already ?
+-       */
+-      e = lookup_exception(&s->pending, chunk);
+-      if (e) {
+-              /* cast the exception to a pending exception */
+-              pe = container_of(e, struct dm_snap_pending_exception, e);
+-              goto out;
+-      }
+-
+-      /*
+        * Create a new pending exception, we don't want
+        * to hold the lock while we do this.
+        */
+@@ -1010,11 +1010,10 @@ __find_pending_exception(struct dm_snaps
+               return NULL;
+       }
+-      e = lookup_exception(&s->pending, chunk);
+-      if (e) {
++      pe2 = __lookup_pending_exception(s, chunk);
++      if (pe2) {
+               free_pending_exception(pe);
+-              pe = container_of(e, struct dm_snap_pending_exception, e);
+-              goto out;
++              return pe2;
+       }
+       pe->e.old_chunk = chunk;
+@@ -1032,7 +1031,6 @@ __find_pending_exception(struct dm_snaps
+       get_pending_exception(pe);
+       insert_exception(&s->pending, &pe->e);
+- out:
+       return pe;
+ }
+@@ -1083,11 +1081,14 @@ static int snapshot_map(struct dm_target
+        * writeable.
+        */
+       if (bio_rw(bio) == WRITE) {
+-              pe = __find_pending_exception(s, bio);
++              pe = __lookup_pending_exception(s, chunk);
+               if (!pe) {
+-                      __invalidate_snapshot(s, -ENOMEM);
+-                      r = -EIO;
+-                      goto out_unlock;
++                      pe = __find_pending_exception(s, bio);
++                      if (!pe) {
++                              __invalidate_snapshot(s, -ENOMEM);
++                              r = -EIO;
++                              goto out_unlock;
++                      }
+               }
+               remap_exception(s, &pe->e, bio, chunk);
+@@ -1217,10 +1218,13 @@ static int __origin_write(struct list_he
+               if (e)
+                       goto next_snapshot;
+-              pe = __find_pending_exception(snap, bio);
++              pe = __lookup_pending_exception(snap, chunk);
+               if (!pe) {
+-                      __invalidate_snapshot(snap, -ENOMEM);
+-                      goto next_snapshot;
++                      pe = __find_pending_exception(snap, bio);
++                      if (!pe) {
++                              __invalidate_snapshot(snap, -ENOMEM);
++                              goto next_snapshot;
++                      }
+               }
+               if (!primary_pe) {
diff --git a/queue-2.6.29/dm-table-fix-upgrade-mode-race.patch b/queue-2.6.29/dm-table-fix-upgrade-mode-race.patch
new file mode 100644 (file)
index 0000000..38b814b
--- /dev/null
@@ -0,0 +1,70 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 17:45:14 2009
+Date: Fri, 3 Apr 2009 17:45:08 GMT
+Message-Id: <200904031745.n33Hj8Zt030210@hera.kernel.org>
+From: Alasdair G Kergon <agk@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: dm table: fix upgrade mode race
+
+upstream commit: 570b9d968bf9b16974252ef7cbce73fa6dac34f3
+
+upgrade_mode() sets bdev to NULL temporarily, and does not have any
+locking to exclude anything from seeing that NULL.
+
+In dm_table_any_congested() bdev_get_queue() can dereference that NULL and
+cause a reported oops.
+
+Fix this by not changing that field during the mode upgrade.
+
+Cc: stable@kernel.org
+Cc: Neil Brown <neilb@suse.de>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/dm-table.c |   26 ++++++++++++++------------
+ 1 file changed, 14 insertions(+), 12 deletions(-)
+
+--- a/drivers/md/dm-table.c
++++ b/drivers/md/dm-table.c
+@@ -399,28 +399,30 @@ static int check_device_area(struct dm_d
+ }
+ /*
+- * This upgrades the mode on an already open dm_dev.  Being
++ * This upgrades the mode on an already open dm_dev, being
+  * careful to leave things as they were if we fail to reopen the
+- * device.
++ * device and not to touch the existing bdev field in case
++ * it is accessed concurrently inside dm_table_any_congested().
+  */
+ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
+                       struct mapped_device *md)
+ {
+       int r;
+-      struct dm_dev_internal dd_copy;
+-      dev_t dev = dd->dm_dev.bdev->bd_dev;
++      struct dm_dev_internal dd_new, dd_old;
+-      dd_copy = *dd;
++      dd_new = dd_old = *dd;
++
++      dd_new.dm_dev.mode |= new_mode;
++      dd_new.dm_dev.bdev = NULL;
++
++      r = open_dev(&dd_new, dd->dm_dev.bdev->bd_dev, md);
++      if (r)
++              return r;
+       dd->dm_dev.mode |= new_mode;
+-      dd->dm_dev.bdev = NULL;
+-      r = open_dev(dd, dev, md);
+-      if (!r)
+-              close_dev(&dd_copy, md);
+-      else
+-              *dd = dd_copy;
++      close_dev(&dd_old, md);
+-      return r;
++      return 0;
+ }
+ /*
diff --git a/queue-2.6.29/dm-target-use-module-refcount-directly.patch b/queue-2.6.29/dm-target-use-module-refcount-directly.patch
new file mode 100644 (file)
index 0000000..321efec
--- /dev/null
@@ -0,0 +1,77 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 17:45:17 2009
+Date: Fri, 3 Apr 2009 17:45:13 GMT
+Message-Id: <200904031745.n33HjDju030328@hera.kernel.org>
+From: Cheng Renquan <crquan@gmail.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: dm target: use module refcount directly
+
+upstream commit: 5642b8a61a15436231adf27b2b1bd96901b623dd
+
+The tt_internal's 'use' field is superfluous: the module's refcount can do
+the work properly.  An acceptable side-effect is that this increases the
+reference counts reported by 'lsmod'.
+
+Remove the superfluous test when removing a target module.
+
+[Crash possible without this on SMP - agk]
+
+Cc: stable@kernel.org
+Signed-off-by: Cheng Renquan <crquan@gmail.com>
+Signed-off-by: Alasdair G Kergon <agk@redhat.com>
+Reviewed-by: Alasdair G Kergon <agk@redhat.com>
+Reviewed-by: Jonathan Brassow <jbrassow@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/dm-target.c |   20 +++-----------------
+ 1 file changed, 3 insertions(+), 17 deletions(-)
+
+--- a/drivers/md/dm-target.c
++++ b/drivers/md/dm-target.c
+@@ -18,7 +18,6 @@ struct tt_internal {
+       struct target_type tt;
+       struct list_head list;
+-      long use;
+ };
+ static LIST_HEAD(_targets);
+@@ -44,12 +43,8 @@ static struct tt_internal *get_target_ty
+       down_read(&_lock);
+       ti = __find_target_type(name);
+-      if (ti) {
+-              if ((ti->use == 0) && !try_module_get(ti->tt.module))
+-                      ti = NULL;
+-              else
+-                      ti->use++;
+-      }
++      if (ti && !try_module_get(ti->tt.module))
++              ti = NULL;
+       up_read(&_lock);
+       return ti;
+@@ -77,10 +72,7 @@ void dm_put_target_type(struct target_ty
+       struct tt_internal *ti = (struct tt_internal *) t;
+       down_read(&_lock);
+-      if (--ti->use == 0)
+-              module_put(ti->tt.module);
+-
+-      BUG_ON(ti->use < 0);
++      module_put(ti->tt.module);
+       up_read(&_lock);
+       return;
+@@ -140,12 +132,6 @@ void dm_unregister_target(struct target_
+               BUG();
+       }
+-      if (ti->use) {
+-              DMCRIT("Attempt to unregister target still in use: %s",
+-                     t->name);
+-              BUG();
+-      }
+-
+       list_del(&ti->list);
+       kfree(ti);
diff --git a/queue-2.6.29/dock-fix-dereference-after-kfree.patch b/queue-2.6.29/dock-fix-dereference-after-kfree.patch
new file mode 100644 (file)
index 0000000..ca5a990
--- /dev/null
@@ -0,0 +1,38 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 03:57:59 2009
+Date: Mon, 06 Apr 2009 23:56:46 -0400 (EDT)
+From: Len Brown <lenb@kernel.org>
+To: stable@kernel.org
+Message-id: <alpine.LFD.2.00.0904062354360.5698@localhost.localdomain>
+Cc: linux-acpi@vger.kernel.org
+Subject: dock: fix dereference after kfree()
+
+From: Dan Carpenter <error27@gmail.com>
+
+upstream commit: f240729832dff3785104d950dad2d3ced4387f6d
+
+dock_remove() calls kfree() on dock_station so we should use
+list_for_each_entry_safe() to avoid dereferencing freed memory.
+
+Found by smatch (http://repo.or.cz/w/smatch.git/).  Compile tested.
+
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/acpi/dock.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/dock.c
++++ b/drivers/acpi/dock.c
+@@ -1146,9 +1146,10 @@ static int __init dock_init(void)
+ static void __exit dock_exit(void)
+ {
+       struct dock_station *dock_station;
++      struct dock_station *tmp;
+       unregister_acpi_bus_notifier(&dock_acpi_notifier);
+-      list_for_each_entry(dock_station, &dock_stations, sibiling)
++      list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling)
+               dock_remove(dock_station);
+ }
diff --git a/queue-2.6.29/drm-i915-change-dcc-tiling-detection-case-to-cover-only-mobile-parts.patch b/queue-2.6.29/drm-i915-change-dcc-tiling-detection-case-to-cover-only-mobile-parts.patch
new file mode 100644 (file)
index 0000000..e7ff11d
--- /dev/null
@@ -0,0 +1,75 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:42:29 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:12 -0700
+Message-Id: <1239072025-1706-1-git-send-email-eric@anholt.net>
+Cc: Eric Anholt <eric@anholt.net>
+Subject: drm/i915: Change DCC tiling detection case to cover only mobile parts.
+
+upstream commit: 568d9a8f6d4bf81e0672c74573dc02981d31e3ea
+
+Later spec investigation has revealed that every 9xx mobile part has
+had this register in this format.  Also, no non-mobile parts have been shown
+to have this register.  So make all mobile use the same code, and all
+non-mobile use the hack 965 detection.
+
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/i915_gem_tiling.c |   31 +++++++++++++++----------------
+ 1 file changed, 15 insertions(+), 16 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
++++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
+@@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm
+                */
+               swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+               swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+-      } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
+-                 IS_GM45(dev)) {
++      } else if (IS_MOBILE(dev)) {
+               uint32_t dcc;
+-              /* On 915-945 and GM965, channel interleave by the CPU is
+-               * determined by DCC.  The CPU will alternate based on bit 6
+-               * in interleaved mode, and the GPU will then also alternate
+-               * on bit 6, 9, and 10 for X, but the CPU may also optionally
+-               * alternate based on bit 17 (XOR not disabled and XOR
+-               * bit == 17).
++              /* On mobile 9xx chipsets, channel interleave by the CPU is
++               * determined by DCC.  For single-channel, neither the CPU
++               * nor the GPU do swizzling.  For dual channel interleaved,
++               * the GPU's interleave is bit 9 and 10 for X tiled, and bit
++               * 9 for Y tiled.  The CPU's interleave is independent, and
++               * can be based on either bit 11 (haven't seen this yet) or
++               * bit 17 (common).
+                */
+               dcc = I915_READ(DCC);
+               switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+@@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm
+                       swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+                       break;
+               case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+-                      if (IS_I915G(dev) || IS_I915GM(dev) ||
+-                          dcc & DCC_CHANNEL_XOR_DISABLE) {
++                      if (dcc & DCC_CHANNEL_XOR_DISABLE) {
++                              /* This is the base swizzling by the GPU for
++                               * tiled buffers.
++                               */
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9;
+-                      } else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
+-                                 (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
+-                              /* GM965/GM45 does either bit 11 or bit 17
+-                               * swizzling.
+-                               */
++                      } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
++                              /* Bit 11 swizzling by the CPU in addition. */
+                               swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+                               swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+                       } else {
+-                              /* Bit 17 or perhaps other swizzling */
++                              /* Bit 17 swizzling by the CPU in addition. */
+                               swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+                               swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+                       }
diff --git a/queue-2.6.29/drm-i915-check-for-dev-primary-master-before-dereference.patch b/queue-2.6.29/drm-i915-check-for-dev-primary-master-before-dereference.patch
new file mode 100644 (file)
index 0000000..ad70601
--- /dev/null
@@ -0,0 +1,48 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:44:18 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:20 -0700
+Message-Id: <1239072025-1706-9-git-send-email-eric@anholt.net>
+Cc: Eric Anholt <eric@anholt.net>, Chris Wilson <chris@chris-wilson.co.uk>
+Subject: drm/i915: Check for dev->primary->master before dereference.
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+upstream commit: 98787c057fdefdce6230ff46f2c1105835005a4c
+
+I've hit the occasional oops inside i915_wait_ring() with an indication of
+a NULL derefence of dev->primary->master.  Adding a NULL check is
+consistent with the other potential users of dev->primary->master.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/i915_dma.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -41,7 +41,6 @@
+ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+-      struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+       drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+       u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+       u32 last_acthd = I915_READ(acthd_reg);
+@@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * d
+               if (ring->space >= n)
+                       return 0;
+-              if (master_priv->sarea_priv)
+-                      master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
++              if (dev->primary->master) {
++                      struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
++                      if (master_priv->sarea_priv)
++                              master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
++              }
++
+               if (ring->head != last_head)
+                       i = 0;
diff --git a/queue-2.6.29/drm-i915-check-for-einval-from-vm_insert_pfn.patch b/queue-2.6.29/drm-i915-check-for-einval-from-vm_insert_pfn.patch
new file mode 100644 (file)
index 0000000..ddc962c
--- /dev/null
@@ -0,0 +1,32 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:43:35 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:22 -0700
+Message-Id: <1239072025-1706-11-git-send-email-eric@anholt.net>
+Cc: Eric Anholt <eric@anholt.net>, Jesse Barnes <jbarnes@virtuosugeek.org>,         Jesse Barnes <jbarnes@virtuousgeek.org>
+Subject: drm/i915: check for -EINVAL from vm_insert_pfn
+
+From: Jesse Barnes <jbarnes@virtuousgeek.org>
+
+upstream commit: 959b887cf42fd63cf10e28a7f26126f78aa1c0b0
+
+Indicates something is wrong with the mapping; and apparently triggers
+in current kernels.
+
+Signed-off-by: Jesse Barnes <jbarnes@virtuosugeek.org>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/i915_gem.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1072,6 +1072,7 @@ int i915_gem_fault(struct vm_area_struct
+       case -EAGAIN:
+               return VM_FAULT_OOM;
+       case -EFAULT:
++      case -EINVAL:
+               return VM_FAULT_SIGBUS;
+       default:
+               return VM_FAULT_NOPAGE;
diff --git a/queue-2.6.29/drm-i915-check-the-return-value-from-the-copy-from-user.patch b/queue-2.6.29/drm-i915-check-the-return-value-from-the-copy-from-user.patch
new file mode 100644 (file)
index 0000000..8ade7db
--- /dev/null
@@ -0,0 +1,41 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:44:01 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:21 -0700
+Message-Id: <1239072025-1706-10-git-send-email-eric@anholt.net>
+Cc: Dave Airlie <airlied@redhat.com>
+Subject: drm/i915: check the return value from the copy from user
+
+From: Dave Airlie <airlied@redhat.com>
+
+upstream commit: d008877550d8ca8c6878dd494e50c1b9209f38d4
+
+This produced a warning on my build, not sure why super-warning-man didn't
+notice this one, its much worse than the %z one.
+
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/i915_gem.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -446,13 +446,16 @@ fast_shmem_write(struct page **pages,
+                int length)
+ {
+       char __iomem *vaddr;
++      unsigned long unwritten;
+       vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
+       if (vaddr == NULL)
+               return -ENOMEM;
+-      __copy_from_user_inatomic(vaddr + page_offset, data, length);
++      unwritten = __copy_from_user_inatomic(vaddr + page_offset, data, length);
+       kunmap_atomic(vaddr, KM_USER0);
++      if (unwritten)
++              return -EFAULT;
+       return 0;
+ }
diff --git a/queue-2.6.29/drm-i915-fix-lock-order-reversal-in-gtt-pwrite-path.patch b/queue-2.6.29/drm-i915-fix-lock-order-reversal-in-gtt-pwrite-path.patch
new file mode 100644 (file)
index 0000000..31eee72
--- /dev/null
@@ -0,0 +1,237 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:42:06 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:14 -0700
+Message-Id: <1239072025-1706-3-git-send-email-eric@anholt.net>
+Cc: Eric Anholt <eric@anholt.net>
+Subject: drm/i915: Fix lock order reversal in GTT pwrite path.
+
+upstream commit: 3de09aa3b38910d366f4710ffdf430c9d387d1a3
+
+Since the pagefault path determines that the lock order we use has to be
+mmap_sem -> struct_mutex, we can't allow page faults to occur while the
+struct_mutex is held.  To fix this in pwrite, we first try optimistically to
+see if we can copy from user without faulting.  If it fails, fall back to
+using get_user_pages to pin the user's memory, and map those pages
+atomically when copying it to the GPU.
+
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/i915_gem.c |  166 +++++++++++++++++++++++++++++++++-------
+ 1 file changed, 139 insertions(+), 27 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -223,29 +223,34 @@ fast_user_write(struct io_mapping *mappi
+  */
+ static inline int
+-slow_user_write(struct io_mapping *mapping,
+-              loff_t page_base, int page_offset,
+-              char __user *user_data,
+-              int length)
++slow_kernel_write(struct io_mapping *mapping,
++                loff_t gtt_base, int gtt_offset,
++                struct page *user_page, int user_offset,
++                int length)
+ {
+-      char __iomem *vaddr;
++      char *src_vaddr, *dst_vaddr;
+       unsigned long unwritten;
+-      vaddr = io_mapping_map_wc(mapping, page_base);
+-      if (vaddr == NULL)
+-              return -EFAULT;
+-      unwritten = __copy_from_user(vaddr + page_offset,
+-                                   user_data, length);
+-      io_mapping_unmap(vaddr);
++      dst_vaddr = io_mapping_map_atomic_wc(mapping, gtt_base);
++      src_vaddr = kmap_atomic(user_page, KM_USER1);
++      unwritten = __copy_from_user_inatomic_nocache(dst_vaddr + gtt_offset,
++                                                    src_vaddr + user_offset,
++                                                    length);
++      kunmap_atomic(src_vaddr, KM_USER1);
++      io_mapping_unmap_atomic(dst_vaddr);
+       if (unwritten)
+               return -EFAULT;
+       return 0;
+ }
++/**
++ * This is the fast pwrite path, where we copy the data directly from the
++ * user into the GTT, uncached.
++ */
+ static int
+-i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+-                  struct drm_i915_gem_pwrite *args,
+-                  struct drm_file *file_priv)
++i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
++                       struct drm_i915_gem_pwrite *args,
++                       struct drm_file *file_priv)
+ {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       drm_i915_private_t *dev_priv = dev->dev_private;
+@@ -273,7 +278,6 @@ i915_gem_gtt_pwrite(struct drm_device *d
+       obj_priv = obj->driver_private;
+       offset = obj_priv->gtt_offset + args->offset;
+-      obj_priv->dirty = 1;
+       while (remain > 0) {
+               /* Operation in this page
+@@ -292,16 +296,11 @@ i915_gem_gtt_pwrite(struct drm_device *d
+                                      page_offset, user_data, page_length);
+               /* If we get a fault while copying data, then (presumably) our
+-               * source page isn't available. In this case, use the
+-               * non-atomic function
++               * source page isn't available.  Return the error and we'll
++               * retry in the slow path.
+                */
+-              if (ret) {
+-                      ret = slow_user_write (dev_priv->mm.gtt_mapping,
+-                                             page_base, page_offset,
+-                                             user_data, page_length);
+-                      if (ret)
+-                              goto fail;
+-              }
++              if (ret)
++                      goto fail;
+               remain -= page_length;
+               user_data += page_length;
+@@ -315,6 +314,115 @@ fail:
+       return ret;
+ }
++/**
++ * This is the fallback GTT pwrite path, which uses get_user_pages to pin
++ * the memory and maps it using kmap_atomic for copying.
++ *
++ * This code resulted in x11perf -rgb10text consuming about 10% more CPU
++ * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
++ */
++static int
++i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
++                       struct drm_i915_gem_pwrite *args,
++                       struct drm_file *file_priv)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      drm_i915_private_t *dev_priv = dev->dev_private;
++      ssize_t remain;
++      loff_t gtt_page_base, offset;
++      loff_t first_data_page, last_data_page, num_pages;
++      loff_t pinned_pages, i;
++      struct page **user_pages;
++      struct mm_struct *mm = current->mm;
++      int gtt_page_offset, data_page_offset, data_page_index, page_length;
++      int ret;
++      uint64_t data_ptr = args->data_ptr;
++
++      remain = args->size;
++
++      /* Pin the user pages containing the data.  We can't fault while
++       * holding the struct mutex, and all of the pwrite implementations
++       * want to hold it while dereferencing the user data.
++       */
++      first_data_page = data_ptr / PAGE_SIZE;
++      last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
++      num_pages = last_data_page - first_data_page + 1;
++
++      user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
++      if (user_pages == NULL)
++              return -ENOMEM;
++
++      down_read(&mm->mmap_sem);
++      pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
++                                    num_pages, 0, 0, user_pages, NULL);
++      up_read(&mm->mmap_sem);
++      if (pinned_pages < num_pages) {
++              ret = -EFAULT;
++              goto out_unpin_pages;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++      ret = i915_gem_object_pin(obj, 0);
++      if (ret)
++              goto out_unlock;
++
++      ret = i915_gem_object_set_to_gtt_domain(obj, 1);
++      if (ret)
++              goto out_unpin_object;
++
++      obj_priv = obj->driver_private;
++      offset = obj_priv->gtt_offset + args->offset;
++
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * gtt_page_base = page offset within aperture
++               * gtt_page_offset = offset within page in aperture
++               * data_page_index = page number in get_user_pages return
++               * data_page_offset = offset with data_page_index page.
++               * page_length = bytes to copy for this page
++               */
++              gtt_page_base = offset & PAGE_MASK;
++              gtt_page_offset = offset & ~PAGE_MASK;
++              data_page_index = data_ptr / PAGE_SIZE - first_data_page;
++              data_page_offset = data_ptr & ~PAGE_MASK;
++
++              page_length = remain;
++              if ((gtt_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - gtt_page_offset;
++              if ((data_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - data_page_offset;
++
++              ret = slow_kernel_write(dev_priv->mm.gtt_mapping,
++                                      gtt_page_base, gtt_page_offset,
++                                      user_pages[data_page_index],
++                                      data_page_offset,
++                                      page_length);
++
++              /* If we get a fault while copying data, then (presumably) our
++               * source page isn't available.  Return the error and we'll
++               * retry in the slow path.
++               */
++              if (ret)
++                      goto out_unpin_object;
++
++              remain -= page_length;
++              offset += page_length;
++              data_ptr += page_length;
++      }
++
++out_unpin_object:
++      i915_gem_object_unpin(obj);
++out_unlock:
++      mutex_unlock(&dev->struct_mutex);
++out_unpin_pages:
++      for (i = 0; i < pinned_pages; i++)
++              page_cache_release(user_pages[i]);
++      kfree(user_pages);
++
++      return ret;
++}
++
+ static int
+ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+                     struct drm_i915_gem_pwrite *args,
+@@ -388,9 +496,13 @@ i915_gem_pwrite_ioctl(struct drm_device 
+       if (obj_priv->phys_obj)
+               ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
+       else if (obj_priv->tiling_mode == I915_TILING_NONE &&
+-               dev->gtt_total != 0)
+-              ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
+-      else
++               dev->gtt_total != 0) {
++              ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file_priv);
++              if (ret == -EFAULT) {
++                      ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
++                                                     file_priv);
++              }
++      } else
+               ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+ #if WATCH_PWRITE
diff --git a/queue-2.6.29/drm-i915-fix-lock-order-reversal-in-shmem-pread-path.patch b/queue-2.6.29/drm-i915-fix-lock-order-reversal-in-shmem-pread-path.patch
new file mode 100644 (file)
index 0000000..3909781
--- /dev/null
@@ -0,0 +1,271 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:44:07 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:17 -0700
+Message-Id: <1239072025-1706-6-git-send-email-eric@anholt.net>
+Cc: Eric Anholt <eric@anholt.net>
+Subject: drm/i915: Fix lock order reversal in shmem pread path.
+
+upstream commit: eb01459fbbccb4ca0b879cbfc97e33ac6eabf975
+
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/i915_gem.c |  221 +++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 195 insertions(+), 26 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -137,6 +137,24 @@ i915_gem_create_ioctl(struct drm_device 
+ }
+ static inline int
++fast_shmem_read(struct page **pages,
++              loff_t page_base, int page_offset,
++              char __user *data,
++              int length)
++{
++      char __iomem *vaddr;
++      int ret;
++
++      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
++      if (vaddr == NULL)
++              return -ENOMEM;
++      ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
++      kunmap_atomic(vaddr, KM_USER0);
++
++      return ret;
++}
++
++static inline int
+ slow_shmem_copy(struct page *dst_page,
+               int dst_offset,
+               struct page *src_page,
+@@ -164,6 +182,179 @@ slow_shmem_copy(struct page *dst_page,
+ }
+ /**
++ * This is the fast shmem pread path, which attempts to copy_from_user directly
++ * from the backing pages of the object to the user's address space.  On a
++ * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
++ */
++static int
++i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
++                        struct drm_i915_gem_pread *args,
++                        struct drm_file *file_priv)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      ssize_t remain;
++      loff_t offset, page_base;
++      char __user *user_data;
++      int page_offset, page_length;
++      int ret;
++
++      user_data = (char __user *) (uintptr_t) args->data_ptr;
++      remain = args->size;
++
++      mutex_lock(&dev->struct_mutex);
++
++      ret = i915_gem_object_get_pages(obj);
++      if (ret != 0)
++              goto fail_unlock;
++
++      ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
++                                                      args->size);
++      if (ret != 0)
++              goto fail_put_pages;
++
++      obj_priv = obj->driver_private;
++      offset = args->offset;
++
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * page_base = page offset within aperture
++               * page_offset = offset within page
++               * page_length = bytes to copy for this page
++               */
++              page_base = (offset & ~(PAGE_SIZE-1));
++              page_offset = offset & (PAGE_SIZE-1);
++              page_length = remain;
++              if ((page_offset + remain) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - page_offset;
++
++              ret = fast_shmem_read(obj_priv->pages,
++                                    page_base, page_offset,
++                                    user_data, page_length);
++              if (ret)
++                      goto fail_put_pages;
++
++              remain -= page_length;
++              user_data += page_length;
++              offset += page_length;
++      }
++
++fail_put_pages:
++      i915_gem_object_put_pages(obj);
++fail_unlock:
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++/**
++ * This is the fallback shmem pread path, which allocates temporary storage
++ * in kernel space to copy_to_user into outside of the struct_mutex, so we
++ * can copy out of the object's backing pages while holding the struct mutex
++ * and not take page faults.
++ */
++static int
++i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
++                        struct drm_i915_gem_pread *args,
++                        struct drm_file *file_priv)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      struct mm_struct *mm = current->mm;
++      struct page **user_pages;
++      ssize_t remain;
++      loff_t offset, pinned_pages, i;
++      loff_t first_data_page, last_data_page, num_pages;
++      int shmem_page_index, shmem_page_offset;
++      int data_page_index,  data_page_offset;
++      int page_length;
++      int ret;
++      uint64_t data_ptr = args->data_ptr;
++
++      remain = args->size;
++
++      /* Pin the user pages containing the data.  We can't fault while
++       * holding the struct mutex, yet we want to hold it while
++       * dereferencing the user data.
++       */
++      first_data_page = data_ptr / PAGE_SIZE;
++      last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
++      num_pages = last_data_page - first_data_page + 1;
++
++      user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
++      if (user_pages == NULL)
++              return -ENOMEM;
++
++      down_read(&mm->mmap_sem);
++      pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
++                                    num_pages, 0, 0, user_pages, NULL);
++      up_read(&mm->mmap_sem);
++      if (pinned_pages < num_pages) {
++              ret = -EFAULT;
++              goto fail_put_user_pages;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++
++      ret = i915_gem_object_get_pages(obj);
++      if (ret != 0)
++              goto fail_unlock;
++
++      ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
++                                                      args->size);
++      if (ret != 0)
++              goto fail_put_pages;
++
++      obj_priv = obj->driver_private;
++      offset = args->offset;
++
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * shmem_page_index = page number within shmem file
++               * shmem_page_offset = offset within page in shmem file
++               * data_page_index = page number in get_user_pages return
++               * data_page_offset = offset with data_page_index page.
++               * page_length = bytes to copy for this page
++               */
++              shmem_page_index = offset / PAGE_SIZE;
++              shmem_page_offset = offset & ~PAGE_MASK;
++              data_page_index = data_ptr / PAGE_SIZE - first_data_page;
++              data_page_offset = data_ptr & ~PAGE_MASK;
++
++              page_length = remain;
++              if ((shmem_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - shmem_page_offset;
++              if ((data_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - data_page_offset;
++
++              ret = slow_shmem_copy(user_pages[data_page_index],
++                                    data_page_offset,
++                                    obj_priv->pages[shmem_page_index],
++                                    shmem_page_offset,
++                                    page_length);
++              if (ret)
++                      goto fail_put_pages;
++
++              remain -= page_length;
++              data_ptr += page_length;
++              offset += page_length;
++      }
++
++fail_put_pages:
++      i915_gem_object_put_pages(obj);
++fail_unlock:
++      mutex_unlock(&dev->struct_mutex);
++fail_put_user_pages:
++      for (i = 0; i < pinned_pages; i++) {
++              SetPageDirty(user_pages[i]);
++              page_cache_release(user_pages[i]);
++      }
++      kfree(user_pages);
++
++      return ret;
++}
++
++/**
+  * Reads data from the object referenced by handle.
+  *
+  * On error, the contents of *data are undefined.
+@@ -175,8 +366,6 @@ i915_gem_pread_ioctl(struct drm_device *
+       struct drm_i915_gem_pread *args = data;
+       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj_priv;
+-      ssize_t read;
+-      loff_t offset;
+       int ret;
+       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+@@ -194,33 +383,13 @@ i915_gem_pread_ioctl(struct drm_device *
+               return -EINVAL;
+       }
+-      mutex_lock(&dev->struct_mutex);
+-
+-      ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
+-                                                      args->size);
+-      if (ret != 0) {
+-              drm_gem_object_unreference(obj);
+-              mutex_unlock(&dev->struct_mutex);
+-              return ret;
+-      }
+-
+-      offset = args->offset;
+-
+-      read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
+-                      args->size, &offset);
+-      if (read != args->size) {
+-              drm_gem_object_unreference(obj);
+-              mutex_unlock(&dev->struct_mutex);
+-              if (read < 0)
+-                      return read;
+-              else
+-                      return -EINVAL;
+-      }
++      ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
++      if (ret != 0)
++              ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+       drm_gem_object_unreference(obj);
+-      mutex_unlock(&dev->struct_mutex);
+-      return 0;
++      return ret;
+ }
+ /* This is the fast write path which cannot handle
diff --git a/queue-2.6.29/drm-i915-fix-lock-order-reversal-in-shmem-pwrite-path.patch b/queue-2.6.29/drm-i915-fix-lock-order-reversal-in-shmem-pwrite-path.patch
new file mode 100644 (file)
index 0000000..422d9ee
--- /dev/null
@@ -0,0 +1,291 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:42:18 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:16 -0700
+Message-Id: <1239072025-1706-5-git-send-email-eric@anholt.net>
+Cc: Eric Anholt <eric@anholt.net>
+Subject: drm/i915: Fix lock order reversal in shmem pwrite path.
+
+upstream commit: 40123c1f8dd920dcff7a42cde5b351d7d0b0422e
+
+Like the GTT pwrite path fix, this uses an optimistic path and a
+fallback to get_user_pages.  Note that this means we have to stop using
+vfs_write and roll it ourselves.
+
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/i915_gem.c |  225 ++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 205 insertions(+), 20 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -136,6 +136,33 @@ i915_gem_create_ioctl(struct drm_device 
+       return 0;
+ }
++static inline int
++slow_shmem_copy(struct page *dst_page,
++              int dst_offset,
++              struct page *src_page,
++              int src_offset,
++              int length)
++{
++      char *dst_vaddr, *src_vaddr;
++
++      dst_vaddr = kmap_atomic(dst_page, KM_USER0);
++      if (dst_vaddr == NULL)
++              return -ENOMEM;
++
++      src_vaddr = kmap_atomic(src_page, KM_USER1);
++      if (src_vaddr == NULL) {
++              kunmap_atomic(dst_vaddr, KM_USER0);
++              return -ENOMEM;
++      }
++
++      memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
++
++      kunmap_atomic(src_vaddr, KM_USER1);
++      kunmap_atomic(dst_vaddr, KM_USER0);
++
++      return 0;
++}
++
+ /**
+  * Reads data from the object referenced by handle.
+  *
+@@ -243,6 +270,23 @@ slow_kernel_write(struct io_mapping *map
+       return 0;
+ }
++static inline int
++fast_shmem_write(struct page **pages,
++               loff_t page_base, int page_offset,
++               char __user *data,
++               int length)
++{
++      char __iomem *vaddr;
++
++      vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT], KM_USER0);
++      if (vaddr == NULL)
++              return -ENOMEM;
++      __copy_from_user_inatomic(vaddr + page_offset, data, length);
++      kunmap_atomic(vaddr, KM_USER0);
++
++      return 0;
++}
++
+ /**
+  * This is the fast pwrite path, where we copy the data directly from the
+  * user into the GTT, uncached.
+@@ -423,39 +467,175 @@ out_unpin_pages:
+       return ret;
+ }
++/**
++ * This is the fast shmem pwrite path, which attempts to directly
++ * copy_from_user into the kmapped pages backing the object.
++ */
+ static int
+-i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+-                    struct drm_i915_gem_pwrite *args,
+-                    struct drm_file *file_priv)
++i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
++                         struct drm_i915_gem_pwrite *args,
++                         struct drm_file *file_priv)
+ {
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      ssize_t remain;
++      loff_t offset, page_base;
++      char __user *user_data;
++      int page_offset, page_length;
+       int ret;
+-      loff_t offset;
+-      ssize_t written;
++
++      user_data = (char __user *) (uintptr_t) args->data_ptr;
++      remain = args->size;
+       mutex_lock(&dev->struct_mutex);
++      ret = i915_gem_object_get_pages(obj);
++      if (ret != 0)
++              goto fail_unlock;
++
+       ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+-      if (ret) {
+-              mutex_unlock(&dev->struct_mutex);
+-              return ret;
++      if (ret != 0)
++              goto fail_put_pages;
++
++      obj_priv = obj->driver_private;
++      offset = args->offset;
++      obj_priv->dirty = 1;
++
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * page_base = page offset within aperture
++               * page_offset = offset within page
++               * page_length = bytes to copy for this page
++               */
++              page_base = (offset & ~(PAGE_SIZE-1));
++              page_offset = offset & (PAGE_SIZE-1);
++              page_length = remain;
++              if ((page_offset + remain) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - page_offset;
++
++              ret = fast_shmem_write(obj_priv->pages,
++                                     page_base, page_offset,
++                                     user_data, page_length);
++              if (ret)
++                      goto fail_put_pages;
++
++              remain -= page_length;
++              user_data += page_length;
++              offset += page_length;
+       }
++fail_put_pages:
++      i915_gem_object_put_pages(obj);
++fail_unlock:
++      mutex_unlock(&dev->struct_mutex);
++
++      return ret;
++}
++
++/**
++ * This is the fallback shmem pwrite path, which uses get_user_pages to pin
++ * the memory and maps it using kmap_atomic for copying.
++ *
++ * This avoids taking mmap_sem for faulting on the user's address while the
++ * struct_mutex is held.
++ */
++static int
++i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
++                         struct drm_i915_gem_pwrite *args,
++                         struct drm_file *file_priv)
++{
++      struct drm_i915_gem_object *obj_priv = obj->driver_private;
++      struct mm_struct *mm = current->mm;
++      struct page **user_pages;
++      ssize_t remain;
++      loff_t offset, pinned_pages, i;
++      loff_t first_data_page, last_data_page, num_pages;
++      int shmem_page_index, shmem_page_offset;
++      int data_page_index,  data_page_offset;
++      int page_length;
++      int ret;
++      uint64_t data_ptr = args->data_ptr;
++
++      remain = args->size;
++
++      /* Pin the user pages containing the data.  We can't fault while
++       * holding the struct mutex, and all of the pwrite implementations
++       * want to hold it while dereferencing the user data.
++       */
++      first_data_page = data_ptr / PAGE_SIZE;
++      last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
++      num_pages = last_data_page - first_data_page + 1;
++
++      user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
++      if (user_pages == NULL)
++              return -ENOMEM;
++
++      down_read(&mm->mmap_sem);
++      pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
++                                    num_pages, 0, 0, user_pages, NULL);
++      up_read(&mm->mmap_sem);
++      if (pinned_pages < num_pages) {
++              ret = -EFAULT;
++              goto fail_put_user_pages;
++      }
++
++      mutex_lock(&dev->struct_mutex);
++
++      ret = i915_gem_object_get_pages(obj);
++      if (ret != 0)
++              goto fail_unlock;
++
++      ret = i915_gem_object_set_to_cpu_domain(obj, 1);
++      if (ret != 0)
++              goto fail_put_pages;
++
++      obj_priv = obj->driver_private;
+       offset = args->offset;
++      obj_priv->dirty = 1;
+-      written = vfs_write(obj->filp,
+-                          (char __user *)(uintptr_t) args->data_ptr,
+-                          args->size, &offset);
+-      if (written != args->size) {
+-              mutex_unlock(&dev->struct_mutex);
+-              if (written < 0)
+-                      return written;
+-              else
+-                      return -EINVAL;
++      while (remain > 0) {
++              /* Operation in this page
++               *
++               * shmem_page_index = page number within shmem file
++               * shmem_page_offset = offset within page in shmem file
++               * data_page_index = page number in get_user_pages return
++               * data_page_offset = offset with data_page_index page.
++               * page_length = bytes to copy for this page
++               */
++              shmem_page_index = offset / PAGE_SIZE;
++              shmem_page_offset = offset & ~PAGE_MASK;
++              data_page_index = data_ptr / PAGE_SIZE - first_data_page;
++              data_page_offset = data_ptr & ~PAGE_MASK;
++
++              page_length = remain;
++              if ((shmem_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - shmem_page_offset;
++              if ((data_page_offset + page_length) > PAGE_SIZE)
++                      page_length = PAGE_SIZE - data_page_offset;
++
++              ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
++                                    shmem_page_offset,
++                                    user_pages[data_page_index],
++                                    data_page_offset,
++                                    page_length);
++              if (ret)
++                      goto fail_put_pages;
++
++              remain -= page_length;
++              data_ptr += page_length;
++              offset += page_length;
+       }
++fail_put_pages:
++      i915_gem_object_put_pages(obj);
++fail_unlock:
+       mutex_unlock(&dev->struct_mutex);
++fail_put_user_pages:
++      for (i = 0; i < pinned_pages; i++)
++              page_cache_release(user_pages[i]);
++      kfree(user_pages);
+-      return 0;
++      return ret;
+ }
+ /**
+@@ -502,8 +682,13 @@ i915_gem_pwrite_ioctl(struct drm_device 
+                       ret = i915_gem_gtt_pwrite_slow(dev, obj, args,
+                                                      file_priv);
+               }
+-      } else
+-              ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
++      } else {
++              ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file_priv);
++              if (ret == -EFAULT) {
++                      ret = i915_gem_shmem_pwrite_slow(dev, obj, args,
++                                                       file_priv);
++              }
++      }
+ #if WATCH_PWRITE
+       if (ret)
diff --git a/queue-2.6.29/drm-i915-fix-lock-order-reversal-with-cliprects-and-cmdbuf-in-non-dri2-paths.patch b/queue-2.6.29/drm-i915-fix-lock-order-reversal-with-cliprects-and-cmdbuf-in-non-dri2-paths.patch
new file mode 100644 (file)
index 0000000..0b5de5c
--- /dev/null
@@ -0,0 +1,332 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:44:13 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:18 -0700
+Message-Id: <1239072025-1706-7-git-send-email-eric@anholt.net>
+Cc: Eric Anholt <eric@anholt.net>
+Subject: drm/i915: Fix lock order reversal with cliprects and cmdbuf in non-DRI2 paths.
+
+upstream commit: 201361a54ed187d8595a283e3a4ddb213bc8323b
+
+This introduces allocation in the batch submission path that wasn't there
+previously, but these are compatibility paths so we care about simplicity
+more than performance.
+
+kernel.org bug #12419.
+
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Reviewed-by: Keith Packard <keithp@keithp.com>
+Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/i915_dma.c |  107 +++++++++++++++++++++++++++-------------
+ drivers/gpu/drm/i915/i915_drv.h |    2 
+ drivers/gpu/drm/i915/i915_gem.c |   27 ++++++++--
+ 3 files changed, 97 insertions(+), 39 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -356,7 +356,7 @@ static int validate_cmd(int cmd)
+       return ret;
+ }
+-static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
++static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       int i;
+@@ -370,8 +370,7 @@ static int i915_emit_cmds(struct drm_dev
+       for (i = 0; i < dwords;) {
+               int cmd, sz;
+-              if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
+-                      return -EINVAL;
++              cmd = buffer[i];
+               if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+                       return -EINVAL;
+@@ -379,11 +378,7 @@ static int i915_emit_cmds(struct drm_dev
+               OUT_RING(cmd);
+               while (++i, --sz) {
+-                      if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
+-                                                       sizeof(cmd))) {
+-                              return -EINVAL;
+-                      }
+-                      OUT_RING(cmd);
++                      OUT_RING(buffer[i]);
+               }
+       }
+@@ -397,17 +392,13 @@ static int i915_emit_cmds(struct drm_dev
+ int
+ i915_emit_box(struct drm_device *dev,
+-            struct drm_clip_rect __user *boxes,
++            struct drm_clip_rect *boxes,
+             int i, int DR1, int DR4)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+-      struct drm_clip_rect box;
++      struct drm_clip_rect box = boxes[i];
+       RING_LOCALS;
+-      if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
+-              return -EFAULT;
+-      }
+-
+       if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+               DRM_ERROR("Bad box %d,%d..%d,%d\n",
+                         box.x1, box.y1, box.x2, box.y2);
+@@ -460,7 +451,9 @@ static void i915_emit_breadcrumb(struct 
+ }
+ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+-                                 drm_i915_cmdbuffer_t * cmd)
++                                 drm_i915_cmdbuffer_t *cmd,
++                                 struct drm_clip_rect *cliprects,
++                                 void *cmdbuf)
+ {
+       int nbox = cmd->num_cliprects;
+       int i = 0, count, ret;
+@@ -476,13 +469,13 @@ static int i915_dispatch_cmdbuffer(struc
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+-                      ret = i915_emit_box(dev, cmd->cliprects, i,
++                      ret = i915_emit_box(dev, cliprects, i,
+                                           cmd->DR1, cmd->DR4);
+                       if (ret)
+                               return ret;
+               }
+-              ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
++              ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
+               if (ret)
+                       return ret;
+       }
+@@ -492,10 +485,10 @@ static int i915_dispatch_cmdbuffer(struc
+ }
+ static int i915_dispatch_batchbuffer(struct drm_device * dev,
+-                                   drm_i915_batchbuffer_t * batch)
++                                   drm_i915_batchbuffer_t * batch,
++                                   struct drm_clip_rect *cliprects)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+-      struct drm_clip_rect __user *boxes = batch->cliprects;
+       int nbox = batch->num_cliprects;
+       int i = 0, count;
+       RING_LOCALS;
+@@ -511,7 +504,7 @@ static int i915_dispatch_batchbuffer(str
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+-                      int ret = i915_emit_box(dev, boxes, i,
++                      int ret = i915_emit_box(dev, cliprects, i,
+                                               batch->DR1, batch->DR4);
+                       if (ret)
+                               return ret;
+@@ -626,6 +619,7 @@ static int i915_batchbuffer(struct drm_d
+           master_priv->sarea_priv;
+       drm_i915_batchbuffer_t *batch = data;
+       int ret;
++      struct drm_clip_rect *cliprects = NULL;
+       if (!dev_priv->allow_batchbuffer) {
+               DRM_ERROR("Batchbuffer ioctl disabled\n");
+@@ -637,17 +631,35 @@ static int i915_batchbuffer(struct drm_d
+       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+-      if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
+-                                                     batch->num_cliprects *
+-                                                     sizeof(struct drm_clip_rect)))
+-              return -EFAULT;
++      if (batch->num_cliprects < 0)
++              return -EINVAL;
++
++      if (batch->num_cliprects) {
++              cliprects = drm_calloc(batch->num_cliprects,
++                                     sizeof(struct drm_clip_rect),
++                                     DRM_MEM_DRIVER);
++              if (cliprects == NULL)
++                      return -ENOMEM;
++
++              ret = copy_from_user(cliprects, batch->cliprects,
++                                   batch->num_cliprects *
++                                   sizeof(struct drm_clip_rect));
++              if (ret != 0)
++                      goto fail_free;
++      }
+       mutex_lock(&dev->struct_mutex);
+-      ret = i915_dispatch_batchbuffer(dev, batch);
++      ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
+       mutex_unlock(&dev->struct_mutex);
+       if (sarea_priv)
+               sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
++
++fail_free:
++      drm_free(cliprects,
++               batch->num_cliprects * sizeof(struct drm_clip_rect),
++               DRM_MEM_DRIVER);
++
+       return ret;
+ }
+@@ -659,6 +671,8 @@ static int i915_cmdbuffer(struct drm_dev
+       drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+           master_priv->sarea_priv;
+       drm_i915_cmdbuffer_t *cmdbuf = data;
++      struct drm_clip_rect *cliprects = NULL;
++      void *batch_data;
+       int ret;
+       DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
+@@ -666,25 +680,50 @@ static int i915_cmdbuffer(struct drm_dev
+       RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+-      if (cmdbuf->num_cliprects &&
+-          DRM_VERIFYAREA_READ(cmdbuf->cliprects,
+-                              cmdbuf->num_cliprects *
+-                              sizeof(struct drm_clip_rect))) {
+-              DRM_ERROR("Fault accessing cliprects\n");
+-              return -EFAULT;
++      if (cmdbuf->num_cliprects < 0)
++              return -EINVAL;
++
++      batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
++      if (batch_data == NULL)
++              return -ENOMEM;
++
++      ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
++      if (ret != 0)
++              goto fail_batch_free;
++
++      if (cmdbuf->num_cliprects) {
++              cliprects = drm_calloc(cmdbuf->num_cliprects,
++                                     sizeof(struct drm_clip_rect),
++                                     DRM_MEM_DRIVER);
++              if (cliprects == NULL)
++                      goto fail_batch_free;
++
++              ret = copy_from_user(cliprects, cmdbuf->cliprects,
++                                   cmdbuf->num_cliprects *
++                                   sizeof(struct drm_clip_rect));
++              if (ret != 0)
++                      goto fail_clip_free;
+       }
+       mutex_lock(&dev->struct_mutex);
+-      ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
++      ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
+       mutex_unlock(&dev->struct_mutex);
+       if (ret) {
+               DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
+-              return ret;
++              goto fail_batch_free;
+       }
+       if (sarea_priv)
+               sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+-      return 0;
++
++fail_batch_free:
++      drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
++fail_clip_free:
++      drm_free(cliprects,
++               cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
++               DRM_MEM_DRIVER);
++
++      return ret;
+ }
+ static int i915_flip_bufs(struct drm_device *dev, void *data,
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -520,7 +520,7 @@ extern int i915_driver_device_is_agp(str
+ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+                             unsigned long arg);
+ extern int i915_emit_box(struct drm_device *dev,
+-                       struct drm_clip_rect __user *boxes,
++                       struct drm_clip_rect *boxes,
+                        int i, int DR1, int DR4);
+ /* i915_irq.c */
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2891,11 +2891,10 @@ i915_gem_object_pin_and_relocate(struct 
+ static int
+ i915_dispatch_gem_execbuffer(struct drm_device *dev,
+                             struct drm_i915_gem_execbuffer *exec,
++                            struct drm_clip_rect *cliprects,
+                             uint64_t exec_offset)
+ {
+       drm_i915_private_t *dev_priv = dev->dev_private;
+-      struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
+-                                           (uintptr_t) exec->cliprects_ptr;
+       int nbox = exec->num_cliprects;
+       int i = 0, count;
+       uint32_t        exec_start, exec_len;
+@@ -2916,7 +2915,7 @@ i915_dispatch_gem_execbuffer(struct drm_
+       for (i = 0; i < count; i++) {
+               if (i < nbox) {
+-                      int ret = i915_emit_box(dev, boxes, i,
++                      int ret = i915_emit_box(dev, cliprects, i,
+                                               exec->DR1, exec->DR4);
+                       if (ret)
+                               return ret;
+@@ -2983,6 +2982,7 @@ i915_gem_execbuffer(struct drm_device *d
+       struct drm_gem_object **object_list = NULL;
+       struct drm_gem_object *batch_obj;
+       struct drm_i915_gem_object *obj_priv;
++      struct drm_clip_rect *cliprects = NULL;
+       int ret, i, pinned = 0;
+       uint64_t exec_offset;
+       uint32_t seqno, flush_domains;
+@@ -3019,6 +3019,23 @@ i915_gem_execbuffer(struct drm_device *d
+               goto pre_mutex_err;
+       }
++      if (args->num_cliprects != 0) {
++              cliprects = drm_calloc(args->num_cliprects, sizeof(*cliprects),
++                                     DRM_MEM_DRIVER);
++              if (cliprects == NULL)
++                      goto pre_mutex_err;
++
++              ret = copy_from_user(cliprects,
++                                   (struct drm_clip_rect __user *)
++                                   (uintptr_t) args->cliprects_ptr,
++                                   sizeof(*cliprects) * args->num_cliprects);
++              if (ret != 0) {
++                      DRM_ERROR("copy %d cliprects failed: %d\n",
++                                args->num_cliprects, ret);
++                      goto pre_mutex_err;
++              }
++      }
++
+       mutex_lock(&dev->struct_mutex);
+       i915_verify_inactive(dev, __FILE__, __LINE__);
+@@ -3155,7 +3172,7 @@ i915_gem_execbuffer(struct drm_device *d
+ #endif
+       /* Exec the batchbuffer */
+-      ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
++      ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+       if (ret) {
+               DRM_ERROR("dispatch failed %d\n", ret);
+               goto err;
+@@ -3224,6 +3241,8 @@ pre_mutex_err:
+                DRM_MEM_DRIVER);
+       drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
+                DRM_MEM_DRIVER);
++      drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
++               DRM_MEM_DRIVER);
+       return ret;
+ }
diff --git a/queue-2.6.29/drm-i915-fix-tv-mode-setting-in-property-change.patch b/queue-2.6.29/drm-i915-fix-tv-mode-setting-in-property-change.patch
new file mode 100644 (file)
index 0000000..26bc39e
--- /dev/null
@@ -0,0 +1,46 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:43:52 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:25 -0700
+Message-Id: <1239072025-1706-14-git-send-email-eric@anholt.net>
+Cc: Eric Anholt <eric@anholt.net>, Zhenyu Wang <zhenyu.z.wang@intel.com>
+Subject: drm/i915: fix TV mode setting in property change
+
+From: Zhenyu Wang <zhenyu.z.wang@intel.com>
+
+upstream commit: 7d6ff7851c23740c3813bdf457be638381774b69
+
+Only set TV DAC in property change seems doesn't work, we have to
+setup whole crtc pipe which assigned to TV alone.
+
+Signed-off-by: Zhenyu Wang <zhenyu.z.wang@intel.com>
+[anholt: Note that this should also fix the oops at startup with new 2D]
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/intel_tv.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -1558,6 +1558,8 @@ intel_tv_set_property(struct drm_connect
+       struct drm_device *dev = connector->dev;
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_tv_priv *tv_priv = intel_output->dev_priv;
++      struct drm_encoder *encoder = &intel_output->enc;
++      struct drm_crtc *crtc = encoder->crtc;
+       int ret = 0;
+       bool changed = false;
+@@ -1596,8 +1598,9 @@ intel_tv_set_property(struct drm_connect
+               goto out;
+       }
+-      if (changed)
+-              intel_tv_mode_set(&intel_output->enc, NULL, NULL);
++      if (changed && crtc)
++              drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
++                              crtc->y, crtc->fb);
+ out:
+       return ret;
+ }
diff --git a/queue-2.6.29/drm-i915-make-gem-object-s-page-lists-refcounted-instead-of-get-free.patch b/queue-2.6.29/drm-i915-make-gem-object-s-page-lists-refcounted-instead-of-get-free.patch
new file mode 100644 (file)
index 0000000..fcef839
--- /dev/null
@@ -0,0 +1,258 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:42:12 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:15 -0700
+Message-Id: <1239072025-1706-4-git-send-email-eric@anholt.net>
+Cc: Eric Anholt <eric@anholt.net>
+Subject: drm/i915: Make GEM object's page lists refcounted instead of get/free.
+
+upstream commit: 856fa1988ea483fc2dab84a16681dcfde821b740
+
+We've wanted this for a few consumers that touch the pages directly (such as
+the following commit), which have been doing the refcounting outside of
+get/put pages.
+
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Reviewed-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/i915_drv.h |    3 +
+ drivers/gpu/drm/i915/i915_gem.c |   70 ++++++++++++++++++++--------------------
+ 2 files changed, 38 insertions(+), 35 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -404,7 +404,8 @@ struct drm_i915_gem_object {
+       /** AGP memory structure for our GTT binding. */
+       DRM_AGP_MEM *agp_mem;
+-      struct page **page_list;
++      struct page **pages;
++      int pages_refcount;
+       /**
+        * Current offset of the object in GTT space.
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -43,8 +43,8 @@ static int i915_gem_object_set_cpu_read_
+                                                    uint64_t offset,
+                                                    uint64_t size);
+ static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
+-static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+-static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
++static int i915_gem_object_get_pages(struct drm_gem_object *obj);
++static void i915_gem_object_put_pages(struct drm_gem_object *obj);
+ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+ static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+                                          unsigned alignment);
+@@ -928,29 +928,30 @@ i915_gem_mmap_gtt_ioctl(struct drm_devic
+ }
+ static void
+-i915_gem_object_free_page_list(struct drm_gem_object *obj)
++i915_gem_object_put_pages(struct drm_gem_object *obj)
+ {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int page_count = obj->size / PAGE_SIZE;
+       int i;
+-      if (obj_priv->page_list == NULL)
+-              return;
++      BUG_ON(obj_priv->pages_refcount == 0);
++      if (--obj_priv->pages_refcount != 0)
++              return;
+       for (i = 0; i < page_count; i++)
+-              if (obj_priv->page_list[i] != NULL) {
++              if (obj_priv->pages[i] != NULL) {
+                       if (obj_priv->dirty)
+-                              set_page_dirty(obj_priv->page_list[i]);
+-                      mark_page_accessed(obj_priv->page_list[i]);
+-                      page_cache_release(obj_priv->page_list[i]);
++                              set_page_dirty(obj_priv->pages[i]);
++                      mark_page_accessed(obj_priv->pages[i]);
++                      page_cache_release(obj_priv->pages[i]);
+               }
+       obj_priv->dirty = 0;
+-      drm_free(obj_priv->page_list,
++      drm_free(obj_priv->pages,
+                page_count * sizeof(struct page *),
+                DRM_MEM_DRIVER);
+-      obj_priv->page_list = NULL;
++      obj_priv->pages = NULL;
+ }
+ static void
+@@ -1402,7 +1403,7 @@ i915_gem_object_unbind(struct drm_gem_ob
+       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+               i915_gem_clear_fence_reg(obj);
+-      i915_gem_object_free_page_list(obj);
++      i915_gem_object_put_pages(obj);
+       if (obj_priv->gtt_space) {
+               atomic_dec(&dev->gtt_count);
+@@ -1521,7 +1522,7 @@ i915_gem_evict_everything(struct drm_dev
+ }
+ static int
+-i915_gem_object_get_page_list(struct drm_gem_object *obj)
++i915_gem_object_get_pages(struct drm_gem_object *obj)
+ {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int page_count, i;
+@@ -1530,18 +1531,19 @@ i915_gem_object_get_page_list(struct drm
+       struct page *page;
+       int ret;
+-      if (obj_priv->page_list)
++      if (obj_priv->pages_refcount++ != 0)
+               return 0;
+       /* Get the list of pages out of our struct file.  They'll be pinned
+        * at this point until we release them.
+        */
+       page_count = obj->size / PAGE_SIZE;
+-      BUG_ON(obj_priv->page_list != NULL);
+-      obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
+-                                       DRM_MEM_DRIVER);
+-      if (obj_priv->page_list == NULL) {
++      BUG_ON(obj_priv->pages != NULL);
++      obj_priv->pages = drm_calloc(page_count, sizeof(struct page *),
++                                   DRM_MEM_DRIVER);
++      if (obj_priv->pages == NULL) {
+               DRM_ERROR("Faled to allocate page list\n");
++              obj_priv->pages_refcount--;
+               return -ENOMEM;
+       }
+@@ -1552,10 +1554,10 @@ i915_gem_object_get_page_list(struct drm
+               if (IS_ERR(page)) {
+                       ret = PTR_ERR(page);
+                       DRM_ERROR("read_mapping_page failed: %d\n", ret);
+-                      i915_gem_object_free_page_list(obj);
++                      i915_gem_object_put_pages(obj);
+                       return ret;
+               }
+-              obj_priv->page_list[i] = page;
++              obj_priv->pages[i] = page;
+       }
+       return 0;
+ }
+@@ -1878,7 +1880,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
+       DRM_INFO("Binding object of size %d at 0x%08x\n",
+                obj->size, obj_priv->gtt_offset);
+ #endif
+-      ret = i915_gem_object_get_page_list(obj);
++      ret = i915_gem_object_get_pages(obj);
+       if (ret) {
+               drm_mm_put_block(obj_priv->gtt_space);
+               obj_priv->gtt_space = NULL;
+@@ -1890,12 +1892,12 @@ i915_gem_object_bind_to_gtt(struct drm_g
+        * into the GTT.
+        */
+       obj_priv->agp_mem = drm_agp_bind_pages(dev,
+-                                             obj_priv->page_list,
++                                             obj_priv->pages,
+                                              page_count,
+                                              obj_priv->gtt_offset,
+                                              obj_priv->agp_type);
+       if (obj_priv->agp_mem == NULL) {
+-              i915_gem_object_free_page_list(obj);
++              i915_gem_object_put_pages(obj);
+               drm_mm_put_block(obj_priv->gtt_space);
+               obj_priv->gtt_space = NULL;
+               return -ENOMEM;
+@@ -1922,10 +1924,10 @@ i915_gem_clflush_object(struct drm_gem_o
+        * to GPU, and we can ignore the cache flush because it'll happen
+        * again at bind time.
+        */
+-      if (obj_priv->page_list == NULL)
++      if (obj_priv->pages == NULL)
+               return;
+-      drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
++      drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
+ }
+ /** Flushes any GPU write domain for the object if it's dirty. */
+@@ -2270,7 +2272,7 @@ i915_gem_object_set_to_full_cpu_read_dom
+               for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
+                       if (obj_priv->page_cpu_valid[i])
+                               continue;
+-                      drm_clflush_pages(obj_priv->page_list + i, 1);
++                      drm_clflush_pages(obj_priv->pages + i, 1);
+               }
+               drm_agp_chipset_flush(dev);
+       }
+@@ -2336,7 +2338,7 @@ i915_gem_object_set_cpu_read_domain_rang
+               if (obj_priv->page_cpu_valid[i])
+                       continue;
+-              drm_clflush_pages(obj_priv->page_list + i, 1);
++              drm_clflush_pages(obj_priv->pages + i, 1);
+               obj_priv->page_cpu_valid[i] = 1;
+       }
+@@ -3304,7 +3306,7 @@ i915_gem_init_hws(struct drm_device *dev
+       dev_priv->status_gfx_addr = obj_priv->gtt_offset;
+-      dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
++      dev_priv->hw_status_page = kmap(obj_priv->pages[0]);
+       if (dev_priv->hw_status_page == NULL) {
+               DRM_ERROR("Failed to map status page.\n");
+               memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+@@ -3334,7 +3336,7 @@ i915_gem_cleanup_hws(struct drm_device *
+       obj = dev_priv->hws_obj;
+       obj_priv = obj->driver_private;
+-      kunmap(obj_priv->page_list[0]);
++      kunmap(obj_priv->pages[0]);
+       i915_gem_object_unpin(obj);
+       drm_gem_object_unreference(obj);
+       dev_priv->hws_obj = NULL;
+@@ -3637,20 +3639,20 @@ void i915_gem_detach_phys_object(struct 
+       if (!obj_priv->phys_obj)
+               return;
+-      ret = i915_gem_object_get_page_list(obj);
++      ret = i915_gem_object_get_pages(obj);
+       if (ret)
+               goto out;
+       page_count = obj->size / PAGE_SIZE;
+       for (i = 0; i < page_count; i++) {
+-              char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
++              char *dst = kmap_atomic(obj_priv->pages[i], KM_USER0);
+               char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+               memcpy(dst, src, PAGE_SIZE);
+               kunmap_atomic(dst, KM_USER0);
+       }
+-      drm_clflush_pages(obj_priv->page_list, page_count);
++      drm_clflush_pages(obj_priv->pages, page_count);
+       drm_agp_chipset_flush(dev);
+ out:
+       obj_priv->phys_obj->cur_obj = NULL;
+@@ -3693,7 +3695,7 @@ i915_gem_attach_phys_object(struct drm_d
+       obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
+       obj_priv->phys_obj->cur_obj = obj;
+-      ret = i915_gem_object_get_page_list(obj);
++      ret = i915_gem_object_get_pages(obj);
+       if (ret) {
+               DRM_ERROR("failed to get page list\n");
+               goto out;
+@@ -3702,7 +3704,7 @@ i915_gem_attach_phys_object(struct drm_d
+       page_count = obj->size / PAGE_SIZE;
+       for (i = 0; i < page_count; i++) {
+-              char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
++              char *src = kmap_atomic(obj_priv->pages[i], KM_USER0);
+               char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+               memcpy(dst, src, PAGE_SIZE);
diff --git a/queue-2.6.29/drm-i915-only-set-tv-mode-when-any-property-changed.patch b/queue-2.6.29/drm-i915-only-set-tv-mode-when-any-property-changed.patch
new file mode 100644 (file)
index 0000000..48ce914
--- /dev/null
@@ -0,0 +1,77 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:43:46 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:24 -0700
+Message-Id: <1239072025-1706-13-git-send-email-eric@anholt.net>
+Cc: Eric Anholt <eric@anholt.net>, Zhenyu Wang <zhenyu.z.wang@intel.com>
+Subject: drm/i915: only set TV mode when any property changed
+
+From: Zhenyu Wang <zhenyu.z.wang@intel.com>
+
+upstream commit: ebcc8f2eade76946dbb5d5c545b91f8157051aa8
+
+If there's no real property change, don't need to set TV mode again.
+
+Signed-off-by: Zhenyu Wang <zhenyu.z.wang@intel.com>
+[anholt: checkpatch.pl fix]
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/intel_tv.c |   27 ++++++++++++++++++++-------
+ 1 file changed, 20 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/i915/intel_tv.c
++++ b/drivers/gpu/drm/i915/intel_tv.c
+@@ -1559,32 +1559,45 @@ intel_tv_set_property(struct drm_connect
+       struct intel_output *intel_output = to_intel_output(connector);
+       struct intel_tv_priv *tv_priv = intel_output->dev_priv;
+       int ret = 0;
++      bool changed = false;
+       ret = drm_connector_property_set_value(connector, property, val);
+       if (ret < 0)
+               goto out;
+-      if (property == dev->mode_config.tv_left_margin_property)
++      if (property == dev->mode_config.tv_left_margin_property &&
++              tv_priv->margin[TV_MARGIN_LEFT] != val) {
+               tv_priv->margin[TV_MARGIN_LEFT] = val;
+-      else if (property == dev->mode_config.tv_right_margin_property)
++              changed = true;
++      } else if (property == dev->mode_config.tv_right_margin_property &&
++              tv_priv->margin[TV_MARGIN_RIGHT] != val) {
+               tv_priv->margin[TV_MARGIN_RIGHT] = val;
+-      else if (property == dev->mode_config.tv_top_margin_property)
++              changed = true;
++      } else if (property == dev->mode_config.tv_top_margin_property &&
++              tv_priv->margin[TV_MARGIN_TOP] != val) {
+               tv_priv->margin[TV_MARGIN_TOP] = val;
+-      else if (property == dev->mode_config.tv_bottom_margin_property)
++              changed = true;
++      } else if (property == dev->mode_config.tv_bottom_margin_property &&
++              tv_priv->margin[TV_MARGIN_BOTTOM] != val) {
+               tv_priv->margin[TV_MARGIN_BOTTOM] = val;
+-      else if (property == dev->mode_config.tv_mode_property) {
++              changed = true;
++      } else if (property == dev->mode_config.tv_mode_property) {
+               if (val >= NUM_TV_MODES) {
+                       ret = -EINVAL;
+                       goto out;
+               }
++              if (!strcmp(tv_priv->tv_format, tv_modes[val].name))
++                      goto out;
++
+               tv_priv->tv_format = tv_modes[val].name;
+-              intel_tv_mode_set(&intel_output->enc, NULL, NULL);
++              changed = true;
+       } else {
+               ret = -EINVAL;
+               goto out;
+       }
+-      intel_tv_mode_set(&intel_output->enc, NULL, NULL);
++      if (changed)
++              intel_tv_mode_set(&intel_output->enc, NULL, NULL);
+ out:
+       return ret;
+ }
diff --git a/queue-2.6.29/drm-i915-read-the-right-sdvo-register-when-detecting-svdo-hdmi.patch b/queue-2.6.29/drm-i915-read-the-right-sdvo-register-when-detecting-svdo-hdmi.patch
new file mode 100644 (file)
index 0000000..961022d
--- /dev/null
@@ -0,0 +1,47 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:42:24 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:13 -0700
+Message-Id: <1239072025-1706-2-git-send-email-eric@anholt.net>
+Cc: Kristian Høgsberg <krh@redhat.com>, Eric Anholt <eric@anholt.net>
+Subject: drm/i915: Read the right SDVO register when detecting SVDO/HDMI.
+
+From: Kristian Høgsberg <krh@redhat.com>
+
+upstream commit: 13520b051e8888dd3af9bda639d83e7df76613d1
+
+This fixes incorrect detection of the second SDVO/HDMI output on G4X, and
+extra boot time on pre-G4X.
+
+Signed-off-by: Kristian Høgsberg <krh@redhat.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/intel_display.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1474,13 +1474,21 @@ static void intel_setup_outputs(struct d
+       if (IS_I9XX(dev)) {
+               int found;
++              u32 reg;
+               if (I915_READ(SDVOB) & SDVO_DETECTED) {
+                       found = intel_sdvo_init(dev, SDVOB);
+                       if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+                               intel_hdmi_init(dev, SDVOB);
+               }
+-              if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
++
++              /* Before G4X SDVOC doesn't have its own detect register */
++              if (IS_G4X(dev))
++                      reg = SDVOC;
++              else
++                      reg = SDVOB;
++
++              if (I915_READ(reg) & SDVO_DETECTED) {
+                       found = intel_sdvo_init(dev, SDVOC);
+                       if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
+                               intel_hdmi_init(dev, SDVOC);
diff --git a/queue-2.6.29/drm-i915-sync-crt-hotplug-detection-with-intel-video-driver.patch b/queue-2.6.29/drm-i915-sync-crt-hotplug-detection-with-intel-video-driver.patch
new file mode 100644 (file)
index 0000000..f5e81dd
--- /dev/null
@@ -0,0 +1,104 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:43:41 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:19 -0700
+Message-Id: <1239072025-1706-8-git-send-email-eric@anholt.net>
+Cc: Zhao Yakui <yakui.zhao@intel.com>, Eric Anholt <eric@anholt.net>
+Subject: drm/i915: Sync crt hotplug detection with intel video driver
+
+From: Zhao Yakui <yakui.zhao@intel.com>
+
+upstream commit: 771cb081354161eea21534ba58e5cc1a2db94a25
+
+This covers:
+Use long crt hotplug activation time on GM45.
+
+Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/i915/i915_reg.h  |   16 +++++++++++++
+ drivers/gpu/drm/i915/intel_crt.c |   47 +++++++++++++++++++++++++++------------
+ 2 files changed, 49 insertions(+), 14 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -629,6 +629,22 @@
+ #define   TV_HOTPLUG_INT_EN                   (1 << 18)
+ #define   CRT_HOTPLUG_INT_EN                  (1 << 9)
+ #define   CRT_HOTPLUG_FORCE_DETECT            (1 << 3)
++#define CRT_HOTPLUG_ACTIVATION_PERIOD_32      (0 << 8)
++/* must use period 64 on GM45 according to docs */
++#define CRT_HOTPLUG_ACTIVATION_PERIOD_64      (1 << 8)
++#define CRT_HOTPLUG_DAC_ON_TIME_2M            (0 << 7)
++#define CRT_HOTPLUG_DAC_ON_TIME_4M            (1 << 7)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_40                (0 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_50                (1 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_60                (2 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_70                (3 << 5)
++#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK      (3 << 5)
++#define CRT_HOTPLUG_DETECT_DELAY_1G           (0 << 4)
++#define CRT_HOTPLUG_DETECT_DELAY_2G           (1 << 4)
++#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV      (0 << 2)
++#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV      (1 << 2)
++#define CRT_HOTPLUG_MASK                      (0x3fc) /* Bits 9-2 */
++
+ #define PORT_HOTPLUG_STAT     0x61114
+ #define   HDMIB_HOTPLUG_INT_STATUS            (1 << 29)
+--- a/drivers/gpu/drm/i915/intel_crt.c
++++ b/drivers/gpu/drm/i915/intel_crt.c
+@@ -133,20 +133,39 @@ static bool intel_crt_detect_hotplug(str
+ {
+       struct drm_device *dev = connector->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+-      u32 temp;
+-
+-      unsigned long timeout = jiffies + msecs_to_jiffies(1000);
+-
+-      temp = I915_READ(PORT_HOTPLUG_EN);
+-
+-      I915_WRITE(PORT_HOTPLUG_EN,
+-                 temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
+-
+-      do {
+-              if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
+-                      break;
+-              msleep(1);
+-      } while (time_after(timeout, jiffies));
++      u32 hotplug_en;
++      int i, tries = 0;
++      /*
++       * On 4 series desktop, CRT detect sequence need to be done twice
++       * to get a reliable result.
++       */
++
++      if (IS_G4X(dev) && !IS_GM45(dev))
++              tries = 2;
++      else
++              tries = 1;
++      hotplug_en = I915_READ(PORT_HOTPLUG_EN);
++      hotplug_en &= ~(CRT_HOTPLUG_MASK);
++      hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
++
++      if (IS_GM45(dev))
++              hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
++
++      hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
++
++      for (i = 0; i < tries ; i++) {
++              unsigned long timeout;
++              /* turn on the FORCE_DETECT */
++              I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
++              timeout = jiffies + msecs_to_jiffies(1000);
++              /* wait for FORCE_DETECT to go off */
++              do {
++                      if (!(I915_READ(PORT_HOTPLUG_EN) &
++                                      CRT_HOTPLUG_FORCE_DETECT))
++                              break;
++                      msleep(1);
++              } while (time_after(timeout, jiffies));
++      }
+       if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
+           CRT_HOTPLUG_MONITOR_COLOR)
diff --git a/queue-2.6.29/drm-use-pgprot_writecombine-in-gem-gtt-mapping-to-get-the-right-bits-for-pat.patch b/queue-2.6.29/drm-use-pgprot_writecombine-in-gem-gtt-mapping-to-get-the-right-bits-for-pat.patch
new file mode 100644 (file)
index 0000000..b61e506
--- /dev/null
@@ -0,0 +1,46 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 02:43:58 2009
+From: Eric Anholt <eric@anholt.net>
+To: stable@kernel.org
+Date: Mon,  6 Apr 2009 19:40:23 -0700
+Message-Id: <1239072025-1706-12-git-send-email-eric@anholt.net>
+Cc: Eric Anholt <eric@anholt.net>, Jesse Barnes <jbarnes@virtuousgeek.org>
+Subject: drm: Use pgprot_writecombine in GEM GTT mapping to get the right bits for !PAT.
+
+From: Jesse Barnes <jbarnes@virtuousgeek.org>
+
+upstream commit: 1055f9ddad093f54dfd708a0f976582034d4ce1a
+
+Otherwise, the PAGE_CACHE_WC would end up getting us a UC-only mapping, and
+the write performance of GTT maps dropped 10x.
+
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+[anholt: cleaned up unused var]
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/gpu/drm/drm_gem.c |    7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -505,7 +505,6 @@ int drm_gem_mmap(struct file *filp, stru
+       struct drm_map *map = NULL;
+       struct drm_gem_object *obj;
+       struct drm_hash_item *hash;
+-      unsigned long prot;
+       int ret = 0;
+       mutex_lock(&dev->struct_mutex);
+@@ -538,11 +537,7 @@ int drm_gem_mmap(struct file *filp, stru
+       vma->vm_ops = obj->dev->driver->gem_vm_ops;
+       vma->vm_private_data = map->handle;
+       /* FIXME: use pgprot_writecombine when available */
+-      prot = pgprot_val(vma->vm_page_prot);
+-#ifdef CONFIG_X86
+-      prot |= _PAGE_CACHE_WC;
+-#endif
+-      vma->vm_page_prot = __pgprot(prot);
++      vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       /* Take a ref for this mapping of the object, so that the fault
+        * handler can dereference the mmap offset's pointer to the object.
diff --git a/queue-2.6.29/ext4-fix-locking-typo-in-mballoc-which-could-cause-soft-lockup-hangs.patch b/queue-2.6.29/ext4-fix-locking-typo-in-mballoc-which-could-cause-soft-lockup-hangs.patch
new file mode 100644 (file)
index 0000000..9dafbbb
--- /dev/null
@@ -0,0 +1,43 @@
+From stable-bounces@linux.kernel.org  Wed Apr  1 18:05:07 2009
+Date: Wed, 1 Apr 2009 18:05:02 GMT
+Message-Id: <200904011805.n31I5239011910@hera.kernel.org>
+From: Theodore Ts'o <tytso@mit.edu>
+To: stable@kernel.org
+Subject: ext4: fix locking typo in mballoc which could cause soft lockup hangs
+
+upstream commit: e7c9e3e99adf6c49c5d593a51375916acc039d1e
+
+Smatch (http://repo.or.cz/w/smatch.git/) complains about the locking in
+ext4_mb_add_n_trim() from fs/ext4/mballoc.c
+
+  4438          list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
+  4439                                                  pa_inode_list) {
+  4440                  spin_lock(&tmp_pa->pa_lock);
+  4441                  if (tmp_pa->pa_deleted) {
+  4442                          spin_unlock(&pa->pa_lock);
+  4443                          continue;
+  4444                  }
+
+Brown paper bag time...
+
+Reported-by: Dan Carpenter <error27@gmail.com>
+Reviewed-by: Eric Sandeen <sandeen@redhat.com>
+Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@gmail.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ fs/ext4/mballoc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -4439,7 +4439,7 @@ static void ext4_mb_add_n_trim(struct ex
+                                               pa_inode_list) {
+               spin_lock(&tmp_pa->pa_lock);
+               if (tmp_pa->pa_deleted) {
+-                      spin_unlock(&pa->pa_lock);
++                      spin_unlock(&tmp_pa->pa_lock);
+                       continue;
+               }
+               if (!added && pa->pa_free < tmp_pa->pa_free) {
diff --git a/queue-2.6.29/ext4-fix-typo-which-causes-a-memory-leak-on-error-path.patch b/queue-2.6.29/ext4-fix-typo-which-causes-a-memory-leak-on-error-path.patch
new file mode 100644 (file)
index 0000000..99c3805
--- /dev/null
@@ -0,0 +1,30 @@
+From stable-bounces@linux.kernel.org  Wed Apr  1 18:05:09 2009
+Date: Wed, 1 Apr 2009 18:05:04 GMT
+Message-Id: <200904011805.n31I54qn011981@hera.kernel.org>
+From: Dan Carpenter <error27@gmail.com>
+To: stable@kernel.org
+Subject: ext4: fix typo which causes a memory leak on error path
+
+upstream commit: a7b19448ddbdc34b2b8fedc048ba154ca798667b
+
+This was found by smatch (http://repo.or.cz/w/smatch.git/)
+
+Signed-off-by: Dan Carpenter <error27@gmail.com>
+Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
+Cc: stable@kernel.org
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ fs/ext4/mballoc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -2693,7 +2693,7 @@ int ext4_mb_init(struct super_block *sb,
+       i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int);
+       sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
+       if (sbi->s_mb_maxs == NULL) {
+-              kfree(sbi->s_mb_maxs);
++              kfree(sbi->s_mb_offsets);
+               return -ENOMEM;
+       }
diff --git a/queue-2.6.29/fbdev-fix-info-lock-deadlock-in-fbcon_event_notify.patch b/queue-2.6.29/fbdev-fix-info-lock-deadlock-in-fbcon_event_notify.patch
new file mode 100644 (file)
index 0000000..02cd916
--- /dev/null
@@ -0,0 +1,282 @@
+From 513adb58685615b0b1d47a3f0d40f5352beff189 Mon Sep 17 00:00:00 2001
+Message-ID: <20090410081428.GA5595@linux>
+From: Andrea Righi <righi.andrea@gmail.com>
+Date: Mon, 13 Apr 2009 14:39:39 -0700
+Subject: fbdev: fix info->lock deadlock in fbcon_event_notify()
+
+upstream commit: 513adb58685615b0b1d47a3f0d40f5352beff189
+
+fb_notifier_call_chain() is called with info->lock held, i.e.  in
+do_fb_ioctl() => FBIOPUT_VSCREENINFO => fb_set_var() and the some
+notifier callbacks, like fbcon_event_notify(), try to re-acquire
+info->lock again.
+
+Remove the lock/unlock_fb_info() in all the framebuffer notifier
+callbacks' and be sure to always call fb_notifier_call_chain() with
+info->lock held.
+
+[fixes hang caused by 66c1ca01]
+
+Reported-by: Pavel Roskin <proski@gnu.org>
+Reported-by: Eric Miao <eric.y.miao@gmail.com>
+Signed-off-by: Andrea Righi <righi.andrea@gmail.com>
+Cc: Stefan Richter <stefanr@s5r6.in-berlin.de>
+Cc: Krzysztof Helt <krzysztof.h1@poczta.fm>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/video/backlight/backlight.c |    3 -
+ drivers/video/backlight/lcd.c       |    3 -
+ drivers/video/console/fbcon.c       |   55 +-----------------------------------
+ drivers/video/fbmem.c               |   19 ++++++++++++
+ 4 files changed, 22 insertions(+), 58 deletions(-)
+
+--- a/drivers/video/backlight/backlight.c
++++ b/drivers/video/backlight/backlight.c
+@@ -35,8 +35,6 @@ static int fb_notifier_callback(struct n
+               return 0;
+       bd = container_of(self, struct backlight_device, fb_notif);
+-      if (!lock_fb_info(evdata->info))
+-              return -ENODEV;
+       mutex_lock(&bd->ops_lock);
+       if (bd->ops)
+               if (!bd->ops->check_fb ||
+@@ -49,7 +47,6 @@ static int fb_notifier_callback(struct n
+                       backlight_update_status(bd);
+               }
+       mutex_unlock(&bd->ops_lock);
+-      unlock_fb_info(evdata->info);
+       return 0;
+ }
+--- a/drivers/video/backlight/lcd.c
++++ b/drivers/video/backlight/lcd.c
+@@ -40,8 +40,6 @@ static int fb_notifier_callback(struct n
+       if (!ld->ops)
+               return 0;
+-      if (!lock_fb_info(evdata->info))
+-              return -ENODEV;
+       mutex_lock(&ld->ops_lock);
+       if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) {
+               if (event == FB_EVENT_BLANK) {
+@@ -53,7 +51,6 @@ static int fb_notifier_callback(struct n
+               }
+       }
+       mutex_unlock(&ld->ops_lock);
+-      unlock_fb_info(evdata->info);
+       return 0;
+ }
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -2263,9 +2263,12 @@ static void fbcon_generic_blank(struct v
+       }
++      if (!lock_fb_info(info))
++              return;
+       event.info = info;
+       event.data = &blank;
+       fb_notifier_call_chain(FB_EVENT_CONBLANK, &event);
++      unlock_fb_info(info);
+ }
+ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+@@ -2956,8 +2959,6 @@ static int fbcon_fb_unregistered(struct 
+ {
+       int i, idx;
+-      if (!lock_fb_info(info))
+-              return -ENODEV;
+       idx = info->node;
+       for (i = first_fb_vc; i <= last_fb_vc; i++) {
+               if (con2fb_map[i] == idx)
+@@ -2985,8 +2986,6 @@ static int fbcon_fb_unregistered(struct 
+       if (primary_device == idx)
+               primary_device = -1;
+-      unlock_fb_info(info);
+-
+       if (!num_registered_fb)
+               unregister_con_driver(&fb_con);
+@@ -3027,11 +3026,8 @@ static int fbcon_fb_registered(struct fb
+ {
+       int ret = 0, i, idx;
+-      if (!lock_fb_info(info))
+-              return -ENODEV;
+       idx = info->node;
+       fbcon_select_primary(info);
+-      unlock_fb_info(info);
+       if (info_idx == -1) {
+               for (i = first_fb_vc; i <= last_fb_vc; i++) {
+@@ -3152,53 +3148,23 @@ static int fbcon_event_notify(struct not
+       switch(action) {
+       case FB_EVENT_SUSPEND:
+-              if (!lock_fb_info(info)) {
+-                      ret = -ENODEV;
+-                      goto done;
+-              }
+               fbcon_suspended(info);
+-              unlock_fb_info(info);
+               break;
+       case FB_EVENT_RESUME:
+-              if (!lock_fb_info(info)) {
+-                      ret = -ENODEV;
+-                      goto done;
+-              }
+               fbcon_resumed(info);
+-              unlock_fb_info(info);
+               break;
+       case FB_EVENT_MODE_CHANGE:
+-              if (!lock_fb_info(info)) {
+-                      ret = -ENODEV;
+-                      goto done;
+-              }
+               fbcon_modechanged(info);
+-              unlock_fb_info(info);
+               break;
+       case FB_EVENT_MODE_CHANGE_ALL:
+-              if (!lock_fb_info(info)) {
+-                      ret = -ENODEV;
+-                      goto done;
+-              }
+               fbcon_set_all_vcs(info);
+-              unlock_fb_info(info);
+               break;
+       case FB_EVENT_MODE_DELETE:
+               mode = event->data;
+-              if (!lock_fb_info(info)) {
+-                      ret = -ENODEV;
+-                      goto done;
+-              }
+               ret = fbcon_mode_deleted(info, mode);
+-              unlock_fb_info(info);
+               break;
+       case FB_EVENT_FB_UNBIND:
+-              if (!lock_fb_info(info)) {
+-                      ret = -ENODEV;
+-                      goto done;
+-              }
+               idx = info->node;
+-              unlock_fb_info(info);
+               ret = fbcon_fb_unbind(idx);
+               break;
+       case FB_EVENT_FB_REGISTERED:
+@@ -3217,29 +3183,14 @@ static int fbcon_event_notify(struct not
+               con2fb->framebuffer = con2fb_map[con2fb->console - 1];
+               break;
+       case FB_EVENT_BLANK:
+-              if (!lock_fb_info(info)) {
+-                      ret = -ENODEV;
+-                      goto done;
+-              }
+               fbcon_fb_blanked(info, *(int *)event->data);
+-              unlock_fb_info(info);
+               break;
+       case FB_EVENT_NEW_MODELIST:
+-              if (!lock_fb_info(info)) {
+-                      ret = -ENODEV;
+-                      goto done;
+-              }
+               fbcon_new_modelist(info);
+-              unlock_fb_info(info);
+               break;
+       case FB_EVENT_GET_REQ:
+               caps = event->data;
+-              if (!lock_fb_info(info)) {
+-                      ret = -ENODEV;
+-                      goto done;
+-              }
+               fbcon_get_requirement(info, caps);
+-              unlock_fb_info(info);
+               break;
+       }
+ done:
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1086,8 +1086,11 @@ static long do_fb_ioctl(struct fb_info *
+                       return -EINVAL;
+               con2fb.framebuffer = -1;
+               event.data = &con2fb;
++              if (!lock_fb_info(info))
++                      return -ENODEV;
+               event.info = info;
+               fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event);
++              unlock_fb_info(info);
+               ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0;
+               break;
+       case FBIOPUT_CON2FBMAP:
+@@ -1104,8 +1107,11 @@ static long do_fb_ioctl(struct fb_info *
+                       break;
+               }
+               event.data = &con2fb;
++              if (!lock_fb_info(info))
++                      return -ENODEV;
+               event.info = info;
+               ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
++              unlock_fb_info(info);
+               break;
+       case FBIOBLANK:
+               if (!lock_fb_info(info))
+@@ -1510,7 +1516,10 @@ register_framebuffer(struct fb_info *fb_
+       registered_fb[i] = fb_info;
+       event.info = fb_info;
++      if (!lock_fb_info(fb_info))
++              return -ENODEV;
+       fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
++      unlock_fb_info(fb_info);
+       return 0;
+ }
+@@ -1544,8 +1553,12 @@ unregister_framebuffer(struct fb_info *f
+               goto done;
+       }
++
++      if (!lock_fb_info(fb_info))
++              return -ENODEV;
+       event.info = fb_info;
+       ret = fb_notifier_call_chain(FB_EVENT_FB_UNBIND, &event);
++      unlock_fb_info(fb_info);
+       if (ret) {
+               ret = -EINVAL;
+@@ -1579,6 +1592,8 @@ void fb_set_suspend(struct fb_info *info
+ {
+       struct fb_event event;
++      if (!lock_fb_info(info))
++              return;
+       event.info = info;
+       if (state) {
+               fb_notifier_call_chain(FB_EVENT_SUSPEND, &event);
+@@ -1587,6 +1602,7 @@ void fb_set_suspend(struct fb_info *info
+               info->state = FBINFO_STATE_RUNNING;
+               fb_notifier_call_chain(FB_EVENT_RESUME, &event);
+       }
++      unlock_fb_info(info);
+ }
+ /**
+@@ -1656,8 +1672,11 @@ int fb_new_modelist(struct fb_info *info
+       err = 1;
+       if (!list_empty(&info->modelist)) {
++              if (!lock_fb_info(info))
++                      return -ENODEV;
+               event.info = info;
+               err = fb_notifier_call_chain(FB_EVENT_NEW_MODELIST, &event);
++              unlock_fb_info(info);
+       }
+       return err;
diff --git a/queue-2.6.29/fbmem-fix-fb_info-lock-and-mm-mmap_sem-circular-locking-dependency.patch b/queue-2.6.29/fbmem-fix-fb_info-lock-and-mm-mmap_sem-circular-locking-dependency.patch
new file mode 100644 (file)
index 0000000..807e9fe
--- /dev/null
@@ -0,0 +1,256 @@
+From stable-bounces@linux.kernel.org  Wed Apr  1 17:05:07 2009
+Date: Wed, 1 Apr 2009 17:05:02 GMT
+Message-Id: <200904011705.n31H52GW005259@hera.kernel.org>
+From: Andrea Righi <righi.andrea@gmail.com>
+To: stable@kernel.org
+Subject: fbmem: fix fb_info->lock and mm->mmap_sem circular locking dependency
+
+upstream commit: 66c1ca019078220dc1bf968f2bb18421100ef147
+
+Fix a circular locking dependency in the frame buffer console driver
+pushing down the mutex fb_info->lock.
+
+Circular locking dependecies occur calling the blocking
+fb_notifier_call_chain() with fb_info->lock held.  Notifier callbacks can
+try to acquire mm->mmap_sem, while fb_mmap() acquires the locks in the
+reverse order mm->mmap_sem => fb_info->lock.
+
+Tested-by: Andrey Borzenkov <arvidjaar@mail.ru>
+Signed-off-by: Andrea Righi <righi.andrea@gmail.com>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Krzysztof Helt <krzysztof.h1@poczta.fm>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/video/backlight/backlight.c |    3 +
+ drivers/video/backlight/lcd.c       |    3 +
+ drivers/video/console/fbcon.c       |   73 +++++++++++++++++++++++++++++++-----
+ drivers/video/fbmem.c               |   11 -----
+ 4 files changed, 70 insertions(+), 20 deletions(-)
+
+--- a/drivers/video/backlight/backlight.c
++++ b/drivers/video/backlight/backlight.c
+@@ -35,6 +35,8 @@ static int fb_notifier_callback(struct n
+               return 0;
+       bd = container_of(self, struct backlight_device, fb_notif);
++      if (!lock_fb_info(evdata->info))
++              return -ENODEV;
+       mutex_lock(&bd->ops_lock);
+       if (bd->ops)
+               if (!bd->ops->check_fb ||
+@@ -47,6 +49,7 @@ static int fb_notifier_callback(struct n
+                       backlight_update_status(bd);
+               }
+       mutex_unlock(&bd->ops_lock);
++      unlock_fb_info(evdata->info);
+       return 0;
+ }
+--- a/drivers/video/backlight/lcd.c
++++ b/drivers/video/backlight/lcd.c
+@@ -40,6 +40,8 @@ static int fb_notifier_callback(struct n
+       if (!ld->ops)
+               return 0;
++      if (!lock_fb_info(evdata->info))
++              return -ENODEV;
+       mutex_lock(&ld->ops_lock);
+       if (!ld->ops->check_fb || ld->ops->check_fb(ld, evdata->info)) {
+               if (event == FB_EVENT_BLANK) {
+@@ -51,6 +53,7 @@ static int fb_notifier_callback(struct n
+               }
+       }
+       mutex_unlock(&ld->ops_lock);
++      unlock_fb_info(evdata->info);
+       return 0;
+ }
+--- a/drivers/video/console/fbcon.c
++++ b/drivers/video/console/fbcon.c
+@@ -2954,8 +2954,11 @@ static int fbcon_fb_unbind(int idx)
+ static int fbcon_fb_unregistered(struct fb_info *info)
+ {
+-      int i, idx = info->node;
++      int i, idx;
++      if (!lock_fb_info(info))
++              return -ENODEV;
++      idx = info->node;
+       for (i = first_fb_vc; i <= last_fb_vc; i++) {
+               if (con2fb_map[i] == idx)
+                       con2fb_map[i] = -1;
+@@ -2979,13 +2982,14 @@ static int fbcon_fb_unregistered(struct 
+               }
+       }
+-      if (!num_registered_fb)
+-              unregister_con_driver(&fb_con);
+-
+-
+       if (primary_device == idx)
+               primary_device = -1;
++      unlock_fb_info(info);
++
++      if (!num_registered_fb)
++              unregister_con_driver(&fb_con);
++
+       return 0;
+ }
+@@ -3021,9 +3025,13 @@ static inline void fbcon_select_primary(
+ static int fbcon_fb_registered(struct fb_info *info)
+ {
+-      int ret = 0, i, idx = info->node;
++      int ret = 0, i, idx;
++      if (!lock_fb_info(info))
++              return -ENODEV;
++      idx = info->node;
+       fbcon_select_primary(info);
++      unlock_fb_info(info);
+       if (info_idx == -1) {
+               for (i = first_fb_vc; i <= last_fb_vc; i++) {
+@@ -3124,7 +3132,7 @@ static void fbcon_get_requirement(struct
+       }
+ }
+-static int fbcon_event_notify(struct notifier_block *self, 
++static int fbcon_event_notify(struct notifier_block *self,
+                             unsigned long action, void *data)
+ {
+       struct fb_event *event = data;
+@@ -3132,7 +3140,7 @@ static int fbcon_event_notify(struct not
+       struct fb_videomode *mode;
+       struct fb_con2fbmap *con2fb;
+       struct fb_blit_caps *caps;
+-      int ret = 0;
++      int idx, ret = 0;
+       /*
+        * ignore all events except driver registration and deregistration
+@@ -3144,23 +3152,54 @@ static int fbcon_event_notify(struct not
+       switch(action) {
+       case FB_EVENT_SUSPEND:
++              if (!lock_fb_info(info)) {
++                      ret = -ENODEV;
++                      goto done;
++              }
+               fbcon_suspended(info);
++              unlock_fb_info(info);
+               break;
+       case FB_EVENT_RESUME:
++              if (!lock_fb_info(info)) {
++                      ret = -ENODEV;
++                      goto done;
++              }
+               fbcon_resumed(info);
++              unlock_fb_info(info);
+               break;
+       case FB_EVENT_MODE_CHANGE:
++              if (!lock_fb_info(info)) {
++                      ret = -ENODEV;
++                      goto done;
++              }
+               fbcon_modechanged(info);
++              unlock_fb_info(info);
+               break;
+       case FB_EVENT_MODE_CHANGE_ALL:
++              if (!lock_fb_info(info)) {
++                      ret = -ENODEV;
++                      goto done;
++              }
+               fbcon_set_all_vcs(info);
++              unlock_fb_info(info);
+               break;
+       case FB_EVENT_MODE_DELETE:
+               mode = event->data;
++              if (!lock_fb_info(info)) {
++                      ret = -ENODEV;
++                      goto done;
++              }
+               ret = fbcon_mode_deleted(info, mode);
++              unlock_fb_info(info);
+               break;
+       case FB_EVENT_FB_UNBIND:
+-              ret = fbcon_fb_unbind(info->node);
++              if (!lock_fb_info(info)) {
++                      ret = -ENODEV;
++                      goto done;
++              }
++              idx = info->node;
++              unlock_fb_info(info);
++              ret = fbcon_fb_unbind(idx);
+               break;
+       case FB_EVENT_FB_REGISTERED:
+               ret = fbcon_fb_registered(info);
+@@ -3178,17 +3217,31 @@ static int fbcon_event_notify(struct not
+               con2fb->framebuffer = con2fb_map[con2fb->console - 1];
+               break;
+       case FB_EVENT_BLANK:
++              if (!lock_fb_info(info)) {
++                      ret = -ENODEV;
++                      goto done;
++              }
+               fbcon_fb_blanked(info, *(int *)event->data);
++              unlock_fb_info(info);
+               break;
+       case FB_EVENT_NEW_MODELIST:
++              if (!lock_fb_info(info)) {
++                      ret = -ENODEV;
++                      goto done;
++              }
+               fbcon_new_modelist(info);
++              unlock_fb_info(info);
+               break;
+       case FB_EVENT_GET_REQ:
+               caps = event->data;
++              if (!lock_fb_info(info)) {
++                      ret = -ENODEV;
++                      goto done;
++              }
+               fbcon_get_requirement(info, caps);
++              unlock_fb_info(info);
+               break;
+       }
+-
+ done:
+       return ret;
+ }
+--- a/drivers/video/fbmem.c
++++ b/drivers/video/fbmem.c
+@@ -1086,13 +1086,8 @@ static long do_fb_ioctl(struct fb_info *
+                       return -EINVAL;
+               con2fb.framebuffer = -1;
+               event.data = &con2fb;
+-
+-              if (!lock_fb_info(info))
+-                      return -ENODEV;
+               event.info = info;
+               fb_notifier_call_chain(FB_EVENT_GET_CONSOLE_MAP, &event);
+-              unlock_fb_info(info);
+-
+               ret = copy_to_user(argp, &con2fb, sizeof(con2fb)) ? -EFAULT : 0;
+               break;
+       case FBIOPUT_CON2FBMAP:
+@@ -1109,12 +1104,8 @@ static long do_fb_ioctl(struct fb_info *
+                       break;
+               }
+               event.data = &con2fb;
+-              if (!lock_fb_info(info))
+-                      return -ENODEV;
+               event.info = info;
+-              ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP,
+-                                            &event);
+-              unlock_fb_info(info);
++              ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event);
+               break;
+       case FBIOBLANK:
+               if (!lock_fb_info(info))
diff --git a/queue-2.6.29/fix-i_mutex-vs.-readdir-handling-in-nfsd.patch b/queue-2.6.29/fix-i_mutex-vs.-readdir-handling-in-nfsd.patch
new file mode 100644 (file)
index 0000000..db3cb3d
--- /dev/null
@@ -0,0 +1,229 @@
+From stable-bounces@linux.kernel.org  Tue Apr 21 21:20:16 2009
+Date: Tue, 21 Apr 2009 21:20:10 GMT
+Message-Id: <200904212120.n3LLKAO9024993@hera.kernel.org>
+From: David Woodhouse <dwmw2@infradead.org>
+To: jejb@kernel.org, stable@kernel.org
+Subject: Fix i_mutex vs. readdir handling in nfsd
+
+upstream commit: 2f9092e1020246168b1309b35e085ecd7ff9ff72
+
+Commit 14f7dd63 ("Copy XFS readdir hack into nfsd code") introduced a
+bug to generic code which had been extant for a long time in the XFS
+version -- it started to call through into lookup_one_len() and hence
+into the file systems' ->lookup() methods without i_mutex held on the
+directory.
+
+This patch fixes it by locking the directory's i_mutex again before
+calling the filldir functions. The original deadlocks which commit
+14f7dd63 was designed to avoid are still avoided, because they were due
+to fs-internal locking, not i_mutex.
+
+While we're at it, fix the return type of nfsd_buffered_readdir() which
+should be a __be32 not an int -- it's an NFS errno, not a Linux errno.
+And return nfserrno(-ENOMEM) when allocation fails, not just -ENOMEM.
+Sparse would have caught that, if it wasn't so busy bitching about
+__cold__.
+
+Commit 05f4f678 ("nfsd4: don't do lookup within readdir in recovery
+code") introduced a similar problem with calling lookup_one_len()
+without i_mutex, which this patch also addresses. To fix that, it was
+necessary to fix the called functions so that they expect i_mutex to be
+held; that part was done by J. Bruce Fields.
+
+Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
+Umm-I-can-live-with-that-by: Al Viro <viro@zeniv.linux.org.uk>
+Reported-by: J. R. Okajima <hooanon05@yahoo.co.jp>
+Tested-by: J. Bruce Fields <bfields@citi.umich.edu>
+LKML-Reference: <8036.1237474444@jrobl>
+Cc: stable@kernel.org
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ fs/namei.c            |    2 ++
+ fs/nfsd/nfs4recover.c |   46 +++++++++-------------------------------------
+ fs/nfsd/vfs.c         |   25 +++++++++++++++++++------
+ 3 files changed, 30 insertions(+), 43 deletions(-)
+
+diff --git a/fs/namei.c b/fs/namei.c
+index b8433eb..78f253c 100644
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1248,6 +1248,8 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
+       int err;
+       struct qstr this;
++      WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex));
++
+       err = __lookup_one_len(name, &this, base, len);
+       if (err)
+               return ERR_PTR(err);
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index 3444c00..5275097 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -229,21 +229,23 @@ nfsd4_list_rec_dir(struct dentry *dir, recdir_func *f)
+               goto out;
+       status = vfs_readdir(filp, nfsd4_build_namelist, &names);
+       fput(filp);
++      mutex_lock(&dir->d_inode->i_mutex);
+       while (!list_empty(&names)) {
+               entry = list_entry(names.next, struct name_list, list);
+               dentry = lookup_one_len(entry->name, dir, HEXDIR_LEN-1);
+               if (IS_ERR(dentry)) {
+                       status = PTR_ERR(dentry);
+-                      goto out;
++                      break;
+               }
+               status = f(dir, dentry);
+               dput(dentry);
+               if (status)
+-                      goto out;
++                      break;
+               list_del(&entry->list);
+               kfree(entry);
+       }
++      mutex_unlock(&dir->d_inode->i_mutex);
+ out:
+       while (!list_empty(&names)) {
+               entry = list_entry(names.next, struct name_list, list);
+@@ -255,36 +257,6 @@ out:
+ }
+ static int
+-nfsd4_remove_clid_file(struct dentry *dir, struct dentry *dentry)
+-{
+-      int status;
+-
+-      if (!S_ISREG(dir->d_inode->i_mode)) {
+-              printk("nfsd4: non-file found in client recovery directory\n");
+-              return -EINVAL;
+-      }
+-      mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+-      status = vfs_unlink(dir->d_inode, dentry);
+-      mutex_unlock(&dir->d_inode->i_mutex);
+-      return status;
+-}
+-
+-static int
+-nfsd4_clear_clid_dir(struct dentry *dir, struct dentry *dentry)
+-{
+-      int status;
+-
+-      /* For now this directory should already be empty, but we empty it of
+-       * any regular files anyway, just in case the directory was created by
+-       * a kernel from the future.... */
+-      nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
+-      mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+-      status = vfs_rmdir(dir->d_inode, dentry);
+-      mutex_unlock(&dir->d_inode->i_mutex);
+-      return status;
+-}
+-
+-static int
+ nfsd4_unlink_clid_dir(char *name, int namlen)
+ {
+       struct dentry *dentry;
+@@ -294,18 +266,18 @@ nfsd4_unlink_clid_dir(char *name, int namlen)
+       mutex_lock(&rec_dir.dentry->d_inode->i_mutex);
+       dentry = lookup_one_len(name, rec_dir.dentry, namlen);
+-      mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+       if (IS_ERR(dentry)) {
+               status = PTR_ERR(dentry);
+-              return status;
++              goto out_unlock;
+       }
+       status = -ENOENT;
+       if (!dentry->d_inode)
+               goto out;
+-
+-      status = nfsd4_clear_clid_dir(rec_dir.dentry, dentry);
++      status = vfs_rmdir(rec_dir.dentry->d_inode, dentry);
+ out:
+       dput(dentry);
++out_unlock:
++      mutex_unlock(&rec_dir.dentry->d_inode->i_mutex);
+       return status;
+ }
+@@ -348,7 +320,7 @@ purge_old(struct dentry *parent, struct dentry *child)
+       if (nfs4_has_reclaimed_state(child->d_name.name, false))
+               return 0;
+-      status = nfsd4_clear_clid_dir(parent, child);
++      status = vfs_rmdir(parent->d_inode, child);
+       if (status)
+               printk("failed to remove client recovery directory %s\n",
+                               child->d_name.name);
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index 46e6bd2..6c68ffd 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1890,8 +1890,8 @@ static int nfsd_buffered_filldir(void *__buf, const char *name, int namlen,
+       return 0;
+ }
+-static int nfsd_buffered_readdir(struct file *file, filldir_t func,
+-                               struct readdir_cd *cdp, loff_t *offsetp)
++static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
++                                  struct readdir_cd *cdp, loff_t *offsetp)
+ {
+       struct readdir_data buf;
+       struct buffered_dirent *de;
+@@ -1901,11 +1901,12 @@ static int nfsd_buffered_readdir(struct file *file, filldir_t func,
+       buf.dirent = (void *)__get_free_page(GFP_KERNEL);
+       if (!buf.dirent)
+-              return -ENOMEM;
++              return nfserrno(-ENOMEM);
+       offset = *offsetp;
+       while (1) {
++              struct inode *dir_inode = file->f_path.dentry->d_inode;
+               unsigned int reclen;
+               cdp->err = nfserr_eof; /* will be cleared on successful read */
+@@ -1924,26 +1925,38 @@ static int nfsd_buffered_readdir(struct file *file, filldir_t func,
+               if (!size)
+                       break;
++              /*
++               * Various filldir functions may end up calling back into
++               * lookup_one_len() and the file system's ->lookup() method.
++               * These expect i_mutex to be held, as it would within readdir.
++               */
++              host_err = mutex_lock_killable(&dir_inode->i_mutex);
++              if (host_err)
++                      break;
++
+               de = (struct buffered_dirent *)buf.dirent;
+               while (size > 0) {
+                       offset = de->offset;
+                       if (func(cdp, de->name, de->namlen, de->offset,
+                                de->ino, de->d_type))
+-                              goto done;
++                              break;
+                       if (cdp->err != nfs_ok)
+-                              goto done;
++                              break;
+                       reclen = ALIGN(sizeof(*de) + de->namlen,
+                                      sizeof(u64));
+                       size -= reclen;
+                       de = (struct buffered_dirent *)((char *)de + reclen);
+               }
++              mutex_unlock(&dir_inode->i_mutex);
++              if (size > 0) /* We bailed out early */
++                      break;
++
+               offset = vfs_llseek(file, 0, SEEK_CUR);
+       }
+- done:
+       free_page((unsigned long)(buf.dirent));
+       if (host_err)
diff --git a/queue-2.6.29/gso-fix-support-for-linear-packets.patch b/queue-2.6.29/gso-fix-support-for-linear-packets.patch
new file mode 100644 (file)
index 0000000..62f0894
--- /dev/null
@@ -0,0 +1,42 @@
+From stable-bounces@linux.kernel.org  Tue Apr 21 11:32:59 2009
+Date: Tue, 21 Apr 2009 04:31:50 -0700 (PDT)
+Message-Id: <20090421.043150.178833774.davem@davemloft.net>
+To: stable@kernel.org
+From: David Miller <davem@davemloft.net>
+Subject: gso: Fix support for linear packets
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+upstream commit: 2f181855a0b3c2b39314944add7b41c15647cf86
+
+When GRO/frag_list support was added to GSO, I made an error
+which broke the support for segmenting linear GSO packets (GSO
+packets are normally non-linear in the payload).
+
+These days most of these packets are constructed by the tun
+driver, which prefers to allocate linear memory if possible.
+This is fixed in the latest kernel, but for 2.6.29 and earlier
+it is still the norm.
+
+Therefore this bug causes failures with GSO when used with tun
+in 2.6.29.
+
+Reported-by: James Huang <jamesclhuang@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ net/core/skbuff.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2496,7 +2496,7 @@ struct sk_buff *skb_segment(struct sk_bu
+                                         skb_network_header_len(skb));
+               skb_copy_from_linear_data(skb, nskb->data, doffset);
+-              if (pos >= offset + len)
++              if (fskb != skb_shinfo(skb)->frag_list)
+                       continue;
+               if (!sg) {
diff --git a/queue-2.6.29/hpt366-fix-hpt370-dma-timeouts.patch b/queue-2.6.29/hpt366-fix-hpt370-dma-timeouts.patch
new file mode 100644 (file)
index 0000000..2afc2b0
--- /dev/null
@@ -0,0 +1,44 @@
+From c018f1ee5cf81e58b93d9e93a2ee39cad13dc1ac Mon Sep 17 00:00:00 2001
+Message-Id: <200904072009.32070.sshtylyov@ru.mvista.com>
+From: Sergei Shtylyov <sshtylyov@ru.mvista.com>
+Date: Sat, 18 Apr 2009 17:42:19 +0200
+Subject: hpt366: fix HPT370 DMA timeouts
+
+upstream commit: c018f1ee5cf81e58b93d9e93a2ee39cad13dc1ac
+
+The big driver change in 2.4.19-rc1 introduced a regression for many HPT370[A]
+chips -- DMA stopped to work completely, only causing endless timeouts...
+
+The culprit has been identified (at last!): it turned to be the code resetting
+the DMA state machine before each transfer. Stop doing it now as this counter-
+measure has clearly caused more harm than good.
+
+This should fix the kernel.org bug #7703.
+
+Signed-off-by: Sergei Shtylyov <sshtylyov@ru.mvista.com>
+Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/ide/hpt366.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/ide/hpt366.c
++++ b/drivers/ide/hpt366.c
+@@ -114,6 +114,8 @@
+  *   the register setting lists into the table indexed by the clock selected
+  * - set the correct hwif->ultra_mask for each individual chip
+  * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards
++ * - stop resetting HPT370's state machine before each DMA transfer as that has
++ *   caused more harm than good
+  *    Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
+  */
+@@ -133,7 +135,7 @@
+ #define DRV_NAME "hpt366"
+ /* various tuning parameters */
+-#define HPT_RESET_STATE_ENGINE
++#undef        HPT_RESET_STATE_ENGINE
+ #undef        HPT_DELAY_INTERRUPT
+ static const char *quirk_drives[] = {
diff --git a/queue-2.6.29/hrtimer-fix-rq-lock-inversion.patch b/queue-2.6.29/hrtimer-fix-rq-lock-inversion.patch
new file mode 100644 (file)
index 0000000..1a5334a
--- /dev/null
@@ -0,0 +1,206 @@
+From 7f1e2ca9f04b02794597f60e7b1d43f0a1317939 Mon Sep 17 00:00:00 2001
+Message-ID: <20090410131051.GD31307@elte.hu>
+From: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Date: Fri, 13 Mar 2009 12:21:27 +0100
+Subject: hrtimer: fix rq->lock inversion (again)
+
+upstream commit: 7f1e2ca9f04b02794597f60e7b1d43f0a1317939
+
+It appears I inadvertly introduced rq->lock recursion to the
+hrtimer_start() path when I delegated running already expired
+timers to softirq context.
+
+This patch fixes it by introducing a __hrtimer_start_range_ns()
+method that will not use raise_softirq_irqoff() but
+__raise_softirq_irqoff() which avoids the wakeup.
+
+It then also changes schedule() to check for pending softirqs and
+do the wakeup then, I'm not quite sure I like this last bit, nor
+am I convinced its really needed.
+
+Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: paulus@samba.org
+LKML-Reference: <20090313112301.096138802@chello.nl>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Cc: Mikael Pettersson <mikpe@it.uu.se>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ include/linux/hrtimer.h   |    5 ++++
+ include/linux/interrupt.h |    1 
+ kernel/hrtimer.c          |   55 ++++++++++++++++++++++++++++------------------
+ kernel/sched.c            |   14 +++++++++--
+ kernel/softirq.c          |    2 -
+ 5 files changed, 52 insertions(+), 25 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -336,6 +336,11 @@ extern int hrtimer_start(struct hrtimer 
+                        const enum hrtimer_mode mode);
+ extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+                       unsigned long range_ns, const enum hrtimer_mode mode);
++extern int
++__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
++                       unsigned long delta_ns,
++                       const enum hrtimer_mode mode, int wakeup);
++
+ extern int hrtimer_cancel(struct hrtimer *timer);
+ extern int hrtimer_try_to_cancel(struct hrtimer *timer);
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -274,6 +274,7 @@ extern void softirq_init(void);
+ #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
+ extern void raise_softirq_irqoff(unsigned int nr);
+ extern void raise_softirq(unsigned int nr);
++extern void wakeup_softirqd(void);
+ /* This is the worklist that queues up per-cpu softirq work.
+  *
+--- a/kernel/hrtimer.c
++++ b/kernel/hrtimer.c
+@@ -651,14 +651,20 @@ static inline void hrtimer_init_timer_hr
+  * and expiry check is done in the hrtimer_interrupt or in the softirq.
+  */
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+-                                          struct hrtimer_clock_base *base)
++                                          struct hrtimer_clock_base *base,
++                                          int wakeup)
+ {
+       if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
+-              spin_unlock(&base->cpu_base->lock);
+-              raise_softirq_irqoff(HRTIMER_SOFTIRQ);
+-              spin_lock(&base->cpu_base->lock);
++              if (wakeup) {
++                      spin_unlock(&base->cpu_base->lock);
++                      raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++                      spin_lock(&base->cpu_base->lock);
++              } else
++                      __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++
+               return 1;
+       }
++
+       return 0;
+ }
+@@ -703,7 +709,8 @@ static inline int hrtimer_is_hres_enable
+ static inline int hrtimer_switch_to_hres(void) { return 0; }
+ static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
+ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
+-                                          struct hrtimer_clock_base *base)
++                                          struct hrtimer_clock_base *base,
++                                          int wakeup)
+ {
+       return 0;
+ }
+@@ -886,20 +893,9 @@ remove_hrtimer(struct hrtimer *timer, st
+       return 0;
+ }
+-/**
+- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
+- * @timer:    the timer to be added
+- * @tim:      expiry time
+- * @delta_ns: "slack" range for the timer
+- * @mode:     expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
+- *
+- * Returns:
+- *  0 on success
+- *  1 when the timer was active
+- */
+-int
+-hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
+-                      const enum hrtimer_mode mode)
++int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
++              unsigned long delta_ns, const enum hrtimer_mode mode,
++              int wakeup)
+ {
+       struct hrtimer_clock_base *base, *new_base;
+       unsigned long flags;
+@@ -940,12 +936,29 @@ hrtimer_start_range_ns(struct hrtimer *t
+        * XXX send_remote_softirq() ?
+        */
+       if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
+-              hrtimer_enqueue_reprogram(timer, new_base);
++              hrtimer_enqueue_reprogram(timer, new_base, wakeup);
+       unlock_hrtimer_base(timer, &flags);
+       return ret;
+ }
++
++/**
++ * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
++ * @timer:    the timer to be added
++ * @tim:      expiry time
++ * @delta_ns: "slack" range for the timer
++ * @mode:     expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
++ *
++ * Returns:
++ *  0 on success
++ *  1 when the timer was active
++ */
++int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
++              unsigned long delta_ns, const enum hrtimer_mode mode)
++{
++      return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
++}
+ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
+ /**
+@@ -961,7 +974,7 @@ EXPORT_SYMBOL_GPL(hrtimer_start_range_ns
+ int
+ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
+ {
+-      return hrtimer_start_range_ns(timer, tim, 0, mode);
++      return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
+ }
+ EXPORT_SYMBOL_GPL(hrtimer_start);
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -231,13 +231,20 @@ static void start_rt_bandwidth(struct rt
+       spin_lock(&rt_b->rt_runtime_lock);
+       for (;;) {
++              unsigned long delta;
++              ktime_t soft, hard;
++
+               if (hrtimer_active(&rt_b->rt_period_timer))
+                       break;
+               now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
+               hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
+-              hrtimer_start_expires(&rt_b->rt_period_timer,
+-                              HRTIMER_MODE_ABS);
++
++              soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
++              hard = hrtimer_get_expires(&rt_b->rt_period_timer);
++              delta = ktime_to_ns(ktime_sub(hard, soft));
++              __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
++                              HRTIMER_MODE_ABS, 0);
+       }
+       spin_unlock(&rt_b->rt_runtime_lock);
+ }
+@@ -1129,7 +1136,8 @@ static __init void init_hrtick(void)
+  */
+ static void hrtick_start(struct rq *rq, u64 delay)
+ {
+-      hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
++      __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
++                      HRTIMER_MODE_REL, 0);
+ }
+ static inline void init_hrtick(void)
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct task_struct
+  * to the pending events, so lets the scheduler to balance
+  * the softirq load for us.
+  */
+-static inline void wakeup_softirqd(void)
++void wakeup_softirqd(void)
+ {
+       /* Interrupts are disabled: no need to stop preemption */
+       struct task_struct *tsk = __get_cpu_var(ksoftirqd);
diff --git a/queue-2.6.29/hugetlbfs-return-negative-error-code-for-bad-mount-option.patch b/queue-2.6.29/hugetlbfs-return-negative-error-code-for-bad-mount-option.patch
new file mode 100644 (file)
index 0000000..85ed08c
--- /dev/null
@@ -0,0 +1,53 @@
+From stable-bounces@linux.kernel.org  Tue Apr 21 21:20:10 2009
+Date: Tue, 21 Apr 2009 21:20:04 GMT
+Message-Id: <200904212120.n3LLK4Wx024902@hera.kernel.org>
+From: Akinobu Mita <akinobu.mita@gmail.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: hugetlbfs: return negative error code for bad mount option
+
+upstream commit: c12ddba09394c60e1120e6997794fa6ed52da884
+
+This fixes the following BUG:
+
+  # mount -o size=MM -t hugetlbfs none /huge
+  hugetlbfs: Bad value 'MM' for mount option 'size=MM'
+  ------------[ cut here ]------------
+  kernel BUG at fs/super.c:996!
+
+Due to
+
+       BUG_ON(!mnt->mnt_sb);
+
+in vfs_kern_mount().
+
+Also, remove unused #include <linux/quotaops.h>
+
+Cc: William Irwin <wli@holomorphy.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ fs/hugetlbfs/inode.c |    3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/fs/hugetlbfs/inode.c
++++ b/fs/hugetlbfs/inode.c
+@@ -26,7 +26,6 @@
+ #include <linux/pagevec.h>
+ #include <linux/parser.h>
+ #include <linux/mman.h>
+-#include <linux/quotaops.h>
+ #include <linux/slab.h>
+ #include <linux/dnotify.h>
+ #include <linux/statfs.h>
+@@ -842,7 +841,7 @@ hugetlbfs_parse_options(char *options, s
+ bad_val:
+       printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
+              args[0].from, p);
+-      return 1;
++      return -EINVAL;
+ }
+ static int
diff --git a/queue-2.6.29/ide-atapi-start-dma-after-issuing-a-packet-command.patch b/queue-2.6.29/ide-atapi-start-dma-after-issuing-a-packet-command.patch
new file mode 100644 (file)
index 0000000..f28767c
--- /dev/null
@@ -0,0 +1,51 @@
+From stable-bounces@linux.kernel.org  Thu Apr  2 20:19:39 2009
+From: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+To: stable@kernel.org
+Date: Thu, 2 Apr 2009 22:21:31 +0200
+Content-Disposition: inline
+Message-Id: <200904022221.32139.bzolnier@gmail.com>
+Cc: Borislav Petkov <petkovbb@gmail.com>, Michael Roth <mroth@nessie.de>
+Subject: ide-atapi: start DMA after issuing a packet command
+
+From: Borislav Petkov <petkovbb@gmail.com>
+
+upstream commit: 2eba08270990b99fb5429b76ee97184ddd272f7f
+
+Apparently¹, some ATAPI devices want to see the packet command first
+before enabling DMA otherwise they simply hang indefinitely. Reorder the
+two steps and start DMA only after having issued the command first.
+
+[1] http://marc.info/?l=linux-kernel&m=123835520317235&w=2
+
+Signed-off-by: Borislav Petkov <petkovbb@gmail.com>
+Reported-by: Michael Roth <mroth@nessie.de>
+Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/ide/ide-atapi.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/ide/ide-atapi.c
++++ b/drivers/ide/ide-atapi.c
+@@ -568,6 +568,10 @@ static ide_startstop_t ide_transfer_pc(i
+                                            : ide_pc_intr),
+                       timeout, expiry);
++      /* Send the actual packet */
++      if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
++              hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
++
+       /* Begin DMA, if necessary */
+       if (dev_is_idecd(drive)) {
+               if (drive->dma)
+@@ -579,10 +583,6 @@ static ide_startstop_t ide_transfer_pc(i
+               }
+       }
+-      /* Send the actual packet */
+-      if ((drive->atapi_flags & IDE_AFLAG_ZIP_DRIVE) == 0)
+-              hwif->tp_ops->output_data(drive, NULL, rq->cmd, cmd_len);
+-
+       return ide_started;
+ }
diff --git a/queue-2.6.29/ide-drivers-ide-ide-atapi.c-needs-linux-scatterlist.h.patch b/queue-2.6.29/ide-drivers-ide-ide-atapi.c-needs-linux-scatterlist.h.patch
new file mode 100644 (file)
index 0000000..e8f7f55
--- /dev/null
@@ -0,0 +1,43 @@
+From stable-bounces@linux.kernel.org  Thu Apr  2 20:19:34 2009
+From: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+To: stable@kernel.org
+Date: Thu, 2 Apr 2009 22:20:22 +0200
+Message-Id: <200904022220.23783.bzolnier@gmail.com>
+Cc: Dmitri Vorobiev <dmitri.vorobiev@movial.com>, Geert Uytterhoeven <geert@linux-m68k.org>
+Subject: ide: drivers/ide/ide-atapi.c needs <linux/scatterlist.h>
+
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+
+upstream commit: 479edf065576aeed7ac99d10838bb3b4f870b5f9
+
+On m68k:
+| drivers/ide/ide-atapi.c: In function 'ide_io_buffers':
+| drivers/ide/ide-atapi.c:87: error: implicit declaration of function 'sg_page'
+| drivers/ide/ide-atapi.c:87: warning: passing argument 1 of 'PageHighMem' makes pointer from integer without a cast
+| drivers/ide/ide-atapi.c:91: warning: passing argument 1 of 'kmap_atomic' makes pointer from integer without a cast
+| drivers/ide/ide-atapi.c:96: error: implicit declaration of function 'sg_virt'
+| drivers/ide/ide-atapi.c:96: warning: assignment makes pointer from integer without a cast
+| drivers/ide/ide-atapi.c:107: error: implicit declaration of function 'sg_next'
+| drivers/ide/ide-atapi.c:107: warning: assignment makes pointer from integer without a cast
+
+[bart: Dmitri Vorobiev submitted similar patch fixing MIPS]
+
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Cc: Dmitri Vorobiev <dmitri.vorobiev@movial.com>
+Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/ide/ide-atapi.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/ide/ide-atapi.c
++++ b/drivers/ide/ide-atapi.c
+@@ -6,6 +6,8 @@
+ #include <linux/cdrom.h>
+ #include <linux/delay.h>
+ #include <linux/ide.h>
++#include <linux/scatterlist.h>
++
+ #include <scsi/scsi.h>
+ #ifdef DEBUG
diff --git a/queue-2.6.29/ide-fix-code-dealing-with-sleeping-devices-in-do_ide_request.patch b/queue-2.6.29/ide-fix-code-dealing-with-sleeping-devices-in-do_ide_request.patch
new file mode 100644 (file)
index 0000000..ce68eb2
--- /dev/null
@@ -0,0 +1,41 @@
+From stable-bounces@linux.kernel.org  Wed Apr  1 17:05:14 2009
+Date: Wed, 1 Apr 2009 17:05:08 GMT
+Message-Id: <200904011705.n31H582t005505@hera.kernel.org>
+From: Elias Oltmanns <eo@nebensachen.de>
+To: stable@kernel.org
+Subject: ide: Fix code dealing with sleeping devices in do_ide_request()
+
+upstream commit: 9010941c5483a7a5bb1f7d97ee62491fb078bb51
+
+Unfortunately, I missed a catch when reviewing the patch committed as
+201bffa4. Here is the fix to the currently broken handling of sleeping
+devices. In particular, this is required to get the disk shock
+protection code working again.
+
+Reported-by: Christian Thaeter <ct@pipapo.org>
+Cc: stable@kernel.org
+Signed-off-by: Elias Oltmanns <eo@nebensachen.de>
+Signed-off-by: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/ide/ide-io.c |    9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/drivers/ide/ide-io.c
++++ b/drivers/ide/ide-io.c
+@@ -736,11 +736,10 @@ repeat:
+               prev_port = hwif->host->cur_port;
+               hwif->rq = NULL;
+-              if (drive->dev_flags & IDE_DFLAG_SLEEPING) {
+-                      if (time_before(drive->sleep, jiffies)) {
+-                              ide_unlock_port(hwif);
+-                              goto plug_device;
+-                      }
++              if (drive->dev_flags & IDE_DFLAG_SLEEPING &&
++                  time_after(drive->sleep, jiffies)) {
++                      ide_unlock_port(hwif);
++                      goto plug_device;
+               }
+               if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
diff --git a/queue-2.6.29/input-gameport-fix-attach-driver-code.patch b/queue-2.6.29/input-gameport-fix-attach-driver-code.patch
new file mode 100644 (file)
index 0000000..b06d8ea
--- /dev/null
@@ -0,0 +1,69 @@
+From 4ced8e7cb990a2c3bbf0ac7f27b35c890e7ce895 Mon Sep 17 00:00:00 2001
+Message-ID: <20090418225420.GB31557@dtor-d630.eng.vmware.com>
+From: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Date: Mon, 13 Apr 2009 15:27:49 -0700
+Subject: Input: gameport - fix attach driver code
+
+upstream commit: 4ced8e7cb990a2c3bbf0ac7f27b35c890e7ce895
+
+The commit 6902c0bead4ce266226fc0c5b3828b850bdc884a that moved
+driver registration out of kgameportd thread was incomplete and
+did not add the code necessary to actually attach driver to
+already registered devices, rectify that.
+
+Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/input/gameport/gameport.c |   14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/drivers/input/gameport/gameport.c
++++ b/drivers/input/gameport/gameport.c
+@@ -50,9 +50,8 @@ static LIST_HEAD(gameport_list);
+ static struct bus_type gameport_bus;
+-static void gameport_add_driver(struct gameport_driver *drv);
+ static void gameport_add_port(struct gameport *gameport);
+-static void gameport_destroy_port(struct gameport *gameport);
++static void gameport_attach_driver(struct gameport_driver *drv);
+ static void gameport_reconnect_port(struct gameport *gameport);
+ static void gameport_disconnect_port(struct gameport *gameport);
+@@ -230,7 +229,6 @@ static void gameport_find_driver(struct 
+ enum gameport_event_type {
+       GAMEPORT_REGISTER_PORT,
+-      GAMEPORT_REGISTER_DRIVER,
+       GAMEPORT_ATTACH_DRIVER,
+ };
+@@ -374,8 +372,8 @@ static void gameport_handle_event(void)
+                               gameport_add_port(event->object);
+                               break;
+-                      case GAMEPORT_REGISTER_DRIVER:
+-                              gameport_add_driver(event->object);
++                      case GAMEPORT_ATTACH_DRIVER:
++                              gameport_attach_driver(event->object);
+                               break;
+                       default:
+@@ -706,14 +704,14 @@ static int gameport_driver_remove(struct
+       return 0;
+ }
+-static void gameport_add_driver(struct gameport_driver *drv)
++static void gameport_attach_driver(struct gameport_driver *drv)
+ {
+       int error;
+-      error = driver_register(&drv->driver);
++      error = driver_attach(&drv->driver);
+       if (error)
+               printk(KERN_ERR
+-                      "gameport: driver_register() failed for %s, error: %d\n",
++                      "gameport: driver_attach() failed for %s, error: %d\n",
+                       drv->driver.name, error);
+ }
diff --git a/queue-2.6.29/ixgbe-fix-potential-memory-leak-driver-panic-issue-while-setting-up-tx-rx-ring-parameters.patch b/queue-2.6.29/ixgbe-fix-potential-memory-leak-driver-panic-issue-while-setting-up-tx-rx-ring-parameters.patch
new file mode 100644 (file)
index 0000000..e358925
--- /dev/null
@@ -0,0 +1,176 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 04:35:21 2009
+Date: Fri, 3 Apr 2009 04:35:17 GMT
+Message-Id: <200904030435.n334ZHY4010519@hera.kernel.org>
+From: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: ixgbe: Fix potential memory leak/driver panic issue while setting up Tx & Rx ring parameters
+
+upstream commit: f9ed88549e2ec73922b788e3865282d221233662
+
+While setting up the ring parameters using ethtool the driver can
+panic or leak memory as ixgbe_open tries to setup tx & rx resources.
+The updated logic will use ixgbe_down/up after successful allocation of
+tx & rx resources
+
+Signed-off-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>
+Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+CC: stable@kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/net/ixgbe/ixgbe_ethtool.c |  103 +++++++++++++++++++++-----------------
+ 1 file changed, 59 insertions(+), 44 deletions(-)
+
+--- a/drivers/net/ixgbe/ixgbe_ethtool.c
++++ b/drivers/net/ixgbe/ixgbe_ethtool.c
+@@ -691,9 +691,10 @@ static int ixgbe_set_ringparam(struct ne
+                                struct ethtool_ringparam *ring)
+ {
+       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+-      struct ixgbe_ring *temp_ring;
++      struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
+       int i, err;
+       u32 new_rx_count, new_tx_count;
++      bool need_update = false;
+       if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+               return -EINVAL;
+@@ -712,80 +713,94 @@ static int ixgbe_set_ringparam(struct ne
+               return 0;
+       }
+-      temp_ring = kcalloc(adapter->num_tx_queues,
+-                          sizeof(struct ixgbe_ring), GFP_KERNEL);
+-      if (!temp_ring)
+-              return -ENOMEM;
+-
+       while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+               msleep(1);
+-      if (new_tx_count != adapter->tx_ring->count) {
++      temp_tx_ring = kcalloc(adapter->num_tx_queues,
++                             sizeof(struct ixgbe_ring), GFP_KERNEL);
++      if (!temp_tx_ring) {
++              err = -ENOMEM;
++              goto err_setup;
++      }
++
++      if (new_tx_count != adapter->tx_ring_count) {
++              memcpy(temp_tx_ring, adapter->tx_ring,
++                     adapter->num_tx_queues * sizeof(struct ixgbe_ring));
+               for (i = 0; i < adapter->num_tx_queues; i++) {
+-                      temp_ring[i].count = new_tx_count;
+-                      err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]);
++                      temp_tx_ring[i].count = new_tx_count;
++                      err = ixgbe_setup_tx_resources(adapter,
++                                                     &temp_tx_ring[i]);
+                       if (err) {
+                               while (i) {
+                                       i--;
+                                       ixgbe_free_tx_resources(adapter,
+-                                                              &temp_ring[i]);
++                                                              &temp_tx_ring[i]);
+                               }
+                               goto err_setup;
+                       }
+-                      temp_ring[i].v_idx = adapter->tx_ring[i].v_idx;
++                      temp_tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
+               }
+-              if (netif_running(netdev))
+-                      netdev->netdev_ops->ndo_stop(netdev);
+-              ixgbe_reset_interrupt_capability(adapter);
+-              ixgbe_napi_del_all(adapter);
+-              INIT_LIST_HEAD(&netdev->napi_list);
+-              kfree(adapter->tx_ring);
+-              adapter->tx_ring = temp_ring;
+-              temp_ring = NULL;
+-              adapter->tx_ring_count = new_tx_count;
+-      }
+-
+-      temp_ring = kcalloc(adapter->num_rx_queues,
+-                          sizeof(struct ixgbe_ring), GFP_KERNEL);
+-      if (!temp_ring) {
+-              if (netif_running(netdev))
+-                      netdev->netdev_ops->ndo_open(netdev);
+-              return -ENOMEM;
++              need_update = true;
+       }
+-      if (new_rx_count != adapter->rx_ring->count) {
++      temp_rx_ring = kcalloc(adapter->num_rx_queues,
++                             sizeof(struct ixgbe_ring), GFP_KERNEL);
++      if ((!temp_rx_ring) && (need_update)) {
++              for (i = 0; i < adapter->num_tx_queues; i++)
++                      ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
++              kfree(temp_tx_ring);
++              err = -ENOMEM;
++              goto err_setup;
++      }
++
++      if (new_rx_count != adapter->rx_ring_count) {
++              memcpy(temp_rx_ring, adapter->rx_ring,
++                     adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+               for (i = 0; i < adapter->num_rx_queues; i++) {
+-                      temp_ring[i].count = new_rx_count;
+-                      err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]);
++                      temp_rx_ring[i].count = new_rx_count;
++                      err = ixgbe_setup_rx_resources(adapter,
++                                                     &temp_rx_ring[i]);
+                       if (err) {
+                               while (i) {
+                                       i--;
+                                       ixgbe_free_rx_resources(adapter,
+-                                                              &temp_ring[i]);
++                                                            &temp_rx_ring[i]);
+                               }
+                               goto err_setup;
+                       }
+-                      temp_ring[i].v_idx = adapter->rx_ring[i].v_idx;
++                      temp_rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
+               }
++              need_update = true;
++      }
++
++      /* if rings need to be updated, here's the place to do it in one shot */
++      if (need_update) {
+               if (netif_running(netdev))
+-                      netdev->netdev_ops->ndo_stop(netdev);
+-              ixgbe_reset_interrupt_capability(adapter);
+-              ixgbe_napi_del_all(adapter);
+-              INIT_LIST_HEAD(&netdev->napi_list);
+-              kfree(adapter->rx_ring);
+-              adapter->rx_ring = temp_ring;
+-              temp_ring = NULL;
++                      ixgbe_down(adapter);
++
++              /* tx */
++              if (new_tx_count != adapter->tx_ring_count) {
++                      kfree(adapter->tx_ring);
++                      adapter->tx_ring = temp_tx_ring;
++                      temp_tx_ring = NULL;
++                      adapter->tx_ring_count = new_tx_count;
++              }
+-              adapter->rx_ring_count = new_rx_count;
++              /* rx */
++              if (new_rx_count != adapter->rx_ring_count) {
++                      kfree(adapter->rx_ring);
++                      adapter->rx_ring = temp_rx_ring;
++                      temp_rx_ring = NULL;
++                      adapter->rx_ring_count = new_rx_count;
++              }
+       }
+       /* success! */
+       err = 0;
+-err_setup:
+-      ixgbe_init_interrupt_scheme(adapter);
+       if (netif_running(netdev))
+-              netdev->netdev_ops->ndo_open(netdev);
++              ixgbe_up(adapter);
++err_setup:
+       clear_bit(__IXGBE_RESETTING, &adapter->state);
+       return err;
+ }
diff --git a/queue-2.6.29/kprobes-fix-locking-imbalance-in-kretprobes.patch b/queue-2.6.29/kprobes-fix-locking-imbalance-in-kretprobes.patch
new file mode 100644 (file)
index 0000000..0479b74
--- /dev/null
@@ -0,0 +1,86 @@
+From stable-bounces@linux.kernel.org  Sun Apr  5 18:20:26 2009
+Date: Sun, 5 Apr 2009 18:20:22 GMT
+Message-Id: <200904051820.n35IKMnc012102@hera.kernel.org>
+From: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: kprobes: Fix locking imbalance in kretprobes
+
+upstream commit: f02b8624fedca39886b0eef770dca70c2f0749b3
+
+Fix locking imbalance in kretprobes:
+
+=====================================
+[ BUG: bad unlock balance detected! ]
+-------------------------------------
+kthreadd/2 is trying to release lock (&rp->lock) at:
+[<c06b3080>] pre_handler_kretprobe+0xea/0xf4
+but there are no more locks to release!
+
+other info that might help us debug this:
+1 lock held by kthreadd/2:
+ #0:  (rcu_read_lock){..--}, at: [<c06b2b24>] __atomic_notifier_call_chain+0x0/0x5a
+
+stack backtrace:
+Pid: 2, comm: kthreadd Not tainted 2.6.29-rc8 #1
+Call Trace:
+ [<c06ae498>] ? printk+0xf/0x17
+ [<c06b3080>] ? pre_handler_kretprobe+0xea/0xf4
+ [<c044ce6c>] print_unlock_inbalance_bug+0xc3/0xce
+ [<c0444d4b>] ? clocksource_read+0x7/0xa
+ [<c04450a4>] ? getnstimeofday+0x5f/0xf6
+ [<c044a9ca>] ? register_lock_class+0x17/0x293
+ [<c044b72c>] ? mark_lock+0x1e/0x30b
+ [<c0448956>] ? tick_dev_program_event+0x4a/0xbc
+ [<c0498100>] ? __slab_alloc+0xa5/0x415
+ [<c06b2fbe>] ? pre_handler_kretprobe+0x28/0xf4
+ [<c06b3080>] ? pre_handler_kretprobe+0xea/0xf4
+ [<c044cf1b>] lock_release_non_nested+0xa4/0x1a5
+ [<c06b3080>] ? pre_handler_kretprobe+0xea/0xf4
+ [<c044d15d>] lock_release+0x141/0x166
+ [<c06b07dd>] _spin_unlock_irqrestore+0x19/0x50
+ [<c06b3080>] pre_handler_kretprobe+0xea/0xf4
+ [<c06b20b5>] kprobe_exceptions_notify+0x1c9/0x43e
+ [<c06b2b02>] notifier_call_chain+0x26/0x48
+ [<c06b2b5b>] __atomic_notifier_call_chain+0x37/0x5a
+ [<c06b2b24>] ? __atomic_notifier_call_chain+0x0/0x5a
+ [<c06b2b8a>] atomic_notifier_call_chain+0xc/0xe
+ [<c0442d0d>] notify_die+0x2d/0x2f
+ [<c06b0f9c>] do_int3+0x1f/0x71
+ [<c06b0e84>] int3+0x2c/0x34
+ [<c042d476>] ? do_fork+0x1/0x288
+ [<c040221b>] ? kernel_thread+0x71/0x79
+ [<c043ed1b>] ? kthread+0x0/0x60
+ [<c043ed1b>] ? kthread+0x0/0x60
+ [<c04040b8>] ? kernel_thread_helper+0x0/0x10
+ [<c043ec7f>] kthreadd+0xac/0x148
+ [<c043ebd3>] ? kthreadd+0x0/0x148
+ [<c04040bf>] kernel_thread_helper+0x7/0x10
+
+Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+Tested-by: Bharata B Rao <bharata@linux.vnet.ibm.com>
+Cc: Masami Hiramatsu <mhiramat@redhat.com>
+Cc: Jim Keniston <jkenisto@us.ibm.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: <stable@kernel.org> [2.6.29.x, 2.6.28.x, 2.6.27.x]
+LKML-Reference: <20090318113621.GB4129@in.ibm.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ kernel/kprobes.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -912,10 +912,8 @@ static int __kprobes pre_handler_kretpro
+               ri->rp = rp;
+               ri->task = current;
+-              if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+-                      spin_unlock_irqrestore(&rp->lock, flags);
++              if (rp->entry_handler && rp->entry_handler(ri, regs))
+                       return 0;
+-              }
+               arch_prepare_kretprobe(ri, regs);
diff --git a/queue-2.6.29/kvm-add-config_have_kvm_irqchip.patch b/queue-2.6.29/kvm-add-config_have_kvm_irqchip.patch
new file mode 100644 (file)
index 0000000..c7a52b4
--- /dev/null
@@ -0,0 +1,74 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 22:41:51 2009
+From: mtosatti@redhat.com
+To: chrisw@redhat.com
+Date: Fri, 17 Apr 2009 19:40:07 -0300
+Message-Id: <1240008013-4635-2-git-send-email-mtosatti@redhat.com>
+Cc: stable@kernel.org, Avi Kivity <avi@redhat.com>
+Subject: KVM: Add CONFIG_HAVE_KVM_IRQCHIP
+
+From: Avi Kivity <avi@redhat.com>
+
+upstream commit: 5d9b8e30f543a9f21a968a4cda71e8f6d1c66a61
+
+Two KVM archs support irqchips and two don't.  Add a Kconfig item to
+make selecting between the two models easier.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/ia64/kvm/Kconfig    |    4 ++++
+ arch/powerpc/kvm/Kconfig |    3 +++
+ arch/s390/kvm/Kconfig    |    3 +++
+ arch/x86/kvm/Kconfig     |    4 ++++
+ 4 files changed, 14 insertions(+)
+
+--- a/arch/ia64/kvm/Kconfig
++++ b/arch/ia64/kvm/Kconfig
+@@ -4,6 +4,10 @@
+ config HAVE_KVM
+       bool
++config HAVE_KVM_IRQCHIP
++       bool
++       default y
++
+ menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       depends on HAVE_KVM || IA64
+--- a/arch/powerpc/kvm/Kconfig
++++ b/arch/powerpc/kvm/Kconfig
+@@ -2,6 +2,9 @@
+ # KVM configuration
+ #
++config HAVE_KVM_IRQCHIP
++       bool
++
+ menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       ---help---
+--- a/arch/s390/kvm/Kconfig
++++ b/arch/s390/kvm/Kconfig
+@@ -4,6 +4,9 @@
+ config HAVE_KVM
+        bool
++config HAVE_KVM_IRQCHIP
++       bool
++
+ menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       default y
+--- a/arch/x86/kvm/Kconfig
++++ b/arch/x86/kvm/Kconfig
+@@ -4,6 +4,10 @@
+ config HAVE_KVM
+        bool
++config HAVE_KVM_IRQCHIP
++       bool
++       default y
++
+ menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       depends on HAVE_KVM || X86
diff --git a/queue-2.6.29/kvm-fix-kvm_vm_ioctl_deassign_device.patch b/queue-2.6.29/kvm-fix-kvm_vm_ioctl_deassign_device.patch
new file mode 100644 (file)
index 0000000..bab1add
--- /dev/null
@@ -0,0 +1,34 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 22:42:27 2009
+From: mtosatti@redhat.com
+To: chrisw@redhat.com
+Date: Fri, 17 Apr 2009 19:40:11 -0300
+Message-Id: <1240008013-4635-6-git-send-email-mtosatti@redhat.com>
+Cc: Weidong Han <weidong.han@intel.com>, stable@kernel.org, Avi Kivity <avi@redhat.com>
+Subject: KVM: fix kvm_vm_ioctl_deassign_device
+
+From: Weidong Han <weidong.han@intel.com>
+
+upstream commit: 4a906e49f103c2e544148a209ba1db316510799f
+
+only need to set assigned_dev_id for deassignment, use
+match->flags to judge and deassign it.
+
+Acked-by: Mark McLoughlin <markmc@redhat.com>
+Signed-off-by: Weidong Han <weidong.han@intel.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ virt/kvm/kvm_main.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -563,7 +563,7 @@ static int kvm_vm_ioctl_deassign_device(
+               goto out;
+       }
+-      if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
++      if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
+               kvm_deassign_device(kvm, match);
+       kvm_free_assigned_device(kvm, match);
diff --git a/queue-2.6.29/kvm-fix-missing-smp-tlb-flush-in-invlpg.patch b/queue-2.6.29/kvm-fix-missing-smp-tlb-flush-in-invlpg.patch
new file mode 100644 (file)
index 0000000..13a19ee
--- /dev/null
@@ -0,0 +1,56 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 22:42:09 2009
+From: mtosatti@redhat.com
+To: chrisw@redhat.com
+Date: Fri, 17 Apr 2009 19:40:06 -0300
+Message-Id: <1240008013-4635-1-git-send-email-mtosatti@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>, stable@kernel.org, Avi Kivity <avi@redhat.com>
+Subject: KVM: Fix missing smp tlb flush in invlpg
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+upstream commit: 4539b35881ae9664b0e2953438dd83f5ee02c0b4
+
+When kvm emulates an invlpg instruction, it can drop a shadow pte, but
+leaves the guest tlbs intact.  This can cause memory corruption when
+swapping out.
+
+Without this the other cpu can still write to a freed host physical page.
+tlb smp flush must happen if rmap_remove is called always before mmu_lock
+is released because the VM will take the mmu_lock before it can finally add
+the page to the freelist after swapout. mmu notifier makes it safe to flush
+the tlb after freeing the page (otherwise it would never be safe) so we can do
+a single flush for multiple sptes invalidated.
+
+Cc: stable@kernel.org
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Acked-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+[mtosatti: backport to 2.6.29]
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/x86/kvm/paging_tmpl.h |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kvm/paging_tmpl.h
++++ b/arch/x86/kvm/paging_tmpl.h
+@@ -476,16 +476,20 @@ static int FNAME(shadow_invlpg_entry)(st
+       if (level == PT_PAGE_TABLE_LEVEL ||
+           ((level == PT_DIRECTORY_LEVEL) && is_large_pte(*sptep))) {
+               struct kvm_mmu_page *sp = page_header(__pa(sptep));
++              int need_flush = 0;
+               sw->pte_gpa = (sp->gfn << PAGE_SHIFT);
+               sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
+               if (is_shadow_present_pte(*sptep)) {
++                      need_flush = 1;
+                       rmap_remove(vcpu->kvm, sptep);
+                       if (is_large_pte(*sptep))
+                               --vcpu->kvm->stat.lpages;
+               }
+               set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
++              if (need_flush)
++                      kvm_flush_remote_tlbs(vcpu->kvm);
+               return 1;
+       }
+       if (!is_shadow_present_pte(*sptep))
diff --git a/queue-2.6.29/kvm-interrupt-mask-notifiers-for-ioapic.patch b/queue-2.6.29/kvm-interrupt-mask-notifiers-for-ioapic.patch
new file mode 100644 (file)
index 0000000..4d5dae5
--- /dev/null
@@ -0,0 +1,135 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 22:42:21 2009
+From: mtosatti@redhat.com
+To: chrisw@redhat.com
+Date: Fri, 17 Apr 2009 19:40:08 -0300
+Message-Id: <1240008013-4635-3-git-send-email-mtosatti@redhat.com>
+Cc: stable@kernel.org, Avi Kivity <avi@redhat.com>
+Subject: KVM: Interrupt mask notifiers for ioapic
+
+From: Avi Kivity <avi@redhat.com>
+
+upstream commit: 75858a84a6207f5e60196f6bbd18fde4250e5759
+
+Allow clients to request notifications when the guest masks or unmasks a
+particular irq line.  This complements irq ack notifications, as the guest
+will not ack an irq line that is masked.
+
+Currently implemented for the ioapic only.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ include/linux/kvm_host.h |   17 +++++++++++++++++
+ virt/kvm/ioapic.c        |    6 ++++++
+ virt/kvm/irq_comm.c      |   24 ++++++++++++++++++++++++
+ virt/kvm/kvm_main.c      |    3 +++
+ 4 files changed, 50 insertions(+)
+
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -127,6 +127,10 @@ struct kvm {
+       struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
+ #endif
++#ifdef CONFIG_HAVE_KVM_IRQCHIP
++      struct hlist_head mask_notifier_list;
++#endif
++
+ #ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+       struct mmu_notifier mmu_notifier;
+       unsigned long mmu_notifier_seq;
+@@ -321,6 +325,19 @@ struct kvm_assigned_dev_kernel {
+       struct pci_dev *dev;
+       struct kvm *kvm;
+ };
++
++struct kvm_irq_mask_notifier {
++      void (*func)(struct kvm_irq_mask_notifier *kimn, bool masked);
++      int irq;
++      struct hlist_node link;
++};
++
++void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
++                                  struct kvm_irq_mask_notifier *kimn);
++void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
++                                    struct kvm_irq_mask_notifier *kimn);
++void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask);
++
+ void kvm_set_irq(struct kvm *kvm, int irq_source_id, int irq, int level);
+ void kvm_notify_acked_irq(struct kvm *kvm, unsigned gsi);
+ void kvm_register_irq_ack_notifier(struct kvm *kvm,
+--- a/virt/kvm/ioapic.c
++++ b/virt/kvm/ioapic.c
+@@ -101,6 +101,7 @@ static void ioapic_service(struct kvm_io
+ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
+ {
+       unsigned index;
++      bool mask_before, mask_after;
+       switch (ioapic->ioregsel) {
+       case IOAPIC_REG_VERSION:
+@@ -120,6 +121,7 @@ static void ioapic_write_indirect(struct
+               ioapic_debug("change redir index %x val %x\n", index, val);
+               if (index >= IOAPIC_NUM_PINS)
+                       return;
++              mask_before = ioapic->redirtbl[index].fields.mask;
+               if (ioapic->ioregsel & 1) {
+                       ioapic->redirtbl[index].bits &= 0xffffffff;
+                       ioapic->redirtbl[index].bits |= (u64) val << 32;
+@@ -128,6 +130,9 @@ static void ioapic_write_indirect(struct
+                       ioapic->redirtbl[index].bits |= (u32) val;
+                       ioapic->redirtbl[index].fields.remote_irr = 0;
+               }
++              mask_after = ioapic->redirtbl[index].fields.mask;
++              if (mask_before != mask_after)
++                      kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after);
+               if (ioapic->irr & (1 << index))
+                       ioapic_service(ioapic, index);
+               break;
+@@ -426,3 +431,4 @@ int kvm_ioapic_init(struct kvm *kvm)
+       kvm_io_bus_register_dev(&kvm->mmio_bus, &ioapic->dev);
+       return 0;
+ }
++
+--- a/virt/kvm/irq_comm.c
++++ b/virt/kvm/irq_comm.c
+@@ -99,3 +99,27 @@ void kvm_free_irq_source_id(struct kvm *
+               clear_bit(irq_source_id, &kvm->arch.irq_states[i]);
+       clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
+ }
++
++void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
++                                  struct kvm_irq_mask_notifier *kimn)
++{
++      kimn->irq = irq;
++      hlist_add_head(&kimn->link, &kvm->mask_notifier_list);
++}
++
++void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
++                                    struct kvm_irq_mask_notifier *kimn)
++{
++      hlist_del(&kimn->link);
++}
++
++void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask)
++{
++      struct kvm_irq_mask_notifier *kimn;
++      struct hlist_node *n;
++
++      hlist_for_each_entry(kimn, n, &kvm->mask_notifier_list, link)
++              if (kimn->irq == irq)
++                      kimn->func(kimn, mask);
++}
++
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -828,6 +828,9 @@ static struct kvm *kvm_create_vm(void)
+       if (IS_ERR(kvm))
+               goto out;
++#ifdef CONFIG_HAVE_KVM_IRQCHIP
++      INIT_HLIST_HEAD(&kvm->mask_notifier_list);
++#endif
+ #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+       page = alloc_page(GFP_KERNEL | __GFP_ZERO);
diff --git a/queue-2.6.29/kvm-is_long_mode-should-check-for-efer.lma.patch b/queue-2.6.29/kvm-is_long_mode-should-check-for-efer.lma.patch
new file mode 100644 (file)
index 0000000..5224a32
--- /dev/null
@@ -0,0 +1,37 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 22:42:03 2009
+From: mtosatti@redhat.com
+To: chrisw@redhat.com
+Date: Fri, 17 Apr 2009 19:40:13 -0300
+Message-Id: <1240008013-4635-8-git-send-email-mtosatti@redhat.com>
+Cc: Amit Shah <amit.shah@redhat.com>, stable@kernel.org, Avi Kivity <avi@redhat.com>
+Subject: KVM: is_long_mode() should check for EFER.LMA
+
+From: Amit Shah <amit.shah@qumranet.com>
+
+upstream commit: 41d6af119206e98764b4ae6d264d63acefcf851e
+
+is_long_mode currently checks the LongModeEnable bit in
+EFER instead of the LongModeActive bit. This is wrong, but
+we survived this till now since it wasn't triggered. This
+breaks guests that go from long mode to compatibility mode.
+
+This is noticed on a solaris guest and fixes bug #1842160
+
+Signed-off-by: Amit Shah <amit.shah@qumranet.com>
+Signed-off-by: Avi Kivity <avi@qumranet.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/x86/kvm/mmu.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kvm/mmu.h
++++ b/arch/x86/kvm/mmu.h
+@@ -54,7 +54,7 @@ static inline int kvm_mmu_reload(struct 
+ static inline int is_long_mode(struct kvm_vcpu *vcpu)
+ {
+ #ifdef CONFIG_X86_64
+-      return vcpu->arch.shadow_efer & EFER_LME;
++      return vcpu->arch.shadow_efer & EFER_LMA;
+ #else
+       return 0;
+ #endif
diff --git a/queue-2.6.29/kvm-mmu-handle-compound-pages-in-kvm_is_mmio_pfn.patch b/queue-2.6.29/kvm-mmu-handle-compound-pages-in-kvm_is_mmio_pfn.patch
new file mode 100644 (file)
index 0000000..a16323c
--- /dev/null
@@ -0,0 +1,44 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 22:42:15 2009
+From: mtosatti@redhat.com
+To: chrisw@redhat.com
+Date: Fri, 17 Apr 2009 19:40:10 -0300
+Message-Id: <1240008013-4635-5-git-send-email-mtosatti@redhat.com>
+Cc: Joerg Roedel <joerg.roedel@amd.com>, stable@kernel.org, Avi Kivity <avi@redhat.com>
+Subject: KVM: MMU: handle compound pages in kvm_is_mmio_pfn
+
+From: Joerg Roedel <joerg.roedel@amd.com>
+
+upstream commit: fc5659c8c6b6c4e02ac354b369017c1bf231f347
+
+The function kvm_is_mmio_pfn is called before put_page is called on a
+page by KVM. This is a problem when when this function is called on some
+struct page which is part of a compund page. It does not test the
+reserved flag of the compound page but of the struct page within the
+compount page. This is a problem when KVM works with hugepages allocated
+at boot time. These pages have the reserved bit set in all tail pages.
+Only the flag in the compount head is cleared. KVM would not put such a
+page which results in a memory leak.
+
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Acked-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ virt/kvm/kvm_main.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -581,8 +581,10 @@ static inline int valid_vcpu(int n)
+ inline int kvm_is_mmio_pfn(pfn_t pfn)
+ {
+-      if (pfn_valid(pfn))
+-              return PageReserved(pfn_to_page(pfn));
++      if (pfn_valid(pfn)) {
++              struct page *page = compound_head(pfn_to_page(pfn));
++              return PageReserved(page);
++      }
+       return true;
+ }
diff --git a/queue-2.6.29/kvm-reset-pit-irq-injection-logic-when-the-pit-irq-is-unmasked.patch b/queue-2.6.29/kvm-reset-pit-irq-injection-logic-when-the-pit-irq-is-unmasked.patch
new file mode 100644 (file)
index 0000000..3d8bf68
--- /dev/null
@@ -0,0 +1,74 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 22:41:44 2009
+From: mtosatti@redhat.com
+To: chrisw@redhat.com
+Date: Fri, 17 Apr 2009 19:40:09 -0300
+Message-Id: <1240008013-4635-4-git-send-email-mtosatti@redhat.com>
+Cc: stable@kernel.org, Avi Kivity <avi@redhat.com>
+Subject: KVM: Reset PIT irq injection logic when the PIT IRQ is unmasked
+
+From: Avi Kivity <avi@redhat.com>
+
+upstream commit: 4780c65904f0fc4e312ee2da9383eacbe04e61ea
+
+While the PIT is masked the guest cannot ack the irq, so the reinject logic
+will never allow the interrupt to be injected.
+
+Fix by resetting the reinjection counters on unmask.
+
+Unbreaks Xen.
+
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/x86/kvm/i8254.c |   15 +++++++++++++++
+ arch/x86/kvm/i8254.h |    1 +
+ 2 files changed, 16 insertions(+)
+
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -536,6 +536,16 @@ void kvm_pit_reset(struct kvm_pit *pit)
+       pit->pit_state.irq_ack = 1;
+ }
++static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
++{
++      struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
++
++      if (!mask) {
++              atomic_set(&pit->pit_state.pit_timer.pending, 0);
++              pit->pit_state.irq_ack = 1;
++      }
++}
++
+ struct kvm_pit *kvm_create_pit(struct kvm *kvm)
+ {
+       struct kvm_pit *pit;
+@@ -584,6 +594,9 @@ struct kvm_pit *kvm_create_pit(struct kv
+       kvm_pit_reset(pit);
++      pit->mask_notifier.func = pit_mask_notifer;
++      kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
++
+       return pit;
+ }
+@@ -592,6 +605,8 @@ void kvm_free_pit(struct kvm *kvm)
+       struct hrtimer *timer;
+       if (kvm->arch.vpit) {
++              kvm_unregister_irq_mask_notifier(kvm, 0,
++                                             &kvm->arch.vpit->mask_notifier);
+               mutex_lock(&kvm->arch.vpit->pit_state.lock);
+               timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
+               hrtimer_cancel(timer);
+--- a/arch/x86/kvm/i8254.h
++++ b/arch/x86/kvm/i8254.h
+@@ -45,6 +45,7 @@ struct kvm_pit {
+       struct kvm *kvm;
+       struct kvm_kpit_state pit_state;
+       int irq_source_id;
++      struct kvm_irq_mask_notifier mask_notifier;
+ };
+ #define KVM_PIT_BASE_ADDRESS      0x40
diff --git a/queue-2.6.29/kvm-vmx-update-necessary-state-when-guest-enters-long-mode.patch b/queue-2.6.29/kvm-vmx-update-necessary-state-when-guest-enters-long-mode.patch
new file mode 100644 (file)
index 0000000..52c0112
--- /dev/null
@@ -0,0 +1,109 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 22:41:56 2009
+From: mtosatti@redhat.com
+To: chrisw@redhat.com
+Date: Fri, 17 Apr 2009 19:40:12 -0300
+Message-Id: <1240008013-4635-7-git-send-email-mtosatti@redhat.com>
+Cc: Amit Shah <amit.shah@redhat.com>, stable@kernel.org, Avi Kivity <avi@redhat.com>
+Subject: KVM: VMX: Update necessary state when guest enters long mode
+
+From: Amit Shah <amit.shah@redhat.com>
+
+upstream commit: 401d10dee083bda281f2fdcdf654080313ba30ec
+
+setup_msrs() should be called when entering long mode to save the
+shadow state for the 64-bit guest state.
+
+Using vmx_set_efer() in enter_lmode() removes some duplicated code
+and also ensures we call setup_msrs(). We can safely pass the value
+of shadow_efer to vmx_set_efer() as no other bits in the efer change
+while enabling long mode (guest first sets EFER.LME, then sets CR0.PG
+which causes a vmexit where we activate long mode).
+
+With this fix, is_long_mode() can check for EFER.LMA set instead of
+EFER.LME and 5e23049e86dd298b72e206b420513dbc3a240cd9 can be reverted.
+
+Signed-off-by: Amit Shah <amit.shah@redhat.com>
+Signed-off-by: Avi Kivity <avi@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/x86/kvm/vmx.c |   54 +++++++++++++++++++++++------------------------------
+ 1 file changed, 24 insertions(+), 30 deletions(-)
+
+--- a/arch/x86/kvm/vmx.c
++++ b/arch/x86/kvm/vmx.c
+@@ -1433,6 +1433,29 @@ continue_rmode:
+       init_rmode(vcpu->kvm);
+ }
++static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
++{
++      struct vcpu_vmx *vmx = to_vmx(vcpu);
++      struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
++
++      vcpu->arch.shadow_efer = efer;
++      if (!msr)
++              return;
++      if (efer & EFER_LMA) {
++              vmcs_write32(VM_ENTRY_CONTROLS,
++                           vmcs_read32(VM_ENTRY_CONTROLS) |
++                           VM_ENTRY_IA32E_MODE);
++              msr->data = efer;
++      } else {
++              vmcs_write32(VM_ENTRY_CONTROLS,
++                           vmcs_read32(VM_ENTRY_CONTROLS) &
++                           ~VM_ENTRY_IA32E_MODE);
++
++              msr->data = efer & ~EFER_LME;
++      }
++      setup_msrs(vmx);
++}
++
+ #ifdef CONFIG_X86_64
+ static void enter_lmode(struct kvm_vcpu *vcpu)
+@@ -1447,13 +1470,8 @@ static void enter_lmode(struct kvm_vcpu 
+                            (guest_tr_ar & ~AR_TYPE_MASK)
+                            | AR_TYPE_BUSY_64_TSS);
+       }
+-
+       vcpu->arch.shadow_efer |= EFER_LMA;
+-
+-      find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
+-      vmcs_write32(VM_ENTRY_CONTROLS,
+-                   vmcs_read32(VM_ENTRY_CONTROLS)
+-                   | VM_ENTRY_IA32E_MODE);
++      vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
+ }
+ static void exit_lmode(struct kvm_vcpu *vcpu)
+@@ -1612,30 +1630,6 @@ static void vmx_set_cr4(struct kvm_vcpu 
+       vmcs_writel(GUEST_CR4, hw_cr4);
+ }
+-static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+-{
+-      struct vcpu_vmx *vmx = to_vmx(vcpu);
+-      struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
+-
+-      vcpu->arch.shadow_efer = efer;
+-      if (!msr)
+-              return;
+-      if (efer & EFER_LMA) {
+-              vmcs_write32(VM_ENTRY_CONTROLS,
+-                                   vmcs_read32(VM_ENTRY_CONTROLS) |
+-                                   VM_ENTRY_IA32E_MODE);
+-              msr->data = efer;
+-
+-      } else {
+-              vmcs_write32(VM_ENTRY_CONTROLS,
+-                                   vmcs_read32(VM_ENTRY_CONTROLS) &
+-                                   ~VM_ENTRY_IA32E_MODE);
+-
+-              msr->data = efer & ~EFER_LME;
+-      }
+-      setup_msrs(vmx);
+-}
+-
+ static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
+ {
+       struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
diff --git a/queue-2.6.29/md-raid1-don-t-assume-newly-allocated-bvecs-are-initialised.patch b/queue-2.6.29/md-raid1-don-t-assume-newly-allocated-bvecs-are-initialised.patch
new file mode 100644 (file)
index 0000000..44a0ebe
--- /dev/null
@@ -0,0 +1,49 @@
+From stable-bounces@linux.kernel.org  Mon Apr  6 20:55:08 2009
+Date: Mon, 6 Apr 2009 20:55:03 GMT
+Message-Id: <200904062055.n36Kt3Pp024747@hera.kernel.org>
+From: NeilBrown <neilb@suse.de>
+To: jejb@kernel.org, stable@kernel.org
+Subject: md/raid1 - don't assume newly allocated bvecs are initialised.
+
+upstream commit: 303a0e11d0ee136ad8f53f747f3c377daece763b
+
+Since commit d3f761104b097738932afcc310fbbbbfb007ef92
+newly allocated bvecs aren't initialised to NULL, so we have
+to be more careful about freeing a bio which only managed
+to get a few pages allocated to it.  Otherwise the resync
+process crashes.
+
+This patch is appropriate for 2.6.29-stable.
+
+Cc: stable@kernel.org
+Cc: "Jens Axboe" <jens.axboe@oracle.com>
+Reported-by: Gabriele Tozzi <gabriele@tozzi.eu>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/md/raid1.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -120,6 +120,7 @@ static void * r1buf_pool_alloc(gfp_t gfp
+                               goto out_free_pages;
+                       bio->bi_io_vec[i].bv_page = page;
++                      bio->bi_vcnt = i+1;
+               }
+       }
+       /* If not user-requests, copy the page pointers to all bios */
+@@ -135,9 +136,9 @@ static void * r1buf_pool_alloc(gfp_t gfp
+       return r1_bio;
+ out_free_pages:
+-      for (i=0; i < RESYNC_PAGES ; i++)
+-              for (j=0 ; j < pi->raid_disks; j++)
+-                      safe_put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
++      for (j=0 ; j < pi->raid_disks; j++)
++              for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
++                      put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
+       j = -1;
+ out_free_bio:
+       while ( ++j < pi->raid_disks )
diff --git a/queue-2.6.29/mips-compat-zero-upper-32-bit-of-offset_high-and-offset_low.patch b/queue-2.6.29/mips-compat-zero-upper-32-bit-of-offset_high-and-offset_low.patch
new file mode 100644 (file)
index 0000000..f295ace
--- /dev/null
@@ -0,0 +1,39 @@
+From stable-bounces@linux.kernel.org  Wed Apr  1 17:24:16 2009
+Date: Wed, 1 Apr 2009 11:23:38 -0600
+From: dann frazier <dannf@dannf.org>
+To: stable@kernel.org
+Message-ID: <20090401172338.GC24088@ldl.fc.hp.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Subject: MIPS: Compat: Zero upper 32-bit of offset_high and offset_low.
+
+From: Ralf Baechle <ralf@linux-mips.org>
+
+upstream commit: d6c178e9694e7e0c7ffe0289cf4389a498cac735
+
+Through sys_llseek() arguably should do exactly that it doesn't which
+means llseek(2) will fail for o32 processes if offset_low has bit 31 set.
+
+As suggested by Heiko Carstens.
+
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+---
+ arch/mips/kernel/linux32.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/arch/mips/kernel/linux32.c
++++ b/arch/mips/kernel/linux32.c
+@@ -134,9 +134,9 @@ SYSCALL_DEFINE4(32_ftruncate64, unsigned
+       return sys_ftruncate(fd, merge_64(a2, a3));
+ }
+-SYSCALL_DEFINE5(32_llseek, unsigned long, fd, unsigned long, offset_high,
+-      unsigned long, offset_low, loff_t __user *, result,
+-      unsigned long, origin)
++SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high,
++              unsigned int, offset_low, loff_t __user *, result,
++              unsigned int, origin)
+ {
+       return sys_llseek(fd, offset_high, offset_low, result, origin);
+ }
diff --git a/queue-2.6.29/mm-define-a-unique-value-for-as_unevictable-flag.patch b/queue-2.6.29/mm-define-a-unique-value-for-as_unevictable-flag.patch
new file mode 100644 (file)
index 0000000..50ceff1
--- /dev/null
@@ -0,0 +1,58 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 04:35:15 2009
+Date: Fri, 3 Apr 2009 04:35:10 GMT
+Message-Id: <200904030435.n334ZAl2010428@hera.kernel.org>
+From: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: mm: define a UNIQUE value for AS_UNEVICTABLE flag
+
+upstream commit: 9a896c9a48ac6704c0ce8ee081b836644d0afe40
+
+A new "address_space flag"--AS_MM_ALL_LOCKS--was defined to use the next
+available AS flag while the Unevictable LRU was under development.  The
+Unevictable LRU was using the same flag and "no one" noticed.  Current
+mainline, since 2.6.28, has same value for two symbolic flag names.
+
+So, define a unique flag value for AS_UNEVICTABLE--up close to the other
+flags, [at the cost of an additional #ifdef] so we'll notice next time.
+Note that #ifdef is not actually required, if we don't mind having the
+unused flag value defined.
+
+Replace #defines with an enum.
+
+Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
+Cc: <stable@kernel.org>                [2.6.28.x, 2.6.29.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ include/linux/pagemap.h |   12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -18,9 +18,14 @@
+  * Bits in mapping->flags.  The lower __GFP_BITS_SHIFT bits are the page
+  * allocation mode flags.
+  */
+-#define       AS_EIO          (__GFP_BITS_SHIFT + 0)  /* IO error on async write */
+-#define AS_ENOSPC     (__GFP_BITS_SHIFT + 1)  /* ENOSPC on async write */
+-#define AS_MM_ALL_LOCKS       (__GFP_BITS_SHIFT + 2)  /* under mm_take_all_locks() */
++enum mapping_flags {
++      AS_EIO          = __GFP_BITS_SHIFT + 0, /* IO error on async write */
++      AS_ENOSPC       = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
++      AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
++#ifdef CONFIG_UNEVICTABLE_LRU
++      AS_UNEVICTABLE  = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
++#endif
++};
+ static inline void mapping_set_error(struct address_space *mapping, int error)
+ {
+@@ -33,7 +38,6 @@ static inline void mapping_set_error(str
+ }
+ #ifdef CONFIG_UNEVICTABLE_LRU
+-#define AS_UNEVICTABLE        (__GFP_BITS_SHIFT + 2)  /* e.g., ramdisk, SHM_LOCK */
+ static inline void mapping_set_unevictable(struct address_space *mapping)
+ {
diff --git a/queue-2.6.29/mm-do_xip_mapping_read-fix-length-calculation.patch b/queue-2.6.29/mm-do_xip_mapping_read-fix-length-calculation.patch
new file mode 100644 (file)
index 0000000..b8b3392
--- /dev/null
@@ -0,0 +1,65 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 04:35:16 2009
+Date: Fri, 3 Apr 2009 04:35:12 GMT
+Message-Id: <200904030435.n334ZCrF010445@hera.kernel.org>
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: mm: do_xip_mapping_read: fix length calculation
+
+upstream commit: 58984ce21d315b70df1a43644df7416ea7c9bfd8
+
+The calculation of the value nr in do_xip_mapping_read is incorrect.  If
+the copy required more than one iteration in the do while loop the copies
+variable will be non-zero.  The maximum length that may be passed to the
+call to copy_to_user(buf+copied, xip_mem+offset, nr) is len-copied but the
+check only compares against (nr > len).
+
+This bug is the cause for the heap corruption Carsten has been chasing
+for so long:
+
+*** glibc detected *** /bin/bash: free(): invalid next size (normal): 0x00000000800e39f0 ***
+======= Backtrace: =========
+/lib64/libc.so.6[0x200000b9b44]
+/lib64/libc.so.6(cfree+0x8e)[0x200000bdade]
+/bin/bash(free_buffered_stream+0x32)[0x80050e4e]
+/bin/bash(close_buffered_stream+0x1c)[0x80050ea4]
+/bin/bash(unset_bash_input+0x2a)[0x8001c366]
+/bin/bash(make_child+0x1d4)[0x8004115c]
+/bin/bash[0x8002fc3c]
+/bin/bash(execute_command_internal+0x656)[0x8003048e]
+/bin/bash(execute_command+0x5e)[0x80031e1e]
+/bin/bash(execute_command_internal+0x79a)[0x800305d2]
+/bin/bash(execute_command+0x5e)[0x80031e1e]
+/bin/bash(reader_loop+0x270)[0x8001efe0]
+/bin/bash(main+0x1328)[0x8001e960]
+/lib64/libc.so.6(__libc_start_main+0x100)[0x200000592a8]
+/bin/bash(clearerr+0x5e)[0x8001c092]
+
+With this bug fix the commit 0e4a9b59282914fe057ab17027f55123964bc2e2
+"ext2/xip: refuse to change xip flag during remount with busy inodes" can
+be removed again.
+
+Cc: Carsten Otte <cotte@de.ibm.com>
+Cc: Nick Piggin <npiggin@suse.de>
+Cc: Jared Hulbert <jaredeh@gmail.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ mm/filemap_xip.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/filemap_xip.c
++++ b/mm/filemap_xip.c
+@@ -89,8 +89,8 @@ do_xip_mapping_read(struct address_space
+                       }
+               }
+               nr = nr - offset;
+-              if (nr > len)
+-                      nr = len;
++              if (nr > len - copied)
++                      nr = len - copied;
+               error = mapping->a_ops->get_xip_mem(mapping, index, 0,
+                                                       &xip_mem, &xip_pfn);
diff --git a/queue-2.6.29/mm-pass-correct-mm-when-growing-stack.patch b/queue-2.6.29/mm-pass-correct-mm-when-growing-stack.patch
new file mode 100644 (file)
index 0000000..49c1821
--- /dev/null
@@ -0,0 +1,39 @@
+From stable-bounces@linux.kernel.org  Thu Apr 16 21:45:10 2009
+Date: Thu, 16 Apr 2009 21:45:05 GMT
+Message-Id: <200904162145.n3GLj57t018654@hera.kernel.org>
+From: Hugh Dickins <hugh@veritas.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: mm: pass correct mm when growing stack
+
+upstream commit: 05fa199d45c54a9bda7aa3ae6537253d6f097aa9
+
+Tetsuo Handa reports seeing the WARN_ON(current->mm == NULL) in
+security_vm_enough_memory(), when do_execve() is touching the
+target mm's stack, to set up its args and environment.
+
+Yes, a UMH_NO_WAIT or UMH_WAIT_PROC call_usermodehelper() spawns
+an mm-less kernel thread to do the exec.  And in any case, that
+vm_enough_memory check when growing stack ought to be done on the
+target mm, not on the execer's mm (though apart from the warning,
+it only makes a slight tweak to OVERCOMMIT_NEVER behaviour).
+
+Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Signed-off-by: Hugh Dickins <hugh@veritas.com>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ mm/mmap.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -1571,7 +1571,7 @@ static int acct_stack_growth(struct vm_a
+        * Overcommit..  This must be the final test, as it will
+        * update security statistics.
+        */
+-      if (security_vm_enough_memory(grow))
++      if (security_vm_enough_memory_mm(mm, grow))
+               return -ENOMEM;
+       /* Ok, everything looks good - let it rip */
diff --git a/queue-2.6.29/netfilter-ip-ip6-arp-_tables-fix-incorrect-loop-detection.patch b/queue-2.6.29/netfilter-ip-ip6-arp-_tables-fix-incorrect-loop-detection.patch
new file mode 100644 (file)
index 0000000..c1479a4
--- /dev/null
@@ -0,0 +1,71 @@
+From stable-bounces@linux.kernel.org  Mon Apr  6 15:33:13 2009
+Message-ID: <49DA2051.5030507@trash.net>
+Date: Mon, 06 Apr 2009 17:31:29 +0200
+From: Patrick McHardy <kaber@trash.net>
+To: stable@kernel.org
+Cc: Netfilter Development Mailinglist <netfilter-devel@vger.kernel.org>,         "David S. Miller" <davem@davemloft.net>
+Subject: netfilter: {ip, ip6, arp}_tables: fix incorrect loop detection
+
+upstream commit: 1f9352ae2253a97b07b34dcf16ffa3b4ca12c558
+
+Commit e1b4b9f ([NETFILTER]: {ip,ip6,arp}_tables: fix exponential worst-case
+search for loops) introduced a regression in the loop detection algorithm,
+causing sporadic incorrectly detected loops.
+
+When a chain has already been visited during the check, it is treated as
+having a standard target containing a RETURN verdict directly at the
+beginning in order to not check it again. The real target of the first
+rule is then incorrectly treated as STANDARD target and checked not to
+contain invalid verdicts.
+
+Fix by making sure the rule does actually contain a standard target.
+
+Based on patch by Francis Dupont <Francis_Dupont@isc.org>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+---
+ net/ipv4/netfilter/arp_tables.c |    4 +++-
+ net/ipv4/netfilter/ip_tables.c  |    4 +++-
+ net/ipv6/netfilter/ip6_tables.c |    4 +++-
+ 3 files changed, 9 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -374,7 +374,9 @@ static int mark_source_chains(struct xt_
+                           && unconditional(&e->arp)) || visited) {
+                               unsigned int oldpos, size;
+-                              if (t->verdict < -NF_MAX_VERDICT - 1) {
++                              if ((strcmp(t->target.u.user.name,
++                                          ARPT_STANDARD_TARGET) == 0) &&
++                                  t->verdict < -NF_MAX_VERDICT - 1) {
+                                       duprintf("mark_source_chains: bad "
+                                               "negative verdict (%i)\n",
+                                                               t->verdict);
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -496,7 +496,9 @@ mark_source_chains(struct xt_table_info 
+                           && unconditional(&e->ip)) || visited) {
+                               unsigned int oldpos, size;
+-                              if (t->verdict < -NF_MAX_VERDICT - 1) {
++                              if ((strcmp(t->target.u.user.name,
++                                          IPT_STANDARD_TARGET) == 0) &&
++                                  t->verdict < -NF_MAX_VERDICT - 1) {
+                                       duprintf("mark_source_chains: bad "
+                                               "negative verdict (%i)\n",
+                                                               t->verdict);
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -525,7 +525,9 @@ mark_source_chains(struct xt_table_info 
+                           && unconditional(&e->ipv6)) || visited) {
+                               unsigned int oldpos, size;
+-                              if (t->verdict < -NF_MAX_VERDICT - 1) {
++                              if ((strcmp(t->target.u.user.name,
++                                          IP6T_STANDARD_TARGET) == 0) &&
++                                  t->verdict < -NF_MAX_VERDICT - 1) {
+                                       duprintf("mark_source_chains: bad "
+                                               "negative verdict (%i)\n",
+                                                               t->verdict);
diff --git a/queue-2.6.29/nfs-fix-the-xdr-iovec-calculation-in-nfs3_xdr_setaclargs.patch b/queue-2.6.29/nfs-fix-the-xdr-iovec-calculation-in-nfs3_xdr_setaclargs.patch
new file mode 100644 (file)
index 0000000..7bc6496
--- /dev/null
@@ -0,0 +1,36 @@
+From stable-bounces@linux.kernel.org  Tue Apr 21 21:20:13 2009
+Date: Tue, 21 Apr 2009 21:20:08 GMT
+Message-Id: <200904212120.n3LLK8Vg024952@hera.kernel.org>
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: NFS: Fix the XDR iovec calculation in nfs3_xdr_setaclargs
+
+upstream commit: 8340437210390676f687633a80e3748c40885dc8
+
+Commit ae46141ff08f1965b17c531b571953c39ce8b9e2 (NFSv3: Fix posix ACL code)
+introduces a bug in the calculation of the XDR header iovec. In the case
+where we are inlining the acls, we need to adjust the length of the iovec
+req->rq_svec, in addition to adjusting the total buffer length.
+
+Tested-by: Leonardo Chiquitto <leonardo.lists@gmail.com>
+Tested-by: Suresh Jayaraman <sjayaraman@suse.de>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ fs/nfs/nfs3xdr.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/nfs/nfs3xdr.c
++++ b/fs/nfs/nfs3xdr.c
+@@ -716,7 +716,8 @@ nfs3_xdr_setaclargs(struct rpc_rqst *req
+       if (args->npages != 0)
+               xdr_encode_pages(buf, args->pages, 0, args->len);
+       else
+-              req->rq_slen += args->len;
++              req->rq_slen = xdr_adjust_iovec(req->rq_svec,
++                              p + XDR_QUADLEN(args->len));
+       err = nfsacl_encode(buf, base, args->inode,
+                           (args->mask & NFS_ACL) ?
diff --git a/queue-2.6.29/pata_hpt37x-fix-hpt370-dma-timeouts.patch b/queue-2.6.29/pata_hpt37x-fix-hpt370-dma-timeouts.patch
new file mode 100644 (file)
index 0000000..0ef6197
--- /dev/null
@@ -0,0 +1,71 @@
+From 265b7215aed36941620b65ecfff516200fb190c1 Mon Sep 17 00:00:00 2001
+Message-Id: <200904141839.14755.sshtylyov@ru.mvista.com>
+From: Sergei Shtylyov <sshtylyov@ru.mvista.com>
+Date: Tue, 14 Apr 2009 18:39:14 +0400
+Subject: pata_hpt37x: fix HPT370 DMA timeouts
+
+upstream commit: 265b7215aed36941620b65ecfff516200fb190c1
+
+The libata driver has copied the code from the IDE driver which caused a post
+2.4.18 regression on many HPT370[A] chips -- DMA stopped to work completely,
+only causing timeouts.  Now remove hpt370_bmdma_start() for good...
+
+Signed-off-by: Sergei Shtylyov <sshtylyov@ru.mvista.com>
+Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/ata/pata_hpt37x.c |   22 ++--------------------
+ 1 file changed, 2 insertions(+), 20 deletions(-)
+
+--- a/drivers/ata/pata_hpt37x.c
++++ b/drivers/ata/pata_hpt37x.c
+@@ -8,7 +8,7 @@
+  * Copyright (C) 1999-2003            Andre Hedrick <andre@linux-ide.org>
+  * Portions Copyright (C) 2001                Sun Microsystems, Inc.
+  * Portions Copyright (C) 2003                Red Hat Inc
+- * Portions Copyright (C) 2005-2007   MontaVista Software, Inc.
++ * Portions Copyright (C) 2005-2009   MontaVista Software, Inc.
+  *
+  * TODO
+  *    Look into engine reset on timeout errors. Should not be required.
+@@ -24,7 +24,7 @@
+ #include <linux/libata.h>
+ #define DRV_NAME      "pata_hpt37x"
+-#define DRV_VERSION   "0.6.11"
++#define DRV_VERSION   "0.6.12"
+ struct hpt_clock {
+       u8      xfer_speed;
+@@ -445,23 +445,6 @@ static void hpt370_set_dmamode(struct at
+ }
+ /**
+- *    hpt370_bmdma_start              -       DMA engine begin
+- *    @qc: ATA command
+- *
+- *    The 370 and 370A want us to reset the DMA engine each time we
+- *    use it. The 372 and later are fine.
+- */
+-
+-static void hpt370_bmdma_start(struct ata_queued_cmd *qc)
+-{
+-      struct ata_port *ap = qc->ap;
+-      struct pci_dev *pdev = to_pci_dev(ap->host->dev);
+-      pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
+-      udelay(10);
+-      ata_bmdma_start(qc);
+-}
+-
+-/**
+  *    hpt370_bmdma_end                -       DMA engine stop
+  *    @qc: ATA command
+  *
+@@ -598,7 +581,6 @@ static struct scsi_host_template hpt37x_
+ static struct ata_port_operations hpt370_port_ops = {
+       .inherits       = &ata_bmdma_port_ops,
+-      .bmdma_start    = hpt370_bmdma_start,
+       .bmdma_stop     = hpt370_bmdma_stop,
+       .mode_filter    = hpt370_filter,
diff --git a/queue-2.6.29/pci-x86-detect-host-bridge-config-space-size-w-o-using-quirks.patch b/queue-2.6.29/pci-x86-detect-host-bridge-config-space-size-w-o-using-quirks.patch
new file mode 100644 (file)
index 0000000..1dedf25
--- /dev/null
@@ -0,0 +1,95 @@
+From stable-bounces@linux.kernel.org  Wed Apr  1 17:05:15 2009
+Date: Wed, 1 Apr 2009 17:05:11 GMT
+Message-Id: <200904011705.n31H5B0X005562@hera.kernel.org>
+From: Yinghai Lu <yinghai@kernel.org>
+To: stable@kernel.org
+Subject: PCI/x86: detect host bridge config space size w/o using quirks
+
+upstream commit: dfadd9edff498d767008edc6b2a6e86a7a19934d
+
+Many host bridges support a 4k config space, so check them directy
+instead of using quirks to add them.
+
+We only need to do this extra check for host bridges at this point,
+because only host bridges are known to have extended address space
+without also having a PCI-X/PCI-E caps.  Other devices with this
+property could be done with quirks (if there are any).
+
+As a bonus, we can remove the quirks for AMD host bridges with family
+10h and 11h since they're not needed any more.
+
+With this patch, we can get correct pci cfg size of new Intel CPUs/IOHs
+with host bridges.
+
+Signed-off-by: Yinghai Lu <yinghai@kernel.org>
+Acked-by: H. Peter Anvin <hpa@zytor.com>
+Reviewed-by: Matthew Wilcox <willy@linux.intel.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Jesse Barnes <jbarnes@virtuousgeek.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/x86/pci/fixup.c |   20 --------------------
+ drivers/pci/probe.c  |    9 ++++++++-
+ 2 files changed, 8 insertions(+), 21 deletions(-)
+
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -495,26 +495,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S
+                         pci_siemens_interrupt_controller);
+ /*
+- * Regular PCI devices have 256 bytes, but AMD Family 10h/11h CPUs have
+- * 4096 bytes configuration space for each function of their processor
+- * configuration space.
+- */
+-static void amd_cpu_pci_cfg_space_size(struct pci_dev *dev)
+-{
+-      dev->cfg_size = pci_cfg_space_size_ext(dev);
+-}
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, amd_cpu_pci_cfg_space_size);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, amd_cpu_pci_cfg_space_size);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, amd_cpu_pci_cfg_space_size);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, amd_cpu_pci_cfg_space_size);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, amd_cpu_pci_cfg_space_size);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1300, amd_cpu_pci_cfg_space_size);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1301, amd_cpu_pci_cfg_space_size);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1302, amd_cpu_pci_cfg_space_size);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1303, amd_cpu_pci_cfg_space_size);
+-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1304, amd_cpu_pci_cfg_space_size);
+-
+-/*
+  * SB600: Disable BAR1 on device 14.0 to avoid HPET resources from
+  * confusing the PCI engine:
+  */
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -847,6 +847,11 @@ int pci_cfg_space_size(struct pci_dev *d
+ {
+       int pos;
+       u32 status;
++      u16 class;
++
++      class = dev->class >> 8;
++      if (class == PCI_CLASS_BRIDGE_HOST)
++              return pci_cfg_space_size_ext(dev);
+       pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+       if (!pos) {
+@@ -936,7 +941,6 @@ static struct pci_dev *pci_scan_device(s
+       dev->multifunction = !!(hdr_type & 0x80);
+       dev->vendor = l & 0xffff;
+       dev->device = (l >> 16) & 0xffff;
+-      dev->cfg_size = pci_cfg_space_size(dev);
+       dev->error_state = pci_channel_io_normal;
+       set_pcie_port_type(dev);
+@@ -952,6 +956,9 @@ static struct pci_dev *pci_scan_device(s
+               return NULL;
+       }
++      /* need to have dev->class ready */
++      dev->cfg_size = pci_cfg_space_size(dev);
++
+       return dev;
+ }
diff --git a/queue-2.6.29/posix-timers-fix-rlimit_cpu-fork.patch b/queue-2.6.29/posix-timers-fix-rlimit_cpu-fork.patch
new file mode 100644 (file)
index 0000000..bcba72e
--- /dev/null
@@ -0,0 +1,69 @@
+From stable-bounces@linux.kernel.org  Thu Apr  9 18:20:14 2009
+Date: Thu, 9 Apr 2009 18:20:10 GMT
+Message-Id: <200904091820.n39IKAHb027581@hera.kernel.org>
+From: Oleg Nesterov <oleg@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: posix-timers: fix RLIMIT_CPU && fork()
+
+upstream commit: 6279a751fe096a21dc7704e918d570d3ff06e769
+
+See http://bugzilla.kernel.org/show_bug.cgi?id=12911
+
+copy_signal() copies signal->rlim, but RLIMIT_CPU is "lost". Because
+posix_cpu_timers_init_group() sets cputime_expires.prof_exp = 0 and thus
+fastpath_timer_check() returns false unless we have other expired cpu timers.
+
+Change copy_signal() to set cputime_expires.prof_exp if we have RLIMIT_CPU.
+Also, set cputimer.running = 1 in that case. This is not strictly necessary,
+but imho makes sense.
+
+Reported-by: Peter Lojkin <ia6432@inbox.ru>
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Peter Lojkin <ia6432@inbox.ru>
+Cc: Roland McGrath <roland@redhat.com>
+Cc: stable@kernel.org
+LKML-Reference: <20090327000607.GA10104@redhat.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ kernel/fork.c |   13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -808,6 +808,12 @@ static void posix_cpu_timers_init_group(
+       sig->cputime_expires.virt_exp = cputime_zero;
+       sig->cputime_expires.sched_exp = 0;
++      if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
++              sig->cputime_expires.prof_exp =
++                      secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
++              sig->cputimer.running = 1;
++      }
++
+       /* The timer lists. */
+       INIT_LIST_HEAD(&sig->cpu_timers[0]);
+       INIT_LIST_HEAD(&sig->cpu_timers[1]);
+@@ -823,11 +829,8 @@ static int copy_signal(unsigned long clo
+               atomic_inc(&current->signal->live);
+               return 0;
+       }
+-      sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
+-
+-      if (sig)
+-              posix_cpu_timers_init_group(sig);
++      sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
+       tsk->signal = sig;
+       if (!sig)
+               return -ENOMEM;
+@@ -865,6 +868,8 @@ static int copy_signal(unsigned long clo
+       memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
+       task_unlock(current->group_leader);
++      posix_cpu_timers_init_group(sig);
++
+       acct_init_pacct(&sig->pacct);
+       tty_audit_fork(sig);
diff --git a/queue-2.6.29/posix-timers-fix-rlimit_cpu-setitimer.patch b/queue-2.6.29/posix-timers-fix-rlimit_cpu-setitimer.patch
new file mode 100644 (file)
index 0000000..8fc56b2
--- /dev/null
@@ -0,0 +1,61 @@
+From stable-bounces@linux.kernel.org  Thu Apr  9 18:20:13 2009
+Date: Thu, 9 Apr 2009 18:20:08 GMT
+Message-Id: <200904091820.n39IK8NM027551@hera.kernel.org>
+From: Oleg Nesterov <oleg@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: posix-timers: fix RLIMIT_CPU && setitimer(CPUCLOCK_PROF)
+
+upstream commit: 8f2e586567b1bad72dac7c3810fe9a2ef7117506
+
+update_rlimit_cpu() tries to optimize out set_process_cpu_timer() in case
+when we already have CPUCLOCK_PROF timer which should expire first. But it
+uses cputime_lt() instead of cputime_gt().
+
+Test case:
+
+       int main(void)
+       {
+               struct itimerval it = {
+                       .it_value = { .tv_sec = 1000 },
+               };
+
+               assert(!setitimer(ITIMER_PROF, &it, NULL));
+
+               struct rlimit rl = {
+                       .rlim_cur = 1,
+                       .rlim_max = 1,
+               };
+
+               assert(!setrlimit(RLIMIT_CPU, &rl));
+
+               for (;;)
+                       ;
+
+               return 0;
+       }
+
+Without this patch, the task is not killed as RLIMIT_CPU demands.
+
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Peter Lojkin <ia6432@inbox.ru>
+Cc: Roland McGrath <roland@redhat.com>
+Cc: stable@kernel.org
+LKML-Reference: <20090327000610.GA10108@redhat.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ kernel/posix-cpu-timers.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -18,7 +18,7 @@ void update_rlimit_cpu(unsigned long rli
+       cputime = secs_to_cputime(rlim_new);
+       if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
+-          cputime_lt(current->signal->it_prof_expires, cputime)) {
++          cputime_gt(current->signal->it_prof_expires, cputime)) {
+               spin_lock_irq(&current->sighand->siglock);
+               set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
+               spin_unlock_irq(&current->sighand->siglock);
diff --git a/queue-2.6.29/posixtimers-sched-fix-posix-clock-monotonicity.patch b/queue-2.6.29/posixtimers-sched-fix-posix-clock-monotonicity.patch
new file mode 100644 (file)
index 0000000..d1b7295
--- /dev/null
@@ -0,0 +1,163 @@
+From stable-bounces@linux.kernel.org  Thu Apr  9 18:20:18 2009
+Date: Thu, 9 Apr 2009 18:20:12 GMT
+Message-Id: <200904091820.n39IKCrk027615@hera.kernel.org>
+From: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: posixtimers, sched: Fix posix clock monotonicity
+
+upstream commit: c5f8d99585d7b5b7e857fabf8aefd0174903a98c
+
+Impact: Regression fix (against clock_gettime() backwarding bug)
+
+This patch re-introduces a couple of functions, task_sched_runtime
+and thread_group_sched_runtime, which was once removed at the
+time of 2.6.28-rc1.
+
+These functions protect the sampling of thread/process clock with
+rq lock.  This rq lock is required not to update rq->clock during
+the sampling.
+
+i.e.
+  The clock_gettime() may return
+   ((accounted runtime before update) + (delta after update))
+  that is less than what it should be.
+
+v2 -> v3:
+       - Rename static helper function __task_delta_exec()
+         to do_task_delta_exec() since -tip tree already has
+         a __task_delta_exec() of different version.
+
+v1 -> v2:
+       - Revises comments of function and patch description.
+       - Add note about accuracy of thread group's runtime.
+
+Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
+Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: stable@kernel.org  [2.6.28.x][2.6.29.x]
+LKML-Reference: <49D1CC93.4080401@jp.fujitsu.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ kernel/posix-cpu-timers.c |    7 ++--
+ kernel/sched.c            |   65 ++++++++++++++++++++++++++++++++++++++++------
+ 2 files changed, 61 insertions(+), 11 deletions(-)
+
+--- a/kernel/posix-cpu-timers.c
++++ b/kernel/posix-cpu-timers.c
+@@ -224,7 +224,7 @@ static int cpu_clock_sample(const clocki
+               cpu->cpu = virt_ticks(p);
+               break;
+       case CPUCLOCK_SCHED:
+-              cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p);
++              cpu->sched = task_sched_runtime(p);
+               break;
+       }
+       return 0;
+@@ -305,18 +305,19 @@ static int cpu_clock_sample_group(const 
+ {
+       struct task_cputime cputime;
+-      thread_group_cputime(p, &cputime);
+       switch (CPUCLOCK_WHICH(which_clock)) {
+       default:
+               return -EINVAL;
+       case CPUCLOCK_PROF:
++              thread_group_cputime(p, &cputime);
+               cpu->cpu = cputime_add(cputime.utime, cputime.stime);
+               break;
+       case CPUCLOCK_VIRT:
++              thread_group_cputime(p, &cputime);
+               cpu->cpu = cputime.utime;
+               break;
+       case CPUCLOCK_SCHED:
+-              cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
++              cpu->sched = thread_group_sched_runtime(p);
+               break;
+       }
+       return 0;
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -4134,9 +4134,25 @@ DEFINE_PER_CPU(struct kernel_stat, kstat
+ EXPORT_PER_CPU_SYMBOL(kstat);
+ /*
+- * Return any ns on the sched_clock that have not yet been banked in
++ * Return any ns on the sched_clock that have not yet been accounted in
+  * @p in case that task is currently running.
++ *
++ * Called with task_rq_lock() held on @rq.
+  */
++static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
++{
++      u64 ns = 0;
++
++      if (task_current(rq, p)) {
++              update_rq_clock(rq);
++              ns = rq->clock - p->se.exec_start;
++              if ((s64)ns < 0)
++                      ns = 0;
++      }
++
++      return ns;
++}
++
+ unsigned long long task_delta_exec(struct task_struct *p)
+ {
+       unsigned long flags;
+@@ -4144,16 +4160,49 @@ unsigned long long task_delta_exec(struc
+       u64 ns = 0;
+       rq = task_rq_lock(p, &flags);
++      ns = do_task_delta_exec(p, rq);
++      task_rq_unlock(rq, &flags);
+-      if (task_current(rq, p)) {
+-              u64 delta_exec;
++      return ns;
++}
+-              update_rq_clock(rq);
+-              delta_exec = rq->clock - p->se.exec_start;
+-              if ((s64)delta_exec > 0)
+-                      ns = delta_exec;
+-      }
++/*
++ * Return accounted runtime for the task.
++ * In case the task is currently running, return the runtime plus current's
++ * pending runtime that have not been accounted yet.
++ */
++unsigned long long task_sched_runtime(struct task_struct *p)
++{
++      unsigned long flags;
++      struct rq *rq;
++      u64 ns = 0;
++
++      rq = task_rq_lock(p, &flags);
++      ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
++      task_rq_unlock(rq, &flags);
++
++      return ns;
++}
++/*
++ * Return sum_exec_runtime for the thread group.
++ * In case the task is currently running, return the sum plus current's
++ * pending runtime that have not been accounted yet.
++ *
++ * Note that the thread group might have other running tasks as well,
++ * so the return value not includes other pending runtime that other
++ * running tasks might have.
++ */
++unsigned long long thread_group_sched_runtime(struct task_struct *p)
++{
++      struct task_cputime totals;
++      unsigned long flags;
++      struct rq *rq;
++      u64 ns;
++
++      rq = task_rq_lock(p, &flags);
++      thread_group_cputime(p, &totals);
++      ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
+       task_rq_unlock(rq, &flags);
+       return ns;
diff --git a/queue-2.6.29/powerpc-fix-data-corrupting-bug-in-__futex_atomic_op.patch b/queue-2.6.29/powerpc-fix-data-corrupting-bug-in-__futex_atomic_op.patch
new file mode 100644 (file)
index 0000000..ca5bdb1
--- /dev/null
@@ -0,0 +1,61 @@
+From stable-bounces@linux.kernel.org  Wed Apr 15 17:25:10 2009
+Date: Wed, 15 Apr 2009 17:25:05 GMT
+Message-Id: <200904151725.n3FHP578026707@hera.kernel.org>
+From: Paul Mackerras <paulus@samba.org>
+To: jejb@kernel.org, stable@kernel.org
+Subject: powerpc: Fix data-corrupting bug in __futex_atomic_op
+
+upstream commit: 306a82881b14d950d59e0b59a55093a07d82aa9a
+
+Richard Henderson pointed out that the powerpc __futex_atomic_op has a
+bug: it will write the wrong value if the stwcx. fails and it has to
+retry the lwarx/stwcx. loop, since 'oparg' will have been overwritten
+by the result from the first time around the loop.  This happens
+because it uses the same register for 'oparg' (an input) as it uses
+for the result.
+
+This fixes it by using separate registers for 'oparg' and 'ret'.
+
+Cc: stable@kernel.org
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/powerpc/include/asm/futex.h |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/include/asm/futex.h
++++ b/arch/powerpc/include/asm/futex.h
+@@ -27,7 +27,7 @@
+       PPC_LONG "1b,4b,2b,4b\n" \
+       ".previous" \
+       : "=&r" (oldval), "=&r" (ret) \
+-      : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
++      : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
+       : "cr0", "memory")
+ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+@@ -47,19 +47,19 @@ static inline int futex_atomic_op_inuser
+       switch (op) {
+       case FUTEX_OP_SET:
+-              __futex_atomic_op("", ret, oldval, uaddr, oparg);
++              __futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ADD:
+-              __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg);
++              __futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_OR:
+-              __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg);
++              __futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ANDN:
+-              __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg);
++              __futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_XOR:
+-              __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg);
++              __futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg);
+               break;
+       default:
+               ret = -ENOSYS;
diff --git a/queue-2.6.29/r8169-reset-intrstatus-after-chip-reset.patch b/queue-2.6.29/r8169-reset-intrstatus-after-chip-reset.patch
new file mode 100644 (file)
index 0000000..8600a52
--- /dev/null
@@ -0,0 +1,54 @@
+From stable-bounces@linux.kernel.org  Mon Apr  6 21:32:59 2009
+Date: Mon, 6 Apr 2009 23:35:13 +0200
+From: Francois Romieu <romieu@fr.zoreil.com>
+To: stable@kernel.org
+Message-ID: <20090406213513.GA16180@electric-eye.fr.zoreil.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>, Josep <josep.puigdemont@gmail.com>, Karsten Wiese <fzu@wemgehoertderstaat.de>, "David S. Miller" <davem@davemloft.net>
+Subject: r8169: Reset IntrStatus after chip reset
+
+upstream commit: d78ad8cbfe73ad568de38814a75e9c92ad0a907c
+
+Original comment (Karsten):
+On a MSI MS-6702E mainboard, when in rtl8169_init_one() for the first time
+after BIOS has run, IntrStatus reads 5 after chip has been reset.
+IntrStatus should equal 0 there, so patch changes IntrStatus reset to happen
+after chip reset instead of before.
+
+Remark (Francois):
+Assuming that the loglevel of the driver is increased above NETIF_MSG_INTR,
+the bug reveals itself with a typical "interrupt 0025 in poll" message
+at startup. In retrospect, the message should had been read as an hint of
+an unexpected hardware state several months ago :o(
+
+Fixes (at least part of) https://bugzilla.redhat.com/show_bug.cgi?id=460747
+
+Signed-off-by: Karsten Wiese <fzu@wemgehoertderstaat.de>
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Tested-by: Josep <josep.puigdemont@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/net/r8169.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/r8169.c
++++ b/drivers/net/r8169.c
+@@ -2075,8 +2075,7 @@ rtl8169_init_one(struct pci_dev *pdev, c
+       if (!tp->pcie_cap && netif_msg_probe(tp))
+               dev_info(&pdev->dev, "no PCI Express capability\n");
+-      /* Unneeded ? Don't mess with Mrs. Murphy. */
+-      rtl8169_irq_mask_and_ack(ioaddr);
++      RTL_W16(IntrMask, 0x0000);
+       /* Soft reset the chip. */
+       RTL_W8(ChipCmd, CmdReset);
+@@ -2088,6 +2087,8 @@ rtl8169_init_one(struct pci_dev *pdev, c
+               msleep_interruptible(1);
+       }
++      RTL_W16(IntrStatus, 0xffff);
++
+       /* Identify chip attached to board */
+       rtl8169_get_mac_version(tp, ioaddr);
diff --git a/queue-2.6.29/revert-console-ascii-glyph-1-1-mapping.patch b/queue-2.6.29/revert-console-ascii-glyph-1-1-mapping.patch
new file mode 100644 (file)
index 0000000..cb90d58
--- /dev/null
@@ -0,0 +1,36 @@
+From stable-bounces@linux.kernel.org  Sun Apr 19 18:05:07 2009
+Date: Sun, 19 Apr 2009 18:05:02 GMT
+Message-Id: <200904191805.n3JI52g6018434@hera.kernel.org>
+From: Samuel Thibault <samuel.thibault@ens-lyon.org>
+To: jejb@kernel.org, stable@kernel.org
+Subject: Revert "console ASCII glyph 1:1 mapping"
+
+upstream commit: c0b7988200a82290287c6f4cd49585007f73175a
+
+This reverts commit 1c55f18717304100a5f624c923f7cb6511b4116d.
+
+Ingo Brueckl was assuming that reverting to 1:1 mapping for chars >= 128
+was not useful, but it happens to be: due to the limitations of the
+Linux console, when a blind user wants to read BIG5 on it, he has no
+other way than loading a font without SFM and let the 1:1 mapping permit
+the screen reader to get the BIG5 encoding.
+
+Signed-off-by: Samuel Thibault <samuel.thibault@ens-lyon.org>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/char/vt.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/char/vt.c
++++ b/drivers/char/vt.c
+@@ -2271,7 +2271,7 @@ rescan_last_byte:
+                                   continue; /* nothing to display */
+                               }
+                               /* Glyph not found */
+-                              if ((!(vc->vc_utf && !vc->vc_disp_ctrl) && c < 128) && !(c & ~charmask)) {
++                              if ((!(vc->vc_utf && !vc->vc_disp_ctrl) || c < 128) && !(c & ~charmask)) {
+                                   /* In legacy mode use the glyph we get by a 1:1 mapping.
+                                      This would make absolutely no sense with Unicode in mind,
+                                      but do this for ASCII characters since a font may lack
diff --git a/queue-2.6.29/rt2x00-fix-slab-corruption-during-rmmod.patch b/queue-2.6.29/rt2x00-fix-slab-corruption-during-rmmod.patch
new file mode 100644 (file)
index 0000000..b8fa850
--- /dev/null
@@ -0,0 +1,205 @@
+From stable-bounces@linux.kernel.org  Wed Apr  1 19:26:08 2009
+From: Ivo van Doorn <ivdoorn@gmail.com>
+To: stable@kernel.org
+Date: Wed, 1 Apr 2009 21:18:11 +0200
+Cc: Arnaud Patard <apatard@mandriva.com>, linux-wireless@vger.kernel.org, "John W. Linville" <linville@tuxdriver.com>, Gertjan van Wingerde <gwingerde@gmail.com>
+Subject: rt2x00: Fix SLAB corruption during rmmod
+
+From: Gertjan van Wingerde <gwingerde@gmail.com>
+
+At rmmod stage, the code path is the following one :
+
+rt2x00lib_remove_dev
+  -> Â rt2x00lib_uninitialize()
+  Â  Â  Â  -> rt2x00rfkill_unregister()
+  Â  Â  Â  Â  Â  Â -> rfkill_unregister()
+  Â  Â  Â  -> rt2x00rfkill_free()
+  Â  Â  Â  Â  Â  Â -> rfkill_free()
+
+The problem is that rfkill_free should not be called after rfkill_register
+otherwise put_device(&rfkill->dev) will be called 2 times. This patch
+fixes this by only calling rt2x00rfkill_free() when rt2x00rfkill_register()
+hasn't been called or has failed.
+
+This patch is for 2.6.29 only. The code in question has completely disappeared
+in 2.6.30 and does not contain this bug.
+
+Signed-off-by: Gertjan van Wingerde <gwingerde@gmail.com>
+Tested-by: Arnaud Patard <apatard@mandriva.com>
+Signed-off-by: Ivo van Doorn <IvDoorn@gmail.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/net/wireless/rt2x00/rt2x00.h       |    3 -
+ drivers/net/wireless/rt2x00/rt2x00dev.c    |    2 
+ drivers/net/wireless/rt2x00/rt2x00lib.h    |   10 ---
+ drivers/net/wireless/rt2x00/rt2x00rfkill.c |   86 +++++++++++++----------------
+ 4 files changed, 40 insertions(+), 61 deletions(-)
+
+--- a/drivers/net/wireless/rt2x00/rt2x00.h
++++ b/drivers/net/wireless/rt2x00/rt2x00.h
+@@ -687,8 +687,7 @@ struct rt2x00_dev {
+        */
+ #ifdef CONFIG_RT2X00_LIB_RFKILL
+       unsigned long rfkill_state;
+-#define RFKILL_STATE_ALLOCATED                1
+-#define RFKILL_STATE_REGISTERED               2
++#define RFKILL_STATE_REGISTERED               1
+       struct rfkill *rfkill;
+       struct delayed_work rfkill_work;
+ #endif /* CONFIG_RT2X00_LIB_RFKILL */
+--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
++++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
+@@ -1105,7 +1105,6 @@ int rt2x00lib_probe_dev(struct rt2x00_de
+        * Register extra components.
+        */
+       rt2x00leds_register(rt2x00dev);
+-      rt2x00rfkill_allocate(rt2x00dev);
+       rt2x00debug_register(rt2x00dev);
+       set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
+@@ -1137,7 +1136,6 @@ void rt2x00lib_remove_dev(struct rt2x00_
+        * Free extra components
+        */
+       rt2x00debug_deregister(rt2x00dev);
+-      rt2x00rfkill_free(rt2x00dev);
+       rt2x00leds_unregister(rt2x00dev);
+       /*
+--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
++++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
+@@ -260,8 +260,6 @@ static inline void rt2x00crypto_rx_inser
+ #ifdef CONFIG_RT2X00_LIB_RFKILL
+ void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev);
+ void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev);
+-void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev);
+-void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev);
+ #else
+ static inline void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
+ {
+@@ -270,14 +268,6 @@ static inline void rt2x00rfkill_register
+ static inline void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
+ {
+ }
+-
+-static inline void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
+-{
+-}
+-
+-static inline void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
+-{
+-}
+ #endif /* CONFIG_RT2X00_LIB_RFKILL */
+ /*
+--- a/drivers/net/wireless/rt2x00/rt2x00rfkill.c
++++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
+@@ -94,14 +94,50 @@ static void rt2x00rfkill_poll(struct wor
+                          &rt2x00dev->rfkill_work, RFKILL_POLL_INTERVAL);
+ }
++static int rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
++{
++      struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy);
++
++      rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN);
++      if (!rt2x00dev->rfkill)
++              return -ENOMEM;
++
++      rt2x00dev->rfkill->name = rt2x00dev->ops->name;
++      rt2x00dev->rfkill->data = rt2x00dev;
++      rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio;
++      if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) {
++              rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state;
++              rt2x00dev->rfkill->state =
++                      rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ?
++                          RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED;
++      } else {
++              rt2x00dev->rfkill->state = RFKILL_STATE_UNBLOCKED;
++      }
++
++      INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll);
++
++      return 0;
++}
++
++static void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
++{
++      rfkill_free(rt2x00dev->rfkill);
++      rt2x00dev->rfkill = NULL;
++}
++
+ void rt2x00rfkill_register(struct rt2x00_dev *rt2x00dev)
+ {
+-      if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) ||
+-          test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
++      if (test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
++              return;
++
++      if (rt2x00rfkill_allocate(rt2x00dev)) {
++              ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n");
+               return;
++      }
+       if (rfkill_register(rt2x00dev->rfkill)) {
+               ERROR(rt2x00dev, "Failed to register rfkill handler.\n");
++              rt2x00rfkill_free(rt2x00dev);
+               return;
+       }
+@@ -117,8 +153,7 @@ void rt2x00rfkill_register(struct rt2x00
+ void rt2x00rfkill_unregister(struct rt2x00_dev *rt2x00dev)
+ {
+-      if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state) ||
+-          !test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
++      if (!test_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state))
+               return;
+       cancel_delayed_work_sync(&rt2x00dev->rfkill_work);
+@@ -127,46 +162,3 @@ void rt2x00rfkill_unregister(struct rt2x
+       __clear_bit(RFKILL_STATE_REGISTERED, &rt2x00dev->rfkill_state);
+ }
+-
+-void rt2x00rfkill_allocate(struct rt2x00_dev *rt2x00dev)
+-{
+-      struct device *dev = wiphy_dev(rt2x00dev->hw->wiphy);
+-
+-      if (test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
+-              return;
+-
+-      rt2x00dev->rfkill = rfkill_allocate(dev, RFKILL_TYPE_WLAN);
+-      if (!rt2x00dev->rfkill) {
+-              ERROR(rt2x00dev, "Failed to allocate rfkill handler.\n");
+-              return;
+-      }
+-
+-      __set_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state);
+-
+-      rt2x00dev->rfkill->name = rt2x00dev->ops->name;
+-      rt2x00dev->rfkill->data = rt2x00dev;
+-      rt2x00dev->rfkill->toggle_radio = rt2x00rfkill_toggle_radio;
+-      if (test_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags)) {
+-              rt2x00dev->rfkill->get_state = rt2x00rfkill_get_state;
+-              rt2x00dev->rfkill->state =
+-                      rt2x00dev->ops->lib->rfkill_poll(rt2x00dev) ?
+-                          RFKILL_STATE_SOFT_BLOCKED : RFKILL_STATE_UNBLOCKED;
+-      } else {
+-              rt2x00dev->rfkill->state = RFKILL_STATE_UNBLOCKED;
+-      }
+-
+-      INIT_DELAYED_WORK(&rt2x00dev->rfkill_work, rt2x00rfkill_poll);
+-
+-      return;
+-}
+-
+-void rt2x00rfkill_free(struct rt2x00_dev *rt2x00dev)
+-{
+-      if (!test_bit(RFKILL_STATE_ALLOCATED, &rt2x00dev->rfkill_state))
+-              return;
+-
+-      cancel_delayed_work_sync(&rt2x00dev->rfkill_work);
+-
+-      rfkill_free(rt2x00dev->rfkill);
+-      rt2x00dev->rfkill = NULL;
+-}
diff --git a/queue-2.6.29/sched-do-not-count-frozen-tasks-toward-load.patch b/queue-2.6.29/sched-do-not-count-frozen-tasks-toward-load.patch
new file mode 100644 (file)
index 0000000..9f2c908
--- /dev/null
@@ -0,0 +1,52 @@
+From stable-bounces@linux.kernel.org  Thu Apr  9 18:20:08 2009
+Date: Thu, 9 Apr 2009 18:20:02 GMT
+Message-Id: <200904091820.n39IK2bC027464@hera.kernel.org>
+From: Nathan Lynch <ntl@pobox.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: sched: do not count frozen tasks toward load
+
+upstream commit: e3c8ca8336707062f3f7cb1cd7e6b3c753baccdd
+
+Freezing tasks via the cgroup freezer causes the load average to climb
+because the freezer's current implementation puts frozen tasks in
+uninterruptible sleep (D state).
+
+Some applications which perform job-scheduling functions consult the
+load average when making decisions.  If a cgroup is frozen, the load
+average does not provide a useful measure of the system's utilization
+to such applications.  This is especially inconvenient if the job
+scheduler employs the cgroup freezer as a mechanism for preempting low
+priority jobs.  Contrast this with using SIGSTOP for the same purpose:
+the stopped tasks do not count toward system load.
+
+Change task_contributes_to_load() to return false if the task is
+frozen.  This results in /proc/loadavg behavior that better meets
+users' expectations.
+
+Signed-off-by: Nathan Lynch <ntl@pobox.com>
+Acked-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Nigel Cunningham <nigel@tuxonice.net>
+Tested-by: Nigel Cunningham <nigel@tuxonice.net>
+Cc: <stable@kernel.org>
+Cc: containers@lists.linux-foundation.org
+Cc: linux-pm@lists.linux-foundation.org
+Cc: Matt Helsley <matthltc@us.ibm.com>
+LKML-Reference: <20090408194512.47a99b95@manatee.lan>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ include/linux/sched.h |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -202,7 +202,8 @@ extern unsigned long long time_sync_thre
+ #define task_is_stopped_or_traced(task)       \
+                       ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+ #define task_contributes_to_load(task)        \
+-                              ((task->state & TASK_UNINTERRUPTIBLE) != 0)
++                              ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
++                               (task->flags & PF_FROZEN) == 0)
+ #define __set_task_state(tsk, state_value)            \
+       do { (tsk)->state = (state_value); } while (0)
diff --git a/queue-2.6.29/scsi-libiscsi-fix-iscsi-pool-error-path-fixlet.patch b/queue-2.6.29/scsi-libiscsi-fix-iscsi-pool-error-path-fixlet.patch
new file mode 100644 (file)
index 0000000..183c4f6
--- /dev/null
@@ -0,0 +1,45 @@
+From fd6e1c14b73dbab89cb76af895d5612e4a8b5522 Mon Sep 17 00:00:00 2001
+Message-Id: <200904081122.38831.jdelvare@suse.de>
+From: Jean Delvare <jdelvare@suse.de>
+Date: Wed, 1 Apr 2009 13:11:29 -0500
+Subject: SCSI: libiscsi: fix iscsi pool error path
+
+upstream commit: fd6e1c14b73dbab89cb76af895d5612e4a8b5522
+
+Le lundi 30 mars 2009, Chris Wright a Ã©crit :
+> q->queue could be ERR_PTR(-ENOMEM) which will break unwinding
+> on error.  Make iscsi_pool_free more defensive.
+>
+
+Making the freeing of q->queue dependent on q->pool being set looks
+really weird (although it is correct at the moment. But this seems
+to be fixable in a much simpler way.
+
+With the benefit that only the error case is slowed down. In both
+cases we have a problem if q->queue contains an error value but it's
+not -ENOMEM. Apparently this can't happen today, but it doesn't feel
+right to assume this will always be true. Maybe it's the right time
+to fix this as well.
+
+Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+[chrisw: this is a fixlet to f474a37b, also in -stable]
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/scsi/libiscsi.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1948,8 +1948,10 @@ iscsi_pool_init(struct iscsi_pool *q, in
+       q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
+                             GFP_KERNEL, NULL);
+-      if (q->queue == ERR_PTR(-ENOMEM))
++      if (IS_ERR(q->queue)) {
++              q->queue = NULL;
+               goto enomem;
++      }
+       for (i = 0; i < max; i++) {
+               q->pool[i] = kzalloc(item_size, GFP_KERNEL);
diff --git a/queue-2.6.29/scsi-libiscsi-fix-iscsi-pool-error-path.patch b/queue-2.6.29/scsi-libiscsi-fix-iscsi-pool-error-path.patch
new file mode 100644 (file)
index 0000000..0bee88f
--- /dev/null
@@ -0,0 +1,45 @@
+From f474a37bc48667595b5653a983b635c95ed82a3b Mon Sep 17 00:00:00 2001
+Message-Id: <200903291119.42080.jdelvare@suse.de>
+From: Jean Delvare <jdelvare@suse.de>
+Date: Thu, 5 Mar 2009 14:45:55 -0600
+Subject: SCSI: libiscsi: fix iscsi pool error path
+
+upstream commit: f474a37bc48667595b5653a983b635c95ed82a3b
+
+Memory freeing in iscsi_pool_free() looks wrong to me. Either q->pool
+can be NULL and this should be tested before dereferencing it, or it
+can't be NULL and it shouldn't be tested at all. As far as I can see,
+the only case where q->pool is NULL is on early error in
+iscsi_pool_init(). One possible way to fix the bug is thus to not
+call iscsi_pool_free() in this case (nothing needs to be freed anyway)
+and then we can get rid of the q->pool check.
+
+Signed-off-by: Jean Delvare <jdelvare@suse.de>
+Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/scsi/libiscsi.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/scsi/libiscsi.c
++++ b/drivers/scsi/libiscsi.c
+@@ -1944,7 +1944,7 @@ iscsi_pool_init(struct iscsi_pool *q, in
+               num_arrays++;
+       q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
+       if (q->pool == NULL)
+-              goto enomem;
++              return -ENOMEM;
+       q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
+                             GFP_KERNEL, NULL);
+@@ -1979,8 +1979,7 @@ void iscsi_pool_free(struct iscsi_pool *
+       for (i = 0; i < q->max; i++)
+               kfree(q->pool[i]);
+-      if (q->pool)
+-              kfree(q->pool);
++      kfree(q->pool);
+       kfree(q->queue);
+ }
+ EXPORT_SYMBOL_GPL(iscsi_pool_free);
diff --git a/queue-2.6.29/scsi-mpt-suppress-debugobjects-warning.patch b/queue-2.6.29/scsi-mpt-suppress-debugobjects-warning.patch
new file mode 100644 (file)
index 0000000..62f1862
--- /dev/null
@@ -0,0 +1,77 @@
+From stable-bounces@linux.kernel.org  Tue Apr 21 21:20:08 2009
+Date: Tue, 21 Apr 2009 21:20:02 GMT
+Message-Id: <200904212120.n3LLK2IY024865@hera.kernel.org>
+From: Eric Paris <eparis@parisplace.org>
+To: jejb@kernel.org, stable@kernel.org
+Subject: scsi: mpt: suppress debugobjects warning
+
+upstream commit: b298cecb3deddf76d60022473a57f1cb776cbdcd
+
+Addresses http://bugzilla.kernel.org/show_bug.cgi?id=13133
+
+ODEBUG: object is on stack, but not annotated
+------------[ cut here ]------------
+WARNING: at lib/debugobjects.c:253 __debug_object_init+0x1f3/0x276()
+Hardware name: VMware Virtual Platform
+Modules linked in: mptspi(+) mptscsih mptbase scsi_transport_spi ext3 jbd mbcache
+Pid: 540, comm: insmod Not tainted 2.6.28-mm1 #2
+Call Trace:
+ [<c042c51c>] warn_slowpath+0x74/0x8a
+ [<c0469600>] ? start_critical_timing+0x96/0xb7
+ [<c060c8ea>] ? _spin_unlock_irqrestore+0x2f/0x3c
+ [<c0446fad>] ? trace_hardirqs_off_caller+0x18/0xaf
+ [<c044704f>] ? trace_hardirqs_off+0xb/0xd
+ [<c060c8ea>] ? _spin_unlock_irqrestore+0x2f/0x3c
+ [<c042cb84>] ? release_console_sem+0x1a5/0x1ad
+ [<c05013e6>] __debug_object_init+0x1f3/0x276
+ [<c0501494>] debug_object_init+0x13/0x17
+ [<c0433c56>] init_timer+0x10/0x1a
+ [<e08e5b54>] mpt_config+0x1c1/0x2b7 [mptbase]
+ [<e08e3b82>] ? kmalloc+0x8/0xa [mptbase]
+ [<e08e3b82>] ? kmalloc+0x8/0xa [mptbase]
+ [<e08e6fa2>] mpt_do_ioc_recovery+0x950/0x1212 [mptbase]
+ [<c04496c2>] ? __lock_acquire+0xa69/0xacc
+ [<c060c8f1>] ? _spin_unlock_irqrestore+0x36/0x3c
+ [<c060c3af>] ? _spin_unlock_irq+0x22/0x26
+ [<c04f2d8b>] ? string+0x2b/0x76
+ [<c04f310e>] ? vsnprintf+0x338/0x7b3
+ [<c04496c2>] ? __lock_acquire+0xa69/0xacc
+ [<c060c8ea>] ? _spin_unlock_irqrestore+0x2f/0x3c
+ [<c04496c2>] ? __lock_acquire+0xa69/0xacc
+ [<c044897d>] ? debug_check_no_locks_freed+0xeb/0x105
+ [<c060c8f1>] ? _spin_unlock_irqrestore+0x36/0x3c
+ [<c04488bc>] ? debug_check_no_locks_freed+0x2a/0x105
+ [<c0446b8c>] ? lock_release_holdtime+0x43/0x48
+ [<c043f742>] ? up_read+0x16/0x29
+ [<c05076f8>] ? pci_get_slot+0x66/0x72
+ [<e08e89ca>] mpt_attach+0x881/0x9b1 [mptbase]
+ [<e091c8e5>] mptspi_probe+0x11/0x354 [mptspi]
+
+Noticing that every caller of mpt_config has its CONFIGPARMS struct
+declared on the stack and thus the &pCfg->timer is always on the stack I
+changed init_timer() to init_timer_on_stack() and it seems to have shut
+up.....
+
+Cc: "Moore, Eric Dean" <Eric.Moore@lsil.com>
+Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: "Desai, Kashyap" <Kashyap.Desai@lsi.com>
+Cc: <stable@kernel.org>                [2.6.29.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/message/fusion/mptbase.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/message/fusion/mptbase.c
++++ b/drivers/message/fusion/mptbase.c
+@@ -5934,7 +5934,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS
+       /* Initalize the timer
+        */
+-      init_timer(&pCfg->timer);
++      init_timer_on_stack(&pCfg->timer);
+       pCfg->timer.data = (unsigned long) ioc;
+       pCfg->timer.function = mpt_timer_expired;
+       pCfg->wait_done = 0;
diff --git a/queue-2.6.29/scsi-sg-fix-iovec-bugs-introduced-by-the-block-layer-conversion.patch b/queue-2.6.29/scsi-sg-fix-iovec-bugs-introduced-by-the-block-layer-conversion.patch
new file mode 100644 (file)
index 0000000..e96dd35
--- /dev/null
@@ -0,0 +1,62 @@
+From stable-bounces@linux.kernel.org  Mon Apr  6 20:55:13 2009
+Date: Mon, 6 Apr 2009 20:55:08 GMT
+Message-Id: <200904062055.n36Kt8R7024842@hera.kernel.org>
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+To: jejb@kernel.org, stable@kernel.org
+Subject: SCSI: sg: fix iovec bugs introduced by the block layer conversion
+
+upstream commit: 0fdf96b67ac2649cc1ddb29b316a0db11586c6a8
+
+- needs to use copy_from_user for iovec before passing it to
+blk_rq_map_user_iov().
+
+- before the block layer conversion, if ->dxfer_len and sum of iovec
+disagrees, the shorter one wins. However, currently sg returns
+-EINVAL. This restores the old behavior.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Acked-by: Douglas Gilbert <dgilbert@interlog.com>
+Cc: stable@kernel.org
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/scsi/sg.c |   28 ++++++++++++++++++++++++----
+ 1 file changed, 24 insertions(+), 4 deletions(-)
+
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1673,10 +1673,30 @@ static int sg_start_req(Sg_request *srp,
+               md->null_mapped = hp->dxferp ? 0 : 1;
+       }
+-      if (iov_count)
+-              res = blk_rq_map_user_iov(q, rq, md, hp->dxferp, iov_count,
+-                                        hp->dxfer_len, GFP_ATOMIC);
+-      else
++      if (iov_count) {
++              int len, size = sizeof(struct sg_iovec) * iov_count;
++              struct iovec *iov;
++
++              iov = kmalloc(size, GFP_ATOMIC);
++              if (!iov)
++                      return -ENOMEM;
++
++              if (copy_from_user(iov, hp->dxferp, size)) {
++                      kfree(iov);
++                      return -EFAULT;
++              }
++
++              len = iov_length(iov, iov_count);
++              if (hp->dxfer_len < len) {
++                      iov_count = iov_shorten(iov, iov_count, hp->dxfer_len);
++                      len = hp->dxfer_len;
++              }
++
++              res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov,
++                                        iov_count,
++                                        len, GFP_ATOMIC);
++              kfree(iov);
++      } else
+               res = blk_rq_map_user(q, rq, md, hp->dxferp,
+                                     hp->dxfer_len, GFP_ATOMIC);
diff --git a/queue-2.6.29/scsi-sg-fix-q-queue_lock-on-scsi_error_handler-path.patch b/queue-2.6.29/scsi-sg-fix-q-queue_lock-on-scsi_error_handler-path.patch
new file mode 100644 (file)
index 0000000..3a847a7
--- /dev/null
@@ -0,0 +1,60 @@
+From stable-bounces@linux.kernel.org  Mon Apr  6 20:55:11 2009
+Date: Mon, 6 Apr 2009 20:55:06 GMT
+Message-Id: <200904062055.n36Kt6go024812@hera.kernel.org>
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+To: jejb@kernel.org, stable@kernel.org
+Subject: SCSI: sg: fix q->queue_lock on scsi_error_handler path
+
+upstream commit: 015640edb1f346e0b2eda703587c4cd1c310ec1d
+
+sg_rq_end_io() is called via rq->end_io. In some rare cases,
+sg_rq_end_io calls blk_put_request/blk_rq_unmap_user (when a program
+issuing a command has gone before the command completion; e.g. by
+interrupting a program issuing a command before the command
+completes).
+
+We can't call blk_put_request/blk_rq_unmap_user in interrupt so the
+commit c96952ed7031e7c576ecf90cf95b8ec099d5295a uses
+execute_in_process_context().
+
+The problem is that scsi_error_handler() calls rq->end_io too. We
+can't call blk_put_request/blk_rq_unmap_user too in this path (we hold
+q->queue_lock).
+
+To avoid the above problem, in these rare cases, this patch always
+uses schedule_work() instead of execute_in_process_context().
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Acked-by: Douglas Gilbert <dgilbert@interlog.com>
+Cc: Stable Tree <stable@kernel.org>
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/scsi/sg.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -1323,8 +1323,10 @@ static void sg_rq_end_io(struct request 
+               wake_up_interruptible(&sfp->read_wait);
+               kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
+               kref_put(&sfp->f_ref, sg_remove_sfp);
+-      } else
+-              execute_in_process_context(sg_rq_end_io_usercontext, &srp->ew);
++      } else {
++              INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext);
++              schedule_work(&srp->ew.work);
++      }
+ }
+ static struct file_operations sg_fops = {
+@@ -2134,7 +2136,8 @@ static void sg_remove_sfp(struct kref *k
+       write_unlock_irqrestore(&sg_index_lock, iflags);
+       wake_up_interruptible(&sdp->o_excl_wait);
+-      execute_in_process_context(sg_remove_sfp_usercontext, &sfp->ew);
++      INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
++      schedule_work(&sfp->ew.work);
+ }
+ static int
diff --git a/queue-2.6.29/scsi-sg-fix-races-during-device-removal.patch b/queue-2.6.29/scsi-sg-fix-races-during-device-removal.patch
new file mode 100644 (file)
index 0000000..3d6ce2f
--- /dev/null
@@ -0,0 +1,739 @@
+From c6517b7942fad663cc1cf3235cbe4207cf769332 Mon Sep 17 00:00:00 2001
+Message-Id: <20090417111050D.fujita.tomonori@lab.ntt.co.jp>
+From: Tony Battersby <tonyb@cybernetics.com>
+Date: Wed, 21 Jan 2009 14:45:50 -0500
+Subject: SCSI: sg: fix races during device removal
+
+upstream commit: c6517b7942fad663cc1cf3235cbe4207cf769332
+
+sg has the following problems related to device removal:
+
+* opening a sg fd races with removing a device
+* closing a sg fd races with removing a device
+* /proc/scsi/sg/* access races with removing a device
+* command completion races with removing a device
+* command completion races with closing a sg fd
+* can rmmod sg with active commands
+
+These problems can cause kernel oopses, memory-use-after-free, or
+double-free errors.  This patch fixes these problems by using krefs
+to manage the lifetime of sg_device and sg_fd.
+
+Each command submitted to the midlevel holds a reference to sg_fd
+until the completion callback.  This ensures that sg_fd doesn't go
+away if the fd is closed with commands still outstanding.
+
+sg_fd gets the reference of sg_device (with scsi_device) and also
+makes sure that the sg module doesn't go away.
+
+/proc/scsi/sg/* functions don't play nicely with krefs because they
+give information about sg_fds which have been closed but not yet
+freed due to still having outstanding commands and sg_devices which
+have been removed but not yet freed due to still being referenced
+by one or more sg_fds.  To deal with this safely without removing
+functionality, /proc functions now access sg_device and sg_fd while
+holding a lock instead of using kref_get()/kref_put().
+
+Signed-off-by: Tony Battersby <tonyb@cybernetics.com>
+Acked-by: Douglas Gilbert <dgilbert@interlog.com>
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+[chrisw: big for -stable, helps fix real bug, and made it through rc2 upstream]
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/scsi/sg.c |  418 +++++++++++++++++++++++++-----------------------------
+ 1 file changed, 201 insertions(+), 217 deletions(-)
+
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -101,6 +101,7 @@ static int scatter_elem_sz_prev = SG_SCA
+ #define SG_SECTOR_MSK (SG_SECTOR_SZ - 1)
+ static int sg_add(struct device *, struct class_interface *);
++static void sg_device_destroy(struct kref *kref);
+ static void sg_remove(struct device *, struct class_interface *);
+ static DEFINE_IDR(sg_index_idr);
+@@ -158,6 +159,8 @@ typedef struct sg_fd {             /* holds the sta
+       char next_cmd_len;      /* 0 -> automatic (def), >0 -> use on next write() */
+       char keep_orphan;       /* 0 -> drop orphan (def), 1 -> keep for read() */
+       char mmap_called;       /* 0 -> mmap() never called on this fd */
++      struct kref f_ref;
++      struct execute_work ew;
+ } Sg_fd;
+ typedef struct sg_device { /* holds the state of each scsi generic device */
+@@ -171,6 +174,7 @@ typedef struct sg_device { /* holds the 
+       char sgdebug;           /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
+       struct gendisk *disk;
+       struct cdev * cdev;     /* char_dev [sysfs: /sys/cdev/major/sg<n>] */
++      struct kref d_ref;
+ } Sg_device;
+ static int sg_fasync(int fd, struct file *filp, int mode);
+@@ -194,13 +198,14 @@ static void sg_build_reserve(Sg_fd * sfp
+ static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size);
+ static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp);
+ static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev);
+-static int sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
+-static void __sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp);
++static void sg_remove_sfp(struct kref *);
+ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+ static Sg_request *sg_add_request(Sg_fd * sfp);
+ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
+ static int sg_res_in_use(Sg_fd * sfp);
++static Sg_device *sg_lookup_dev(int dev);
+ static Sg_device *sg_get_dev(int dev);
++static void sg_put_dev(Sg_device *sdp);
+ #ifdef CONFIG_SCSI_PROC_FS
+ static int sg_last_dev(void);
+ #endif
+@@ -237,22 +242,17 @@ sg_open(struct inode *inode, struct file
+       nonseekable_open(inode, filp);
+       SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags));
+       sdp = sg_get_dev(dev);
+-      if ((!sdp) || (!sdp->device)) {
+-              unlock_kernel();
+-              return -ENXIO;
+-      }
+-      if (sdp->detached) {
+-              unlock_kernel();
+-              return -ENODEV;
++      if (IS_ERR(sdp)) {
++              retval = PTR_ERR(sdp);
++              sdp = NULL;
++              goto sg_put;
+       }
+       /* This driver's module count bumped by fops_get in <linux/fs.h> */
+       /* Prevent the device driver from vanishing while we sleep */
+       retval = scsi_device_get(sdp->device);
+-      if (retval) {
+-              unlock_kernel();
+-              return retval;
+-      }
++      if (retval)
++              goto sg_put;
+       if (!((flags & O_NONBLOCK) ||
+             scsi_block_when_processing_errors(sdp->device))) {
+@@ -303,16 +303,20 @@ sg_open(struct inode *inode, struct file
+       if ((sfp = sg_add_sfp(sdp, dev)))
+               filp->private_data = sfp;
+       else {
+-              if (flags & O_EXCL)
++              if (flags & O_EXCL) {
+                       sdp->exclude = 0;       /* undo if error */
++                      wake_up_interruptible(&sdp->o_excl_wait);
++              }
+               retval = -ENOMEM;
+               goto error_out;
+       }
+-      unlock_kernel();
+-      return 0;
+-
+-      error_out:
+-      scsi_device_put(sdp->device);
++      retval = 0;
++error_out:
++      if (retval)
++              scsi_device_put(sdp->device);
++sg_put:
++      if (sdp)
++              sg_put_dev(sdp);
+       unlock_kernel();
+       return retval;
+ }
+@@ -327,13 +331,13 @@ sg_release(struct inode *inode, struct f
+       if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
+               return -ENXIO;
+       SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
+-      if (0 == sg_remove_sfp(sdp, sfp)) {     /* Returns 1 when sdp gone */
+-              if (!sdp->detached) {
+-                      scsi_device_put(sdp->device);
+-              }
+-              sdp->exclude = 0;
+-              wake_up_interruptible(&sdp->o_excl_wait);
+-      }
++
++      sfp->closed = 1;
++
++      sdp->exclude = 0;
++      wake_up_interruptible(&sdp->o_excl_wait);
++
++      kref_put(&sfp->f_ref, sg_remove_sfp);
+       return 0;
+ }
+@@ -755,6 +759,7 @@ sg_common_write(Sg_fd * sfp, Sg_request 
+       hp->duration = jiffies_to_msecs(jiffies);
+       srp->rq->timeout = timeout;
++      kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
+       blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
+                             srp->rq, 1, sg_rq_end_io);
+       return 0;
+@@ -1247,24 +1252,23 @@ sg_mmap(struct file *filp, struct vm_are
+ static void sg_rq_end_io(struct request *rq, int uptodate)
+ {
+       struct sg_request *srp = rq->end_io_data;
+-      Sg_device *sdp = NULL;
++      Sg_device *sdp;
+       Sg_fd *sfp;
+       unsigned long iflags;
+       unsigned int ms;
+       char *sense;
+-      int result, resid;
++      int result, resid, done = 1;
+-      if (NULL == srp) {
+-              printk(KERN_ERR "sg_cmd_done: NULL request\n");
++      if (WARN_ON(srp->done != 0))
+               return;
+-      }
++
+       sfp = srp->parentfp;
+-      if (sfp)
+-              sdp = sfp->parentdp;
+-      if ((NULL == sdp) || sdp->detached) {
+-              printk(KERN_INFO "sg_cmd_done: device detached\n");
++      if (WARN_ON(sfp == NULL))
+               return;
+-      }
++
++      sdp = sfp->parentdp;
++      if (unlikely(sdp->detached))
++              printk(KERN_INFO "sg_rq_end_io: device detached\n");
+       sense = rq->sense;
+       result = rq->errors;
+@@ -1303,33 +1307,26 @@ static void sg_rq_end_io(struct request 
+       }
+       /* Rely on write phase to clean out srp status values, so no "else" */
+-      if (sfp->closed) {      /* whoops this fd already released, cleanup */
+-              SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, freeing ...\n"));
+-              sg_finish_rem_req(srp);
+-              srp = NULL;
+-              if (NULL == sfp->headrp) {
+-                      SCSI_LOG_TIMEOUT(1, printk("sg_cmd_done: already closed, final cleanup\n"));
+-                      if (0 == sg_remove_sfp(sdp, sfp)) {     /* device still present */
+-                              scsi_device_put(sdp->device);
+-                      }
+-                      sfp = NULL;
+-              }
+-      } else if (srp && srp->orphan) {
++      write_lock_irqsave(&sfp->rq_list_lock, iflags);
++      if (unlikely(srp->orphan)) {
+               if (sfp->keep_orphan)
+                       srp->sg_io_owned = 0;
+-              else {
+-                      sg_finish_rem_req(srp);
+-                      srp = NULL;
+-              }
++              else
++                      done = 0;
+       }
+-      if (sfp && srp) {
+-              /* Now wake up any sg_read() that is waiting for this packet. */
+-              kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
+-              write_lock_irqsave(&sfp->rq_list_lock, iflags);
+-              srp->done = 1;
++      srp->done = done;
++      write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
++
++      if (likely(done)) {
++              /* Now wake up any sg_read() that is waiting for this
++               * packet.
++               */
+               wake_up_interruptible(&sfp->read_wait);
+-              write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+-      }
++              kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
++      } else
++              sg_finish_rem_req(srp); /* call with srp->done == 0 */
++
++      kref_put(&sfp->f_ref, sg_remove_sfp);
+ }
+ static struct file_operations sg_fops = {
+@@ -1364,17 +1361,18 @@ static Sg_device *sg_alloc(struct gendis
+               printk(KERN_WARNING "kmalloc Sg_device failure\n");
+               return ERR_PTR(-ENOMEM);
+       }
+-      error = -ENOMEM;
++
+       if (!idr_pre_get(&sg_index_idr, GFP_KERNEL)) {
+               printk(KERN_WARNING "idr expansion Sg_device failure\n");
++              error = -ENOMEM;
+               goto out;
+       }
+       write_lock_irqsave(&sg_index_lock, iflags);
+-      error = idr_get_new(&sg_index_idr, sdp, &k);
+-      write_unlock_irqrestore(&sg_index_lock, iflags);
++      error = idr_get_new(&sg_index_idr, sdp, &k);
+       if (error) {
++              write_unlock_irqrestore(&sg_index_lock, iflags);
+               printk(KERN_WARNING "idr allocation Sg_device failure: %d\n",
+                      error);
+               goto out;
+@@ -1391,6 +1389,9 @@ static Sg_device *sg_alloc(struct gendis
+       init_waitqueue_head(&sdp->o_excl_wait);
+       sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments);
+       sdp->index = k;
++      kref_init(&sdp->d_ref);
++
++      write_unlock_irqrestore(&sg_index_lock, iflags);
+       error = 0;
+  out:
+@@ -1401,6 +1402,8 @@ static Sg_device *sg_alloc(struct gendis
+       return sdp;
+  overflow:
++      idr_remove(&sg_index_idr, k);
++      write_unlock_irqrestore(&sg_index_lock, iflags);
+       sdev_printk(KERN_WARNING, scsidp,
+                   "Unable to attach sg device type=%d, minor "
+                   "number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1);
+@@ -1488,49 +1491,46 @@ out:
+       return error;
+ }
+-static void
+-sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
++static void sg_device_destroy(struct kref *kref)
++{
++      struct sg_device *sdp = container_of(kref, struct sg_device, d_ref);
++      unsigned long flags;
++
++      /* CAUTION!  Note that the device can still be found via idr_find()
++       * even though the refcount is 0.  Therefore, do idr_remove() BEFORE
++       * any other cleanup.
++       */
++
++      write_lock_irqsave(&sg_index_lock, flags);
++      idr_remove(&sg_index_idr, sdp->index);
++      write_unlock_irqrestore(&sg_index_lock, flags);
++
++      SCSI_LOG_TIMEOUT(3,
++              printk("sg_device_destroy: %s\n",
++                      sdp->disk->disk_name));
++
++      put_disk(sdp->disk);
++      kfree(sdp);
++}
++
++static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
+ {
+       struct scsi_device *scsidp = to_scsi_device(cl_dev->parent);
+       Sg_device *sdp = dev_get_drvdata(cl_dev);
+       unsigned long iflags;
+       Sg_fd *sfp;
+-      Sg_fd *tsfp;
+-      Sg_request *srp;
+-      Sg_request *tsrp;
+-      int delay;
+-      if (!sdp)
++      if (!sdp || sdp->detached)
+               return;
+-      delay = 0;
++      SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name));
++
++      /* Need a write lock to set sdp->detached. */
+       write_lock_irqsave(&sg_index_lock, iflags);
+-      if (sdp->headfp) {
+-              sdp->detached = 1;
+-              for (sfp = sdp->headfp; sfp; sfp = tsfp) {
+-                      tsfp = sfp->nextfp;
+-                      for (srp = sfp->headrp; srp; srp = tsrp) {
+-                              tsrp = srp->nextrp;
+-                              if (sfp->closed || (0 == sg_srp_done(srp, sfp)))
+-                                      sg_finish_rem_req(srp);
+-                      }
+-                      if (sfp->closed) {
+-                              scsi_device_put(sdp->device);
+-                              __sg_remove_sfp(sdp, sfp);
+-                      } else {
+-                              delay = 1;
+-                              wake_up_interruptible(&sfp->read_wait);
+-                              kill_fasync(&sfp->async_qp, SIGPOLL,
+-                                          POLL_HUP);
+-                      }
+-              }
+-              SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d, dirty\n", sdp->index));
+-              if (NULL == sdp->headfp) {
+-                      idr_remove(&sg_index_idr, sdp->index);
+-              }
+-      } else {        /* nothing active, simple case */
+-              SCSI_LOG_TIMEOUT(3, printk("sg_remove: dev=%d\n", sdp->index));
+-              idr_remove(&sg_index_idr, sdp->index);
++      sdp->detached = 1;
++      for (sfp = sdp->headfp; sfp; sfp = sfp->nextfp) {
++              wake_up_interruptible(&sfp->read_wait);
++              kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
+       }
+       write_unlock_irqrestore(&sg_index_lock, iflags);
+@@ -1538,13 +1538,8 @@ sg_remove(struct device *cl_dev, struct 
+       device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index));
+       cdev_del(sdp->cdev);
+       sdp->cdev = NULL;
+-      put_disk(sdp->disk);
+-      sdp->disk = NULL;
+-      if (NULL == sdp->headfp)
+-              kfree(sdp);
+-      if (delay)
+-              msleep(10);     /* dirty detach so delay device destruction */
++      sg_put_dev(sdp);
+ }
+ module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR);
+@@ -1961,22 +1956,6 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+       return resp;
+ }
+-#ifdef CONFIG_SCSI_PROC_FS
+-static Sg_request *
+-sg_get_nth_request(Sg_fd * sfp, int nth)
+-{
+-      Sg_request *resp;
+-      unsigned long iflags;
+-      int k;
+-
+-      read_lock_irqsave(&sfp->rq_list_lock, iflags);
+-      for (k = 0, resp = sfp->headrp; resp && (k < nth);
+-           ++k, resp = resp->nextrp) ;
+-      read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+-      return resp;
+-}
+-#endif
+-
+ /* always adds to end of list */
+ static Sg_request *
+ sg_add_request(Sg_fd * sfp)
+@@ -2052,22 +2031,6 @@ sg_remove_request(Sg_fd * sfp, Sg_reques
+       return res;
+ }
+-#ifdef CONFIG_SCSI_PROC_FS
+-static Sg_fd *
+-sg_get_nth_sfp(Sg_device * sdp, int nth)
+-{
+-      Sg_fd *resp;
+-      unsigned long iflags;
+-      int k;
+-
+-      read_lock_irqsave(&sg_index_lock, iflags);
+-      for (k = 0, resp = sdp->headfp; resp && (k < nth);
+-           ++k, resp = resp->nextfp) ;
+-      read_unlock_irqrestore(&sg_index_lock, iflags);
+-      return resp;
+-}
+-#endif
+-
+ static Sg_fd *
+ sg_add_sfp(Sg_device * sdp, int dev)
+ {
+@@ -2082,6 +2045,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
+       init_waitqueue_head(&sfp->read_wait);
+       rwlock_init(&sfp->rq_list_lock);
++      kref_init(&sfp->f_ref);
+       sfp->timeout = SG_DEFAULT_TIMEOUT;
+       sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
+       sfp->force_packid = SG_DEF_FORCE_PACK_ID;
+@@ -2109,15 +2073,54 @@ sg_add_sfp(Sg_device * sdp, int dev)
+       sg_build_reserve(sfp, bufflen);
+       SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp:   bufflen=%d, k_use_sg=%d\n",
+                          sfp->reserve.bufflen, sfp->reserve.k_use_sg));
++
++      kref_get(&sdp->d_ref);
++      __module_get(THIS_MODULE);
+       return sfp;
+ }
+-static void
+-__sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
++static void sg_remove_sfp_usercontext(struct work_struct *work)
++{
++      struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
++      struct sg_device *sdp = sfp->parentdp;
++
++      /* Cleanup any responses which were never read(). */
++      while (sfp->headrp)
++              sg_finish_rem_req(sfp->headrp);
++
++      if (sfp->reserve.bufflen > 0) {
++              SCSI_LOG_TIMEOUT(6,
++                      printk("sg_remove_sfp:    bufflen=%d, k_use_sg=%d\n",
++                              (int) sfp->reserve.bufflen,
++                              (int) sfp->reserve.k_use_sg));
++              sg_remove_scat(&sfp->reserve);
++      }
++
++      SCSI_LOG_TIMEOUT(6,
++              printk("sg_remove_sfp: %s, sfp=0x%p\n",
++                      sdp->disk->disk_name,
++                      sfp));
++      kfree(sfp);
++
++      scsi_device_put(sdp->device);
++      sg_put_dev(sdp);
++      module_put(THIS_MODULE);
++}
++
++static void sg_remove_sfp(struct kref *kref)
+ {
++      struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref);
++      struct sg_device *sdp = sfp->parentdp;
+       Sg_fd *fp;
+       Sg_fd *prev_fp;
++      unsigned long iflags;
++
++      /* CAUTION!  Note that sfp can still be found by walking sdp->headfp
++       * even though the refcount is now 0.  Therefore, unlink sfp from
++       * sdp->headfp BEFORE doing any other cleanup.
++       */
++      write_lock_irqsave(&sg_index_lock, iflags);
+       prev_fp = sdp->headfp;
+       if (sfp == prev_fp)
+               sdp->headfp = prev_fp->nextfp;
+@@ -2130,54 +2133,10 @@ __sg_remove_sfp(Sg_device * sdp, Sg_fd *
+                       prev_fp = fp;
+               }
+       }
+-      if (sfp->reserve.bufflen > 0) {
+-              SCSI_LOG_TIMEOUT(6, 
+-                      printk("__sg_remove_sfp:    bufflen=%d, k_use_sg=%d\n",
+-                      (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg));
+-              sg_remove_scat(&sfp->reserve);
+-      }
+-      sfp->parentdp = NULL;
+-      SCSI_LOG_TIMEOUT(6, printk("__sg_remove_sfp:    sfp=0x%p\n", sfp));
+-      kfree(sfp);
+-}
+-
+-/* Returns 0 in normal case, 1 when detached and sdp object removed */
+-static int
+-sg_remove_sfp(Sg_device * sdp, Sg_fd * sfp)
+-{
+-      Sg_request *srp;
+-      Sg_request *tsrp;
+-      int dirty = 0;
+-      int res = 0;
+-
+-      for (srp = sfp->headrp; srp; srp = tsrp) {
+-              tsrp = srp->nextrp;
+-              if (sg_srp_done(srp, sfp))
+-                      sg_finish_rem_req(srp);
+-              else
+-                      ++dirty;
+-      }
+-      if (0 == dirty) {
+-              unsigned long iflags;
++      write_unlock_irqrestore(&sg_index_lock, iflags);
++      wake_up_interruptible(&sdp->o_excl_wait);
+-              write_lock_irqsave(&sg_index_lock, iflags);
+-              __sg_remove_sfp(sdp, sfp);
+-              if (sdp->detached && (NULL == sdp->headfp)) {
+-                      idr_remove(&sg_index_idr, sdp->index);
+-                      kfree(sdp);
+-                      res = 1;
+-              }
+-              write_unlock_irqrestore(&sg_index_lock, iflags);
+-      } else {
+-              /* MOD_INC's to inhibit unloading sg and associated adapter driver */
+-              /* only bump the access_count if we actually succeeded in
+-               * throwing another counter on the host module */
+-              scsi_device_get(sdp->device);   /* XXX: retval ignored? */      
+-              sfp->closed = 1;        /* flag dirty state on this fd */
+-              SCSI_LOG_TIMEOUT(1, printk("sg_remove_sfp: worrisome, %d writes pending\n",
+-                                dirty));
+-      }
+-      return res;
++      execute_in_process_context(sg_remove_sfp_usercontext, &sfp->ew);
+ }
+ static int
+@@ -2219,19 +2178,38 @@ sg_last_dev(void)
+ }
+ #endif
+-static Sg_device *
+-sg_get_dev(int dev)
++/* must be called with sg_index_lock held */
++static Sg_device *sg_lookup_dev(int dev)
+ {
+-      Sg_device *sdp;
+-      unsigned long iflags;
++      return idr_find(&sg_index_idr, dev);
++}
+-      read_lock_irqsave(&sg_index_lock, iflags);
+-      sdp = idr_find(&sg_index_idr, dev);
+-      read_unlock_irqrestore(&sg_index_lock, iflags);
++static Sg_device *sg_get_dev(int dev)
++{
++      struct sg_device *sdp;
++      unsigned long flags;
++
++      read_lock_irqsave(&sg_index_lock, flags);
++      sdp = sg_lookup_dev(dev);
++      if (!sdp)
++              sdp = ERR_PTR(-ENXIO);
++      else if (sdp->detached) {
++              /* If sdp->detached, then the refcount may already be 0, in
++               * which case it would be a bug to do kref_get().
++               */
++              sdp = ERR_PTR(-ENODEV);
++      } else
++              kref_get(&sdp->d_ref);
++      read_unlock_irqrestore(&sg_index_lock, flags);
+       return sdp;
+ }
++static void sg_put_dev(struct sg_device *sdp)
++{
++      kref_put(&sdp->d_ref, sg_device_destroy);
++}
++
+ #ifdef CONFIG_SCSI_PROC_FS
+ static struct proc_dir_entry *sg_proc_sgp = NULL;
+@@ -2488,8 +2466,10 @@ static int sg_proc_seq_show_dev(struct s
+       struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
+       Sg_device *sdp;
+       struct scsi_device *scsidp;
++      unsigned long iflags;
+-      sdp = it ? sg_get_dev(it->index) : NULL;
++      read_lock_irqsave(&sg_index_lock, iflags);
++      sdp = it ? sg_lookup_dev(it->index) : NULL;
+       if (sdp && (scsidp = sdp->device) && (!sdp->detached))
+               seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n",
+                             scsidp->host->host_no, scsidp->channel,
+@@ -2500,6 +2480,7 @@ static int sg_proc_seq_show_dev(struct s
+                             (int) scsi_device_online(scsidp));
+       else
+               seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n");
++      read_unlock_irqrestore(&sg_index_lock, iflags);
+       return 0;
+ }
+@@ -2513,16 +2494,20 @@ static int sg_proc_seq_show_devstrs(stru
+       struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
+       Sg_device *sdp;
+       struct scsi_device *scsidp;
++      unsigned long iflags;
+-      sdp = it ? sg_get_dev(it->index) : NULL;
++      read_lock_irqsave(&sg_index_lock, iflags);
++      sdp = it ? sg_lookup_dev(it->index) : NULL;
+       if (sdp && (scsidp = sdp->device) && (!sdp->detached))
+               seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n",
+                          scsidp->vendor, scsidp->model, scsidp->rev);
+       else
+               seq_printf(s, "<no active device>\n");
++      read_unlock_irqrestore(&sg_index_lock, iflags);
+       return 0;
+ }
++/* must be called while holding sg_index_lock */
+ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
+ {
+       int k, m, new_interface, blen, usg;
+@@ -2532,7 +2517,8 @@ static void sg_proc_debug_helper(struct 
+       const char * cp;
+       unsigned int ms;
+-      for (k = 0; (fp = sg_get_nth_sfp(sdp, k)); ++k) {
++      for (k = 0, fp = sdp->headfp; fp != NULL; ++k, fp = fp->nextfp) {
++              read_lock(&fp->rq_list_lock); /* irqs already disabled */
+               seq_printf(s, "   FD(%d): timeout=%dms bufflen=%d "
+                          "(res)sgat=%d low_dma=%d\n", k + 1,
+                          jiffies_to_msecs(fp->timeout),
+@@ -2542,7 +2528,9 @@ static void sg_proc_debug_helper(struct 
+               seq_printf(s, "   cmd_q=%d f_packid=%d k_orphan=%d closed=%d\n",
+                          (int) fp->cmd_q, (int) fp->force_packid,
+                          (int) fp->keep_orphan, (int) fp->closed);
+-              for (m = 0; (srp = sg_get_nth_request(fp, m)); ++m) {
++              for (m = 0, srp = fp->headrp;
++                              srp != NULL;
++                              ++m, srp = srp->nextrp) {
+                       hp = &srp->header;
+                       new_interface = (hp->interface_id == '\0') ? 0 : 1;
+                       if (srp->res_used) {
+@@ -2579,6 +2567,7 @@ static void sg_proc_debug_helper(struct 
+               }
+               if (0 == m)
+                       seq_printf(s, "     No requests active\n");
++              read_unlock(&fp->rq_list_lock);
+       }
+ }
+@@ -2591,39 +2580,34 @@ static int sg_proc_seq_show_debug(struct
+ {
+       struct sg_proc_deviter * it = (struct sg_proc_deviter *) v;
+       Sg_device *sdp;
++      unsigned long iflags;
+       if (it && (0 == it->index)) {
+               seq_printf(s, "max_active_device=%d(origin 1)\n",
+                          (int)it->max);
+               seq_printf(s, " def_reserved_size=%d\n", sg_big_buff);
+       }
+-      sdp = it ? sg_get_dev(it->index) : NULL;
+-      if (sdp) {
+-              struct scsi_device *scsidp = sdp->device;
+-              if (NULL == scsidp) {
+-                      seq_printf(s, "device %d detached ??\n", 
+-                                 (int)it->index);
+-                      return 0;
+-              }
++      read_lock_irqsave(&sg_index_lock, iflags);
++      sdp = it ? sg_lookup_dev(it->index) : NULL;
++      if (sdp && sdp->headfp) {
++              struct scsi_device *scsidp = sdp->device;
+-              if (sg_get_nth_sfp(sdp, 0)) {
+-                      seq_printf(s, " >>> device=%s ",
+-                              sdp->disk->disk_name);
+-                      if (sdp->detached)
+-                              seq_printf(s, "detached pending close ");
+-                      else
+-                              seq_printf
+-                                  (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
+-                                   scsidp->host->host_no,
+-                                   scsidp->channel, scsidp->id,
+-                                   scsidp->lun,
+-                                   scsidp->host->hostt->emulated);
+-                      seq_printf(s, " sg_tablesize=%d excl=%d\n",
+-                                 sdp->sg_tablesize, sdp->exclude);
+-              }
++              seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
++              if (sdp->detached)
++                      seq_printf(s, "detached pending close ");
++              else
++                      seq_printf
++                          (s, "scsi%d chan=%d id=%d lun=%d   em=%d",
++                           scsidp->host->host_no,
++                           scsidp->channel, scsidp->id,
++                           scsidp->lun,
++                           scsidp->host->hostt->emulated);
++              seq_printf(s, " sg_tablesize=%d excl=%d\n",
++                         sdp->sg_tablesize, sdp->exclude);
+               sg_proc_debug_helper(s, sdp);
+       }
++      read_unlock_irqrestore(&sg_index_lock, iflags);
+       return 0;
+ }
diff --git a/queue-2.6.29/scsi-sg-fix-races-with-ioctl.patch b/queue-2.6.29/scsi-sg-fix-races-with-ioctl.patch
new file mode 100644 (file)
index 0000000..225111e
--- /dev/null
@@ -0,0 +1,124 @@
+From a2dd3b4cea335713b58996bb07b3abcde1175f47 Mon Sep 17 00:00:00 2001
+Message-Id: <20090417111050D.fujita.tomonori@lab.ntt.co.jp>
+From: Tony Battersby <tonyb@cybernetics.com>
+Date: Tue, 20 Jan 2009 17:00:09 -0500
+Subject: SCSI: sg: fix races with ioctl(SG_IO)
+
+upstream commit: a2dd3b4cea335713b58996bb07b3abcde1175f47
+
+sg_io_owned needs to be set before the command is sent to the midlevel;
+otherwise, a quickly-completing command may cause a different CPU
+to see "srp->done == 1 && !srp->sg_io_owned", which would lead to
+incorrect behavior.
+
+Check srp->done and set srp->orphan while holding rq_list_lock to
+prevent races with sg_rq_end_io().
+
+There is no need to check sfp->closed from read/write/ioctl/poll/etc.
+since the kernel guarantees that this won't happen.
+
+The usefulness of sg_srp_done() was questionable before; now it is
+definitely not needed.
+
+Signed-off-by: Tony Battersby <tonyb@cybernetics.com>
+Acked-by: Douglas Gilbert <dgilbert@interlog.com>
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/scsi/sg.c |   39 ++++++++++++++-------------------------
+ 1 file changed, 14 insertions(+), 25 deletions(-)
+
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -189,7 +189,7 @@ static ssize_t sg_new_read(Sg_fd * sfp, 
+                          Sg_request * srp);
+ static ssize_t sg_new_write(Sg_fd *sfp, struct file *file,
+                       const char __user *buf, size_t count, int blocking,
+-                      int read_only, Sg_request **o_srp);
++                      int read_only, int sg_io_owned, Sg_request **o_srp);
+ static int sg_common_write(Sg_fd * sfp, Sg_request * srp,
+                          unsigned char *cmnd, int timeout, int blocking);
+ static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer);
+@@ -561,7 +561,8 @@ sg_write(struct file *filp, const char _
+               return -EFAULT;
+       blocking = !(filp->f_flags & O_NONBLOCK);
+       if (old_hdr.reply_len < 0)
+-              return sg_new_write(sfp, filp, buf, count, blocking, 0, NULL);
++              return sg_new_write(sfp, filp, buf, count,
++                                  blocking, 0, 0, NULL);
+       if (count < (SZ_SG_HEADER + 6))
+               return -EIO;    /* The minimum scsi command length is 6 bytes. */
+@@ -642,7 +643,7 @@ sg_write(struct file *filp, const char _
+ static ssize_t
+ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf,
+-               size_t count, int blocking, int read_only,
++               size_t count, int blocking, int read_only, int sg_io_owned,
+                Sg_request **o_srp)
+ {
+       int k;
+@@ -662,6 +663,7 @@ sg_new_write(Sg_fd *sfp, struct file *fi
+               SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n"));
+               return -EDOM;
+       }
++      srp->sg_io_owned = sg_io_owned;
+       hp = &srp->header;
+       if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) {
+               sg_remove_request(sfp, srp);
+@@ -766,18 +768,6 @@ sg_common_write(Sg_fd * sfp, Sg_request 
+ }
+ static int
+-sg_srp_done(Sg_request *srp, Sg_fd *sfp)
+-{
+-      unsigned long iflags;
+-      int done;
+-
+-      read_lock_irqsave(&sfp->rq_list_lock, iflags);
+-      done = srp->done;
+-      read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+-      return done;
+-}
+-
+-static int
+ sg_ioctl(struct inode *inode, struct file *filp,
+        unsigned int cmd_in, unsigned long arg)
+ {
+@@ -809,27 +799,26 @@ sg_ioctl(struct inode *inode, struct fil
+                               return -EFAULT;
+                       result =
+                           sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
+-                                       blocking, read_only, &srp);
++                                       blocking, read_only, 1, &srp);
+                       if (result < 0)
+                               return result;
+-                      srp->sg_io_owned = 1;
+                       while (1) {
+                               result = 0;     /* following macro to beat race condition */
+                               __wait_event_interruptible(sfp->read_wait,
+-                                      (sdp->detached || sfp->closed || sg_srp_done(srp, sfp)),
+-                                                         result);
++                                      (srp->done || sdp->detached),
++                                      result);
+                               if (sdp->detached)
+                                       return -ENODEV;
+-                              if (sfp->closed)
+-                                      return 0;       /* request packet dropped already */
+-                              if (0 == result)
++                              write_lock_irq(&sfp->rq_list_lock);
++                              if (srp->done) {
++                                      srp->done = 2;
++                                      write_unlock_irq(&sfp->rq_list_lock);
+                                       break;
++                              }
+                               srp->orphan = 1;
++                              write_unlock_irq(&sfp->rq_list_lock);
+                               return result;  /* -ERESTARTSYS because signal hit process */
+                       }
+-                      write_lock_irqsave(&sfp->rq_list_lock, iflags);
+-                      srp->done = 2;
+-                      write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+                       result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp);
+                       return (result < 0) ? result : 0;
+               }
diff --git a/queue-2.6.29/security-smack-fix-oops-when-setting-a-size-0-smack64-xattr.patch b/queue-2.6.29/security-smack-fix-oops-when-setting-a-size-0-smack64-xattr.patch
new file mode 100644 (file)
index 0000000..d4a4dc6
--- /dev/null
@@ -0,0 +1,43 @@
+From stable-bounces@linux.kernel.org  Tue Mar 31 22:31:17 2009
+Message-ID: <49D29103.5080709@numericable.fr>
+Date: Tue, 31 Mar 2009 23:54:11 +0200
+From: Etienne Basset <etienne.basset@numericable.fr>
+To: stable <stable@kernel.org>, Casey Schaufler <casey@schaufler-ca.com>
+Cc: Paul Moore <paul.moore@hp.com>
+Subject: security/smack: fix oops when setting a size 0 SMACK64 xattr
+
+upstream commit: 4303154e86597885bc3cbc178a48ccbc8213875f
+
+this patch fix an oops in smack when setting a size 0 SMACK64 xattr eg  
+attr -S -s SMACK64  -V '' somefile
+This oops because smk_import_entry treats a 0 length as SMK_MAXLEN
+
+Signed-off-by: Etienne Basset <etienne.basset@numericable.fr>
+Reviewed-by: James Morris <jmorris@namei.org>
+Acked-by: Casey Schaufler <casey@schaufler-ca.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+---
+ security/smack/smack_lsm.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -607,6 +607,8 @@ static int smack_inode_setxattr(struct d
+           strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) {
+               if (!capable(CAP_MAC_ADMIN))
+                       rc = -EPERM;
++              if (size == 0)
++                      rc = -EINVAL;
+       } else
+               rc = cap_inode_setxattr(dentry, name, value, size, flags);
+@@ -1430,7 +1432,7 @@ static int smack_inode_setsecurity(struc
+       struct socket *sock;
+       int rc = 0;
+-      if (value == NULL || size > SMK_LABELLEN)
++      if (value == NULL || size > SMK_LABELLEN || size == 0)
+               return -EACCES;
+       sp = smk_import(value, size);
diff --git a/queue-2.6.29/series b/queue-2.6.29/series
new file mode 100644 (file)
index 0000000..f1efdf8
--- /dev/null
@@ -0,0 +1,104 @@
+security-smack-fix-oops-when-setting-a-size-0-smack64-xattr.patch
+fbmem-fix-fb_info-lock-and-mm-mmap_sem-circular-locking-dependency.patch
+fbdev-fix-info-lock-deadlock-in-fbcon_event_notify.patch
+ide-fix-code-dealing-with-sleeping-devices-in-do_ide_request.patch
+pci-x86-detect-host-bridge-config-space-size-w-o-using-quirks.patch
+mips-compat-zero-upper-32-bit-of-offset_high-and-offset_low.patch
+ext4-fix-typo-which-causes-a-memory-leak-on-error-path.patch
+ext4-fix-locking-typo-in-mballoc-which-could-cause-soft-lockup-hangs.patch
+rt2x00-fix-slab-corruption-during-rmmod.patch
+tracing-core-fix-early-free-of-cpumasks.patch
+x86-setup-mark-esi-as-clobbered-in-e820-bios-call.patch
+acpi-fix-of-pmtimer-overflow-that-make-cx-states-time-incorrect.patch
+acpi-cap-off-p-state-transition-latency-from-buggy-bioses.patch
+dock-fix-dereference-after-kfree.patch
+drm-i915-change-dcc-tiling-detection-case-to-cover-only-mobile-parts.patch
+drm-i915-read-the-right-sdvo-register-when-detecting-svdo-hdmi.patch
+drm-i915-fix-lock-order-reversal-in-gtt-pwrite-path.patch
+drm-i915-make-gem-object-s-page-lists-refcounted-instead-of-get-free.patch
+drm-i915-fix-lock-order-reversal-in-shmem-pwrite-path.patch
+drm-i915-fix-lock-order-reversal-in-shmem-pread-path.patch
+drm-i915-fix-lock-order-reversal-with-cliprects-and-cmdbuf-in-non-dri2-paths.patch
+drm-i915-sync-crt-hotplug-detection-with-intel-video-driver.patch
+drm-i915-check-for-dev-primary-master-before-dereference.patch
+drm-i915-check-the-return-value-from-the-copy-from-user.patch
+drm-i915-check-for-einval-from-vm_insert_pfn.patch
+drm-use-pgprot_writecombine-in-gem-gtt-mapping-to-get-the-right-bits-for-pat.patch
+drm-i915-only-set-tv-mode-when-any-property-changed.patch
+drm-i915-fix-tv-mode-setting-in-property-change.patch
+scsi-sg-fix-iovec-bugs-introduced-by-the-block-layer-conversion.patch
+md-raid1-don-t-assume-newly-allocated-bvecs-are-initialised.patch
+r8169-reset-intrstatus-after-chip-reset.patch
+v4l-dvb-cx88-prevent-general-protection-fault-on-rmmod.patch
+ide-drivers-ide-ide-atapi.c-needs-linux-scatterlist.h.patch
+ide-atapi-start-dma-after-issuing-a-packet-command.patch
+cpumask-fix-slab-corruption-caused-by-alloc_cpumask_var_node.patch
+sysctl-fix-suid_dumpable-and-lease-break-time-sysctls.patch
+mm-define-a-unique-value-for-as_unevictable-flag.patch
+mm-do_xip_mapping_read-fix-length-calculation.patch
+ixgbe-fix-potential-memory-leak-driver-panic-issue-while-setting-up-tx-rx-ring-parameters.patch
+dm-preserve-bi_io_vec-when-resubmitting-bios.patch
+vfs-skip-i_clear-state-inodes.patch
+dm-raid1-switch-read_record-from-kmalloc-to-slab-to-save-memory.patch
+dm-io-make-sync_io-uninterruptible.patch
+dm-snapshot-refactor-__find_pending_exception.patch
+dm-snapshot-avoid-dropping-lock-in-__find_pending_exception.patch
+dm-snapshot-avoid-having-two-exceptions-for-the-same-chunk.patch
+dm-target-use-module-refcount-directly.patch
+dm-path-selector-use-module-refcount-directly.patch
+dm-table-fix-upgrade-mode-race.patch
+af_rose-x25-sanity-check-the-maximum-user-frame-size.patch
+crypto-shash-fix-unaligned-calculation-with-short-length.patch
+acer-wmi-blacklist-acer-aspire-one.patch
+kprobes-fix-locking-imbalance-in-kretprobes.patch
+netfilter-ip-ip6-arp-_tables-fix-incorrect-loop-detection.patch
+splice-fix-deadlock-in-splicing-to-file.patch
+alsa-hda-add-missing-comma-in-ad1884_slave_vols.patch
+sparc64-fix-bug-in.patch
+scsi-libiscsi-fix-iscsi-pool-error-path.patch
+scsi-libiscsi-fix-iscsi-pool-error-path-fixlet.patch
+cap_prctl-don-t-set-error-to-0-at-no_change.patch
+posixtimers-sched-fix-posix-clock-monotonicity.patch
+posix-timers-fix-rlimit_cpu-fork.patch
+posix-timers-fix-rlimit_cpu-setitimer.patch
+dm-kcopyd-prepare-for-callback-race-fix.patch
+dm-kcopyd-fix-callback-race.patch
+sched-do-not-count-frozen-tasks-toward-load.patch
+x86-fix-broken-irq-migration-logic-while-cleaning-up-multiple-vectors.patch
+hrtimer-fix-rq-lock-inversion.patch
+add-some-long-missing-capabilities-to-fs_mask.patch
+spi-spi_write_then_read-bugfixes.patch
+tty-fix-leak-in-ti-usb.patch
+sfc-match-calls-to-netif_napi_add-and-netif_napi_del.patch
+alsa-hda-fix-the-cmd-cache-keys-for-amp-verbs.patch
+powerpc-fix-data-corrupting-bug-in-__futex_atomic_op.patch
+hpt366-fix-hpt370-dma-timeouts.patch
+pata_hpt37x-fix-hpt370-dma-timeouts.patch
+mm-pass-correct-mm-when-growing-stack.patch
+scsi-sg-fix-races-during-device-removal.patch
+scsi-sg-fix-races-with-ioctl.patch
+sg-avoid-blk_put_request-blk_rq_unmap_user-in-interrupt.patch
+scsi-sg-fix-q-queue_lock-on-scsi_error_handler-path.patch
+x86-disable-x86_ptrace_bts-for-now.patch
+usb-gadget-fix-ethernet-link-reports-to-ethtool.patch
+usb-ftdi_sio-add-vendor-project-id-for-jeti-specbos-1201-spectrometer.patch
+usb-fix-oops-in-cdc-wdm-in-case-of-malformed-descriptors.patch
+usb-usb-storage-augment-unusual_devs-entry-for-simple-tech-datafab.patch
+kvm-fix-missing-smp-tlb-flush-in-invlpg.patch
+kvm-add-config_have_kvm_irqchip.patch
+kvm-interrupt-mask-notifiers-for-ioapic.patch
+kvm-reset-pit-irq-injection-logic-when-the-pit-irq-is-unmasked.patch
+kvm-mmu-handle-compound-pages-in-kvm_is_mmio_pfn.patch
+kvm-fix-kvm_vm_ioctl_deassign_device.patch
+kvm-vmx-update-necessary-state-when-guest-enters-long-mode.patch
+kvm-is_long_mode-should-check-for-efer.lma.patch
+x86-pat-remove-page-granularity-tracking-for-vm_insert_pfn-maps.patch
+input-gameport-fix-attach-driver-code.patch
+revert-console-ascii-glyph-1-1-mapping.patch
+virtio-fix-suspend-when-using-virtio_balloon.patch
+agp-zero-pages-before-sending-to-userspace.patch
+gso-fix-support-for-linear-packets.patch
+nfs-fix-the-xdr-iovec-calculation-in-nfs3_xdr_setaclargs.patch
+hugetlbfs-return-negative-error-code-for-bad-mount-option.patch
+scsi-mpt-suppress-debugobjects-warning.patch
+fix-i_mutex-vs.-readdir-handling-in-nfsd.patch
diff --git a/queue-2.6.29/sfc-match-calls-to-netif_napi_add-and-netif_napi_del.patch b/queue-2.6.29/sfc-match-calls-to-netif_napi_add-and-netif_napi_del.patch
new file mode 100644 (file)
index 0000000..d0ba81c
--- /dev/null
@@ -0,0 +1,60 @@
+From stable-bounces@linux.kernel.org  Wed Apr 15 00:40:55 2009
+From: Ben Hutchings <bhutchings@solarflare.com>
+To: Greg Kroah-Hartman <greg@kroah.com>, Chris Wright <chrisw@sous-sol.org>
+Date: Wed, 15 Apr 2009 01:39:03 +0100
+Message-Id: <1239755943.3203.10.camel@achroite>
+Cc: netdev@vger.kernel.org, linux-net-drivers@solarflare.com, stable@kernel.org
+Subject: sfc: Match calls to netif_napi_add() and netif_napi_del()
+
+upstream commit: 718cff1eec595ce6ab0635b8160a51ee37d9268d
+
+sfc could call netif_napi_add() multiple times for the same
+napi_struct, corrupting the list of napi_structs for the associated
+device and leading to a busy-loop on device removal.  Move the call to
+netif_napi_add() and add a call to netif_napi_del() in the obvious
+places.
+
+[bhutchings: backport to 2.6.29]
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+We didn't spot this earlier because only netpoll would look at the list,
+but this changed with the addition of GRO in 2.6.29.  I have no excuse
+for not catching it during the .29 release cycle though.
+
+Ben.
+
+ drivers/net/sfc/efx.c |    8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/sfc/efx.c
++++ b/drivers/net/sfc/efx.c
+@@ -424,10 +424,6 @@ static void efx_start_channel(struct efx
+       EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
+-      if (!(channel->efx->net_dev->flags & IFF_UP))
+-              netif_napi_add(channel->napi_dev, &channel->napi_str,
+-                             efx_poll, napi_weight);
+-
+       /* The interrupt handler for this channel may set work_pending
+        * as soon as we enable it.  Make sure it's cleared before
+        * then.  Similarly, make sure it sees the enabled flag set. */
+@@ -1273,6 +1269,8 @@ static int efx_init_napi(struct efx_nic 
+       efx_for_each_channel(channel, efx) {
+               channel->napi_dev = efx->net_dev;
++              netif_napi_add(channel->napi_dev, &channel->napi_str,
++                             efx_poll, napi_weight);
+               rc = efx_lro_init(&channel->lro_mgr, efx);
+               if (rc)
+                       goto err;
+@@ -1289,6 +1287,8 @@ static void efx_fini_napi(struct efx_nic
+       efx_for_each_channel(channel, efx) {
+               efx_lro_fini(&channel->lro_mgr);
++              if (channel->napi_dev)
++                      netif_napi_del(&channel->napi_str);
+               channel->napi_dev = NULL;
+       }
+ }
diff --git a/queue-2.6.29/sg-avoid-blk_put_request-blk_rq_unmap_user-in-interrupt.patch b/queue-2.6.29/sg-avoid-blk_put_request-blk_rq_unmap_user-in-interrupt.patch
new file mode 100644 (file)
index 0000000..714f4f6
--- /dev/null
@@ -0,0 +1,82 @@
+From c96952ed7031e7c576ecf90cf95b8ec099d5295a Mon Sep 17 00:00:00 2001
+Message-Id: <20090417111050D.fujita.tomonori@lab.ntt.co.jp>
+From: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Date: Wed, 4 Feb 2009 11:36:27 +0900
+Subject: SCSI: sg: avoid blk_put_request/blk_rq_unmap_user in interrupt
+
+upstream commit: c96952ed7031e7c576ecf90cf95b8ec099d5295a
+
+This fixes the following oops:
+
+http://marc.info/?l=linux-kernel&m=123316111415677&w=2
+
+You can reproduce this bug by interrupting a program before a sg
+response completes. This leads to the special sg state (the orphan
+state), then sg calls blk_put_request in interrupt (rq->end_io).
+
+The above bug report shows the recursive lock problem because sg calls
+blk_put_request in interrupt. We could call __blk_put_request here
+instead however we also need to handle blk_rq_unmap_user here, which
+can't be called in interrupt too.
+
+In the orphan state, we don't need to care about the data transfer
+(the program revoked the command) so adding 'just free the resource'
+mode to blk_rq_unmap_user is a possible option.
+
+I prefer to avoid complicating the blk mapping API when possible. I
+change the orphan state to call sg_finish_rem_req via
+execute_in_process_context. We hold sg_fd->kref so sg_fd doesn't go
+away until keventd_wq finishes our work. copy_from_user/to_user fails
+so blk_rq_unmap_user just frees the resource without the data
+transfer.
+
+Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
+Acked-by: Douglas Gilbert <dgilbert@interlog.com>
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/scsi/sg.c |   15 ++++++++++++---
+ 1 files changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 18d079e..cdd83cf 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -138,6 +138,7 @@ typedef struct sg_request {        /* SG_MAX_QUEUE requests outstanding per file */
+       volatile char done;     /* 0->before bh, 1->before read, 2->read */
+       struct request *rq;
+       struct bio *bio;
++      struct execute_work ew;
+ } Sg_request;
+ typedef struct sg_fd {                /* holds the state of a file descriptor */
+@@ -1234,6 +1235,15 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
+       return 0;
+ }
++static void sg_rq_end_io_usercontext(struct work_struct *work)
++{
++      struct sg_request *srp = container_of(work, struct sg_request, ew.work);
++      struct sg_fd *sfp = srp->parentfp;
++
++      sg_finish_rem_req(srp);
++      kref_put(&sfp->f_ref, sg_remove_sfp);
++}
++
+ /*
+  * This function is a "bottom half" handler that is called by the mid
+  * level when a command is completed (or has failed).
+@@ -1312,10 +1322,9 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
+                */
+               wake_up_interruptible(&sfp->read_wait);
+               kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN);
++              kref_put(&sfp->f_ref, sg_remove_sfp);
+       } else
+-              sg_finish_rem_req(srp); /* call with srp->done == 0 */
+-
+-      kref_put(&sfp->f_ref, sg_remove_sfp);
++              execute_in_process_context(sg_rq_end_io_usercontext, &srp->ew);
+ }
+ static struct file_operations sg_fops = {
+
diff --git a/queue-2.6.29/sparc64-fix-bug-in.patch b/queue-2.6.29/sparc64-fix-bug-in.patch
new file mode 100644 (file)
index 0000000..45ad656
--- /dev/null
@@ -0,0 +1,39 @@
+From stable-bounces@linux.kernel.org  Wed Apr  8 09:52:57 2009
+Date: Wed, 08 Apr 2009 02:51:47 -0700 (PDT)
+Message-Id: <20090408.025147.258869203.davem@davemloft.net>
+To: stable@kernel.org
+From: David Miller <davem@davemloft.net>
+Subject: sparc64: Fix bug in ("sparc64: Flush TLB before releasing pages.")
+
+[ No upstream commit, this regression was added only to 2.6.29.1 ]
+
+Unfortunately I merged an earlier version of commit
+b6816b706138c3870f03115071872cad824f90b4 ("sparc64: Flush TLB before
+releasing pages.") than what I actually tested and merged upstream.
+
+Simply diffing asm/tlb_64.h in Linus's tree vs. what ended up in
+2.6.29.1 confirms this.
+
+Sync things up to fix BUG() triggers some users are seeing.
+
+Reported-by: Dennis Gilmore <dennis@ausil.us>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/sparc/include/asm/tlb_64.h |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/sparc/include/asm/tlb_64.h
++++ b/arch/sparc/include/asm/tlb_64.h
+@@ -57,9 +57,9 @@ static inline struct mmu_gather *tlb_gat
+ static inline void tlb_flush_mmu(struct mmu_gather *mp)
+ {
++      if (!mp->fullmm)
++              flush_tlb_pending();
+       if (mp->need_flush) {
+-              if (!mp->fullmm)
+-                      flush_tlb_pending();
+               free_pages_and_swap_cache(mp->pages, mp->pages_nr);
+               mp->pages_nr = 0;
+               mp->need_flush = 0;
diff --git a/queue-2.6.29/spi-spi_write_then_read-bugfixes.patch b/queue-2.6.29/spi-spi_write_then_read-bugfixes.patch
new file mode 100644 (file)
index 0000000..45fb4ef
--- /dev/null
@@ -0,0 +1,81 @@
+From stable-bounces@linux.kernel.org  Mon Apr 13 22:35:08 2009
+Date: Mon, 13 Apr 2009 22:35:03 GMT
+Message-Id: <200904132235.n3DMZ3KR025595@hera.kernel.org>
+From: David Brownell <dbrownell@users.sourceforge.net>
+To: jejb@kernel.org, stable@kernel.org
+Subject: spi: spi_write_then_read() bugfixes
+
+upstream commit: bdff549ebeff92b1a6952e5501caf16a6f8898c8
+
+The "simplify spi_write_then_read()" patch included two regressions from
+the 2.6.27 behaviors:
+
+ - The data it wrote out during the (full duplex) read side
+   of the transfer was not zeroed.
+
+ - It fails completely on half duplex hardware, such as
+   Microwire and most "3-wire" SPI variants.
+
+So, revert that patch.  A revised version should be submitted at some
+point, which can get the speedup on standard hardware (full duplex)
+without breaking on less-capable half-duplex stuff.
+
+Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
+Cc: <stable@kernel.org>                [2.6.28.x, 2.6.29.x]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/spi/spi.c |   22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -658,7 +658,7 @@ int spi_write_then_read(struct spi_devic
+       int                     status;
+       struct spi_message      message;
+-      struct spi_transfer     x;
++      struct spi_transfer     x[2];
+       u8                      *local_buf;
+       /* Use preallocated DMA-safe buffer.  We can't avoid copying here,
+@@ -669,9 +669,15 @@ int spi_write_then_read(struct spi_devic
+               return -EINVAL;
+       spi_message_init(&message);
+-      memset(&x, 0, sizeof x);
+-      x.len = n_tx + n_rx;
+-      spi_message_add_tail(&x, &message);
++      memset(x, 0, sizeof x);
++      if (n_tx) {
++              x[0].len = n_tx;
++              spi_message_add_tail(&x[0], &message);
++      }
++      if (n_rx) {
++              x[1].len = n_rx;
++              spi_message_add_tail(&x[1], &message);
++      }
+       /* ... unless someone else is using the pre-allocated buffer */
+       if (!mutex_trylock(&lock)) {
+@@ -682,15 +688,15 @@ int spi_write_then_read(struct spi_devic
+               local_buf = buf;
+       memcpy(local_buf, txbuf, n_tx);
+-      x.tx_buf = local_buf;
+-      x.rx_buf = local_buf;
++      x[0].tx_buf = local_buf;
++      x[1].rx_buf = local_buf + n_tx;
+       /* do the i/o */
+       status = spi_sync(spi, &message);
+       if (status == 0)
+-              memcpy(rxbuf, x.rx_buf + n_tx, n_rx);
++              memcpy(rxbuf, x[1].rx_buf, n_rx);
+-      if (x.tx_buf == buf)
++      if (x[0].tx_buf == buf)
+               mutex_unlock(&lock);
+       else
+               kfree(local_buf);
diff --git a/queue-2.6.29/splice-fix-deadlock-in-splicing-to-file.patch b/queue-2.6.29/splice-fix-deadlock-in-splicing-to-file.patch
new file mode 100644 (file)
index 0000000..2e46de8
--- /dev/null
@@ -0,0 +1,117 @@
+From stable-bounces@linux.kernel.org  Tue Apr  7 16:25:07 2009
+Date: Tue, 7 Apr 2009 16:25:02 GMT
+Message-Id: <200904071625.n37GP2O1014506@hera.kernel.org>
+From: Miklos Szeredi <mszeredi@suse.cz>
+To: jejb@kernel.org, stable@kernel.org
+Subject: splice: fix deadlock in splicing to file
+
+upstream commit: 7bfac9ecf0585962fe13584f5cf526d8c8e76f17
+
+There's a possible deadlock in generic_file_splice_write(),
+splice_from_pipe() and ocfs2_file_splice_write():
+
+ - task A calls generic_file_splice_write()
+ - this calls inode_double_lock(), which locks i_mutex on both
+   pipe->inode and target inode
+ - ordering depends on inode pointers, can happen that pipe->inode is
+   locked first
+ - __splice_from_pipe() needs more data, calls pipe_wait()
+ - this releases lock on pipe->inode, goes to interruptible sleep
+ - task B calls generic_file_splice_write(), similarly to the first
+ - this locks pipe->inode, then tries to lock inode, but that is
+   already held by task A
+ - task A is interrupted, it tries to lock pipe->inode, but fails, as
+   it is already held by task B
+ - ABBA deadlock
+
+Fix this by explicitly ordering locks: the outer lock must be on
+target inode and the inner lock (which is later unlocked and relocked)
+must be on pipe->inode.  This is OK, pipe inodes and target inodes
+form two nonoverlapping sets, generic_file_splice_write() and friends
+are not called with a target which is a pipe.
+
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+Acked-by: Mark Fasheh <mfasheh@suse.com>
+Acked-by: Jens Axboe <jens.axboe@oracle.com>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ fs/ocfs2/file.c |    8 ++++++--
+ fs/splice.c     |   25 ++++++++++++++++++++-----
+ 2 files changed, 26 insertions(+), 7 deletions(-)
+
+--- a/fs/ocfs2/file.c
++++ b/fs/ocfs2/file.c
+@@ -1926,7 +1926,7 @@ static ssize_t ocfs2_file_splice_write(s
+                  out->f_path.dentry->d_name.len,
+                  out->f_path.dentry->d_name.name);
+-      inode_double_lock(inode, pipe->inode);
++      mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
+       ret = ocfs2_rw_lock(inode, 1);
+       if (ret < 0) {
+@@ -1941,12 +1941,16 @@ static ssize_t ocfs2_file_splice_write(s
+               goto out_unlock;
+       }
++      if (pipe->inode)
++              mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
+       ret = generic_file_splice_write_nolock(pipe, out, ppos, len, flags);
++      if (pipe->inode)
++              mutex_unlock(&pipe->inode->i_mutex);
+ out_unlock:
+       ocfs2_rw_unlock(inode, 1);
+ out:
+-      inode_double_unlock(inode, pipe->inode);
++      mutex_unlock(&inode->i_mutex);
+       mlog_exit(ret);
+       return ret;
+--- a/fs/splice.c
++++ b/fs/splice.c
+@@ -736,10 +736,19 @@ ssize_t splice_from_pipe(struct pipe_ino
+        * ->write_end. Most of the time, these expect i_mutex to
+        * be held. Since this may result in an ABBA deadlock with
+        * pipe->inode, we have to order lock acquiry here.
++       *
++       * Outer lock must be inode->i_mutex, as pipe_wait() will
++       * release and reacquire pipe->inode->i_mutex, AND inode must
++       * never be a pipe.
+        */
+-      inode_double_lock(inode, pipe->inode);
++      WARN_ON(S_ISFIFO(inode->i_mode));
++      mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
++      if (pipe->inode)
++              mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
+       ret = __splice_from_pipe(pipe, &sd, actor);
+-      inode_double_unlock(inode, pipe->inode);
++      if (pipe->inode)
++              mutex_unlock(&pipe->inode->i_mutex);
++      mutex_unlock(&inode->i_mutex);
+       return ret;
+ }
+@@ -830,11 +839,17 @@ generic_file_splice_write(struct pipe_in
+       };
+       ssize_t ret;
+-      inode_double_lock(inode, pipe->inode);
++      WARN_ON(S_ISFIFO(inode->i_mode));
++      mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
+       ret = file_remove_suid(out);
+-      if (likely(!ret))
++      if (likely(!ret)) {
++              if (pipe->inode)
++                      mutex_lock_nested(&pipe->inode->i_mutex, I_MUTEX_CHILD);
+               ret = __splice_from_pipe(pipe, &sd, pipe_to_file);
+-      inode_double_unlock(inode, pipe->inode);
++              if (pipe->inode)
++                      mutex_unlock(&pipe->inode->i_mutex);
++      }
++      mutex_unlock(&inode->i_mutex);
+       if (ret > 0) {
+               unsigned long nr_pages;
diff --git a/queue-2.6.29/sysctl-fix-suid_dumpable-and-lease-break-time-sysctls.patch b/queue-2.6.29/sysctl-fix-suid_dumpable-and-lease-break-time-sysctls.patch
new file mode 100644 (file)
index 0000000..8e5beb0
--- /dev/null
@@ -0,0 +1,66 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 04:35:11 2009
+Date: Fri, 3 Apr 2009 04:35:07 GMT
+Message-Id: <200904030435.n334Z75Q010311@hera.kernel.org>
+From: Matthew Wilcox <matthew@wil.cx>
+To: jejb@kernel.org, stable@kernel.org
+Subject: sysctl: fix suid_dumpable and lease-break-time sysctls
+
+upstream commit: 8e654fba4a376f436bdfe361fc5cdbc87ac09b35
+
+Arne de Bruijn points out that commit
+76fdbb25f963de5dc1e308325f0578a2f92b1c2d ("coredump masking: bound
+suid_dumpable sysctl") mistakenly limits lease-break-time instead of
+suid_dumpable.
+
+Signed-off-by: Matthew Wilcox <matthew@wil.cx>
+Reported-by: Arne de Bruijn <kernelbt@arbruijn.dds.nl>
+Cc: Kawai, Hidehiro <hidehiro.kawai.ez@hitachi.com>
+Cc: Alexey Dobriyan <adobriyan@gmail.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ kernel/sysctl.c |   15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -95,12 +95,9 @@ static int sixty = 60;
+ static int neg_one = -1;
+ #endif
+-#if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING)
+-static int two = 2;
+-#endif
+-
+ static int zero;
+ static int one = 1;
++static int two = 2;
+ static unsigned long one_ul = 1;
+ static int one_hundred = 100;
+@@ -1373,10 +1370,7 @@ static struct ctl_table fs_table[] = {
+               .data           = &lease_break_time,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+-              .proc_handler   = &proc_dointvec_minmax,
+-              .strategy       = &sysctl_intvec,
+-              .extra1         = &zero,
+-              .extra2         = &two,
++              .proc_handler   = &proc_dointvec,
+       },
+ #endif
+ #ifdef CONFIG_AIO
+@@ -1417,7 +1411,10 @@ static struct ctl_table fs_table[] = {
+               .data           = &suid_dumpable,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+-              .proc_handler   = &proc_dointvec,
++              .proc_handler   = &proc_dointvec_minmax,
++              .strategy       = &sysctl_intvec,
++              .extra1         = &zero,
++              .extra2         = &two,
+       },
+ #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
+       {
diff --git a/queue-2.6.29/tracing-core-fix-early-free-of-cpumasks.patch b/queue-2.6.29/tracing-core-fix-early-free-of-cpumasks.patch
new file mode 100644 (file)
index 0000000..514149e
--- /dev/null
@@ -0,0 +1,36 @@
+From 2fc1dfbe17e7705c55b7a99da995fa565e26f151 Mon Sep 17 00:00:00 2001
+Message-ID: <20090402011232.GA32066@goodmis.org>
+From: Frederic Weisbecker <fweisbec@gmail.com>
+Date: Mon, 16 Mar 2009 01:45:03 +0100
+Subject: tracing/core: fix early free of cpumasks
+
+upstream commit: 2fc1dfbe17e7705c55b7a99da995fa565e26f151
+
+Impact: fix crashes when tracing cpumasks
+
+While ring-buffer allocation, the cpumasks are allocated too,
+including the tracing cpumask and the per-cpu file mask handler.
+But these cpumasks are freed accidentally just after.
+Fix it.
+
+Reported-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
+LKML-Reference: <1237164303-11476-1-git-send-email-fweisbec@gmail.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ kernel/trace/trace.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3886,7 +3886,8 @@ __init static int tracer_alloc_buffers(v
+                                      &trace_panic_notifier);
+       register_die_notifier(&trace_die_notifier);
+-      ret = 0;
++
++      return 0;
+ out_free_cpumask:
+       free_cpumask_var(tracing_cpumask);
diff --git a/queue-2.6.29/tty-fix-leak-in-ti-usb.patch b/queue-2.6.29/tty-fix-leak-in-ti-usb.patch
new file mode 100644 (file)
index 0000000..242ffa0
--- /dev/null
@@ -0,0 +1,58 @@
+From cf5450930db0ae308584e5361f3345e0ff73e643 Mon Sep 17 00:00:00 2001
+Message-Id: <200904141759.n3EHx3EP018901@hera.kernel.org>
+From: Alan Cox <alan@lxorguk.ukuu.org.uk>
+Date: Tue, 14 Apr 2009 14:58:11 +0100
+Subject: tty: Fix leak in ti-usb
+
+upstream commit: cf5450930db0ae308584e5361f3345e0ff73e643
+
+If the ti-usb adapter returns an zero data length frame (which happens)
+then we leak a kref.  Found by Christoph Mair <christoph.mair@gmail.com>
+who proposed a patch.  The patch here is different as Christoph's patch
+didn't work for the case where tty = NULL and data arrived but Christoph
+did all the hard work chasing it down.
+
+Signed-off-by: Alan Cox <alan@linux.intel.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/usb/serial/ti_usb_3410_5052.c |   26 ++++++++++++++------------
+ 1 file changed, 14 insertions(+), 12 deletions(-)
+
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -1215,20 +1215,22 @@ static void ti_bulk_in_callback(struct u
+       }
+       tty = tty_port_tty_get(&port->port);
+-      if (tty && urb->actual_length) {
+-              usb_serial_debug_data(debug, dev, __func__,
+-                      urb->actual_length, urb->transfer_buffer);
+-
+-              if (!tport->tp_is_open)
+-                      dbg("%s - port closed, dropping data", __func__);
+-              else
+-                      ti_recv(&urb->dev->dev, tty,
++      if (tty) {
++              if (urb->actual_length) {
++                      usb_serial_debug_data(debug, dev, __func__,
++                              urb->actual_length, urb->transfer_buffer);
++
++                      if (!tport->tp_is_open)
++                              dbg("%s - port closed, dropping data",
++                                      __func__);
++                      else
++                              ti_recv(&urb->dev->dev, tty,
+                                               urb->transfer_buffer,
+                                               urb->actual_length);
+-
+-              spin_lock(&tport->tp_lock);
+-              tport->tp_icount.rx += urb->actual_length;
+-              spin_unlock(&tport->tp_lock);
++                      spin_lock(&tport->tp_lock);
++                      tport->tp_icount.rx += urb->actual_length;
++                      spin_unlock(&tport->tp_lock);
++              }
+               tty_kref_put(tty);
+       }
diff --git a/queue-2.6.29/usb-fix-oops-in-cdc-wdm-in-case-of-malformed-descriptors.patch b/queue-2.6.29/usb-fix-oops-in-cdc-wdm-in-case-of-malformed-descriptors.patch
new file mode 100644 (file)
index 0000000..d0c3f13
--- /dev/null
@@ -0,0 +1,30 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 21:20:13 2009
+Date: Fri, 17 Apr 2009 21:20:06 GMT
+Message-Id: <200904172120.n3HLK6Ii011254@hera.kernel.org>
+From: Oliver Neukum <oliver@neukum.org>
+To: jejb@kernel.org, stable@kernel.org
+Subject: USB: fix oops in cdc-wdm in case of malformed descriptors
+
+upstream commit: e13c594f3a1fc2c78e7a20d1a07974f71e4b448f
+
+cdc-wdm needs to ignore extremely malformed descriptors.
+
+Signed-off-by: Oliver Neukum <oliver@neukum.org>
+Cc: stable <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/usb/class/cdc-wdm.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -652,7 +652,7 @@ next_desc:
+       iface = &intf->altsetting[0];
+       ep = &iface->endpoint[0].desc;
+-      if (!usb_endpoint_is_int_in(ep)) {
++      if (!ep || !usb_endpoint_is_int_in(ep)) {
+               rv = -EINVAL;
+               goto err;
+       }
diff --git a/queue-2.6.29/usb-ftdi_sio-add-vendor-project-id-for-jeti-specbos-1201-spectrometer.patch b/queue-2.6.29/usb-ftdi_sio-add-vendor-project-id-for-jeti-specbos-1201-spectrometer.patch
new file mode 100644 (file)
index 0000000..e1fe5ed
--- /dev/null
@@ -0,0 +1,44 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 21:20:14 2009
+Date: Fri, 17 Apr 2009 21:20:07 GMT
+Message-Id: <200904172120.n3HLK7XY011287@hera.kernel.org>
+From: Peter Korsgaard <jacmet@sunsite.dk>
+To: jejb@kernel.org, stable@kernel.org
+Subject: USB: ftdi_sio: add vendor/project id for JETI specbos 1201 spectrometer
+
+upstream commit: ae27d84351f1f3568118318a8c40ff3a154bd629
+
+Signed-off-by: Peter Korsgaard <jacmet@sunsite.dk>
+Cc: stable <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/usb/serial/ftdi_sio.c |    1 +
+ drivers/usb/serial/ftdi_sio.h |    7 +++++++
+ 2 files changed, 8 insertions(+)
+
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -668,6 +668,7 @@ static struct usb_device_id id_table_com
+       { USB_DEVICE(DE_VID, WHT_PID) },
+       { USB_DEVICE(ADI_VID, ADI_GNICE_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
++      { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
+       { },                                    /* Optional parameter entry */
+       { }                                     /* Terminating entry */
+ };
+--- a/drivers/usb/serial/ftdi_sio.h
++++ b/drivers/usb/serial/ftdi_sio.h
+@@ -913,6 +913,13 @@
+ #define ADI_GNICE_PID                 0xF000
+ /*
++ * JETI SPECTROMETER SPECBOS 1201
++ * http://www.jeti.com/products/sys/scb/scb1201.php
++ */
++#define JETI_VID              0x0c6c
++#define JETI_SPC1201_PID      0x04b2
++
++/*
+  *   BmRequestType:  1100 0000b
+  *   bRequest:       FTDI_E2_READ
+  *   wValue:         0
diff --git a/queue-2.6.29/usb-gadget-fix-ethernet-link-reports-to-ethtool.patch b/queue-2.6.29/usb-gadget-fix-ethernet-link-reports-to-ethtool.patch
new file mode 100644 (file)
index 0000000..bfe5000
--- /dev/null
@@ -0,0 +1,49 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 21:20:16 2009
+Date: Fri, 17 Apr 2009 21:20:10 GMT
+Message-Id: <200904172120.n3HLKAXh011306@hera.kernel.org>
+From: Jonathan McDowell <noodles@earth.li>
+To: jejb@kernel.org, stable@kernel.org
+Subject: usb gadget: fix ethernet link reports to ethtool
+
+upstream commit: 237e75bf1e558f7330f8deb167fa3116405bef2c
+
+The g_ether USB gadget driver currently decides whether or not there's a
+link to report back for eth_get_link based on if the USB link speed is
+set. The USB gadget speed is however often set even before the device is
+enumerated. It seems more sensible to only report a "link" if we're
+actually connected to a host that wants to talk to us. The patch below
+does this for me - tested with the PXA27x UDC driver.
+
+Signed-off-by: Jonathan McDowell <noodles@earth.li>
+Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
+Cc: stable <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/usb/gadget/u_ether.c |    8 +-------
+ 1 file changed, 1 insertion(+), 7 deletions(-)
+
+--- a/drivers/usb/gadget/u_ether.c
++++ b/drivers/usb/gadget/u_ether.c
+@@ -175,12 +175,6 @@ static void eth_get_drvinfo(struct net_d
+       strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
+ }
+-static u32 eth_get_link(struct net_device *net)
+-{
+-      struct eth_dev  *dev = netdev_priv(net);
+-      return dev->gadget->speed != USB_SPEED_UNKNOWN;
+-}
+-
+ /* REVISIT can also support:
+  *   - WOL (by tracking suspends and issuing remote wakeup)
+  *   - msglevel (implies updated messaging)
+@@ -189,7 +183,7 @@ static u32 eth_get_link(struct net_devic
+ static struct ethtool_ops ops = {
+       .get_drvinfo = eth_get_drvinfo,
+-      .get_link = eth_get_link
++      .get_link = ethtool_op_get_link,
+ };
+ static void defer_kevent(struct eth_dev *dev, int flag)
diff --git a/queue-2.6.29/usb-usb-storage-augment-unusual_devs-entry-for-simple-tech-datafab.patch b/queue-2.6.29/usb-usb-storage-augment-unusual_devs-entry-for-simple-tech-datafab.patch
new file mode 100644 (file)
index 0000000..ff3011f
--- /dev/null
@@ -0,0 +1,41 @@
+From stable-bounces@linux.kernel.org  Fri Apr 17 21:20:09 2009
+Date: Fri, 17 Apr 2009 21:20:03 GMT
+Message-Id: <200904172120.n3HLK3HR011240@hera.kernel.org>
+From: Alan Stern <stern@rowland.harvard.edu>
+To: jejb@kernel.org, stable@kernel.org
+Subject: USB: usb-storage: augment unusual_devs entry for Simple Tech/Datafab
+
+upstream commit: e4813eec8d47c8299d968bd5349dc881fa481c26
+
+This patch (as1227) adds the MAX_SECTORS_64 flag to the unusual_devs
+entry for the Simple Tech/Datafab controller.  This fixes Bugzilla
+#12882.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Reported-and-tested-by: binbin <binbinsh@gmail.com>
+Cc: stable <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/usb/storage/unusual_devs.h |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/storage/unusual_devs.h
++++ b/drivers/usb/storage/unusual_devs.h
+@@ -1218,12 +1218,14 @@ UNUSUAL_DEV(  0x07c4, 0xa400, 0x0000, 0x
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+               US_FL_FIX_INQUIRY | US_FL_FIX_CAPACITY ),
+-/* Reported by Rauch Wolke <rauchwolke@gmx.net> */
++/* Reported by Rauch Wolke <rauchwolke@gmx.net>
++ * and augmented by binbin <binbinsh@gmail.com> (Bugzilla #12882)
++ */
+ UNUSUAL_DEV(  0x07c4, 0xa4a5, 0x0000, 0xffff,
+               "Simple Tech/Datafab",
+               "CF+SM Reader",
+               US_SC_DEVICE, US_PR_DEVICE, NULL,
+-              US_FL_IGNORE_RESIDUE ),
++              US_FL_IGNORE_RESIDUE | US_FL_MAX_SECTORS_64 ),
+ /* Casio QV 2x00/3x00/4000/8000 digital still cameras are not conformant
+  * to the USB storage specification in two ways:
diff --git a/queue-2.6.29/v4l-dvb-cx88-prevent-general-protection-fault-on-rmmod.patch b/queue-2.6.29/v4l-dvb-cx88-prevent-general-protection-fault-on-rmmod.patch
new file mode 100644 (file)
index 0000000..5a88103
--- /dev/null
@@ -0,0 +1,78 @@
+From stable-bounces@linux.kernel.org  Thu Apr  2 12:54:15 2009
+Date: Thu, 2 Apr 2009 14:53:01 +0200
+From: Jean Delvare <khali@linux-fr.org>
+To: stable@kernel.org
+Message-ID: <20090402145301.29d3e950@hyperion.delvare>
+Subject: V4L/DVB (10943): cx88: Prevent general protection fault on rmmod
+
+upstream commit: 569b7ec73abf576f9a9e4070d213aadf2cce73cb
+
+When unloading the cx8800 driver I sometimes get a general protection
+fault. Analysis revealed a race in cx88_ir_stop(). It can be solved by
+using a delayed work instead of a timer for infrared input polling.
+
+Signed-off-by: Jean Delvare <khali@linux-fr.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+---
+ drivers/media/video/cx88/cx88-input.c |   25 +++++++------------------
+ 1 file changed, 7 insertions(+), 18 deletions(-)
+
+--- a/drivers/media/video/cx88/cx88-input.c
++++ b/drivers/media/video/cx88/cx88-input.c
+@@ -48,8 +48,7 @@ struct cx88_IR {
+       /* poll external decoder */
+       int polling;
+-      struct work_struct work;
+-      struct timer_list timer;
++      struct delayed_work work;
+       u32 gpio_addr;
+       u32 last_gpio;
+       u32 mask_keycode;
+@@ -143,27 +142,19 @@ static void cx88_ir_handle_key(struct cx
+       }
+ }
+-static void ir_timer(unsigned long data)
+-{
+-      struct cx88_IR *ir = (struct cx88_IR *)data;
+-
+-      schedule_work(&ir->work);
+-}
+-
+ static void cx88_ir_work(struct work_struct *work)
+ {
+-      struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
++      struct cx88_IR *ir = container_of(work, struct cx88_IR, work.work);
+       cx88_ir_handle_key(ir);
+-      mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
++      schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
+ }
+ void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
+ {
+       if (ir->polling) {
+-              setup_timer(&ir->timer, ir_timer, (unsigned long)ir);
+-              INIT_WORK(&ir->work, cx88_ir_work);
+-              schedule_work(&ir->work);
++              INIT_DELAYED_WORK(&ir->work, cx88_ir_work);
++              schedule_delayed_work(&ir->work, 0);
+       }
+       if (ir->sampling) {
+               core->pci_irqmask |= PCI_INT_IR_SMPINT;
+@@ -179,10 +170,8 @@ void cx88_ir_stop(struct cx88_core *core
+               core->pci_irqmask &= ~PCI_INT_IR_SMPINT;
+       }
+-      if (ir->polling) {
+-              del_timer_sync(&ir->timer);
+-              flush_scheduled_work();
+-      }
++      if (ir->polling)
++              cancel_delayed_work_sync(&ir->work);
+ }
+ /* ---------------------------------------------------------------------- */
diff --git a/queue-2.6.29/vfs-skip-i_clear-state-inodes.patch b/queue-2.6.29/vfs-skip-i_clear-state-inodes.patch
new file mode 100644 (file)
index 0000000..9d3d858
--- /dev/null
@@ -0,0 +1,94 @@
+From stable-bounces@linux.kernel.org  Fri Apr  3 04:35:19 2009
+Date: Fri, 3 Apr 2009 04:35:14 GMT
+Message-Id: <200904030435.n334ZEOF010467@hera.kernel.org>
+From: Wu Fengguang <fengguang.wu@intel.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: vfs: skip I_CLEAR state inodes
+
+upstream commit: b6fac63cc1f52ec27f29fe6c6c8494a2ffac33fd
+
+clear_inode() will switch inode state from I_FREEING to I_CLEAR, and do so
+_outside_ of inode_lock.  So any I_FREEING testing is incomplete without a
+coupled testing of I_CLEAR.
+
+So add I_CLEAR tests to drop_pagecache_sb(), generic_sync_sb_inodes() and
+add_dquot_ref().
+
+Masayoshi MIZUMA discovered the bug in drop_pagecache_sb() and Jan Kara
+reminds fixing the other two cases.
+
+Masayoshi MIZUMA has a nice panic flow:
+
+=====================================================================
+            [process A]               |        [process B]
+ |                                    |
+ |    prune_icache()                  | drop_pagecache()
+ |      spin_lock(&inode_lock)        |   drop_pagecache_sb()
+ |      inode->i_state |= I_FREEING;  |       |
+ |      spin_unlock(&inode_lock)      |       V
+ |          |                         |     spin_lock(&inode_lock)
+ |          V                         |         |
+ |      dispose_list()                |         |
+ |        list_del()                  |         |
+ |        clear_inode()               |         |
+ |          inode->i_state = I_CLEAR  |         |
+ |            |                       |         V
+ |            |                       |      if (inode->i_state & (I_FREEING|I_WILL_FREE))
+ |            |                       |              continue;           <==== NOT MATCH
+ |            |                       |
+ |            |                       | (DANGER from here on! Accessing disposing inode!)
+ |            |                       |
+ |            |                       |      __iget()
+ |            |                       |        list_move() <===== PANIC on poisoned list !!
+ V            V                       |
+(time)
+=====================================================================
+
+Reported-by: Masayoshi MIZUMA <m.mizuma@jp.fujitsu.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+[chrisw: backport to 2.6.29]
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ fs/dquot.c        |    2 +-
+ fs/drop_caches.c  |    2 +-
+ fs/fs-writeback.c |    3 ++-
+ 3 files changed, 4 insertions(+), 3 deletions(-)
+
+--- a/fs/drop_caches.c
++++ b/fs/drop_caches.c
+@@ -18,7 +18,7 @@ static void drop_pagecache_sb(struct sup
+       spin_lock(&inode_lock);
+       list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+-              if (inode->i_state & (I_FREEING|I_WILL_FREE))
++              if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
+                       continue;
+               if (inode->i_mapping->nrpages == 0)
+                       continue;
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -538,7 +538,8 @@ void generic_sync_sb_inodes(struct super
+               list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+                       struct address_space *mapping;
+-                      if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW))
++                      if (inode->i_state &
++                                      (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
+                               continue;
+                       mapping = inode->i_mapping;
+                       if (mapping->nrpages == 0)
+--- a/fs/dquot.c
++++ b/fs/dquot.c
+@@ -793,7 +793,7 @@ static void add_dquot_ref(struct super_b
+                       continue;
+               if (!dqinit_needed(inode, type))
+                       continue;
+-              if (inode->i_state & (I_FREEING|I_WILL_FREE))
++              if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))
+                       continue;
+               __iget(inode);
diff --git a/queue-2.6.29/virtio-fix-suspend-when-using-virtio_balloon.patch b/queue-2.6.29/virtio-fix-suspend-when-using-virtio_balloon.patch
new file mode 100644 (file)
index 0000000..512dff7
--- /dev/null
@@ -0,0 +1,33 @@
+From stable-bounces@linux.kernel.org  Sun Apr 19 18:05:09 2009
+Date: Sun, 19 Apr 2009 18:05:04 GMT
+Message-Id: <200904191805.n3JI54Wj018467@hera.kernel.org>
+From: Marcelo Tosatti <mtosatti@redhat.com>
+To: jejb@kernel.org, stable@kernel.org
+Subject: virtio: fix suspend when using virtio_balloon
+
+upstream commit: 84a139a985300901dfad99bd93c7345d180af860
+
+Break out of wait_event_interruptible() if freezing has been requested,
+in the vballoon thread. Without this change vballoon refuses to stop and
+the system can't suspend.
+
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
+Cc: stable@kernel.org
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ drivers/virtio/virtio_balloon.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/virtio/virtio_balloon.c
++++ b/drivers/virtio/virtio_balloon.c
+@@ -190,7 +190,8 @@ static int balloon(void *_vballoon)
+               try_to_freeze();
+               wait_event_interruptible(vb->config_change,
+                                        (diff = towards_target(vb)) != 0
+-                                       || kthread_should_stop());
++                                       || kthread_should_stop()
++                                       || freezing(current));
+               if (diff > 0)
+                       fill_balloon(vb, diff);
+               else if (diff < 0)
diff --git a/queue-2.6.29/x86-disable-x86_ptrace_bts-for-now.patch b/queue-2.6.29/x86-disable-x86_ptrace_bts-for-now.patch
new file mode 100644 (file)
index 0000000..95cd320
--- /dev/null
@@ -0,0 +1,32 @@
+From d45b41ae8da0f54aec0eebcc6f893ba5f22a1e8e Mon Sep 17 00:00:00 2001
+Message-Id: <200904171735.n3HHZ3Cm012040@hera.kernel.org>
+From: Ingo Molnar <mingo@elte.hu>
+Date: Wed, 15 Apr 2009 23:15:14 +0200
+Subject: x86: disable X86_PTRACE_BTS for now
+
+upstream commit: d45b41ae8da0f54aec0eebcc6f893ba5f22a1e8e
+
+Oleg Nesterov found a couple of races in the ptrace-bts code
+and fixes are queued up for it but they did not get ready in time
+for the merge window. We'll merge them in v2.6.31 - until then
+mark the feature as CONFIG_BROKEN. There's no user-space yet
+making use of this so it's not a big issue.
+
+Cc: <stable@kernel.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+[chrisw: trivial 2.6.29 backport]
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/x86/Kconfig.cpu |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/x86/Kconfig.cpu
++++ b/arch/x86/Kconfig.cpu
+@@ -523,6 +523,7 @@ config X86_PTRACE_BTS
+       bool "Branch Trace Store"
+       default y
+       depends on X86_DEBUGCTLMSR
++      depends on BROKEN
+       help
+         This adds a ptrace interface to the hardware's branch trace store.
diff --git a/queue-2.6.29/x86-fix-broken-irq-migration-logic-while-cleaning-up-multiple-vectors.patch b/queue-2.6.29/x86-fix-broken-irq-migration-logic-while-cleaning-up-multiple-vectors.patch
new file mode 100644 (file)
index 0000000..bb62dc0
--- /dev/null
@@ -0,0 +1,81 @@
+From stable-bounces@linux.kernel.org  Thu Apr  9 23:30:14 2009
+From: Suresh Siddha <suresh.b.siddha@intel.com>
+To: Chris Wright <chrisw@sous-sol.org>
+Date: Thu, 09 Apr 2009 15:49:41 -0700
+Message-Id: <1239317381.27006.8028.camel@localhost.localdomain>
+Cc: "hpa@linux.intel.com" <hpa@linux.intel.com>, stable@kernel.org
+Subject: x86: fix broken irq migration logic while cleaning up multiple vectors
+
+upstream commit: 68a8ca593fac82e336a792226272455901fa83df
+
+Impact: fix spurious IRQs
+
+During irq migration, we send a low priority interrupt to the previous
+irq destination. This happens in non interrupt-remapping case after interrupt
+starts arriving at new destination and in interrupt-remapping case after
+modifying and flushing the interrupt-remapping table entry caches.
+
+This low priority irq cleanup handler can cleanup multiple vectors, as
+multiple irq's can be migrated at almost the same time. While
+there will be multiple invocations of irq cleanup handler (one cleanup
+IPI for each irq migration), first invocation of the cleanup handler
+can potentially cleanup more than one vector (as the first invocation can
+see the requests for more than vector cleanup). When we cleanup multiple
+vectors during the first invocation of the smp_irq_move_cleanup_interrupt(),
+other vectors that are to be cleanedup can still be pending in the local
+cpu's IRR (as smp_irq_move_cleanup_interrupt() runs with interrupts disabled).
+
+When we are ready to unhook a vector corresponding to an irq, check if that
+vector is registered in the local cpu's IRR. If so skip that cleanup and
+do a self IPI with the cleanup vector, so that we give a chance to
+service the pending vector interrupt and then cleanup that vector
+allocation once we execute the lowest priority handler.
+
+This fixes spurious interrupts seen when migrating multiple vectors
+at the same time.
+
+[ This is apparently possible even on conventional xapic, although to
+  the best of our knowledge it has never been seen.  The stable
+  maintainers may wish to consider this one for -stable. ]
+
+[suresh: backport to 2.6.29]
+Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Cc: stable@kernel.org
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+
+---
+ arch/x86/kernel/io_apic.c |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/x86/kernel/io_apic.c
++++ b/arch/x86/kernel/io_apic.c
+@@ -2475,6 +2475,7 @@ asmlinkage void smp_irq_move_cleanup_int
+       me = smp_processor_id();
+       for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+               unsigned int irq;
++              unsigned int irr;
+               struct irq_desc *desc;
+               struct irq_cfg *cfg;
+               irq = __get_cpu_var(vector_irq)[vector];
+@@ -2494,6 +2495,18 @@ asmlinkage void smp_irq_move_cleanup_int
+               if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
+                       goto unlock;
++              irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
++              /*
++               * Check if the vector that needs to be cleanedup is
++               * registered at the cpu's IRR. If so, then this is not
++               * the best time to clean it up. Lets clean it up in the
++               * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
++               * to myself.
++               */
++              if (irr  & (1 << (vector % 32))) {
++                      send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
++                      goto unlock;
++              }
+               __get_cpu_var(vector_irq)[vector] = -1;
+               cfg->move_cleanup_count--;
+ unlock:
diff --git a/queue-2.6.29/x86-pat-remove-page-granularity-tracking-for-vm_insert_pfn-maps.patch b/queue-2.6.29/x86-pat-remove-page-granularity-tracking-for-vm_insert_pfn-maps.patch
new file mode 100644 (file)
index 0000000..1373ece
--- /dev/null
@@ -0,0 +1,216 @@
+From stable-bounces@linux.kernel.org  Sat Apr 18 09:09:28 2009
+Date: Sat, 18 Apr 2009 11:08:04 +0200
+From: Ingo Molnar <mingo@elte.hu>
+To: David John <davidjon@xenontk.org>, stable@kernel.org
+Message-ID: <20090418090804.GL7678@elte.hu>
+Cc: Yinghai Lu <yhlu.kernel@gmail.com>, Jesse Barnes <jbarnes@virtuousgeek.org>, "Pallipadi, Venkatesh" <venkatesh.pallipadi@intel.com>
+Subject: x86, PAT: Remove page granularity tracking for vm_insert_pfn maps
+
+From: Pallipadi, Venkatesh <venkatesh.pallipadi@intel.com>
+
+upstream commit: 4b065046273afa01ec8e3de7da407e8d3599251d
+
+This change resolves the problem of too many single page entries
+in pat_memtype_list and "freeing invalid memtype" errors with i915,
+reported here:
+
+  http://marc.info/?l=linux-kernel&m=123845244713183&w=2
+
+Remove page level granularity track and untrack of vm_insert_pfn.
+memtype tracking at page granularity does not scale and cleaner
+approach would be for the driver to request a type for a bigger
+IO address range or PCI io memory range for that device, either at
+mmap time or driver init time and just use that type during
+vm_insert_pfn.
+
+This patch just removes the track/untrack of vm_insert_pfn. That
+means we will be in same state as 2.6.28, with respect to these APIs.
+
+Newer APIs for the drivers to request a memtype for a bigger region
+is coming soon.
+
+[ Impact: fix Xorg startup warnings and hangs ]
+
+Reported-by: Arkadiusz Miskiewicz <a.miskiewicz@gmail.com>
+Tested-by: Arkadiusz Miskiewicz <a.miskiewicz@gmail.com>
+Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
+Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
+Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
+LKML-Reference: <20090408223716.GC3493@linux-os.sc.intel.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/x86/mm/pat.c |   98 ++++++++++--------------------------------------------
+ 1 file changed, 19 insertions(+), 79 deletions(-)
+
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -713,29 +713,28 @@ static void free_pfn_range(u64 paddr, un
+  *
+  * If the vma has a linear pfn mapping for the entire range, we get the prot
+  * from pte and reserve the entire vma range with single reserve_pfn_range call.
+- * Otherwise, we reserve the entire vma range, my ging through the PTEs page
+- * by page to get physical address and protection.
+  */
+ int track_pfn_vma_copy(struct vm_area_struct *vma)
+ {
+-      int retval = 0;
+-      unsigned long i, j;
+       resource_size_t paddr;
+       unsigned long prot;
+-      unsigned long vma_start = vma->vm_start;
+-      unsigned long vma_end = vma->vm_end;
+-      unsigned long vma_size = vma_end - vma_start;
++      unsigned long vma_size = vma->vm_end - vma->vm_start;
+       pgprot_t pgprot;
+       if (!pat_enabled)
+               return 0;
++      /*
++       * For now, only handle remap_pfn_range() vmas where
++       * is_linear_pfn_mapping() == TRUE. Handling of
++       * vm_insert_pfn() is TBD.
++       */
+       if (is_linear_pfn_mapping(vma)) {
+               /*
+                * reserve the whole chunk covered by vma. We need the
+                * starting address and protection from pte.
+                */
+-              if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
++              if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
+                       WARN_ON_ONCE(1);
+                       return -EINVAL;
+               }
+@@ -743,28 +742,7 @@ int track_pfn_vma_copy(struct vm_area_st
+               return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
+       }
+-      /* reserve entire vma page by page, using pfn and prot from pte */
+-      for (i = 0; i < vma_size; i += PAGE_SIZE) {
+-              if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
+-                      continue;
+-
+-              pgprot = __pgprot(prot);
+-              retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
+-              if (retval)
+-                      goto cleanup_ret;
+-      }
+       return 0;
+-
+-cleanup_ret:
+-      /* Reserve error: Cleanup partial reservation and return error */
+-      for (j = 0; j < i; j += PAGE_SIZE) {
+-              if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
+-                      continue;
+-
+-              free_pfn_range(paddr, PAGE_SIZE);
+-      }
+-
+-      return retval;
+ }
+ /*
+@@ -774,50 +752,28 @@ cleanup_ret:
+  * prot is passed in as a parameter for the new mapping. If the vma has a
+  * linear pfn mapping for the entire range reserve the entire vma range with
+  * single reserve_pfn_range call.
+- * Otherwise, we look t the pfn and size and reserve only the specified range
+- * page by page.
+- *
+- * Note that this function can be called with caller trying to map only a
+- * subrange/page inside the vma.
+  */
+ int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
+                       unsigned long pfn, unsigned long size)
+ {
+-      int retval = 0;
+-      unsigned long i, j;
+-      resource_size_t base_paddr;
+       resource_size_t paddr;
+-      unsigned long vma_start = vma->vm_start;
+-      unsigned long vma_end = vma->vm_end;
+-      unsigned long vma_size = vma_end - vma_start;
++      unsigned long vma_size = vma->vm_end - vma->vm_start;
+       if (!pat_enabled)
+               return 0;
++      /*
++       * For now, only handle remap_pfn_range() vmas where
++       * is_linear_pfn_mapping() == TRUE. Handling of
++       * vm_insert_pfn() is TBD.
++       */
+       if (is_linear_pfn_mapping(vma)) {
+               /* reserve the whole chunk starting from vm_pgoff */
+               paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
+               return reserve_pfn_range(paddr, vma_size, prot, 0);
+       }
+-      /* reserve page by page using pfn and size */
+-      base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
+-      for (i = 0; i < size; i += PAGE_SIZE) {
+-              paddr = base_paddr + i;
+-              retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
+-              if (retval)
+-                      goto cleanup_ret;
+-      }
+       return 0;
+-
+-cleanup_ret:
+-      /* Reserve error: Cleanup partial reservation and return error */
+-      for (j = 0; j < i; j += PAGE_SIZE) {
+-              paddr = base_paddr + j;
+-              free_pfn_range(paddr, PAGE_SIZE);
+-      }
+-
+-      return retval;
+ }
+ /*
+@@ -828,39 +784,23 @@ cleanup_ret:
+ void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
+                       unsigned long size)
+ {
+-      unsigned long i;
+       resource_size_t paddr;
+-      unsigned long prot;
+-      unsigned long vma_start = vma->vm_start;
+-      unsigned long vma_end = vma->vm_end;
+-      unsigned long vma_size = vma_end - vma_start;
++      unsigned long vma_size = vma->vm_end - vma->vm_start;
+       if (!pat_enabled)
+               return;
++      /*
++       * For now, only handle remap_pfn_range() vmas where
++       * is_linear_pfn_mapping() == TRUE. Handling of
++       * vm_insert_pfn() is TBD.
++       */
+       if (is_linear_pfn_mapping(vma)) {
+               /* free the whole chunk starting from vm_pgoff */
+               paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
+               free_pfn_range(paddr, vma_size);
+               return;
+       }
+-
+-      if (size != 0 && size != vma_size) {
+-              /* free page by page, using pfn and size */
+-              paddr = (resource_size_t)pfn << PAGE_SHIFT;
+-              for (i = 0; i < size; i += PAGE_SIZE) {
+-                      paddr = paddr + i;
+-                      free_pfn_range(paddr, PAGE_SIZE);
+-              }
+-      } else {
+-              /* free entire vma, page by page, using the pfn from pte */
+-              for (i = 0; i < vma_size; i += PAGE_SIZE) {
+-                      if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
+-                              continue;
+-
+-                      free_pfn_range(paddr, PAGE_SIZE);
+-              }
+-      }
+ }
+ pgprot_t pgprot_writecombine(pgprot_t prot)
diff --git a/queue-2.6.29/x86-setup-mark-esi-as-clobbered-in-e820-bios-call.patch b/queue-2.6.29/x86-setup-mark-esi-as-clobbered-in-e820-bios-call.patch
new file mode 100644 (file)
index 0000000..5fda96b
--- /dev/null
@@ -0,0 +1,45 @@
+From stable-bounces@linux.kernel.org  Wed Apr  1 20:40:07 2009
+Date: Wed, 1 Apr 2009 20:40:02 GMT
+Message-Id: <200904012040.n31Ke2vj002217@hera.kernel.org>
+From: Michael K. Johnson <johnsonm@rpath.com>
+To: stable@kernel.org
+Subject: x86, setup: mark %esi as clobbered in E820 BIOS call
+
+upstream commit: 01522df346f846906eaf6ca57148641476209909
+
+Jordan Hargrave diagnosed a BIOS clobbering %esi in the E820 call.
+That particular BIOS has been fixed, but there is a possibility that
+this is responsible for other occasional reports of early boot
+failure, and it does not hurt to add %esi to the clobbers.
+
+-stable candidate patch.
+
+Cc: Justin Forbes <jmforbes@linuxtx.org>
+Signed-off-by: Michael K Johnson <johnsonm@rpath.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Cc: stable@kernel.org
+Signed-off-by: Chris Wright <chrisw@sous-sol.org>
+---
+ arch/x86/boot/memory.c |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/boot/memory.c
++++ b/arch/x86/boot/memory.c
+@@ -27,13 +27,14 @@ static int detect_memory_e820(void)
+       do {
+               size = sizeof(struct e820entry);
+-              /* Important: %edx is clobbered by some BIOSes,
+-                 so it must be either used for the error output
++              /* Important: %edx and %esi are clobbered by some BIOSes,
++                 so they must be either used for the error output
+                  or explicitly marked clobbered. */
+               asm("int $0x15; setc %0"
+                   : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
+                     "=m" (*desc)
+-                  : "D" (desc), "d" (SMAP), "a" (0xe820));
++                  : "D" (desc), "d" (SMAP), "a" (0xe820)
++                  : "esi");
+               /* BIOSes which terminate the chain with CF = 1 as opposed
+                  to %ebx = 0 don't always report the SMAP signature on