--- /dev/null
+From cab4d27764d5a8654212b3e96eb0ae793aec5b94 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <borislav.petkov@amd.com>
+Date: Thu, 11 Feb 2010 17:15:57 +0100
+Subject: amd64_edac: Do not falsely trigger kerneloops
+
+From: Borislav Petkov <borislav.petkov@amd.com>
+
+commit cab4d27764d5a8654212b3e96eb0ae793aec5b94 upstream.
+
+An unfortunate "WARNING" in the message amd64_edac dumps when the system
+doesn't support DRAM ECC or ECC checking is not enabled in the BIOS
+used to trigger kerneloops which qualified the message as an OOPS thus
+misleading the users. See, e.g.
+
+https://bugs.launchpad.net/ubuntu/+source/linux/+bug/422536
+http://bugzilla.kernel.org/show_bug.cgi?id=15238
+
+Downgrade the message level to KERN_NOTICE and fix the formulation.
+
+Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
+Acked-by: Doug Thompson <dougthompson@xmission.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/edac/amd64_edac.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/drivers/edac/amd64_edac.c
++++ b/drivers/edac/amd64_edac.c
+@@ -2801,10 +2801,11 @@ static void amd64_restore_ecc_error_repo
+ * the memory system completely. A command line option allows to force-enable
+ * hardware ECC later in amd64_enable_ecc_error_reporting().
+ */
+-static const char *ecc_warning =
+- "WARNING: ECC is disabled by BIOS. Module will NOT be loaded.\n"
+- " Either Enable ECC in the BIOS, or set 'ecc_enable_override'.\n"
+- " Also, use of the override can cause unknown side effects.\n";
++static const char *ecc_msg =
++ "ECC disabled in the BIOS or no ECC capability, module will not load.\n"
++ " Either enable ECC checking or force module loading by setting "
++ "'ecc_enable_override'.\n"
++ " (Note that use of the override may cause unknown side effects.)\n";
+
+ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
+ {
+@@ -2819,7 +2820,7 @@ static int amd64_check_ecc_enabled(struc
+
+ ecc_enabled = !!(value & K8_NBCFG_ECC_ENABLE);
+ if (!ecc_enabled)
+- amd64_printk(KERN_WARNING, "This node reports that Memory ECC "
++ amd64_printk(KERN_NOTICE, "This node reports that Memory ECC "
+ "is currently disabled, set F3x%x[22] (%s).\n",
+ K8_NBCFG, pci_name(pvt->misc_f3_ctl));
+ else
+@@ -2827,13 +2828,13 @@ static int amd64_check_ecc_enabled(struc
+
+ nb_mce_en = amd64_nb_mce_bank_enabled_on_node(pvt->mc_node_id);
+ if (!nb_mce_en)
+- amd64_printk(KERN_WARNING, "NB MCE bank disabled, set MSR "
++ amd64_printk(KERN_NOTICE, "NB MCE bank disabled, set MSR "
+ "0x%08x[4] on node %d to enable.\n",
+ MSR_IA32_MCG_CTL, pvt->mc_node_id);
+
+ if (!ecc_enabled || !nb_mce_en) {
+ if (!ecc_enable_override) {
+- amd64_printk(KERN_WARNING, "%s", ecc_warning);
++ amd64_printk(KERN_NOTICE, "%s", ecc_msg);
+ return -ENODEV;
+ }
+ ecc_enable_override = 0;
--- /dev/null
+From a8d7ac279743077965afeca0c9ed748507b68e89 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Mon, 1 Feb 2010 09:17:56 +1100
+Subject: crypto: padlock-sha - Add import/export support
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit a8d7ac279743077965afeca0c9ed748507b68e89 upstream.
+
+As the padlock driver for SHA uses a software fallback to perform
+partial hashing, it must implement custom import/export functions.
+Otherwise hmac which depends on import/export for prehashing will
+not work with padlock-sha.
+
+Reported-by: Wolfgang Walter <wolfgang.walter@stwm.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/crypto/padlock-sha.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+--- a/drivers/crypto/padlock-sha.c
++++ b/drivers/crypto/padlock-sha.c
+@@ -57,6 +57,23 @@ static int padlock_sha_update(struct sha
+ return crypto_shash_update(&dctx->fallback, data, length);
+ }
+
++static int padlock_sha_export(struct shash_desc *desc, void *out)
++{
++ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
++
++ return crypto_shash_export(&dctx->fallback, out);
++}
++
++static int padlock_sha_import(struct shash_desc *desc, const void *in)
++{
++ struct padlock_sha_desc *dctx = shash_desc_ctx(desc);
++ struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm);
++
++ dctx->fallback.tfm = ctx->fallback;
++ dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP;
++ return crypto_shash_import(&dctx->fallback, in);
++}
++
+ static inline void padlock_output_block(uint32_t *src,
+ uint32_t *dst, size_t count)
+ {
+@@ -235,7 +252,10 @@ static struct shash_alg sha1_alg = {
+ .update = padlock_sha_update,
+ .finup = padlock_sha1_finup,
+ .final = padlock_sha1_final,
++ .export = padlock_sha_export,
++ .import = padlock_sha_import,
+ .descsize = sizeof(struct padlock_sha_desc),
++ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-padlock",
+@@ -256,7 +276,10 @@ static struct shash_alg sha256_alg = {
+ .update = padlock_sha_update,
+ .finup = padlock_sha256_finup,
+ .final = padlock_sha256_final,
++ .export = padlock_sha_export,
++ .import = padlock_sha_import,
+ .descsize = sizeof(struct padlock_sha_desc),
++ .statesize = sizeof(struct sha256_state),
+ .base = {
+ .cra_name = "sha256",
+ .cra_driver_name = "sha256-padlock",
--- /dev/null
+From b8ed5dd54895647c2690575aad6f07748c2c618a Mon Sep 17 00:00:00 2001
+From: Stefan Haberland <stefan.haberland@de.ibm.com>
+Date: Mon, 7 Dec 2009 12:51:52 +0100
+Subject: [S390] dasd: remove strings from s390dbf
+
+From: Stefan Haberland <stefan.haberland@de.ibm.com>
+
+commit b8ed5dd54895647c2690575aad6f07748c2c618a upstream.
+
+Remove strings from s390 debugfeature entries that could lead to a
+crash when the data is read from dbf because the strings do not exist
+any more.
+
+Signed-off-by: Stefan Haberland <stefan.haberland@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/s390/block/dasd.c | 22 ++++++++++----------
+ drivers/s390/block/dasd_eckd.c | 44 +++++++++++++++++------------------------
+ drivers/s390/block/dasd_fba.c | 10 +++------
+ drivers/s390/block/dasd_int.h | 10 +++++++++
+ 4 files changed, 44 insertions(+), 42 deletions(-)
+
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -994,10 +994,9 @@ static void dasd_handle_killed_request(s
+ return;
+ cqr = (struct dasd_ccw_req *) intparm;
+ if (cqr->status != DASD_CQR_IN_IO) {
+- DBF_EVENT(DBF_DEBUG,
+- "invalid status in handle_killed_request: "
+- "bus_id %s, status %02x",
+- dev_name(&cdev->dev), cqr->status);
++ DBF_EVENT_DEVID(DBF_DEBUG, cdev,
++ "invalid status in handle_killed_request: "
++ "%02x", cqr->status);
+ return;
+ }
+
+@@ -1045,12 +1044,13 @@ void dasd_int_handler(struct ccw_device
+ case -EIO:
+ break;
+ case -ETIMEDOUT:
+- DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n",
+- __func__, dev_name(&cdev->dev));
++ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
++ "request timed out\n", __func__);
+ break;
+ default:
+- DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n",
+- __func__, dev_name(&cdev->dev), PTR_ERR(irb));
++ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: "
++ "unknown error %ld\n", __func__,
++ PTR_ERR(irb));
+ }
+ dasd_handle_killed_request(cdev, intparm);
+ return;
+@@ -2217,9 +2217,9 @@ int dasd_generic_probe(struct ccw_device
+ }
+ ret = dasd_add_sysfs_files(cdev);
+ if (ret) {
+- DBF_EVENT(DBF_WARNING,
+- "dasd_generic_probe: could not add sysfs entries "
+- "for %s\n", dev_name(&cdev->dev));
++ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
++ "dasd_generic_probe: could not add "
++ "sysfs entries");
+ return ret;
+ }
+ cdev->handler = &dasd_int_handler;
+--- a/drivers/s390/block/dasd_eckd.c
++++ b/drivers/s390/block/dasd_eckd.c
+@@ -88,9 +88,9 @@ dasd_eckd_probe (struct ccw_device *cdev
+ /* set ECKD specific ccw-device options */
+ ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
+ if (ret) {
+- DBF_EVENT(DBF_WARNING,
+- "dasd_eckd_probe: could not set ccw-device options "
+- "for %s\n", dev_name(&cdev->dev));
++ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s",
++ "dasd_eckd_probe: could not set "
++ "ccw-device options");
+ return ret;
+ }
+ ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
+@@ -885,16 +885,15 @@ static int dasd_eckd_read_conf(struct da
+ rc = dasd_eckd_read_conf_lpm(device, &conf_data,
+ &conf_len, lpm);
+ if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
+- DBF_EVENT(DBF_WARNING,
++ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read configuration data returned "
+- "error %d for device: %s", rc,
+- dev_name(&device->cdev->dev));
++ "error %d", rc);
+ return rc;
+ }
+ if (conf_data == NULL) {
+- DBF_EVENT(DBF_WARNING, "No configuration "
+- "data retrieved for device: %s",
+- dev_name(&device->cdev->dev));
++ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
++ "No configuration data "
++ "retrieved");
+ continue; /* no error */
+ }
+ /* save first valid configuration data */
+@@ -941,9 +940,8 @@ static int dasd_eckd_read_features(struc
+ sizeof(struct dasd_rssd_features)),
+ device);
+ if (IS_ERR(cqr)) {
+- DBF_EVENT(DBF_WARNING, "Could not allocate initialization "
+- "request for device: %s",
+- dev_name(&device->cdev->dev));
++ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
++ "allocate initialization request");
+ return PTR_ERR(cqr);
+ }
+ cqr->startdev = device;
+@@ -1071,10 +1069,8 @@ static int dasd_eckd_validate_server(str
+ /* may be requested feature is not available on server,
+ * therefore just report error and go ahead */
+ private = (struct dasd_eckd_private *) device->private;
+- DBF_EVENT(DBF_WARNING, "PSF-SSC on storage subsystem %s.%s.%04x "
+- "returned rc=%d for device: %s",
+- private->uid.vendor, private->uid.serial,
+- private->uid.ssid, rc, dev_name(&device->cdev->dev));
++ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
++ "returned rc=%d", private->uid.ssid, rc);
+ /* RE-Read Configuration Data */
+ return dasd_eckd_read_conf(device);
+ }
+@@ -1123,9 +1119,9 @@ dasd_eckd_check_characteristics(struct d
+ if (private->uid.type == UA_BASE_DEVICE) {
+ block = dasd_alloc_block();
+ if (IS_ERR(block)) {
+- DBF_EVENT(DBF_WARNING, "could not allocate dasd "
+- "block structure for device: %s",
+- dev_name(&device->cdev->dev));
++ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
++ "could not allocate dasd "
++ "block structure");
+ rc = PTR_ERR(block);
+ goto out_err1;
+ }
+@@ -1153,9 +1149,8 @@ dasd_eckd_check_characteristics(struct d
+ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
+ &private->rdc_data, 64);
+ if (rc) {
+- DBF_EVENT(DBF_WARNING,
+- "Read device characteristics failed, rc=%d for "
+- "device: %s", rc, dev_name(&device->cdev->dev));
++ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
++ "Read device characteristic failed, rc=%d", rc);
+ goto out_err3;
+ }
+ /* find the vaild cylinder size */
+@@ -3253,9 +3248,8 @@ int dasd_eckd_restore_device(struct dasd
+ rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
+ &temp_rdc_data, 64);
+ if (rc) {
+- DBF_EVENT(DBF_WARNING,
+- "Read device characteristics failed, rc=%d for "
+- "device: %s", rc, dev_name(&device->cdev->dev));
++ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
++ "Read device characteristic failed, rc=%d", rc);
+ goto out_err;
+ }
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+--- a/drivers/s390/block/dasd_fba.c
++++ b/drivers/s390/block/dasd_fba.c
+@@ -141,9 +141,8 @@ dasd_fba_check_characteristics(struct da
+ }
+ block = dasd_alloc_block();
+ if (IS_ERR(block)) {
+- DBF_EVENT(DBF_WARNING, "could not allocate dasd block "
+- "structure for device: %s",
+- dev_name(&device->cdev->dev));
++ DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", "could not allocate "
++ "dasd block structure");
+ device->private = NULL;
+ kfree(private);
+ return PTR_ERR(block);
+@@ -155,9 +154,8 @@ dasd_fba_check_characteristics(struct da
+ rc = dasd_generic_read_dev_chars(device, DASD_FBA_MAGIC,
+ &private->rdc_data, 32);
+ if (rc) {
+- DBF_EVENT(DBF_WARNING, "Read device characteristics returned "
+- "error %d for device: %s",
+- rc, dev_name(&device->cdev->dev));
++ DBF_EVENT_DEVID(DBF_WARNING, cdev, "Read device "
++ "characteristics returned error %d", rc);
+ device->block = NULL;
+ dasd_free_block(block);
+ device->private = NULL;
+--- a/drivers/s390/block/dasd_int.h
++++ b/drivers/s390/block/dasd_int.h
+@@ -108,6 +108,16 @@ do { \
+ d_data); \
+ } while(0)
+
++#define DBF_EVENT_DEVID(d_level, d_cdev, d_str, d_data...) \
++do { \
++ struct ccw_dev_id __dev_id; \
++ ccw_device_get_id(d_cdev, &__dev_id); \
++ debug_sprintf_event(dasd_debug_area, \
++ d_level, \
++ "0.%x.%04x " d_str "\n", \
++ __dev_id.ssid, __dev_id.devno, d_data); \
++} while (0)
++
+ #define DBF_EXC(d_level, d_str, d_data...)\
+ do { \
+ debug_sprintf_exception(dasd_debug_area, \
--- /dev/null
+From cebbert@redhat.com Fri Feb 12 13:29:54 2010
+From: Len Brown <len.brown@intel.com>
+Date: Sat, 30 Jan 2010 18:01:42 -0500
+Subject: dell-wmi, hp-wmi, msi-wmi: check wmi_get_event_data() return value
+To: stable@kernel.org
+Cc: Len Brown <len.brown@intel.com>
+Message-ID: <201002030139.o131dtPR012382@int-mx03.intmail.prod.int.phx2.redhat.com>
+
+
+From: Len Brown <len.brown@intel.com>
+
+commit fda11e61ff8a4e3a8ebbd434e46560b67cc0ca9d upstream
+
+[ backport to 2.6.32 ]
+
+When acpi_evaluate_object() is passed ACPI_ALLOCATE_BUFFER,
+the caller must kfree the returned buffer if AE_OK is returned.
+
+The callers of wmi_get_event_data() pass ACPI_ALLOCATE_BUFFER,
+and thus must check its return value before accessing
+or kfree() on the buffer.
+
+Signed-off-by: Len Brown <len.brown@intel.com>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/platform/x86/dell-wmi.c | 7 ++++++-
+ drivers/platform/x86/hp-wmi.c | 7 ++++++-
+ 2 files changed, 12 insertions(+), 2 deletions(-)
+
+--- a/drivers/platform/x86/dell-wmi.c
++++ b/drivers/platform/x86/dell-wmi.c
+@@ -158,8 +158,13 @@ static void dell_wmi_notify(u32 value, v
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ static struct key_entry *key;
+ union acpi_object *obj;
++ acpi_status status;
+
+- wmi_get_event_data(value, &response);
++ status = wmi_get_event_data(value, &response);
++ if (status != AE_OK) {
++ printk(KERN_INFO "dell-wmi: bad event status 0x%x\n", status);
++ return;
++ }
+
+ obj = (union acpi_object *)response.pointer;
+
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -334,8 +334,13 @@ static void hp_wmi_notify(u32 value, voi
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ static struct key_entry *key;
+ union acpi_object *obj;
++ acpi_status status;
+
+- wmi_get_event_data(value, &response);
++ status = wmi_get_event_data(value, &response);
++ if (status != AE_OK) {
++ printk(KERN_INFO "hp-wmi: bad event status 0x%x\n", status);
++ return;
++ }
+
+ obj = (union acpi_object *)response.pointer;
+
--- /dev/null
+From cebbert@redhat.com Fri Feb 12 13:31:07 2010
+From: Wu Fengguang <fengguang.wu@intel.com>
+Date: Fri, 5 Feb 2010 15:00:52 -0500
+Subject: /dev/mem: introduce size_inside_page()
+To: stable@kernel.org
+Cc: Wu Fengguang <fengguang.wu@intel.com>
+Message-ID: <201002052006.o15K60sg029346@int-mx03.intmail.prod.int.phx2.redhat.com>
+
+
+From: Wu Fengguang <fengguang.wu@intel.com>
+
+commit f222318e9c3a315723e3524fb9d6566b2430db44 upstream
+
+/dev/mem: introduce size_inside_page()
+
+[ cebbert@redhat.com : backport to 2.6.32 ]
+[ subset of original patch, for just /dev/kmem ]
+
+Introduce size_inside_page() to replace duplicate /dev/mem code.
+
+Also apply it to /dev/kmem, whose alignment logic was buggy.
+
+Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
+Acked-by: Andi Kleen <ak@linux.intel.com>
+Cc: Marcelo Tosatti <mtosatti@redhat.com>
+Cc: Greg Kroah-Hartman <gregkh@suse.de>
+Cc: Mark Brown <broonie@opensource.wolfsonmicro.com>
+Cc: Johannes Berg <johannes@sipsolutions.net>
+Cc: Avi Kivity <avi@qumranet.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/mem.c | 40 +++++++++++++++++-----------------------
+ 1 file changed, 17 insertions(+), 23 deletions(-)
+
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -35,6 +35,19 @@
+ # include <linux/efi.h>
+ #endif
+
++static inline unsigned long size_inside_page(unsigned long start,
++ unsigned long size)
++{
++ unsigned long sz;
++
++ if (-start & (PAGE_SIZE - 1))
++ sz = -start & (PAGE_SIZE - 1);
++ else
++ sz = PAGE_SIZE;
++
++ return min_t(unsigned long, sz, size);
++}
++
+ /*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+@@ -430,15 +443,7 @@ static ssize_t read_kmem(struct file *fi
+ }
+ #endif
+ while (low_count > 0) {
+- /*
+- * Handle first page in case it's not aligned
+- */
+- if (-p & (PAGE_SIZE - 1))
+- sz = -p & (PAGE_SIZE - 1);
+- else
+- sz = PAGE_SIZE;
+-
+- sz = min_t(unsigned long, sz, low_count);
++ sz = size_inside_page(p, low_count);
+
+ /*
+ * On ia64 if a page has been mapped somewhere as
+@@ -462,10 +467,8 @@ static ssize_t read_kmem(struct file *fi
+ if (!kbuf)
+ return -ENOMEM;
+ while (count > 0) {
+- int len = count;
++ int len = size_inside_page(p, count);
+
+- if (len > PAGE_SIZE)
+- len = PAGE_SIZE;
+ len = vread(kbuf, (char *)p, len);
+ if (!len)
+ break;
+@@ -510,15 +513,8 @@ do_write_kmem(void *p, unsigned long rea
+
+ while (count > 0) {
+ char *ptr;
+- /*
+- * Handle first page in case it's not aligned
+- */
+- if (-realp & (PAGE_SIZE - 1))
+- sz = -realp & (PAGE_SIZE - 1);
+- else
+- sz = PAGE_SIZE;
+
+- sz = min_t(unsigned long, sz, count);
++ sz = size_inside_page(realp, count);
+
+ /*
+ * On ia64 if a page has been mapped somewhere as
+@@ -578,10 +574,8 @@ static ssize_t write_kmem(struct file *
+ if (!kbuf)
+ return wrote ? wrote : -ENOMEM;
+ while (count > 0) {
+- int len = count;
++ int len = size_inside_page(p, count);
+
+- if (len > PAGE_SIZE)
+- len = PAGE_SIZE;
+ if (len) {
+ written = copy_from_user(kbuf, buf, len);
+ if (written) {
--- /dev/null
+From cebbert@redhat.com Fri Feb 12 13:33:09 2010
+From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Date: Fri, 5 Feb 2010 15:01:09 -0500
+Subject: devmem: check vmalloc address on kmem read/write
+To: stable@kernel.org
+Cc: Wu Fengguang <fengguang.wu@intel.com>
+Message-ID: <201002052006.o15K60se029346@int-mx03.intmail.prod.int.phx2.redhat.com>
+
+
+From: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+
+commit 325fda71d0badc1073dc59f12a948f24ff05796a
+
+[ cebbert@redhat.com : backport to 2.6.32 ]
+
+devmem: check vmalloc address on kmem read/write
+
+Otherwise vmalloc_to_page() will BUG().
+
+This also makes the kmem read/write implementation aligned with mem(4):
+"References to nonexistent locations cause errors to be returned." Here we
+return -ENXIO (inspired by Hugh) if no bytes have been transfered to/from
+user space, otherwise return partial read/write results.
+
+Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
+Cc: Greg Kroah-Hartman <gregkh@suse.de>
+Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/mem.c | 28 ++++++++++++++++++----------
+ 1 file changed, 18 insertions(+), 10 deletions(-)
+
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -421,6 +421,7 @@ static ssize_t read_kmem(struct file *fi
+ unsigned long p = *ppos;
+ ssize_t low_count, read, sz;
+ char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
++ int err = 0;
+
+ read = 0;
+ if (p < (unsigned long) high_memory) {
+@@ -469,12 +470,16 @@ static ssize_t read_kmem(struct file *fi
+ while (count > 0) {
+ int len = size_inside_page(p, count);
+
++ if (!is_vmalloc_or_module_addr((void *)p)) {
++ err = -ENXIO;
++ break;
++ }
+ len = vread(kbuf, (char *)p, len);
+ if (!len)
+ break;
+ if (copy_to_user(buf, kbuf, len)) {
+- free_page((unsigned long)kbuf);
+- return -EFAULT;
++ err = -EFAULT;
++ break;
+ }
+ count -= len;
+ buf += len;
+@@ -483,8 +488,8 @@ static ssize_t read_kmem(struct file *fi
+ }
+ free_page((unsigned long)kbuf);
+ }
+- *ppos = p;
+- return read;
++ *ppos = p;
++ return read ? read : err;
+ }
+
+
+@@ -553,6 +558,7 @@ static ssize_t write_kmem(struct file *
+ ssize_t virtr = 0;
+ ssize_t written;
+ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
++ int err = 0;
+
+ if (p < (unsigned long) high_memory) {
+
+@@ -576,13 +582,15 @@ static ssize_t write_kmem(struct file *
+ while (count > 0) {
+ int len = size_inside_page(p, count);
+
++ if (!is_vmalloc_or_module_addr((void *)p)) {
++ err = -ENXIO;
++ break;
++ }
+ if (len) {
+ written = copy_from_user(kbuf, buf, len);
+ if (written) {
+- if (wrote + virtr)
+- break;
+- free_page((unsigned long)kbuf);
+- return -EFAULT;
++ err = -EFAULT;
++ break;
+ }
+ }
+ len = vwrite(kbuf, (char *)p, len);
+@@ -594,8 +602,8 @@ static ssize_t write_kmem(struct file *
+ free_page((unsigned long)kbuf);
+ }
+
+- *ppos = p;
+- return virtr + wrote;
++ *ppos = p;
++ return virtr + wrote ? : err;
+ }
+ #endif
+
--- /dev/null
+From cebbert@redhat.com Fri Feb 12 13:45:18 2010
+From: Wu Fengguang <fengguang.wu@intel.com>
+Date: Fri, 5 Feb 2010 15:02:13 -0500
+Subject: devmem: fix kmem write bug on memory holes
+To: stable@kernel.org
+Cc: Wu Fengguang <fengguang.wu@intel.com>
+Message-ID: <201002052006.o15K60sf029346@int-mx03.intmail.prod.int.phx2.redhat.com>
+
+
+From: Wu Fengguang <fengguang.wu@intel.com>
+
+
+commit c85e9a97c4102ce2e83112da850d838cfab5ab13 upstream
+
+devmem: fix kmem write bug on memory holes
+
+[ cebbert@redhat.com : backport to 2.6.32 ]
+
+write_kmem() used to assume vwrite() always return the full buffer length.
+However now vwrite() could return 0 to indicate memory hole. This
+creates a bug that "buf" is not advanced accordingly.
+
+Fix it to simply ignore the return value, hence the memory hole.
+
+Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
+Cc: Andi Kleen <andi@firstfloor.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Christoph Lameter <cl@linux-foundation.org>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Nick Piggin <npiggin@suse.de>
+Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: <stable@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/mem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/char/mem.c
++++ b/drivers/char/mem.c
+@@ -593,7 +593,7 @@ static ssize_t write_kmem(struct file *
+ break;
+ }
+ }
+- len = vwrite(kbuf, (char *)p, len);
++ vwrite(kbuf, (char *)p, len);
+ count -= len;
+ buf += len;
+ virtr += len;
--- /dev/null
+From 48764bf43f746113fc77877d7e80f2df23ca4cbb Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Tue, 15 Sep 2009 22:57:32 +0200
+Subject: drm/i915: add i915_lp_ring_sync helper
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 48764bf43f746113fc77877d7e80f2df23ca4cbb upstream.
+
+This just waits until the hw passed the current ring position with
+cmd execution. This slightly changes the existing i915_wait_request
+function to make uninterruptible waiting possible - no point in
+returning to userspace while mucking around with the overlay, that
+piece of hw is just too fragile.
+
+Also replace a magic 0 with the symbolic constant (and kill the then
+superflous comment) while I was looking at the code.
+
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/i915_drv.h | 1
+ drivers/gpu/drm/i915/i915_gem.c | 49 +++++++++++++++++++++++++++++++---------
+ include/drm/drm_os_linux.h | 2 -
+ 3 files changed, 41 insertions(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -825,6 +825,7 @@ void i915_gem_cleanup_ringbuffer(struct
+ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
+ unsigned long end);
+ int i915_gem_idle(struct drm_device *dev);
++int i915_lp_ring_sync(struct drm_device *dev);
+ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ int write);
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -1809,12 +1809,8 @@ i915_gem_retire_work_handler(struct work
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+-/**
+- * Waits for a sequence number to be signaled, and cleans up the
+- * request and object lists appropriately for that event.
+- */
+ static int
+-i915_wait_request(struct drm_device *dev, uint32_t seqno)
++i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+ {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 ier;
+@@ -1841,10 +1837,15 @@ i915_wait_request(struct drm_device *dev
+
+ dev_priv->mm.waiting_gem_seqno = seqno;
+ i915_user_irq_get(dev);
+- ret = wait_event_interruptible(dev_priv->irq_queue,
+- i915_seqno_passed(i915_get_gem_seqno(dev),
+- seqno) ||
+- atomic_read(&dev_priv->mm.wedged));
++ if (interruptible)
++ ret = wait_event_interruptible(dev_priv->irq_queue,
++ i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
++ atomic_read(&dev_priv->mm.wedged));
++ else
++ wait_event(dev_priv->irq_queue,
++ i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
++ atomic_read(&dev_priv->mm.wedged));
++
+ i915_user_irq_put(dev);
+ dev_priv->mm.waiting_gem_seqno = 0;
+
+@@ -1868,6 +1869,34 @@ i915_wait_request(struct drm_device *dev
+ return ret;
+ }
+
++/**
++ * Waits for a sequence number to be signaled, and cleans up the
++ * request and object lists appropriately for that event.
++ */
++static int
++i915_wait_request(struct drm_device *dev, uint32_t seqno)
++{
++ return i915_do_wait_request(dev, seqno, 1);
++}
++
++/**
++ * Waits for the ring to finish up to the latest request. Usefull for waiting
++ * for flip events, e.g for the overlay support. */
++int i915_lp_ring_sync(struct drm_device *dev)
++{
++ uint32_t seqno;
++ int ret;
++
++ seqno = i915_add_request(dev, NULL, 0);
++
++ if (seqno == 0)
++ return -ENOMEM;
++
++ ret = i915_do_wait_request(dev, seqno, 0);
++ BUG_ON(ret == -ERESTARTSYS);
++ return ret;
++}
++
+ static void
+ i915_gem_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+@@ -1936,7 +1965,7 @@ i915_gem_flush(struct drm_device *dev,
+ #endif
+ BEGIN_LP_RING(2);
+ OUT_RING(cmd);
+- OUT_RING(0); /* noop */
++ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ }
+ }
+--- a/include/drm/drm_os_linux.h
++++ b/include/drm/drm_os_linux.h
+@@ -123,5 +123,5 @@ do { \
+ remove_wait_queue(&(queue), &entry); \
+ } while (0)
+
+-#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
++#define DRM_WAKEUP( queue ) wake_up( queue )
+ #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
--- /dev/null
+From b9241ea31fae4887104e5d1b3b18f4009c25a0c4 Mon Sep 17 00:00:00 2001
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+Date: Wed, 25 Nov 2009 13:09:39 +0800
+Subject: drm/i915: Don't wait interruptible for possible plane buffer flush
+
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+
+commit b9241ea31fae4887104e5d1b3b18f4009c25a0c4 upstream.
+
+When we setup buffer for display plane, we'll check any pending
+required GPU flush and possible make interruptible wait for flush
+complete. But that wait would be most possibly to fail in case of
+signals received for X process, which will then fail modeset process
+and put display engine in unconsistent state. The result could be
+blank screen or CPU hang, and DDX driver would always turn on outputs
+DPMS after whatever modeset fails or not.
+
+So this one creates new helper for setup display plane buffer, and
+when needing flush using uninterruptible wait for that.
+
+This one should fix bug like https://bugs.freedesktop.org/show_bug.cgi?id=24009.
+Also fixing mode switch stress test on Ironlake.
+
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/i915/i915_drv.h | 1
+ drivers/gpu/drm/i915/i915_gem.c | 51 +++++++++++++++++++++++++++++++++++
+ drivers/gpu/drm/i915/intel_display.c | 2 -
+ 3 files changed, 53 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -829,6 +829,7 @@ int i915_lp_ring_sync(struct drm_device
+ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+ int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+ int write);
++int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
+ int i915_gem_attach_phys_object(struct drm_device *dev,
+ struct drm_gem_object *obj, int id);
+ void i915_gem_detach_phys_object(struct drm_device *dev,
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -2825,6 +2825,57 @@ i915_gem_object_set_to_gtt_domain(struct
+ return 0;
+ }
+
++/*
++ * Prepare buffer for display plane. Use uninterruptible for possible flush
++ * wait, as in modesetting process we're not supposed to be interrupted.
++ */
++int
++i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
++{
++ struct drm_device *dev = obj->dev;
++ struct drm_i915_gem_object *obj_priv = obj->driver_private;
++ uint32_t old_write_domain, old_read_domains;
++ int ret;
++
++ /* Not valid to be called on unbound objects. */
++ if (obj_priv->gtt_space == NULL)
++ return -EINVAL;
++
++ i915_gem_object_flush_gpu_write_domain(obj);
++
++ /* Wait on any GPU rendering and flushing to occur. */
++ if (obj_priv->active) {
++#if WATCH_BUF
++ DRM_INFO("%s: object %p wait for seqno %08x\n",
++ __func__, obj, obj_priv->last_rendering_seqno);
++#endif
++ ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
++ if (ret != 0)
++ return ret;
++ }
++
++ old_write_domain = obj->write_domain;
++ old_read_domains = obj->read_domains;
++
++ obj->read_domains &= I915_GEM_DOMAIN_GTT;
++
++ i915_gem_object_flush_cpu_write_domain(obj);
++
++ /* It should now be out of any other write domains, and we can update
++ * the domain values for our changes.
++ */
++ BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
++ obj->read_domains |= I915_GEM_DOMAIN_GTT;
++ obj->write_domain = I915_GEM_DOMAIN_GTT;
++ obj_priv->dirty = 1;
++
++ trace_i915_gem_object_change_domain(obj,
++ old_read_domains,
++ old_write_domain);
++
++ return 0;
++}
++
+ /**
+ * Moves a single object to the CPU read, and possibly write domain.
+ *
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -1253,7 +1253,7 @@ intel_pipe_set_base(struct drm_crtc *crt
+ return ret;
+ }
+
+- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
++ ret = i915_gem_object_set_to_display_plane(obj);
+ if (ret != 0) {
+ i915_gem_object_unpin(obj);
+ mutex_unlock(&dev->struct_mutex);
--- /dev/null
+From 823f68fd646da6a39a9c0d3eb4c60d69dab5aa13 Mon Sep 17 00:00:00 2001
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+Date: Mon, 28 Dec 2009 13:23:36 +0800
+Subject: drm/i915: remove full registers dump debug
+
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+
+commit 823f68fd646da6a39a9c0d3eb4c60d69dab5aa13 upstream.
+
+This one reverts 9e3a6d155ed0a7636b926a798dd7221ea107b274.
+As reported by http://bugzilla.kernel.org/show_bug.cgi?id=14485,
+this dump will cause hang problem on some machine. If something
+really needs this kind of full registers dump, that could be done
+within intel-gpu-tools.
+
+Cc: Ben Gamari <bgamari.foss@gmail.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Signed-off-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+
+---
+ drivers/gpu/drm/i915/i915_debugfs.c | 30 ------------------------------
+ 1 file changed, 30 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -384,37 +384,7 @@ out:
+ return 0;
+ }
+
+-static int i915_registers_info(struct seq_file *m, void *data) {
+- struct drm_info_node *node = (struct drm_info_node *) m->private;
+- struct drm_device *dev = node->minor->dev;
+- drm_i915_private_t *dev_priv = dev->dev_private;
+- uint32_t reg;
+-
+-#define DUMP_RANGE(start, end) \
+- for (reg=start; reg < end; reg += 4) \
+- seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg));
+-
+- DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */
+- DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */
+- DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */
+- DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */
+- DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */
+- DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */
+- DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */
+- DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */
+- DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */
+- DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */
+- DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */
+- DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */
+- DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */
+- DUMP_RANGE(0x73000, 0x73fff); /* performance counters */
+-
+- return 0;
+-}
+-
+-
+ static struct drm_info_list i915_debugfs_list[] = {
+- {"i915_regs", i915_registers_info, 0},
+ {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
+ {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
+ {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
--- /dev/null
+From c93d89f3dbf0202bf19c07960ca8602b48c2f9a0 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 27 Jan 2010 19:13:40 +0800
+Subject: Export the symbol of getboottime and mmonotonic_to_bootbased
+
+From: Jason Wang <jasowang@redhat.com>
+
+commit c93d89f3dbf0202bf19c07960ca8602b48c2f9a0 upstream.
+
+Export getboottime and monotonic_to_bootbased in order to let them
+could be used by following patch.
+
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ kernel/time/timekeeping.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -845,6 +845,7 @@ void getboottime(struct timespec *ts)
+
+ set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
+ }
++EXPORT_SYMBOL_GPL(getboottime);
+
+ /**
+ * monotonic_to_bootbased - Convert the monotonic time to boot based.
+@@ -854,6 +855,7 @@ void monotonic_to_bootbased(struct times
+ {
+ *ts = timespec_add_safe(*ts, total_sleep_time);
+ }
++EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
+
+ unsigned long get_seconds(void)
+ {
--- /dev/null
+From ee73f656a604d5aa9df86a97102e4e462dd79924 Mon Sep 17 00:00:00 2001
+From: Marcelo Tosatti <mtosatti@redhat.com>
+Date: Fri, 29 Jan 2010 17:28:41 -0200
+Subject: KVM: PIT: control word is write-only
+
+From: Marcelo Tosatti <mtosatti@redhat.com>
+
+commit ee73f656a604d5aa9df86a97102e4e462dd79924 upstream.
+
+PIT control word (address 0x43) is write-only, reads are undefined.
+
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/i8254.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/x86/kvm/i8254.c
++++ b/arch/x86/kvm/i8254.c
+@@ -465,6 +465,9 @@ static int pit_ioport_read(struct kvm_io
+ return -EOPNOTSUPP;
+
+ addr &= KVM_PIT_CHANNEL_MASK;
++ if (addr == 3)
++ return 0;
++
+ s = &pit_state->channels[addr];
+
+ mutex_lock(&pit_state->lock);
--- /dev/null
+From 923de3cf5bf12049628019010e36623fca5ef6d1 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 27 Jan 2010 19:13:49 +0800
+Subject: kvmclock: count total_sleep_time when updating guest clock
+
+From: Jason Wang <jasowang@redhat.com>
+
+commit 923de3cf5bf12049628019010e36623fca5ef6d1 upstream.
+
+Current kvm wallclock does not consider the total_sleep_time which could cause
+wrong wallclock in guest after host suspend/resume. This patch solve
+this issue by counting total_sleep_time to get the correct host boot time.
+
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Acked-by: Glauber Costa <glommer@redhat.com>
+Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/kvm/x86.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -583,7 +583,7 @@ static void kvm_write_wall_clock(struct
+ {
+ static int version;
+ struct pvclock_wall_clock wc;
+- struct timespec now, sys, boot;
++ struct timespec boot;
+
+ if (!wall_clock)
+ return;
+@@ -598,9 +598,7 @@ static void kvm_write_wall_clock(struct
+ * wall clock specified here. guest system time equals host
+ * system time for us, thus we must fill in host boot time here.
+ */
+- now = current_kernel_time();
+- ktime_get_ts(&sys);
+- boot = ns_to_timespec(timespec_to_ns(&now) - timespec_to_ns(&sys));
++ getboottime(&boot);
+
+ wc.sec = boot.tv_sec;
+ wc.nsec = boot.tv_nsec;
+@@ -675,6 +673,7 @@ static void kvm_write_guest_time(struct
+ local_irq_save(flags);
+ kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
+ ktime_get_ts(&ts);
++ monotonic_to_bootbased(&ts);
+ local_irq_restore(flags);
+
+ /* With all the info we got, fill in the values */
--- /dev/null
+From 9eb07c259207d048e3ee8be2a77b2a4680b1edd4 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Tue, 9 Feb 2010 12:31:47 +1100
+Subject: md: fix 'degraded' calculation when starting a reshape.
+
+From: NeilBrown <neilb@suse.de>
+
+commit 9eb07c259207d048e3ee8be2a77b2a4680b1edd4 upstream.
+
+This code was written long ago when it was not possible to
+reshape a degraded array. Now it is so the current level of
+degraded-ness needs to be taken in to account. Also newly addded
+devices should only reduce degradedness if they are deemed to be
+in-sync.
+
+In particular, if you convert a RAID5 to a RAID6, and increase the
+number of devices at the same time, then the 5->6 conversion will
+make the array degraded so the current code will produce a wrong
+value for 'degraded' - "-1" to be precise.
+
+If the reshape runs to completion end_reshape will calculate a correct
+new value for 'degraded', but if a device fails during the reshape an
+incorrect decision might be made based on the incorrect value of
+"degraded".
+
+This patch is suitable for 2.6.32-stable and if they are still open,
+2.6.31-stable and 2.6.30-stable as well.
+
+Reported-by: Michael Evans <mjevans1983@gmail.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/md/raid5.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5432,11 +5432,11 @@ static int raid5_start_reshape(mddev_t *
+ !test_bit(Faulty, &rdev->flags)) {
+ if (raid5_add_disk(mddev, rdev) == 0) {
+ char nm[20];
+- if (rdev->raid_disk >= conf->previous_raid_disks)
++ if (rdev->raid_disk >= conf->previous_raid_disks) {
+ set_bit(In_sync, &rdev->flags);
+- else
++ added_devices++;
++ } else
+ rdev->recovery_offset = 0;
+- added_devices++;
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ if (sysfs_create_link(&mddev->kobj,
+ &rdev->kobj, nm))
+@@ -5448,9 +5448,12 @@ static int raid5_start_reshape(mddev_t *
+ break;
+ }
+
++ /* When a reshape changes the number of devices, ->degraded
++ * is measured against the large of the pre and post number of
++ * devices.*/
+ if (mddev->delta_disks > 0) {
+ spin_lock_irqsave(&conf->device_lock, flags);
+- mddev->degraded = (conf->raid_disks - conf->previous_raid_disks)
++ mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
+ - added_devices;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ }
--- /dev/null
+From d696c7bdaa55e2208e56c6f98e6bc1599f34286d Mon Sep 17 00:00:00 2001
+From: Patrick McHardy <kaber@trash.net>
+Date: Mon, 8 Feb 2010 11:18:07 -0800
+Subject: netfilter: nf_conntrack: fix hash resizing with namespaces
+
+From: Patrick McHardy <kaber@trash.net>
+
+commit d696c7bdaa55e2208e56c6f98e6bc1599f34286d upstream.
+
+As noticed by Jon Masters <jonathan@jonmasters.org>, the conntrack hash
+size is global and not per namespace, but modifiable at runtime through
+/sys/module/nf_conntrack/hashsize. Changing the hash size will only
+resize the hash in the current namespace however, so other namespaces
+will use an invalid hash size. This can cause crashes when enlarging
+the hashsize, or false negative lookups when shrinking it.
+
+Move the hash size into the per-namespace data and only use the global
+hash size to initialize the per-namespace value when instanciating a
+new namespace. Additionally restrict hash resizing to init_net for
+now as other namespaces are not handled currently.
+
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/netns/conntrack.h | 1
+ include/net/netns/ipv4.h | 1
+ net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 2
+ net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c | 4 -
+ net/ipv4/netfilter/nf_nat_core.c | 22 +++----
+ net/netfilter/nf_conntrack_core.c | 54 +++++++++---------
+ net/netfilter/nf_conntrack_expect.c | 2
+ net/netfilter/nf_conntrack_helper.c | 2
+ net/netfilter/nf_conntrack_netlink.c | 2
+ net/netfilter/nf_conntrack_standalone.c | 7 +-
+ 10 files changed, 50 insertions(+), 47 deletions(-)
+
+--- a/include/net/netns/conntrack.h
++++ b/include/net/netns/conntrack.h
+@@ -11,6 +11,7 @@ struct nf_conntrack_ecache;
+ struct netns_ct {
+ atomic_t count;
+ unsigned int expect_count;
++ unsigned int htable_size;
+ struct kmem_cache *nf_conntrack_cachep;
+ struct hlist_nulls_head *hash;
+ struct hlist_head *expect_hash;
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -40,6 +40,7 @@ struct netns_ipv4 {
+ struct xt_table *iptable_security;
+ struct xt_table *nat_table;
+ struct hlist_head *nat_bysource;
++ unsigned int nat_htable_size;
+ int nat_vmalloced;
+ #endif
+
+--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
++++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+@@ -213,7 +213,7 @@ static ctl_table ip_ct_sysctl_table[] =
+ {
+ .ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS,
+ .procname = "ip_conntrack_buckets",
+- .data = &nf_conntrack_htable_size,
++ .data = &init_net.ct.htable_size,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
++++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4_compat.c
+@@ -32,7 +32,7 @@ static struct hlist_nulls_node *ct_get_f
+ struct hlist_nulls_node *n;
+
+ for (st->bucket = 0;
+- st->bucket < nf_conntrack_htable_size;
++ st->bucket < net->ct.htable_size;
+ st->bucket++) {
+ n = rcu_dereference(net->ct.hash[st->bucket].first);
+ if (!is_a_nulls(n))
+@@ -50,7 +50,7 @@ static struct hlist_nulls_node *ct_get_n
+ head = rcu_dereference(head->next);
+ while (is_a_nulls(head)) {
+ if (likely(get_nulls_value(head) == st->bucket)) {
+- if (++st->bucket >= nf_conntrack_htable_size)
++ if (++st->bucket >= net->ct.htable_size)
+ return NULL;
+ }
+ head = rcu_dereference(net->ct.hash[st->bucket].first);
+--- a/net/ipv4/netfilter/nf_nat_core.c
++++ b/net/ipv4/netfilter/nf_nat_core.c
+@@ -35,9 +35,6 @@ static DEFINE_SPINLOCK(nf_nat_lock);
+
+ static struct nf_conntrack_l3proto *l3proto __read_mostly;
+
+-/* Calculated at init based on memory size */
+-static unsigned int nf_nat_htable_size __read_mostly;
+-
+ #define MAX_IP_NAT_PROTO 256
+ static const struct nf_nat_protocol *nf_nat_protos[MAX_IP_NAT_PROTO]
+ __read_mostly;
+@@ -72,7 +69,7 @@ EXPORT_SYMBOL_GPL(nf_nat_proto_put);
+
+ /* We keep an extra hash for each conntrack, for fast searching. */
+ static inline unsigned int
+-hash_by_src(const struct nf_conntrack_tuple *tuple)
++hash_by_src(const struct net *net, const struct nf_conntrack_tuple *tuple)
+ {
+ unsigned int hash;
+
+@@ -80,7 +77,7 @@ hash_by_src(const struct nf_conntrack_tu
+ hash = jhash_3words((__force u32)tuple->src.u3.ip,
+ (__force u32)tuple->src.u.all,
+ tuple->dst.protonum, 0);
+- return ((u64)hash * nf_nat_htable_size) >> 32;
++ return ((u64)hash * net->ipv4.nat_htable_size) >> 32;
+ }
+
+ /* Is this tuple already taken? (not by us) */
+@@ -147,7 +144,7 @@ find_appropriate_src(struct net *net,
+ struct nf_conntrack_tuple *result,
+ const struct nf_nat_range *range)
+ {
+- unsigned int h = hash_by_src(tuple);
++ unsigned int h = hash_by_src(net, tuple);
+ const struct nf_conn_nat *nat;
+ const struct nf_conn *ct;
+ const struct hlist_node *n;
+@@ -330,7 +327,7 @@ nf_nat_setup_info(struct nf_conn *ct,
+ if (have_to_hash) {
+ unsigned int srchash;
+
+- srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
++ srchash = hash_by_src(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ spin_lock_bh(&nf_nat_lock);
+ /* nf_conntrack_alter_reply might re-allocate exntension aera */
+ nat = nfct_nat(ct);
+@@ -679,8 +676,10 @@ nfnetlink_parse_nat_setup(struct nf_conn
+
+ static int __net_init nf_nat_net_init(struct net *net)
+ {
+- net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size,
+- &net->ipv4.nat_vmalloced, 0);
++ /* Leave them the same for the moment. */
++ net->ipv4.nat_htable_size = net->ct.htable_size;
++ net->ipv4.nat_bysource = nf_ct_alloc_hashtable(&net->ipv4.nat_htable_size,
++ &net->ipv4.nat_vmalloced, 0);
+ if (!net->ipv4.nat_bysource)
+ return -ENOMEM;
+ return 0;
+@@ -703,7 +702,7 @@ static void __net_exit nf_nat_net_exit(s
+ nf_ct_iterate_cleanup(net, &clean_nat, NULL);
+ synchronize_rcu();
+ nf_ct_free_hashtable(net->ipv4.nat_bysource, net->ipv4.nat_vmalloced,
+- nf_nat_htable_size);
++ net->ipv4.nat_htable_size);
+ }
+
+ static struct pernet_operations nf_nat_net_ops = {
+@@ -724,9 +723,6 @@ static int __init nf_nat_init(void)
+ return ret;
+ }
+
+- /* Leave them the same for the moment. */
+- nf_nat_htable_size = nf_conntrack_htable_size;
+-
+ ret = register_pernet_subsys(&nf_nat_net_ops);
+ if (ret < 0)
+ goto cleanup_extend;
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -30,6 +30,7 @@
+ #include <linux/netdevice.h>
+ #include <linux/socket.h>
+ #include <linux/mm.h>
++#include <linux/nsproxy.h>
+ #include <linux/rculist_nulls.h>
+
+ #include <net/netfilter/nf_conntrack.h>
+@@ -84,9 +85,10 @@ static u_int32_t __hash_conntrack(const
+ return ((u64)h * size) >> 32;
+ }
+
+-static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
++static inline u_int32_t hash_conntrack(const struct net *net,
++ const struct nf_conntrack_tuple *tuple)
+ {
+- return __hash_conntrack(tuple, nf_conntrack_htable_size,
++ return __hash_conntrack(tuple, net->ct.htable_size,
+ nf_conntrack_hash_rnd);
+ }
+
+@@ -294,7 +296,7 @@ __nf_conntrack_find(struct net *net, con
+ {
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+- unsigned int hash = hash_conntrack(tuple);
++ unsigned int hash = hash_conntrack(net, tuple);
+
+ /* Disable BHs the entire time since we normally need to disable them
+ * at least once for the stats anyway.
+@@ -364,10 +366,11 @@ static void __nf_conntrack_hash_insert(s
+
+ void nf_conntrack_hash_insert(struct nf_conn *ct)
+ {
++ struct net *net = nf_ct_net(ct);
+ unsigned int hash, repl_hash;
+
+- hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+- repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
++ hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
++ repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+
+ __nf_conntrack_hash_insert(ct, hash, repl_hash);
+ }
+@@ -395,8 +398,8 @@ __nf_conntrack_confirm(struct sk_buff *s
+ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ return NF_ACCEPT;
+
+- hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+- repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
++ hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
++ repl_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+
+ /* We're not in hash table, and we refuse to set up related
+ connections for unconfirmed conns. But packet copies and
+@@ -466,7 +469,7 @@ nf_conntrack_tuple_taken(const struct nf
+ struct net *net = nf_ct_net(ignored_conntrack);
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+- unsigned int hash = hash_conntrack(tuple);
++ unsigned int hash = hash_conntrack(net, tuple);
+
+ /* Disable BHs the entire time since we need to disable them at
+ * least once for the stats anyway.
+@@ -501,7 +504,7 @@ static noinline int early_drop(struct ne
+ int dropped = 0;
+
+ rcu_read_lock();
+- for (i = 0; i < nf_conntrack_htable_size; i++) {
++ for (i = 0; i < net->ct.htable_size; i++) {
+ hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[hash],
+ hnnode) {
+ tmp = nf_ct_tuplehash_to_ctrack(h);
+@@ -515,7 +518,8 @@ static noinline int early_drop(struct ne
+ ct = NULL;
+ if (ct || cnt >= NF_CT_EVICTION_RANGE)
+ break;
+- hash = (hash + 1) % nf_conntrack_htable_size;
++
++ hash = (hash + 1) % net->ct.htable_size;
+ }
+ rcu_read_unlock();
+
+@@ -549,7 +553,7 @@ struct nf_conn *nf_conntrack_alloc(struc
+
+ if (nf_conntrack_max &&
+ unlikely(atomic_read(&net->ct.count) > nf_conntrack_max)) {
+- unsigned int hash = hash_conntrack(orig);
++ unsigned int hash = hash_conntrack(net, orig);
+ if (!early_drop(net, hash)) {
+ atomic_dec(&net->ct.count);
+ if (net_ratelimit())
+@@ -1006,7 +1010,7 @@ get_next_corpse(struct net *net, int (*i
+ struct hlist_nulls_node *n;
+
+ spin_lock_bh(&nf_conntrack_lock);
+- for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
++ for (; *bucket < net->ct.htable_size; (*bucket)++) {
+ hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (iter(ct, data))
+@@ -1124,7 +1128,7 @@ static void nf_conntrack_cleanup_net(str
+ }
+
+ nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
+- nf_conntrack_htable_size);
++ net->ct.htable_size);
+ nf_conntrack_ecache_fini(net);
+ nf_conntrack_acct_fini(net);
+ nf_conntrack_expect_fini(net);
+@@ -1184,10 +1188,12 @@ int nf_conntrack_set_hashsize(const char
+ {
+ int i, bucket, vmalloced, old_vmalloced;
+ unsigned int hashsize, old_size;
+- int rnd;
+ struct hlist_nulls_head *hash, *old_hash;
+ struct nf_conntrack_tuple_hash *h;
+
++ if (current->nsproxy->net_ns != &init_net)
++ return -EOPNOTSUPP;
++
+ /* On boot, we can set this without any fancy locking. */
+ if (!nf_conntrack_htable_size)
+ return param_set_uint(val, kp);
+@@ -1200,33 +1206,29 @@ int nf_conntrack_set_hashsize(const char
+ if (!hash)
+ return -ENOMEM;
+
+- /* We have to rehahs for the new table anyway, so we also can
+- * use a newrandom seed */
+- get_random_bytes(&rnd, sizeof(rnd));
+-
+ /* Lookups in the old hash might happen in parallel, which means we
+ * might get false negatives during connection lookup. New connections
+ * created because of a false negative won't make it into the hash
+ * though since that required taking the lock.
+ */
+ spin_lock_bh(&nf_conntrack_lock);
+- for (i = 0; i < nf_conntrack_htable_size; i++) {
++ for (i = 0; i < init_net.ct.htable_size; i++) {
+ while (!hlist_nulls_empty(&init_net.ct.hash[i])) {
+ h = hlist_nulls_entry(init_net.ct.hash[i].first,
+ struct nf_conntrack_tuple_hash, hnnode);
+ hlist_nulls_del_rcu(&h->hnnode);
+- bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
++ bucket = __hash_conntrack(&h->tuple, hashsize,
++ nf_conntrack_hash_rnd);
+ hlist_nulls_add_head_rcu(&h->hnnode, &hash[bucket]);
+ }
+ }
+- old_size = nf_conntrack_htable_size;
++ old_size = init_net.ct.htable_size;
+ old_vmalloced = init_net.ct.hash_vmalloc;
+ old_hash = init_net.ct.hash;
+
+- nf_conntrack_htable_size = hashsize;
++ init_net.ct.htable_size = nf_conntrack_htable_size = hashsize;
+ init_net.ct.hash_vmalloc = vmalloced;
+ init_net.ct.hash = hash;
+- nf_conntrack_hash_rnd = rnd;
+ spin_unlock_bh(&nf_conntrack_lock);
+
+ nf_ct_free_hashtable(old_hash, old_vmalloced, old_size);
+@@ -1322,7 +1324,9 @@ static int nf_conntrack_init_net(struct
+ ret = -ENOMEM;
+ goto err_cache;
+ }
+- net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
++
++ net->ct.htable_size = nf_conntrack_htable_size;
++ net->ct.hash = nf_ct_alloc_hashtable(&net->ct.htable_size,
+ &net->ct.hash_vmalloc, 1);
+ if (!net->ct.hash) {
+ ret = -ENOMEM;
+@@ -1347,7 +1351,7 @@ err_acct:
+ nf_conntrack_expect_fini(net);
+ err_expect:
+ nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
+- nf_conntrack_htable_size);
++ net->ct.htable_size);
+ err_hash:
+ kmem_cache_destroy(net->ct.nf_conntrack_cachep);
+ err_cache:
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -577,7 +577,7 @@ int nf_conntrack_expect_init(struct net
+
+ if (net_eq(net, &init_net)) {
+ if (!nf_ct_expect_hsize) {
+- nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
++ nf_ct_expect_hsize = net->ct.htable_size / 256;
+ if (!nf_ct_expect_hsize)
+ nf_ct_expect_hsize = 1;
+ }
+--- a/net/netfilter/nf_conntrack_helper.c
++++ b/net/netfilter/nf_conntrack_helper.c
+@@ -192,7 +192,7 @@ static void __nf_conntrack_helper_unregi
+ /* Get rid of expecteds, set helpers to NULL. */
+ hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
+ unhelp(h, me);
+- for (i = 0; i < nf_conntrack_htable_size; i++) {
++ for (i = 0; i < net->ct.htable_size; i++) {
+ hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
+ unhelp(h, me);
+ }
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -594,7 +594,7 @@ ctnetlink_dump_table(struct sk_buff *skb
+
+ rcu_read_lock();
+ last = (struct nf_conn *)cb->args[1];
+- for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
++ for (; cb->args[0] < init_net.ct.htable_size; cb->args[0]++) {
+ restart:
+ hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
+ hnnode) {
+--- a/net/netfilter/nf_conntrack_standalone.c
++++ b/net/netfilter/nf_conntrack_standalone.c
+@@ -51,7 +51,7 @@ static struct hlist_nulls_node *ct_get_f
+ struct hlist_nulls_node *n;
+
+ for (st->bucket = 0;
+- st->bucket < nf_conntrack_htable_size;
++ st->bucket < net->ct.htable_size;
+ st->bucket++) {
+ n = rcu_dereference(net->ct.hash[st->bucket].first);
+ if (!is_a_nulls(n))
+@@ -69,7 +69,7 @@ static struct hlist_nulls_node *ct_get_n
+ head = rcu_dereference(head->next);
+ while (is_a_nulls(head)) {
+ if (likely(get_nulls_value(head) == st->bucket)) {
+- if (++st->bucket >= nf_conntrack_htable_size)
++ if (++st->bucket >= net->ct.htable_size)
+ return NULL;
+ }
+ head = rcu_dereference(net->ct.hash[st->bucket].first);
+@@ -358,7 +358,7 @@ static ctl_table nf_ct_sysctl_table[] =
+ {
+ .ctl_name = NET_NF_CONNTRACK_BUCKETS,
+ .procname = "nf_conntrack_buckets",
+- .data = &nf_conntrack_htable_size,
++ .data = &init_net.ct.htable_size,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+@@ -429,6 +429,7 @@ static int nf_conntrack_standalone_init_
+ goto out_kmemdup;
+
+ table[1].data = &net->ct.count;
++ table[2].data = &net->ct.htable_size;
+ table[3].data = &net->ct.sysctl_checksum;
+ table[4].data = &net->ct.sysctl_log_invalid;
+
--- /dev/null
+From 9edd7ca0a3e3999c260642c92fa008892d82ca6e Mon Sep 17 00:00:00 2001
+From: Patrick McHardy <kaber@trash.net>
+Date: Mon, 8 Feb 2010 11:16:26 -0800
+Subject: netfilter: nf_conntrack: fix memory corruption with multiple namespaces
+
+From: Patrick McHardy <kaber@trash.net>
+
+commit 9edd7ca0a3e3999c260642c92fa008892d82ca6e upstream.
+
+As discovered by Jon Masters <jonathan@jonmasters.org>, the "untracked"
+conntrack, which is located in the data section, might be accidentally
+freed when a new namespace is instantiated while the untracked conntrack
+is attached to a skb because the reference count it re-initialized.
+
+The best fix would be to use a seperate untracked conntrack per
+namespace since it includes a namespace pointer. Unfortunately this is
+not possible without larger changes since the namespace is not easily
+available everywhere we need it. For now move the untracked conntrack
+initialization to the init_net setup function to make sure the reference
+count is not re-initialized and handle cleanup in the init_net cleanup
+function to make sure namespaces can exit properly while the untracked
+conntrack is in use in other namespaces.
+
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/netfilter/nf_conntrack_core.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1107,6 +1107,10 @@ static void nf_ct_release_dying_list(str
+
+ static void nf_conntrack_cleanup_init_net(void)
+ {
++ /* wait until all references to nf_conntrack_untracked are dropped */
++ while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
++ schedule();
++
+ nf_conntrack_helper_fini();
+ nf_conntrack_proto_fini();
+ kmem_cache_destroy(nf_conntrack_cachep);
+@@ -1121,9 +1125,6 @@ static void nf_conntrack_cleanup_net(str
+ schedule();
+ goto i_see_dead_people;
+ }
+- /* wait until all references to nf_conntrack_untracked are dropped */
+- while (atomic_read(&nf_conntrack_untracked.ct_general.use) > 1)
+- schedule();
+
+ nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
+ nf_conntrack_htable_size);
+@@ -1282,6 +1283,14 @@ static int nf_conntrack_init_init_net(vo
+ if (ret < 0)
+ goto err_helper;
+
++ /* Set up fake conntrack: to never be deleted, not in any hashes */
++#ifdef CONFIG_NET_NS
++ nf_conntrack_untracked.ct_net = &init_net;
++#endif
++ atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
++ /* - and look it like as a confirmed connection */
++ set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
++
+ return 0;
+
+ err_helper:
+@@ -1327,15 +1336,6 @@ static int nf_conntrack_init_net(struct
+ if (ret < 0)
+ goto err_ecache;
+
+- /* Set up fake conntrack:
+- - to never be deleted, not in any hashes */
+-#ifdef CONFIG_NET_NS
+- nf_conntrack_untracked.ct_net = &init_net;
+-#endif
+- atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
+- /* - and look it like as a confirmed connection */
+- set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
+-
+ return 0;
+
+ err_ecache:
--- /dev/null
+From 5b3501faa8741d50617ce4191c20061c6ef36cb3 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Mon, 8 Feb 2010 11:16:56 -0800
+Subject: netfilter: nf_conntrack: per netns nf_conntrack_cachep
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+commit 5b3501faa8741d50617ce4191c20061c6ef36cb3 upstream.
+
+nf_conntrack_cachep is currently shared by all netns instances, but
+because of SLAB_DESTROY_BY_RCU special semantics, this is wrong.
+
+If we use a shared slab cache, one object can instantly flight between
+one hash table (netns ONE) to another one (netns TWO), and concurrent
+reader (doing a lookup in netns ONE, 'finding' an object of netns TWO)
+can be fooled without notice, because no RCU grace period has to be
+observed between object freeing and its reuse.
+
+We dont have this problem with UDP/TCP slab caches because TCP/UDP
+hashtables are global to the machine (and each object has a pointer to
+its netns).
+
+If we use per netns conntrack hash tables, we also *must* use per netns
+conntrack slab caches, to guarantee an object can not escape from one
+namespace to another one.
+
+Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
+[Patrick: added unique slab name allocation]
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/net/netns/conntrack.h | 2 +
+ net/netfilter/nf_conntrack_core.c | 39 ++++++++++++++++++++++----------------
+ 2 files changed, 25 insertions(+), 16 deletions(-)
+
+--- a/include/net/netns/conntrack.h
++++ b/include/net/netns/conntrack.h
+@@ -11,6 +11,7 @@ struct nf_conntrack_ecache;
+ struct netns_ct {
+ atomic_t count;
+ unsigned int expect_count;
++ struct kmem_cache *nf_conntrack_cachep;
+ struct hlist_nulls_head *hash;
+ struct hlist_head *expect_hash;
+ struct hlist_nulls_head unconfirmed;
+@@ -28,5 +29,6 @@ struct netns_ct {
+ #endif
+ int hash_vmalloc;
+ int expect_vmalloc;
++ char *slabname;
+ };
+ #endif
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -63,8 +63,6 @@ EXPORT_SYMBOL_GPL(nf_conntrack_max);
+ struct nf_conn nf_conntrack_untracked __read_mostly;
+ EXPORT_SYMBOL_GPL(nf_conntrack_untracked);
+
+-static struct kmem_cache *nf_conntrack_cachep __read_mostly;
+-
+ static int nf_conntrack_hash_rnd_initted;
+ static unsigned int nf_conntrack_hash_rnd;
+
+@@ -566,7 +564,7 @@ struct nf_conn *nf_conntrack_alloc(struc
+ * Do not use kmem_cache_zalloc(), as this cache uses
+ * SLAB_DESTROY_BY_RCU.
+ */
+- ct = kmem_cache_alloc(nf_conntrack_cachep, gfp);
++ ct = kmem_cache_alloc(net->ct.nf_conntrack_cachep, gfp);
+ if (ct == NULL) {
+ pr_debug("nf_conntrack_alloc: Can't alloc conntrack.\n");
+ atomic_dec(&net->ct.count);
+@@ -605,7 +603,7 @@ void nf_conntrack_free(struct nf_conn *c
+ nf_ct_ext_destroy(ct);
+ atomic_dec(&net->ct.count);
+ nf_ct_ext_free(ct);
+- kmem_cache_free(nf_conntrack_cachep, ct);
++ kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
+ }
+ EXPORT_SYMBOL_GPL(nf_conntrack_free);
+
+@@ -1113,7 +1111,6 @@ static void nf_conntrack_cleanup_init_ne
+
+ nf_conntrack_helper_fini();
+ nf_conntrack_proto_fini();
+- kmem_cache_destroy(nf_conntrack_cachep);
+ }
+
+ static void nf_conntrack_cleanup_net(struct net *net)
+@@ -1131,6 +1128,8 @@ static void nf_conntrack_cleanup_net(str
+ nf_conntrack_ecache_fini(net);
+ nf_conntrack_acct_fini(net);
+ nf_conntrack_expect_fini(net);
++ kmem_cache_destroy(net->ct.nf_conntrack_cachep);
++ kfree(net->ct.slabname);
+ free_percpu(net->ct.stat);
+ }
+
+@@ -1266,15 +1265,6 @@ static int nf_conntrack_init_init_net(vo
+ NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
+ nf_conntrack_max);
+
+- nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
+- sizeof(struct nf_conn),
+- 0, SLAB_DESTROY_BY_RCU, NULL);
+- if (!nf_conntrack_cachep) {
+- printk(KERN_ERR "Unable to create nf_conn slab cache\n");
+- ret = -ENOMEM;
+- goto err_cache;
+- }
+-
+ ret = nf_conntrack_proto_init();
+ if (ret < 0)
+ goto err_proto;
+@@ -1296,8 +1286,6 @@ static int nf_conntrack_init_init_net(vo
+ err_helper:
+ nf_conntrack_proto_fini();
+ err_proto:
+- kmem_cache_destroy(nf_conntrack_cachep);
+-err_cache:
+ return ret;
+ }
+
+@@ -1319,6 +1307,21 @@ static int nf_conntrack_init_net(struct
+ ret = -ENOMEM;
+ goto err_stat;
+ }
++
++ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
++ if (!net->ct.slabname) {
++ ret = -ENOMEM;
++ goto err_slabname;
++ }
++
++ net->ct.nf_conntrack_cachep = kmem_cache_create(net->ct.slabname,
++ sizeof(struct nf_conn), 0,
++ SLAB_DESTROY_BY_RCU, NULL);
++ if (!net->ct.nf_conntrack_cachep) {
++ printk(KERN_ERR "Unable to create nf_conn slab cache\n");
++ ret = -ENOMEM;
++ goto err_cache;
++ }
+ net->ct.hash = nf_ct_alloc_hashtable(&nf_conntrack_htable_size,
+ &net->ct.hash_vmalloc, 1);
+ if (!net->ct.hash) {
+@@ -1346,6 +1349,10 @@ err_expect:
+ nf_ct_free_hashtable(net->ct.hash, net->ct.hash_vmalloc,
+ nf_conntrack_htable_size);
+ err_hash:
++ kmem_cache_destroy(net->ct.nf_conntrack_cachep);
++err_cache:
++ kfree(net->ct.slabname);
++err_slabname:
+ free_percpu(net->ct.stat);
+ err_stat:
+ return ret;
--- /dev/null
+From 13ccdfc2af03e09e60791f7d4bc4ccf53398af7c Mon Sep 17 00:00:00 2001
+From: Alexey Dobriyan <adobriyan@gmail.com>
+Date: Mon, 8 Feb 2010 11:17:22 -0800
+Subject: netfilter: nf_conntrack: restrict runtime expect hashsize modifications
+
+From: Alexey Dobriyan <adobriyan@gmail.com>
+
+commit 13ccdfc2af03e09e60791f7d4bc4ccf53398af7c upstream.
+
+Expectation hashtable size was simply glued to a variable with no code
+to rehash expectations, so it was a bug to allow writing to it.
+Make "expect_hashsize" readonly.
+
+Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/netfilter/nf_conntrack_expect.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/netfilter/nf_conntrack_expect.c
++++ b/net/netfilter/nf_conntrack_expect.c
+@@ -569,7 +569,7 @@ static void exp_proc_remove(struct net *
+ #endif /* CONFIG_PROC_FS */
+ }
+
+-module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0600);
++module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
+
+ int nf_conntrack_expect_init(struct net *net)
+ {
--- /dev/null
+From 14c7dbe043d01a83a30633ab6b109ba2ac61d9f7 Mon Sep 17 00:00:00 2001
+From: Alexey Dobriyan <adobriyan@gmail.com>
+Date: Mon, 8 Feb 2010 11:17:43 -0800
+Subject: netfilter: xtables: compat out of scope fix
+
+From: Alexey Dobriyan <adobriyan@gmail.com>
+
+commit 14c7dbe043d01a83a30633ab6b109ba2ac61d9f7 upstream.
+
+As per C99 6.2.4(2) when temporary table data goes out of scope,
+the behaviour is undefined:
+
+ if (compat) {
+ struct foo tmp;
+ ...
+ private = &tmp;
+ }
+ [dereference private]
+
+Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
+Signed-off-by: Patrick McHardy <kaber@trash.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ net/ipv4/netfilter/arp_tables.c | 4 ++--
+ net/ipv4/netfilter/ip_tables.c | 4 ++--
+ net/ipv6/netfilter/ip6_tables.c | 4 ++--
+ 3 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/net/ipv4/netfilter/arp_tables.c
++++ b/net/ipv4/netfilter/arp_tables.c
+@@ -925,10 +925,10 @@ static int get_info(struct net *net, voi
+ if (t && !IS_ERR(t)) {
+ struct arpt_getinfo info;
+ const struct xt_table_info *private = t->private;
+-
+ #ifdef CONFIG_COMPAT
++ struct xt_table_info tmp;
++
+ if (compat) {
+- struct xt_table_info tmp;
+ ret = compat_table_info(private, &tmp);
+ xt_compat_flush_offsets(NFPROTO_ARP);
+ private = &tmp;
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1132,10 +1132,10 @@ static int get_info(struct net *net, voi
+ if (t && !IS_ERR(t)) {
+ struct ipt_getinfo info;
+ const struct xt_table_info *private = t->private;
+-
+ #ifdef CONFIG_COMPAT
++ struct xt_table_info tmp;
++
+ if (compat) {
+- struct xt_table_info tmp;
+ ret = compat_table_info(private, &tmp);
+ xt_compat_flush_offsets(AF_INET);
+ private = &tmp;
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1164,10 +1164,10 @@ static int get_info(struct net *net, voi
+ if (t && !IS_ERR(t)) {
+ struct ip6t_getinfo info;
+ const struct xt_table_info *private = t->private;
+-
+ #ifdef CONFIG_COMPAT
++ struct xt_table_info tmp;
++
+ if (compat) {
+- struct xt_table_info tmp;
+ ret = compat_table_info(private, &tmp);
+ xt_compat_flush_offsets(AF_INET6);
+ private = &tmp;
--- /dev/null
+From 2c1740098c708b465e87637b237feb2fd98f129a Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 8 Feb 2010 09:32:27 -0500
+Subject: NFS: Fix a bug in nfs_fscache_release_page()
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 2c1740098c708b465e87637b237feb2fd98f129a upstream.
+
+Not having an fscache cookie is perfectly valid if the user didn't mount
+with the fscache option.
+
+This patch fixes http://bugzilla.kernel.org/show_bug.cgi?id=15234
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Acked-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/fscache.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/fs/nfs/fscache.c
++++ b/fs/nfs/fscache.c
+@@ -354,12 +354,11 @@ void nfs_fscache_reset_inode_cookie(stru
+ */
+ int nfs_fscache_release_page(struct page *page, gfp_t gfp)
+ {
+- struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+- struct fscache_cookie *cookie = nfsi->fscache;
+-
+- BUG_ON(!cookie);
+-
+ if (PageFsCache(page)) {
++ struct nfs_inode *nfsi = NFS_I(page->mapping->host);
++ struct fscache_cookie *cookie = nfsi->fscache;
++
++ BUG_ON(!cookie);
+ dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
+ cookie, page, nfsi);
+
--- /dev/null
+From c9edda7140ec6a22accf7f2f86da362dfbfd41fc Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Tue, 26 Jan 2010 15:41:34 -0500
+Subject: NFS: Fix a reference leak in nfs_wb_cancel_page()
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit c9edda7140ec6a22accf7f2f86da362dfbfd41fc upstream.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/write.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -1542,6 +1542,7 @@ int nfs_wb_page_cancel(struct inode *ino
+ break;
+ }
+ ret = nfs_wait_on_request(req);
++ nfs_release_request(req);
+ if (ret < 0)
+ goto out;
+ }
--- /dev/null
+From 387c149b54b4321cbc790dadbd4f8eedb5a90468 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Wed, 3 Feb 2010 08:27:35 -0500
+Subject: NFS: Fix a umount race
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 387c149b54b4321cbc790dadbd4f8eedb5a90468 upstream.
+
+Ensure that we unregister the bdi before kill_anon_super() calls
+ida_remove() on our device name.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/super.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -241,6 +241,7 @@ static int nfs_show_stats(struct seq_fi
+ static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *);
+ static int nfs_xdev_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
++static void nfs_put_super(struct super_block *);
+ static void nfs_kill_super(struct super_block *);
+ static int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
+
+@@ -264,6 +265,7 @@ static const struct super_operations nfs
+ .alloc_inode = nfs_alloc_inode,
+ .destroy_inode = nfs_destroy_inode,
+ .write_inode = nfs_write_inode,
++ .put_super = nfs_put_super,
+ .statfs = nfs_statfs,
+ .clear_inode = nfs_clear_inode,
+ .umount_begin = nfs_umount_begin,
+@@ -333,6 +335,7 @@ static const struct super_operations nfs
+ .alloc_inode = nfs_alloc_inode,
+ .destroy_inode = nfs_destroy_inode,
+ .write_inode = nfs_write_inode,
++ .put_super = nfs_put_super,
+ .statfs = nfs_statfs,
+ .clear_inode = nfs4_clear_inode,
+ .umount_begin = nfs_umount_begin,
+@@ -2196,6 +2199,17 @@ error_splat_super:
+ }
+
+ /*
++ * Ensure that we unregister the bdi before kill_anon_super
++ * releases the device name
++ */
++static void nfs_put_super(struct super_block *s)
++{
++ struct nfs_server *server = NFS_SB(s);
++
++ bdi_unregister(&server->backing_dev_info);
++}
++
++/*
+ * Destroy an NFS2/3 superblock
+ */
+ static void nfs_kill_super(struct super_block *s)
+@@ -2203,7 +2217,6 @@ static void nfs_kill_super(struct super_
+ struct nfs_server *server = NFS_SB(s);
+
+ kill_anon_super(s);
+- bdi_unregister(&server->backing_dev_info);
+ nfs_fscache_release_super_cookie(s);
+ nfs_free_server(server);
+ }
--- /dev/null
+From 9f557cd8073104b39528794d44e129331ded649f Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Wed, 3 Feb 2010 08:27:22 -0500
+Subject: NFS: Fix an Oops when truncating a file
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 9f557cd8073104b39528794d44e129331ded649f upstream.
+
+The VM/VFS does not allow mapping->a_ops->invalidatepage() to fail.
+Unfortunately, nfs_wb_page_cancel() may fail if a fatal signal occurs.
+Since the NFS code assumes that the page stays mapped for as long as the
+writeback is active, we can end up Oopsing (among other things).
+
+The only safe fix here is to convert nfs_wait_on_request(), so as to make
+it uninterruptible (as is already the case with wait_on_page_writeback()).
+
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -176,6 +176,12 @@ void nfs_release_request(struct nfs_page *req)
+ kref_put(&req->wb_kref, nfs_free_request);
+ }
+
++static int nfs_wait_bit_uninterruptible(void *word)
++{
++ io_schedule();
++ return 0;
++}
++
+ /**
+ * nfs_wait_on_request - Wait for a request to complete.
+ * @req: request to wait upon.
+@@ -186,14 +192,9 @@ void nfs_release_request(struct nfs_page *req)
+ int
+ nfs_wait_on_request(struct nfs_page *req)
+ {
+- int ret = 0;
+-
+- if (!test_bit(PG_BUSY, &req->wb_flags))
+- goto out;
+- ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
+- nfs_wait_bit_killable, TASK_KILLABLE);
+-out:
+- return ret;
++ return wait_on_bit(&req->wb_flags, PG_BUSY,
++ nfs_wait_bit_uninterruptible,
++ TASK_UNINTERRUPTIBLE);
+ }
+
+ /**
--- /dev/null
+From fdcb45777a3d1689c5541e1f85ee3ebbd197d2c1 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 8 Feb 2010 09:32:40 -0500
+Subject: NFS: Fix the mapping of the NFSERR_SERVERFAULT error
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit fdcb45777a3d1689c5541e1f85ee3ebbd197d2c1 upstream.
+
+It was recently pointed out that the NFSERR_SERVERFAULT error, which is
+designed to inform the user of a serious internal error on the server, was
+being mapped to an error value that is internal to the kernel.
+
+This patch maps it to the error EREMOTEIO, which is exported to userland
+through errno.h.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/mount_clnt.c | 2 +-
+ fs/nfs/nfs2xdr.c | 2 +-
+ fs/nfs/nfs4xdr.c | 6 +++---
+ 3 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/fs/nfs/mount_clnt.c
++++ b/fs/nfs/mount_clnt.c
+@@ -120,7 +120,7 @@ static struct {
+ { .status = MNT3ERR_INVAL, .errno = -EINVAL, },
+ { .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
+ { .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
+- { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, },
++ { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, },
+ };
+
+ struct mountres {
+--- a/fs/nfs/nfs2xdr.c
++++ b/fs/nfs/nfs2xdr.c
+@@ -699,7 +699,7 @@ static struct {
+ { NFSERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFSERR_NOTSUPP, -ENOTSUPP },
+ { NFSERR_TOOSMALL, -ETOOSMALL },
+- { NFSERR_SERVERFAULT, -ESERVERFAULT },
++ { NFSERR_SERVERFAULT, -EREMOTEIO },
+ { NFSERR_BADTYPE, -EBADTYPE },
+ { NFSERR_JUKEBOX, -EJUKEBOX },
+ { -1, -EIO }
+--- a/fs/nfs/nfs4xdr.c
++++ b/fs/nfs/nfs4xdr.c
+@@ -4554,7 +4554,7 @@ static int decode_sequence(struct xdr_st
+ * If the server returns different values for sessionID, slotID or
+ * sequence number, the server is looney tunes.
+ */
+- status = -ESERVERFAULT;
++ status = -EREMOTEIO;
+
+ if (memcmp(id.data, res->sr_session->sess_id.data,
+ NFS4_MAX_SESSIONID_LEN)) {
+@@ -5678,7 +5678,7 @@ static struct {
+ { NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
+ { NFS4ERR_NOTSUPP, -ENOTSUPP },
+ { NFS4ERR_TOOSMALL, -ETOOSMALL },
+- { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
++ { NFS4ERR_SERVERFAULT, -EREMOTEIO },
+ { NFS4ERR_BADTYPE, -EBADTYPE },
+ { NFS4ERR_LOCKED, -EAGAIN },
+ { NFS4ERR_SYMLINK, -ELOOP },
+@@ -5705,7 +5705,7 @@ nfs4_stat_to_errno(int stat)
+ }
+ if (stat <= 10000 || stat > 10100) {
+ /* The server is looney tunes. */
+- return -ESERVERFAULT;
++ return -EREMOTEIO;
+ }
+ /* If we cannot translate the error, the recovery routines should
+ * handle it.
--- /dev/null
+From 82be934a59ff891cac598727e5a862ba2b9d1fac Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Tue, 26 Jan 2010 15:41:53 -0500
+Subject: NFS: Try to commit unstable writes in nfs_release_page()
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 82be934a59ff891cac598727e5a862ba2b9d1fac upstream.
+
+If someone calls nfs_release_page(), we presumably already know that the
+page is clean, however it may be holding an unstable write.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/file.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -486,6 +486,8 @@ static int nfs_release_page(struct page
+ {
+ dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
+
++ if (gfp & __GFP_WAIT)
++ nfs_wb_page(page->mapping->host, page);
+ /* If PagePrivate() is set, then the page is not freeable */
+ if (PagePrivate(page))
+ return 0;
--- /dev/null
+From 8e469ebd6dc32cbaf620e134d79f740bf0ebab79 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Tue, 26 Jan 2010 15:42:30 -0500
+Subject: NFSv4: Don't allow posix locking against servers that don't support it
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 8e469ebd6dc32cbaf620e134d79f740bf0ebab79 upstream.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/nfs4_fs.h | 1 +
+ fs/nfs/nfs4proc.c | 7 ++++++-
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/nfs/nfs4_fs.h
++++ b/fs/nfs/nfs4_fs.h
+@@ -141,6 +141,7 @@ enum {
+ NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
+ NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
+ NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
++ NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
+ };
+
+ struct nfs4_state {
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1573,6 +1573,8 @@ static int _nfs4_do_open(struct inode *d
+ status = PTR_ERR(state);
+ if (IS_ERR(state))
+ goto err_opendata_put;
++ if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
++ set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
+ nfs4_opendata_put(opendata);
+ nfs4_put_state_owner(sp);
+ *res = state;
+@@ -4060,8 +4062,11 @@ static int _nfs4_proc_setlk(struct nfs4_
+ {
+ struct nfs_inode *nfsi = NFS_I(state->inode);
+ unsigned char fl_flags = request->fl_flags;
+- int status;
++ int status = -ENOLCK;
+
++ if ((fl_flags & FL_POSIX) &&
++ !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
++ goto out;
+ /* Is this a delegated open? */
+ status = nfs4_set_lock_state(state, request);
+ if (status != 0)
--- /dev/null
+From 2bee72a6aa1e6d0a4f5da56217f0d0bbbdd0d9a3 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Tue, 26 Jan 2010 15:42:21 -0500
+Subject: NFSv4: Ensure that the NFSv4 locking can recover from stateid errors
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 2bee72a6aa1e6d0a4f5da56217f0d0bbbdd0d9a3 upstream.
+
+In most cases, we just want to mark the lock_stateid sequence id as being
+uninitialised.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Reviewed-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ fs/nfs/nfs4proc.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -3978,6 +3978,22 @@ static const struct rpc_call_ops nfs4_lo
+ .rpc_release = nfs4_lock_release,
+ };
+
++static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
++{
++ struct nfs_client *clp = server->nfs_client;
++ struct nfs4_state *state = lsp->ls_state;
++
++ switch (error) {
++ case -NFS4ERR_ADMIN_REVOKED:
++ case -NFS4ERR_BAD_STATEID:
++ case -NFS4ERR_EXPIRED:
++ if (new_lock_owner != 0 ||
++ (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
++ nfs4_state_mark_reclaim_nograce(clp, state);
++ lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
++ };
++}
++
+ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int reclaim)
+ {
+ struct nfs4_lockdata *data;
+@@ -4013,6 +4029,9 @@ static int _nfs4_do_setlk(struct nfs4_st
+ ret = nfs4_wait_for_completion_rpc_task(task);
+ if (ret == 0) {
+ ret = data->rpc_status;
++ if (ret)
++ nfs4_handle_setlk_error(data->server, data->lsp,
++ data->arg.new_lock_owner, ret);
+ } else
+ data->cancelled = 1;
+ rpc_put_task(task);
--- /dev/null
+From 9858ae38011d699d4c2fa7f3493a47accf43a0f5 Mon Sep 17 00:00:00 2001
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+Date: Mon, 25 Jan 2010 16:20:52 +0530
+Subject: SCSI: mptfusion : mptscsih_abort return value should be SUCCESS instead of value 0.
+
+From: Kashyap, Desai <kashyap.desai@lsi.com>
+
+commit 9858ae38011d699d4c2fa7f3493a47accf43a0f5 upstream.
+
+retval should be SUCCESS/FAILED which is defined at scsi.h
+retval = 0 is directing wrong return value. It must be retval = SUCCESS.
+
+Signed-off-by: Kashyap Desai <kashyap.desai@lsi.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/message/fusion/mptscsih.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/message/fusion/mptscsih.c
++++ b/drivers/message/fusion/mptscsih.c
+@@ -1720,7 +1720,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
+ dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "task abort: "
+ "Command not in the active list! (sc=%p)\n", ioc->name,
+ SCpnt));
+- retval = 0;
++ retval = SUCCESS;
+ goto out;
+ }
+
resource-add-helpers-for-fetching-rlimits.patch
fs-exec.c-restrict-initial-stack-space-expansion-to-rlimit.patch
cifs-fix-length-calculation-for-converted-unicode-readdir-names.patch
+nfs-fix-a-reference-leak-in-nfs_wb_cancel_page.patch
+nfs-try-to-commit-unstable-writes-in-nfs_release_page.patch
+nfsv4-don-t-allow-posix-locking-against-servers-that-don-t-support-it.patch
+nfsv4-ensure-that-the-nfsv4-locking-can-recover-from-stateid-errors.patch
+nfs-fix-an-oops-when-truncating-a-file.patch
+nfs-fix-a-umount-race.patch
+nfs-fix-a-bug-in-nfs_fscache_release_page.patch
+nfs-fix-the-mapping-of-the-nfserr_serverfault-error.patch
+md-fix-degraded-calculation-when-starting-a-reshape.patch
+v4l-dvb-dvb-core-fix-initialization-of-feeds-list-in-demux-filter.patch
+export-the-symbol-of-getboottime-and-mmonotonic_to_bootbased.patch
+kvmclock-count-total_sleep_time-when-updating-guest-clock.patch
+kvm-pit-control-word-is-write-only.patch
+tpm_infineon-fix-suspend-resume-handler-for-pnp_driver.patch
+amd64_edac-do-not-falsely-trigger-kerneloops.patch
+netfilter-nf_conntrack-fix-memory-corruption-with-multiple-namespaces.patch
+netfilter-nf_conntrack-per-netns-nf_conntrack_cachep.patch
+netfilter-nf_conntrack-restrict-runtime-expect-hashsize-modifications.patch
+netfilter-xtables-compat-out-of-scope-fix.patch
+netfilter-nf_conntrack-fix-hash-resizing-with-namespaces.patch
+drm-i915-remove-full-registers-dump-debug.patch
+drm-i915-add-i915_lp_ring_sync-helper.patch
+drm-i915-don-t-wait-interruptible-for-possible-plane-buffer-flush.patch
+dasd-remove-strings-from-s390dbf.patch
+crypto-padlock-sha-add-import-export-support.patch
+wmi-free-the-allocated-acpi-objects-through-wmi_get_event_data.patch
+dell-wmi-hp-wmi-msi-wmi-check-wmi_get_event_data-return-value.patch
+dev-mem-introduce-size_inside_page.patch
+devmem-check-vmalloc-address-on-kmem-read-write.patch
+devmem-fix-kmem-write-bug-on-memory-holes.patch
+scsi-mptfusion-mptscsih_abort-return-value-should-be-success-instead-of-value-0.patch
+sh-couple-kernel-and-user-write-page-perm-bits-for-config_x2tlb.patch
--- /dev/null
+From fcb4ebd678858850e8b029909064175cb627868d Mon Sep 17 00:00:00 2001
+From: Matt Fleming <matt@console-pimps.org>
+Date: Fri, 11 Dec 2009 22:58:17 +0000
+Subject: sh: Couple kernel and user write page perm bits for CONFIG_X2TLB
+
+From: Matt Fleming <matt@console-pimps.org>
+
+commit fcb4ebd678858850e8b029909064175cb627868d upstream.
+
+pte_write() should check whether the permissions include either the user
+or kernel write permission bits. Likewise, pte_wrprotect() needs to
+remove both the kernel and user write bits.
+
+Without this patch handle_tlbmiss() doesn't handle faulting in pages
+from the P3 area (our vmalloc space) because of a write. Mappings of the
+P3 space have the _PAGE_EXT_KERN_WRITE bit but not _PAGE_EXT_USER_WRITE.
+
+Signed-off-by: Matt Fleming <matt@console-pimps.org>
+Signed-off-by: Paul Mundt <lethal@linux-sh.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/sh/include/asm/pgtable_32.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/sh/include/asm/pgtable_32.h
++++ b/arch/sh/include/asm/pgtable_32.h
+@@ -344,7 +344,8 @@ static inline void set_pte(pte_t *ptep,
+ #define pte_special(pte) ((pte).pte_low & _PAGE_SPECIAL)
+
+ #ifdef CONFIG_X2TLB
+-#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
++#define pte_write(pte) \
++ ((pte).pte_high & (_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE))
+ #else
+ #define pte_write(pte) ((pte).pte_low & _PAGE_RW)
+ #endif
+@@ -358,7 +359,7 @@ static inline pte_t pte_##fn(pte_t pte)
+ * individually toggled (and user permissions are entirely decoupled from
+ * kernel permissions), we attempt to couple them a bit more sanely here.
+ */
+-PTE_BIT_FUNC(high, wrprotect, &= ~_PAGE_EXT_USER_WRITE);
++PTE_BIT_FUNC(high, wrprotect, &= ~(_PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE));
+ PTE_BIT_FUNC(high, mkwrite, |= _PAGE_EXT_USER_WRITE | _PAGE_EXT_KERN_WRITE);
+ PTE_BIT_FUNC(high, mkhuge, |= _PAGE_SZHUGE);
+ #else
--- /dev/null
+From 93716b9470fbfd9efdc7d0f2445cb34635de3f6d Mon Sep 17 00:00:00 2001
+From: Marcel Selhorst <m.selhorst@sirrix.com>
+Date: Wed, 10 Feb 2010 13:56:32 -0800
+Subject: tpm_infineon: fix suspend/resume handler for pnp_driver
+
+From: Marcel Selhorst <m.selhorst@sirrix.com>
+
+commit 93716b9470fbfd9efdc7d0f2445cb34635de3f6d upstream.
+
+When suspending, tpm_infineon calls the generic suspend function of the
+TPM framework. However, the TPM framework does not return and the system
+hangs upon suspend. When sending the necessary command "TPM_SaveState"
+directly within the driver, suspending and resuming works fine.
+
+Signed-off-by: Marcel Selhorst <m.selhorst@sirrix.com>
+Cc: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp>
+Cc: Debora Velarde <debora@linux.vnet.ibm.com>
+Cc: Rajiv Andrade <srajiv@linux.vnet.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/char/tpm/tpm_infineon.c | 79 ++++++++++++++++++++++++++++------------
+ 1 file changed, 57 insertions(+), 22 deletions(-)
+
+--- a/drivers/char/tpm/tpm_infineon.c
++++ b/drivers/char/tpm/tpm_infineon.c
+@@ -39,12 +39,12 @@
+ struct tpm_inf_dev {
+ int iotype;
+
+- void __iomem *mem_base; /* MMIO ioremap'd addr */
+- unsigned long map_base; /* phys MMIO base */
+- unsigned long map_size; /* MMIO region size */
+- unsigned int index_off; /* index register offset */
++ void __iomem *mem_base; /* MMIO ioremap'd addr */
++ unsigned long map_base; /* phys MMIO base */
++ unsigned long map_size; /* MMIO region size */
++ unsigned int index_off; /* index register offset */
+
+- unsigned int data_regs; /* Data registers */
++ unsigned int data_regs; /* Data registers */
+ unsigned int data_size;
+
+ unsigned int config_port; /* IO Port config index reg */
+@@ -406,14 +406,14 @@ static const struct tpm_vendor_specific
+ .miscdev = {.fops = &inf_ops,},
+ };
+
+-static const struct pnp_device_id tpm_pnp_tbl[] = {
++static const struct pnp_device_id tpm_inf_pnp_tbl[] = {
+ /* Infineon TPMs */
+ {"IFX0101", 0},
+ {"IFX0102", 0},
+ {"", 0}
+ };
+
+-MODULE_DEVICE_TABLE(pnp, tpm_pnp_tbl);
++MODULE_DEVICE_TABLE(pnp, tpm_inf_pnp_tbl);
+
+ static int __devinit tpm_inf_pnp_probe(struct pnp_dev *dev,
+ const struct pnp_device_id *dev_id)
+@@ -430,7 +430,7 @@ static int __devinit tpm_inf_pnp_probe(s
+ if (pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) &&
+ !(pnp_port_flags(dev, 0) & IORESOURCE_DISABLED)) {
+
+- tpm_dev.iotype = TPM_INF_IO_PORT;
++ tpm_dev.iotype = TPM_INF_IO_PORT;
+
+ tpm_dev.config_port = pnp_port_start(dev, 0);
+ tpm_dev.config_size = pnp_port_len(dev, 0);
+@@ -459,9 +459,9 @@ static int __devinit tpm_inf_pnp_probe(s
+ goto err_last;
+ }
+ } else if (pnp_mem_valid(dev, 0) &&
+- !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
++ !(pnp_mem_flags(dev, 0) & IORESOURCE_DISABLED)) {
+
+- tpm_dev.iotype = TPM_INF_IO_MEM;
++ tpm_dev.iotype = TPM_INF_IO_MEM;
+
+ tpm_dev.map_base = pnp_mem_start(dev, 0);
+ tpm_dev.map_size = pnp_mem_len(dev, 0);
+@@ -563,11 +563,11 @@ static int __devinit tpm_inf_pnp_probe(s
+ "product id 0x%02x%02x"
+ "%s\n",
+ tpm_dev.iotype == TPM_INF_IO_PORT ?
+- tpm_dev.config_port :
+- tpm_dev.map_base + tpm_dev.index_off,
++ tpm_dev.config_port :
++ tpm_dev.map_base + tpm_dev.index_off,
+ tpm_dev.iotype == TPM_INF_IO_PORT ?
+- tpm_dev.data_regs :
+- tpm_dev.map_base + tpm_dev.data_regs,
++ tpm_dev.data_regs :
++ tpm_dev.map_base + tpm_dev.data_regs,
+ version[0], version[1],
+ vendorid[0], vendorid[1],
+ productid[0], productid[1], chipname);
+@@ -607,20 +607,55 @@ static __devexit void tpm_inf_pnp_remove
+ iounmap(tpm_dev.mem_base);
+ release_mem_region(tpm_dev.map_base, tpm_dev.map_size);
+ }
++ tpm_dev_vendor_release(chip);
+ tpm_remove_hardware(chip->dev);
+ }
+ }
+
++static int tpm_inf_pnp_suspend(struct pnp_dev *dev, pm_message_t pm_state)
++{
++ struct tpm_chip *chip = pnp_get_drvdata(dev);
++ int rc;
++ if (chip) {
++ u8 savestate[] = {
++ 0, 193, /* TPM_TAG_RQU_COMMAND */
++ 0, 0, 0, 10, /* blob length (in bytes) */
++ 0, 0, 0, 152 /* TPM_ORD_SaveState */
++ };
++ dev_info(&dev->dev, "saving TPM state\n");
++ rc = tpm_inf_send(chip, savestate, sizeof(savestate));
++ if (rc < 0) {
++ dev_err(&dev->dev, "error while saving TPM state\n");
++ return rc;
++ }
++ }
++ return 0;
++}
++
++static int tpm_inf_pnp_resume(struct pnp_dev *dev)
++{
++ /* Re-configure TPM after suspending */
++ tpm_config_out(ENABLE_REGISTER_PAIR, TPM_INF_ADDR);
++ tpm_config_out(IOLIMH, TPM_INF_ADDR);
++ tpm_config_out((tpm_dev.data_regs >> 8) & 0xff, TPM_INF_DATA);
++ tpm_config_out(IOLIML, TPM_INF_ADDR);
++ tpm_config_out((tpm_dev.data_regs & 0xff), TPM_INF_DATA);
++ /* activate register */
++ tpm_config_out(TPM_DAR, TPM_INF_ADDR);
++ tpm_config_out(0x01, TPM_INF_DATA);
++ tpm_config_out(DISABLE_REGISTER_PAIR, TPM_INF_ADDR);
++ /* disable RESET, LP and IRQC */
++ tpm_data_out(RESET_LP_IRQC_DISABLE, CMD);
++ return tpm_pm_resume(&dev->dev);
++}
++
+ static struct pnp_driver tpm_inf_pnp_driver = {
+ .name = "tpm_inf_pnp",
+- .driver = {
+- .owner = THIS_MODULE,
+- .suspend = tpm_pm_suspend,
+- .resume = tpm_pm_resume,
+- },
+- .id_table = tpm_pnp_tbl,
++ .id_table = tpm_inf_pnp_tbl,
+ .probe = tpm_inf_pnp_probe,
+- .remove = __devexit_p(tpm_inf_pnp_remove),
++ .suspend = tpm_inf_pnp_suspend,
++ .resume = tpm_inf_pnp_resume,
++ .remove = __devexit_p(tpm_inf_pnp_remove)
+ };
+
+ static int __init init_inf(void)
+@@ -638,5 +673,5 @@ module_exit(cleanup_inf);
+
+ MODULE_AUTHOR("Marcel Selhorst <m.selhorst@sirrix.com>");
+ MODULE_DESCRIPTION("Driver for Infineon TPM SLD 9630 TT 1.1 / SLB 9635 TT 1.2");
+-MODULE_VERSION("1.9");
++MODULE_VERSION("1.9.2");
+ MODULE_LICENSE("GPL");
--- /dev/null
+From 691c9ae099b9bcb5c27125af00a4a90120977458 Mon Sep 17 00:00:00 2001
+From: Francesco Lavra <francescolavra@interfree.it>
+Date: Sun, 7 Feb 2010 09:49:58 -0300
+Subject: V4L/DVB: dvb-core: fix initialization of feeds list in demux filter
+
+From: Francesco Lavra <francescolavra@interfree.it>
+
+commit 691c9ae099b9bcb5c27125af00a4a90120977458 upstream.
+
+A DVB demultiplexer device can be used to set up either a PES filter or
+a section filter. In the former case, the ts field of the feed union of
+struct dmxdev_filter is used, in the latter case the sec field of the
+same union is used.
+The ts field is a struct list_head, and is currently initialized in the
+open() method of the demux device. When for a given demuxer a section
+filter is set up, the sec field is played with, thus if a PES filter
+needs to be set up after that the ts field will be corrupted, causing a
+kernel oops.
+This fix moves the list head initialization to
+dvb_dmxdev_pes_filter_set(), so that the ts field is properly
+initialized every time a PES filter is set up.
+
+Signed-off-by: Francesco Lavra <francescolavra@interfree.it>
+Reviewed-by: Andy Walls <awalls@radix.net>
+Tested-by: hermann pitton <hermann-pitton@arcor.de>
+Signed-off-by: Mauro Carvalho Chehab <mchehab@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/media/dvb/dvb-core/dmxdev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/media/dvb/dvb-core/dmxdev.c
++++ b/drivers/media/dvb/dvb-core/dmxdev.c
+@@ -761,7 +761,6 @@ static int dvb_demux_open(struct inode *
+ dvb_ringbuffer_init(&dmxdevfilter->buffer, NULL, 8192);
+ dmxdevfilter->type = DMXDEV_TYPE_NONE;
+ dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_ALLOCATED);
+- INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
+ init_timer(&dmxdevfilter->timer);
+
+ dvbdev->users++;
+@@ -887,6 +886,7 @@ static int dvb_dmxdev_pes_filter_set(str
+ dmxdevfilter->type = DMXDEV_TYPE_PES;
+ memcpy(&dmxdevfilter->params, params,
+ sizeof(struct dmx_pes_filter_params));
++ INIT_LIST_HEAD(&dmxdevfilter->feed.ts);
+
+ dvb_dmxdev_filter_state_set(dmxdevfilter, DMXDEV_STATE_SET);
+
--- /dev/null
+From cebbert@redhat.com Fri Feb 12 13:29:09 2010
+From: Anisse Astier <anisse@astier.eu>
+Date: Sat, 30 Jan 2010 18:00:17 -0500
+Subject: wmi: Free the allocated acpi objects through wmi_get_event_data
+To: stable@kernel.org
+Cc: Len Brown <len.brown@intel.com>
+Message-ID: <201002030139.o131dtPQ012382@int-mx03.intmail.prod.int.phx2.redhat.com>
+
+
+From: Anisse Astier <anisse@astier.eu>
+
+commit 3e9b988e4edf065d39c1343937f717319b1c1065 upstream
+
+[ backported to 2.6.32 ]
+
+These function allocate an acpi object by calling wmi_get_event_data, which
+then calls acpi_evaluate_object, and it is not freed afterwards.
+
+And kernel doc is fixed for parameters of wmi_get_event_data.
+
+Signed-off-by: Anisse Astier <anisse@astier.eu>
+Acked-by: Randy Dunlap <randy.dunlap@oracle.com>
+Acked-by: Carlos Corbacho <carlos@strangeworlds.co.uk>
+Signed-off-by: Len Brown <len.brown@intel.com>
+Cc: Chuck Ebbert <cebbert@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/platform/x86/dell-wmi.c | 1 +
+ drivers/platform/x86/hp-wmi.c | 2 ++
+ drivers/platform/x86/wmi.c | 4 ++--
+ 3 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/platform/x86/dell-wmi.c
++++ b/drivers/platform/x86/dell-wmi.c
+@@ -180,6 +180,7 @@ static void dell_wmi_notify(u32 value, v
+ printk(KERN_INFO "dell-wmi: Unknown key %x pressed\n",
+ buffer[1] & 0xFFFF);
+ }
++ kfree(obj);
+ }
+
+ static int __init dell_wmi_input_setup(void)
+--- a/drivers/platform/x86/hp-wmi.c
++++ b/drivers/platform/x86/hp-wmi.c
+@@ -377,6 +377,8 @@ static void hp_wmi_notify(u32 value, voi
+ eventcode);
+ } else
+ printk(KERN_INFO "HP WMI: Unknown response received\n");
++
++ kfree(obj);
+ }
+
+ static int __init hp_wmi_input_setup(void)
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -510,8 +510,8 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_hand
+ /**
+ * wmi_get_event_data - Get WMI data associated with an event
+ *
+- * @event - Event to find
+- * &out - Buffer to hold event data
++ * @event: Event to find
++ * @out: Buffer to hold event data. out->pointer should be freed with kfree()
+ *
+ * Returns extra data associated with an event in WMI.
+ */