]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
start .32 queue
authorGreg Kroah-Hartman <gregkh@suse.de>
Fri, 29 Jan 2010 15:44:15 +0000 (07:44 -0800)
committerGreg Kroah-Hartman <gregkh@suse.de>
Fri, 29 Jan 2010 15:44:15 +0000 (07:44 -0800)
queue-2.6.32/drm-i915-selectively-enable-self-reclaim.patch [new file with mode: 0644]
queue-2.6.32/firewire-ohci-fix-crashes-with-tsb43ab23-on-64bit-systems.patch [new file with mode: 0644]
queue-2.6.32/mm-add-new-read_cache_page_gfp-helper-function.patch [new file with mode: 0644]
queue-2.6.32/mptsas-fix-issue-with-chain-pools-allocation-on-katmai.patch [new file with mode: 0644]
queue-2.6.32/s390-fix-single-stepped-svcs-with-trace_irqflags-y.patch [new file with mode: 0644]
queue-2.6.32/scsi_lib-fix-bug-in-completion-of-bidi-commands.patch [new file with mode: 0644]
queue-2.6.32/series [new file with mode: 0644]
queue-2.6.32/x86-remove-x86-cpu-features-in-debugfs-config_x86_cpu_debug.patch [new file with mode: 0644]
queue-2.6.32/x86-set-hotpluggable-nodes-in-nodes_possible_map.patch [new file with mode: 0644]

diff --git a/queue-2.6.32/drm-i915-selectively-enable-self-reclaim.patch b/queue-2.6.32/drm-i915-selectively-enable-self-reclaim.patch
new file mode 100644 (file)
index 0000000..94f3bb2
--- /dev/null
@@ -0,0 +1,224 @@
+From 4bdadb9785696439c6e2b3efe34aa76df1149c83 Mon Sep 17 00:00:00 2001
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Wed, 27 Jan 2010 13:36:32 +0000
+Subject: drm/i915: Selectively enable self-reclaim
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit 4bdadb9785696439c6e2b3efe34aa76df1149c83 upstream.
+
+Having missed the ENOMEM return via i915_gem_fault(), there are probably
+other paths that I also missed. By not enabling NORETRY by default these
+paths can run the shrinker and take memory from the system (but not from
+our own inactive lists because our shrinker can not run whilst we hold
+the struct mutex) and this may allow the system to survive a little longer
+whilst our drivers consume all available memory.
+
+References:
+  OOM killer unexpectedly called with kernel 2.6.32
+  http://bugzilla.kernel.org/show_bug.cgi?id=14933
+
+v2: Pass gfp into page mapping.
+v3: Use new read_cache_page_gfp() instead of open-coding.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
+Cc: Jesse Barnes <jbarnes@virtuousgeek.org>
+Cc: Eric Anholt <eric@anholt.net>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/gpu/drm/drm_gem.c           |   13 --------
+ drivers/gpu/drm/i915/i915_debugfs.c |    2 -
+ drivers/gpu/drm/i915/i915_drv.h     |    2 -
+ drivers/gpu/drm/i915/i915_gem.c     |   54 +++++++++++-------------------------
+ 4 files changed, 19 insertions(+), 52 deletions(-)
+
+--- a/drivers/gpu/drm/drm_gem.c
++++ b/drivers/gpu/drm/drm_gem.c
+@@ -142,19 +142,6 @@ drm_gem_object_alloc(struct drm_device *
+       if (IS_ERR(obj->filp))
+               goto free;
+-      /* Basically we want to disable the OOM killer and handle ENOMEM
+-       * ourselves by sacrificing pages from cached buffers.
+-       * XXX shmem_file_[gs]et_gfp_mask()
+-       */
+-      mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping,
+-                           GFP_HIGHUSER |
+-                           __GFP_COLD |
+-                           __GFP_FS |
+-                           __GFP_RECLAIMABLE |
+-                           __GFP_NORETRY |
+-                           __GFP_NOWARN |
+-                           __GFP_NOMEMALLOC);
+-
+       kref_init(&obj->refcount);
+       kref_init(&obj->handlecount);
+       obj->size = size;
+--- a/drivers/gpu/drm/i915/i915_debugfs.c
++++ b/drivers/gpu/drm/i915/i915_debugfs.c
+@@ -288,7 +288,7 @@ static int i915_batchbuffer_info(struct 
+       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+               obj = obj_priv->obj;
+               if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
+-                  ret = i915_gem_object_get_pages(obj);
++                  ret = i915_gem_object_get_pages(obj, 0);
+                   if (ret) {
+                           DRM_ERROR("Failed to get pages: %d\n", ret);
+                           spin_unlock(&dev_priv->mm.active_list_lock);
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -822,7 +822,7 @@ int i915_gem_attach_phys_object(struct d
+ void i915_gem_detach_phys_object(struct drm_device *dev,
+                                struct drm_gem_object *obj);
+ void i915_gem_free_all_phys_object(struct drm_device *dev);
+-int i915_gem_object_get_pages(struct drm_gem_object *obj);
++int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+ void i915_gem_object_put_pages(struct drm_gem_object *obj);
+ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -277,7 +277,7 @@ i915_gem_shmem_pread_fast(struct drm_dev
+       mutex_lock(&dev->struct_mutex);
+-      ret = i915_gem_object_get_pages(obj);
++      ret = i915_gem_object_get_pages(obj, 0);
+       if (ret != 0)
+               goto fail_unlock;
+@@ -321,40 +321,24 @@ fail_unlock:
+       return ret;
+ }
+-static inline gfp_t
+-i915_gem_object_get_page_gfp_mask (struct drm_gem_object *obj)
+-{
+-      return mapping_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping);
+-}
+-
+-static inline void
+-i915_gem_object_set_page_gfp_mask (struct drm_gem_object *obj, gfp_t gfp)
+-{
+-      mapping_set_gfp_mask(obj->filp->f_path.dentry->d_inode->i_mapping, gfp);
+-}
+-
+ static int
+ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
+ {
+       int ret;
+-      ret = i915_gem_object_get_pages(obj);
++      ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
+       /* If we've insufficient memory to map in the pages, attempt
+        * to make some space by throwing out some old buffers.
+        */
+       if (ret == -ENOMEM) {
+               struct drm_device *dev = obj->dev;
+-              gfp_t gfp;
+               ret = i915_gem_evict_something(dev, obj->size);
+               if (ret)
+                       return ret;
+-              gfp = i915_gem_object_get_page_gfp_mask(obj);
+-              i915_gem_object_set_page_gfp_mask(obj, gfp & ~__GFP_NORETRY);
+-              ret = i915_gem_object_get_pages(obj);
+-              i915_gem_object_set_page_gfp_mask (obj, gfp);
++              ret = i915_gem_object_get_pages(obj, 0);
+       }
+       return ret;
+@@ -790,7 +774,7 @@ i915_gem_shmem_pwrite_fast(struct drm_de
+       mutex_lock(&dev->struct_mutex);
+-      ret = i915_gem_object_get_pages(obj);
++      ret = i915_gem_object_get_pages(obj, 0);
+       if (ret != 0)
+               goto fail_unlock;
+@@ -2219,7 +2203,8 @@ i915_gem_evict_something(struct drm_devi
+ }
+ int
+-i915_gem_object_get_pages(struct drm_gem_object *obj)
++i915_gem_object_get_pages(struct drm_gem_object *obj,
++                        gfp_t gfpmask)
+ {
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       int page_count, i;
+@@ -2245,7 +2230,10 @@ i915_gem_object_get_pages(struct drm_gem
+       inode = obj->filp->f_path.dentry->d_inode;
+       mapping = inode->i_mapping;
+       for (i = 0; i < page_count; i++) {
+-              page = read_mapping_page(mapping, i, NULL);
++              page = read_cache_page_gfp(mapping, i,
++                                         mapping_gfp_mask (mapping) |
++                                         __GFP_COLD |
++                                         gfpmask);
+               if (IS_ERR(page)) {
+                       ret = PTR_ERR(page);
+                       i915_gem_object_put_pages(obj);
+@@ -2568,7 +2556,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj_priv = obj->driver_private;
+       struct drm_mm_node *free_space;
+-      bool retry_alloc = false;
++      gfp_t gfpmask =  __GFP_NORETRY | __GFP_NOWARN;
+       int ret;
+       if (obj_priv->madv != I915_MADV_WILLNEED) {
+@@ -2612,15 +2600,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
+       DRM_INFO("Binding object of size %zd at 0x%08x\n",
+                obj->size, obj_priv->gtt_offset);
+ #endif
+-      if (retry_alloc) {
+-              i915_gem_object_set_page_gfp_mask (obj,
+-                                                 i915_gem_object_get_page_gfp_mask (obj) & ~__GFP_NORETRY);
+-      }
+-      ret = i915_gem_object_get_pages(obj);
+-      if (retry_alloc) {
+-              i915_gem_object_set_page_gfp_mask (obj,
+-                                                 i915_gem_object_get_page_gfp_mask (obj) | __GFP_NORETRY);
+-      }
++      ret = i915_gem_object_get_pages(obj, gfpmask);
+       if (ret) {
+               drm_mm_put_block(obj_priv->gtt_space);
+               obj_priv->gtt_space = NULL;
+@@ -2630,9 +2610,9 @@ i915_gem_object_bind_to_gtt(struct drm_g
+                       ret = i915_gem_evict_something(dev, obj->size);
+                       if (ret) {
+                               /* now try to shrink everyone else */
+-                              if (! retry_alloc) {
+-                                  retry_alloc = true;
+-                                  goto search_free;
++                              if (gfpmask) {
++                                      gfpmask = 0;
++                                      goto search_free;
+                               }
+                               return ret;
+@@ -4695,7 +4675,7 @@ void i915_gem_detach_phys_object(struct 
+       if (!obj_priv->phys_obj)
+               return;
+-      ret = i915_gem_object_get_pages(obj);
++      ret = i915_gem_object_get_pages(obj, 0);
+       if (ret)
+               goto out;
+@@ -4753,7 +4733,7 @@ i915_gem_attach_phys_object(struct drm_d
+       obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
+       obj_priv->phys_obj->cur_obj = obj;
+-      ret = i915_gem_object_get_pages(obj);
++      ret = i915_gem_object_get_pages(obj, 0);
+       if (ret) {
+               DRM_ERROR("failed to get page list\n");
+               goto out;
diff --git a/queue-2.6.32/firewire-ohci-fix-crashes-with-tsb43ab23-on-64bit-systems.patch b/queue-2.6.32/firewire-ohci-fix-crashes-with-tsb43ab23-on-64bit-systems.patch
new file mode 100644 (file)
index 0000000..ce06899
--- /dev/null
@@ -0,0 +1,68 @@
+From 7a481436787cbc932af6c407b317ac603969a242 Mon Sep 17 00:00:00 2001
+From: Stefan Richter <stefanr@s5r6.in-berlin.de>
+Date: Tue, 26 Jan 2010 21:39:07 +0100
+Subject: firewire: ohci: fix crashes with TSB43AB23 on 64bit systems
+
+From: Stefan Richter <stefanr@s5r6.in-berlin.de>
+
+commit 7a481436787cbc932af6c407b317ac603969a242 upstream.
+
+Unsurprisingly, Texas Instruments TSB43AB23 exhibits the same behaviour
+as TSB43AB22/A in dual buffer IR DMA mode:  If descriptors are located
+at physical addresses above the 31 bit address range (2 GB), the
+controller will overwrite random memory.  With luck, this merely
+prevents video reception.  With only a little less luck, the machine
+crashes.
+
+We use the same workaround here as with TSB43AB22/A:  Switch off the
+dual buffer capability flag and use packet-per-buffer IR DMA instead.
+Another possible workaround would be to limit the coherent DMA mask to
+31 bits.
+
+In Linux 2.6.33, this change serves effectively only as documentation
+since dual buffer mode is not used for any controller anymore.  But
+somebody might want to re-enable it in the future to make use of
+features of dual buffer DMA that are not available in packet-per-buffer
+mode.
+
+In Linux 2.6.32 and older, this update is vital for anyone with this
+controller, more than 2 GB RAM, a 64 bit kernel, and FireWire video or
+audio applications.
+
+We have at least four reports:
+http://bugzilla.kernel.org/show_bug.cgi?id=13808
+http://marc.info/?l=linux1394-user&m=126154279004083
+https://bugzilla.redhat.com/show_bug.cgi?id=552142
+http://marc.info/?l=linux1394-user&m=126432246128386
+
+Reported-by: Paul Johnson
+Reported-by: Ronneil Camara
+Reported-by: G Zornetzer
+Reported-by: Mark Thompson
+Signed-off-by: Stefan Richter <stefanr@s5r6.in-berlin.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/firewire/ohci.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -2412,6 +2412,7 @@ static void ohci_pmac_off(struct pci_dev
+ #define PCI_VENDOR_ID_AGERE           PCI_VENDOR_ID_ATT
+ #define PCI_DEVICE_ID_AGERE_FW643     0x5901
++#define PCI_DEVICE_ID_TI_TSB43AB23    0x8024
+ static int __devinit pci_probe(struct pci_dev *dev,
+                              const struct pci_device_id *ent)
+@@ -2477,7 +2478,8 @@ static int __devinit pci_probe(struct pc
+ #if !defined(CONFIG_X86_32)
+       /* dual-buffer mode is broken with descriptor addresses above 2G */
+       if (dev->vendor == PCI_VENDOR_ID_TI &&
+-          dev->device == PCI_DEVICE_ID_TI_TSB43AB22)
++          (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
++           dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
+               ohci->use_dualbuffer = false;
+ #endif
diff --git a/queue-2.6.32/mm-add-new-read_cache_page_gfp-helper-function.patch b/queue-2.6.32/mm-add-new-read_cache_page_gfp-helper-function.patch
new file mode 100644 (file)
index 0000000..ac36aa6
--- /dev/null
@@ -0,0 +1,189 @@
+From 0531b2aac59c2296570ac52bfc032ef2ace7d5e1 Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Wed, 27 Jan 2010 09:20:03 -0800
+Subject: mm: add new 'read_cache_page_gfp()' helper function
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit 0531b2aac59c2296570ac52bfc032ef2ace7d5e1 upstream.
+
+It's a simplified 'read_cache_page()' which takes a page allocation
+flag, so that different paths can control how aggressive the memory
+allocations are that populate a address space.
+
+In particular, the intel GPU object mapping code wants to be able to do
+a certain amount of own internal memory management by automatically
+shrinking the address space when memory starts getting tight.  This
+allows it to dynamically use different memory allocation policies on a
+per-allocation basis, rather than depend on the (static) address space
+gfp policy.
+
+The actual new function is a one-liner, but re-organizing the helper
+functions to the point where you can do this with a single line of code
+is what most of the patch is all about.
+
+Tested-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ include/linux/pagemap.h |    2 
+ mm/filemap.c            |  100 ++++++++++++++++++++++++++++++++----------------
+ 2 files changed, 70 insertions(+), 32 deletions(-)
+
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -253,6 +253,8 @@ extern struct page * read_cache_page_asy
+ extern struct page * read_cache_page(struct address_space *mapping,
+                               pgoff_t index, filler_t *filler,
+                               void *data);
++extern struct page * read_cache_page_gfp(struct address_space *mapping,
++                              pgoff_t index, gfp_t gfp_mask);
+ extern int read_cache_pages(struct address_space *mapping,
+               struct list_head *pages, filler_t *filler, void *data);
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1655,14 +1655,15 @@ EXPORT_SYMBOL(generic_file_readonly_mmap
+ static struct page *__read_cache_page(struct address_space *mapping,
+                               pgoff_t index,
+                               int (*filler)(void *,struct page*),
+-                              void *data)
++                              void *data,
++                              gfp_t gfp)
+ {
+       struct page *page;
+       int err;
+ repeat:
+       page = find_get_page(mapping, index);
+       if (!page) {
+-              page = page_cache_alloc_cold(mapping);
++              page = __page_cache_alloc(gfp | __GFP_COLD);
+               if (!page)
+                       return ERR_PTR(-ENOMEM);
+               err = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+@@ -1682,31 +1683,18 @@ repeat:
+       return page;
+ }
+-/**
+- * read_cache_page_async - read into page cache, fill it if needed
+- * @mapping:  the page's address_space
+- * @index:    the page index
+- * @filler:   function to perform the read
+- * @data:     destination for read data
+- *
+- * Same as read_cache_page, but don't wait for page to become unlocked
+- * after submitting it to the filler.
+- *
+- * Read into the page cache. If a page already exists, and PageUptodate() is
+- * not set, try to fill the page but don't wait for it to become unlocked.
+- *
+- * If the page does not get brought uptodate, return -EIO.
+- */
+-struct page *read_cache_page_async(struct address_space *mapping,
++static struct page *do_read_cache_page(struct address_space *mapping,
+                               pgoff_t index,
+                               int (*filler)(void *,struct page*),
+-                              void *data)
++                              void *data,
++                              gfp_t gfp)
++
+ {
+       struct page *page;
+       int err;
+ retry:
+-      page = __read_cache_page(mapping, index, filler, data);
++      page = __read_cache_page(mapping, index, filler, data, gfp);
+       if (IS_ERR(page))
+               return page;
+       if (PageUptodate(page))
+@@ -1731,8 +1719,67 @@ out:
+       mark_page_accessed(page);
+       return page;
+ }
++
++/**
++ * read_cache_page_async - read into page cache, fill it if needed
++ * @mapping:  the page's address_space
++ * @index:    the page index
++ * @filler:   function to perform the read
++ * @data:     destination for read data
++ *
++ * Same as read_cache_page, but don't wait for page to become unlocked
++ * after submitting it to the filler.
++ *
++ * Read into the page cache. If a page already exists, and PageUptodate() is
++ * not set, try to fill the page but don't wait for it to become unlocked.
++ *
++ * If the page does not get brought uptodate, return -EIO.
++ */
++struct page *read_cache_page_async(struct address_space *mapping,
++                              pgoff_t index,
++                              int (*filler)(void *,struct page*),
++                              void *data)
++{
++      return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping));
++}
+ EXPORT_SYMBOL(read_cache_page_async);
++static struct page *wait_on_page_read(struct page *page)
++{
++      if (!IS_ERR(page)) {
++              wait_on_page_locked(page);
++              if (!PageUptodate(page)) {
++                      page_cache_release(page);
++                      page = ERR_PTR(-EIO);
++              }
++      }
++      return page;
++}
++
++/**
++ * read_cache_page_gfp - read into page cache, using specified page allocation flags.
++ * @mapping:  the page's address_space
++ * @index:    the page index
++ * @gfp:      the page allocator flags to use if allocating
++ *
++ * This is the same as "read_mapping_page(mapping, index, NULL)", but with
++ * any new page allocations done using the specified allocation flags. Note
++ * that the Radix tree operations will still use GFP_KERNEL, so you can't
++ * expect to do this atomically or anything like that - but you can pass in
++ * other page requirements.
++ *
++ * If the page does not get brought uptodate, return -EIO.
++ */
++struct page *read_cache_page_gfp(struct address_space *mapping,
++                              pgoff_t index,
++                              gfp_t gfp)
++{
++      filler_t *filler = (filler_t *)mapping->a_ops->readpage;
++
++      return wait_on_page_read(do_read_cache_page(mapping, index, filler, NULL, gfp));
++}
++EXPORT_SYMBOL(read_cache_page_gfp);
++
+ /**
+  * read_cache_page - read into page cache, fill it if needed
+  * @mapping:  the page's address_space
+@@ -1750,18 +1797,7 @@ struct page *read_cache_page(struct addr
+                               int (*filler)(void *,struct page*),
+                               void *data)
+ {
+-      struct page *page;
+-
+-      page = read_cache_page_async(mapping, index, filler, data);
+-      if (IS_ERR(page))
+-              goto out;
+-      wait_on_page_locked(page);
+-      if (!PageUptodate(page)) {
+-              page_cache_release(page);
+-              page = ERR_PTR(-EIO);
+-      }
+- out:
+-      return page;
++      return wait_on_page_read(read_cache_page_async(mapping, index, filler, data));
+ }
+ EXPORT_SYMBOL(read_cache_page);
diff --git a/queue-2.6.32/mptsas-fix-issue-with-chain-pools-allocation-on-katmai.patch b/queue-2.6.32/mptsas-fix-issue-with-chain-pools-allocation-on-katmai.patch
new file mode 100644 (file)
index 0000000..b33ed30
--- /dev/null
@@ -0,0 +1,47 @@
+From f1053a7ca9ce095d95bcc1cf41684c5e4f3e7751 Mon Sep 17 00:00:00 2001
+From: Anatolij Gustschin <agust@denx.de>
+Date: Sat, 12 Dec 2009 14:52:21 +0100
+Subject: [SCSI] mptsas: Fix issue with chain pools allocation on katmai
+
+From: Anatolij Gustschin <agust@denx.de>
+
+commit f1053a7ca9ce095d95bcc1cf41684c5e4f3e7751 upstream.
+
+Since commit 9d2e9d66a3f032667934144cd61c396ba49f090d
+mptsas driver fails to allocate memory for the MPT chain buffers
+for second LSI adapter on PPC440SPe Katmai platform:
+...
+ioc1: LSISAS1068E B3: Capabilities={Initiator}
+mptbase: ioc1: ERROR - Unable to allocate Reply, Request, Chain Buffers!
+mptbase: ioc1: ERROR - didn't initialize properly! (-3)
+mptsas: probe of 0002:31:00.0 failed with error -3
+
+This commit increased MPT_FC_CAN_QUEUE value but initChainBuffers()
+doesn't differentiate between SAS and FC causing increased allocation
+for SAS case, too. Later pci_alloc_consistent() fails to allocate
+increased chain buffer pool size for SAS case.
+
+Provide a fix by looking at the bus type and using appropriate
+MPT_SAS_CAN_QUEUE value while calculation of the number of chain
+buffers.
+
+Signed-off-by: Anatolij Gustschin <agust@denx.de>
+Acked-by: Kashyap Desai <kashyap.desai@lsi.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/message/fusion/mptbase.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/message/fusion/mptbase.c
++++ b/drivers/message/fusion/mptbase.c
+@@ -4330,6 +4330,8 @@ initChainBuffers(MPT_ADAPTER *ioc)
+       if (ioc->bus_type == SPI)
+               num_chain *= MPT_SCSI_CAN_QUEUE;
++      else if (ioc->bus_type == SAS)
++              num_chain *= MPT_SAS_CAN_QUEUE;
+       else
+               num_chain *= MPT_FC_CAN_QUEUE;
diff --git a/queue-2.6.32/s390-fix-single-stepped-svcs-with-trace_irqflags-y.patch b/queue-2.6.32/s390-fix-single-stepped-svcs-with-trace_irqflags-y.patch
new file mode 100644 (file)
index 0000000..57e6288
--- /dev/null
@@ -0,0 +1,43 @@
+From 21ec7f6dbf10492ce9a21718040677d3e68bd57d Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Wed, 27 Jan 2010 10:12:40 +0100
+Subject: S390: fix single stepped svcs with TRACE_IRQFLAGS=y
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit 21ec7f6dbf10492ce9a21718040677d3e68bd57d upstream.
+
+If irq flags tracing is enabled the TRACE_IRQS_ON macros expands to
+a function call which clobbers registers %r0-%r5. The macro is used
+in the code path for single stepped system calls. The argument
+registers %r2-%r6 need to be restored from the stack before the system
+call function is called.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/s390/kernel/entry.S   |    1 +
+ arch/s390/kernel/entry64.S |    1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/arch/s390/kernel/entry64.S
++++ b/arch/s390/kernel/entry64.S
+@@ -549,6 +549,7 @@ pgm_svcper:
+       mvc     __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
+       oi      __TI_flags+7(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+       TRACE_IRQS_ON
++      lmg     %r2,%r6,SP_R2(%r15)     # load svc arguments
+       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       j       sysc_do_svc
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -571,6 +571,7 @@ pgm_svcper:
+       mvc     __THREAD_per+__PER_access_id(1,%r8),__LC_PER_ACCESS_ID
+       oi      __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+       TRACE_IRQS_ON
++      lm      %r2,%r6,SP_R2(%r15)     # load svc arguments
+       stosm   __SF_EMPTY(%r15),0x03   # reenable interrupts
+       b       BASED(sysc_do_svc)
diff --git a/queue-2.6.32/scsi_lib-fix-bug-in-completion-of-bidi-commands.patch b/queue-2.6.32/scsi_lib-fix-bug-in-completion-of-bidi-commands.patch
new file mode 100644 (file)
index 0000000..65007c6
--- /dev/null
@@ -0,0 +1,53 @@
+From 63c43b0ec1765b74c734d465ba6345ef4f434df8 Mon Sep 17 00:00:00 2001
+From: Boaz Harrosh <bharrosh@panasas.com>
+Date: Tue, 15 Dec 2009 17:25:43 +0200
+Subject: [SCSI] scsi_lib: Fix bug in completion of bidi commands
+
+From: Boaz Harrosh <bharrosh@panasas.com>
+
+commit 63c43b0ec1765b74c734d465ba6345ef4f434df8 upstream.
+
+Because of the terrible structuring of scsi-bidi-commands
+it breaks some of the life time rules of a scsi-command.
+It is now not allowed to free up the block-request before
+cleanup and partial deallocation of the scsi-command. (Which
+is not so for none bidi commands)
+
+The right fix to this problem would be to make bidi command
+a first citizen by allocating a scsi_sdb pointer at scsi command
+just like cmd->prot_sdb. The bidi sdb should be allocated/deallocated
+as part of the get/put_command (Again like the prot_sdb) and the
+current decoupling of scsi_cmnd and blk-request should be kept.
+
+For now make sure scsi_release_buffers() is called before the
+call to blk_end_request_all() which might cause the suicide of
+the block requests. At best the leak of bidi buffers, at worse
+a crash, as there is a race between the existence of the bidi_request
+and the free of the associated bidi_sdb.
+
+The reason this was never hit before is because only OSD has the potential
+of doing asynchronous bidi commands. (So does bsg but it is never used)
+And OSD clients just happen to do all their bidi commands synchronously, up
+until recently.
+
+Signed-off-by: Boaz Harrosh <bharrosh@panasas.com>
+Signed-off-by: James Bottomley <James.Bottomley@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ drivers/scsi/scsi_lib.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -749,9 +749,9 @@ void scsi_io_completion(struct scsi_cmnd
+                        */
+                       req->next_rq->resid_len = scsi_in(cmd)->resid;
++                      scsi_release_buffers(cmd);
+                       blk_end_request_all(req, 0);
+-                      scsi_release_buffers(cmd);
+                       scsi_next_command(cmd);
+                       return;
+               }
diff --git a/queue-2.6.32/series b/queue-2.6.32/series
new file mode 100644 (file)
index 0000000..c75b5c9
--- /dev/null
@@ -0,0 +1,8 @@
+scsi_lib-fix-bug-in-completion-of-bidi-commands.patch
+mptsas-fix-issue-with-chain-pools-allocation-on-katmai.patch
+mm-add-new-read_cache_page_gfp-helper-function.patch
+drm-i915-selectively-enable-self-reclaim.patch
+firewire-ohci-fix-crashes-with-tsb43ab23-on-64bit-systems.patch
+s390-fix-single-stepped-svcs-with-trace_irqflags-y.patch
+x86-set-hotpluggable-nodes-in-nodes_possible_map.patch
+x86-remove-x86-cpu-features-in-debugfs-config_x86_cpu_debug.patch
diff --git a/queue-2.6.32/x86-remove-x86-cpu-features-in-debugfs-config_x86_cpu_debug.patch b/queue-2.6.32/x86-remove-x86-cpu-features-in-debugfs-config_x86_cpu_debug.patch
new file mode 100644 (file)
index 0000000..bce6eef
--- /dev/null
@@ -0,0 +1,885 @@
+From b160091802d4a76dd063facb09fcf10bf5d5d747 Mon Sep 17 00:00:00 2001
+From: H. Peter Anvin <hpa@zytor.com>
+Date: Sat, 23 Jan 2010 18:27:47 -0800
+Subject: x86: Remove "x86 CPU features in debugfs" (CONFIG_X86_CPU_DEBUG)
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: H. Peter Anvin <hpa@zytor.com>
+
+commit b160091802d4a76dd063facb09fcf10bf5d5d747 upstream.
+
+CONFIG_X86_CPU_DEBUG, which provides some parsed versions of the x86
+CPU configuration via debugfs, has caused boot failures on real
+hardware.  The value of this feature has been marginal at best, as all
+this information is already available to userspace via generic
+interfaces.
+
+Causes crashes that have not been fixed + minimal utility -> remove.
+
+See the referenced LKML thread for more information.
+
+Reported-by: Ozan Ã‡aÄŸlayan <ozan@pardus.org.tr>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+LKML-Reference: <alpine.LFD.2.00.1001221755320.13231@localhost.localdomain>
+Cc: Jaswinder Singh Rajput <jaswinder@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Rafael J. Wysocki <rjw@sisk.pl>
+Cc: Yinghai Lu <yinghai@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/Kconfig                 |    6 
+ arch/x86/include/asm/cpu_debug.h |  127 -------
+ arch/x86/kernel/cpu/Makefile     |    2 
+ arch/x86/kernel/cpu/cpu_debug.c  |  688 ---------------------------------------
+ 4 files changed, 823 deletions(-)
+
+--- a/arch/x86/include/asm/cpu_debug.h
++++ /dev/null
+@@ -1,127 +0,0 @@
+-#ifndef _ASM_X86_CPU_DEBUG_H
+-#define _ASM_X86_CPU_DEBUG_H
+-
+-/*
+- * CPU x86 architecture debug
+- *
+- * Copyright(C) 2009 Jaswinder Singh Rajput
+- */
+-
+-/* Register flags */
+-enum cpu_debug_bit {
+-/* Model Specific Registers (MSRs)                                    */
+-      CPU_MC_BIT,                             /* Machine Check        */
+-      CPU_MONITOR_BIT,                        /* Monitor              */
+-      CPU_TIME_BIT,                           /* Time                 */
+-      CPU_PMC_BIT,                            /* Performance Monitor  */
+-      CPU_PLATFORM_BIT,                       /* Platform             */
+-      CPU_APIC_BIT,                           /* APIC                 */
+-      CPU_POWERON_BIT,                        /* Power-on             */
+-      CPU_CONTROL_BIT,                        /* Control              */
+-      CPU_FEATURES_BIT,                       /* Features control     */
+-      CPU_LBRANCH_BIT,                        /* Last Branch          */
+-      CPU_BIOS_BIT,                           /* BIOS                 */
+-      CPU_FREQ_BIT,                           /* Frequency            */
+-      CPU_MTTR_BIT,                           /* MTRR                 */
+-      CPU_PERF_BIT,                           /* Performance          */
+-      CPU_CACHE_BIT,                          /* Cache                */
+-      CPU_SYSENTER_BIT,                       /* Sysenter             */
+-      CPU_THERM_BIT,                          /* Thermal              */
+-      CPU_MISC_BIT,                           /* Miscellaneous        */
+-      CPU_DEBUG_BIT,                          /* Debug                */
+-      CPU_PAT_BIT,                            /* PAT                  */
+-      CPU_VMX_BIT,                            /* VMX                  */
+-      CPU_CALL_BIT,                           /* System Call          */
+-      CPU_BASE_BIT,                           /* BASE Address         */
+-      CPU_VER_BIT,                            /* Version ID           */
+-      CPU_CONF_BIT,                           /* Configuration        */
+-      CPU_SMM_BIT,                            /* System mgmt mode     */
+-      CPU_SVM_BIT,                            /*Secure Virtual Machine*/
+-      CPU_OSVM_BIT,                           /* OS-Visible Workaround*/
+-/* Standard Registers                                                 */
+-      CPU_TSS_BIT,                            /* Task Stack Segment   */
+-      CPU_CR_BIT,                             /* Control Registers    */
+-      CPU_DT_BIT,                             /* Descriptor Table     */
+-/* End of Registers flags                                             */
+-      CPU_REG_ALL_BIT,                        /* Select all Registers */
+-};
+-
+-#define       CPU_REG_ALL             (~0)            /* Select all Registers */
+-
+-#define       CPU_MC                  (1 << CPU_MC_BIT)
+-#define       CPU_MONITOR             (1 << CPU_MONITOR_BIT)
+-#define       CPU_TIME                (1 << CPU_TIME_BIT)
+-#define       CPU_PMC                 (1 << CPU_PMC_BIT)
+-#define       CPU_PLATFORM            (1 << CPU_PLATFORM_BIT)
+-#define       CPU_APIC                (1 << CPU_APIC_BIT)
+-#define       CPU_POWERON             (1 << CPU_POWERON_BIT)
+-#define       CPU_CONTROL             (1 << CPU_CONTROL_BIT)
+-#define       CPU_FEATURES            (1 << CPU_FEATURES_BIT)
+-#define       CPU_LBRANCH             (1 << CPU_LBRANCH_BIT)
+-#define       CPU_BIOS                (1 << CPU_BIOS_BIT)
+-#define       CPU_FREQ                (1 << CPU_FREQ_BIT)
+-#define       CPU_MTRR                (1 << CPU_MTTR_BIT)
+-#define       CPU_PERF                (1 << CPU_PERF_BIT)
+-#define       CPU_CACHE               (1 << CPU_CACHE_BIT)
+-#define       CPU_SYSENTER            (1 << CPU_SYSENTER_BIT)
+-#define       CPU_THERM               (1 << CPU_THERM_BIT)
+-#define       CPU_MISC                (1 << CPU_MISC_BIT)
+-#define       CPU_DEBUG               (1 << CPU_DEBUG_BIT)
+-#define       CPU_PAT                 (1 << CPU_PAT_BIT)
+-#define       CPU_VMX                 (1 << CPU_VMX_BIT)
+-#define       CPU_CALL                (1 << CPU_CALL_BIT)
+-#define       CPU_BASE                (1 << CPU_BASE_BIT)
+-#define       CPU_VER                 (1 << CPU_VER_BIT)
+-#define       CPU_CONF                (1 << CPU_CONF_BIT)
+-#define       CPU_SMM                 (1 << CPU_SMM_BIT)
+-#define       CPU_SVM                 (1 << CPU_SVM_BIT)
+-#define       CPU_OSVM                (1 << CPU_OSVM_BIT)
+-#define       CPU_TSS                 (1 << CPU_TSS_BIT)
+-#define       CPU_CR                  (1 << CPU_CR_BIT)
+-#define       CPU_DT                  (1 << CPU_DT_BIT)
+-
+-/* Register file flags */
+-enum cpu_file_bit {
+-      CPU_INDEX_BIT,                          /* index                */
+-      CPU_VALUE_BIT,                          /* value                */
+-};
+-
+-#define       CPU_FILE_VALUE          (1 << CPU_VALUE_BIT)
+-
+-#define MAX_CPU_FILES         512
+-
+-struct cpu_private {
+-      unsigned                cpu;
+-      unsigned                type;
+-      unsigned                reg;
+-      unsigned                file;
+-};
+-
+-struct cpu_debug_base {
+-      char                    *name;          /* Register name        */
+-      unsigned                flag;           /* Register flag        */
+-      unsigned                write;          /* Register write flag  */
+-};
+-
+-/*
+- * Currently it looks similar to cpu_debug_base but once we add more files
+- * cpu_file_base will go in different direction
+- */
+-struct cpu_file_base {
+-      char                    *name;          /* Register file name   */
+-      unsigned                flag;           /* Register file flag   */
+-      unsigned                write;          /* Register write flag  */
+-};
+-
+-struct cpu_cpuX_base {
+-      struct dentry           *dentry;        /* Register dentry      */
+-      int                     init;           /* Register index file  */
+-};
+-
+-struct cpu_debug_range {
+-      unsigned                min;            /* Register range min   */
+-      unsigned                max;            /* Register range max   */
+-      unsigned                flag;           /* Supported flags      */
+-};
+-
+-#endif /* _ASM_X86_CPU_DEBUG_H */
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -984,12 +984,6 @@ config X86_CPUID
+         with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
+         /dev/cpu/31/cpuid.
+-config X86_CPU_DEBUG
+-      tristate "/sys/kernel/debug/x86/cpu/* - CPU Debug support"
+-      ---help---
+-        If you select this option, this will provide various x86 CPUs
+-        information through debugfs.
+-
+ choice
+       prompt "High Memory Support"
+       default HIGHMEM4G if !X86_NUMAQ
+--- a/arch/x86/kernel/cpu/cpu_debug.c
++++ /dev/null
+@@ -1,688 +0,0 @@
+-/*
+- * CPU x86 architecture debug code
+- *
+- * Copyright(C) 2009 Jaswinder Singh Rajput
+- *
+- * For licencing details see kernel-base/COPYING
+- */
+-
+-#include <linux/interrupt.h>
+-#include <linux/compiler.h>
+-#include <linux/seq_file.h>
+-#include <linux/debugfs.h>
+-#include <linux/kprobes.h>
+-#include <linux/uaccess.h>
+-#include <linux/kernel.h>
+-#include <linux/module.h>
+-#include <linux/percpu.h>
+-#include <linux/signal.h>
+-#include <linux/errno.h>
+-#include <linux/sched.h>
+-#include <linux/types.h>
+-#include <linux/init.h>
+-#include <linux/slab.h>
+-#include <linux/smp.h>
+-
+-#include <asm/cpu_debug.h>
+-#include <asm/paravirt.h>
+-#include <asm/system.h>
+-#include <asm/traps.h>
+-#include <asm/apic.h>
+-#include <asm/desc.h>
+-
+-static DEFINE_PER_CPU(struct cpu_cpuX_base [CPU_REG_ALL_BIT], cpu_arr);
+-static DEFINE_PER_CPU(struct cpu_private * [MAX_CPU_FILES], priv_arr);
+-static DEFINE_PER_CPU(int, cpu_priv_count);
+-
+-static DEFINE_MUTEX(cpu_debug_lock);
+-
+-static struct dentry *cpu_debugfs_dir;
+-
+-static struct cpu_debug_base cpu_base[] = {
+-      { "mc",         CPU_MC,         0       },
+-      { "monitor",    CPU_MONITOR,    0       },
+-      { "time",       CPU_TIME,       0       },
+-      { "pmc",        CPU_PMC,        1       },
+-      { "platform",   CPU_PLATFORM,   0       },
+-      { "apic",       CPU_APIC,       0       },
+-      { "poweron",    CPU_POWERON,    0       },
+-      { "control",    CPU_CONTROL,    0       },
+-      { "features",   CPU_FEATURES,   0       },
+-      { "lastbranch", CPU_LBRANCH,    0       },
+-      { "bios",       CPU_BIOS,       0       },
+-      { "freq",       CPU_FREQ,       0       },
+-      { "mtrr",       CPU_MTRR,       0       },
+-      { "perf",       CPU_PERF,       0       },
+-      { "cache",      CPU_CACHE,      0       },
+-      { "sysenter",   CPU_SYSENTER,   0       },
+-      { "therm",      CPU_THERM,      0       },
+-      { "misc",       CPU_MISC,       0       },
+-      { "debug",      CPU_DEBUG,      0       },
+-      { "pat",        CPU_PAT,        0       },
+-      { "vmx",        CPU_VMX,        0       },
+-      { "call",       CPU_CALL,       0       },
+-      { "base",       CPU_BASE,       0       },
+-      { "ver",        CPU_VER,        0       },
+-      { "conf",       CPU_CONF,       0       },
+-      { "smm",        CPU_SMM,        0       },
+-      { "svm",        CPU_SVM,        0       },
+-      { "osvm",       CPU_OSVM,       0       },
+-      { "tss",        CPU_TSS,        0       },
+-      { "cr",         CPU_CR,         0       },
+-      { "dt",         CPU_DT,         0       },
+-      { "registers",  CPU_REG_ALL,    0       },
+-};
+-
+-static struct cpu_file_base cpu_file[] = {
+-      { "index",      CPU_REG_ALL,    0       },
+-      { "value",      CPU_REG_ALL,    1       },
+-};
+-
+-/* CPU Registers Range */
+-static struct cpu_debug_range cpu_reg_range[] = {
+-      { 0x00000000, 0x00000001, CPU_MC,       },
+-      { 0x00000006, 0x00000007, CPU_MONITOR,  },
+-      { 0x00000010, 0x00000010, CPU_TIME,     },
+-      { 0x00000011, 0x00000013, CPU_PMC,      },
+-      { 0x00000017, 0x00000017, CPU_PLATFORM, },
+-      { 0x0000001B, 0x0000001B, CPU_APIC,     },
+-      { 0x0000002A, 0x0000002B, CPU_POWERON,  },
+-      { 0x0000002C, 0x0000002C, CPU_FREQ,     },
+-      { 0x0000003A, 0x0000003A, CPU_CONTROL,  },
+-      { 0x00000040, 0x00000047, CPU_LBRANCH,  },
+-      { 0x00000060, 0x00000067, CPU_LBRANCH,  },
+-      { 0x00000079, 0x00000079, CPU_BIOS,     },
+-      { 0x00000088, 0x0000008A, CPU_CACHE,    },
+-      { 0x0000008B, 0x0000008B, CPU_BIOS,     },
+-      { 0x0000009B, 0x0000009B, CPU_MONITOR,  },
+-      { 0x000000C1, 0x000000C4, CPU_PMC,      },
+-      { 0x000000CD, 0x000000CD, CPU_FREQ,     },
+-      { 0x000000E7, 0x000000E8, CPU_PERF,     },
+-      { 0x000000FE, 0x000000FE, CPU_MTRR,     },
+-
+-      { 0x00000116, 0x0000011E, CPU_CACHE,    },
+-      { 0x00000174, 0x00000176, CPU_SYSENTER, },
+-      { 0x00000179, 0x0000017B, CPU_MC,       },
+-      { 0x00000186, 0x00000189, CPU_PMC,      },
+-      { 0x00000198, 0x00000199, CPU_PERF,     },
+-      { 0x0000019A, 0x0000019A, CPU_TIME,     },
+-      { 0x0000019B, 0x0000019D, CPU_THERM,    },
+-      { 0x000001A0, 0x000001A0, CPU_MISC,     },
+-      { 0x000001C9, 0x000001C9, CPU_LBRANCH,  },
+-      { 0x000001D7, 0x000001D8, CPU_LBRANCH,  },
+-      { 0x000001D9, 0x000001D9, CPU_DEBUG,    },
+-      { 0x000001DA, 0x000001E0, CPU_LBRANCH,  },
+-
+-      { 0x00000200, 0x0000020F, CPU_MTRR,     },
+-      { 0x00000250, 0x00000250, CPU_MTRR,     },
+-      { 0x00000258, 0x00000259, CPU_MTRR,     },
+-      { 0x00000268, 0x0000026F, CPU_MTRR,     },
+-      { 0x00000277, 0x00000277, CPU_PAT,      },
+-      { 0x000002FF, 0x000002FF, CPU_MTRR,     },
+-
+-      { 0x00000300, 0x00000311, CPU_PMC,      },
+-      { 0x00000345, 0x00000345, CPU_PMC,      },
+-      { 0x00000360, 0x00000371, CPU_PMC,      },
+-      { 0x0000038D, 0x00000390, CPU_PMC,      },
+-      { 0x000003A0, 0x000003BE, CPU_PMC,      },
+-      { 0x000003C0, 0x000003CD, CPU_PMC,      },
+-      { 0x000003E0, 0x000003E1, CPU_PMC,      },
+-      { 0x000003F0, 0x000003F2, CPU_PMC,      },
+-
+-      { 0x00000400, 0x00000417, CPU_MC,       },
+-      { 0x00000480, 0x0000048B, CPU_VMX,      },
+-
+-      { 0x00000600, 0x00000600, CPU_DEBUG,    },
+-      { 0x00000680, 0x0000068F, CPU_LBRANCH,  },
+-      { 0x000006C0, 0x000006CF, CPU_LBRANCH,  },
+-
+-      { 0x000107CC, 0x000107D3, CPU_PMC,      },
+-
+-      { 0xC0000080, 0xC0000080, CPU_FEATURES, },
+-      { 0xC0000081, 0xC0000084, CPU_CALL,     },
+-      { 0xC0000100, 0xC0000102, CPU_BASE,     },
+-      { 0xC0000103, 0xC0000103, CPU_TIME,     },
+-
+-      { 0xC0010000, 0xC0010007, CPU_PMC,      },
+-      { 0xC0010010, 0xC0010010, CPU_CONF,     },
+-      { 0xC0010015, 0xC0010015, CPU_CONF,     },
+-      { 0xC0010016, 0xC001001A, CPU_MTRR,     },
+-      { 0xC001001D, 0xC001001D, CPU_MTRR,     },
+-      { 0xC001001F, 0xC001001F, CPU_CONF,     },
+-      { 0xC0010030, 0xC0010035, CPU_BIOS,     },
+-      { 0xC0010044, 0xC0010048, CPU_MC,       },
+-      { 0xC0010050, 0xC0010056, CPU_SMM,      },
+-      { 0xC0010058, 0xC0010058, CPU_CONF,     },
+-      { 0xC0010060, 0xC0010060, CPU_CACHE,    },
+-      { 0xC0010061, 0xC0010068, CPU_SMM,      },
+-      { 0xC0010069, 0xC001006B, CPU_SMM,      },
+-      { 0xC0010070, 0xC0010071, CPU_SMM,      },
+-      { 0xC0010111, 0xC0010113, CPU_SMM,      },
+-      { 0xC0010114, 0xC0010118, CPU_SVM,      },
+-      { 0xC0010140, 0xC0010141, CPU_OSVM,     },
+-      { 0xC0011022, 0xC0011023, CPU_CONF,     },
+-};
+-
+-static int is_typeflag_valid(unsigned cpu, unsigned flag)
+-{
+-      int i;
+-
+-      /* Standard Registers should be always valid */
+-      if (flag >= CPU_TSS)
+-              return 1;
+-
+-      for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
+-              if (cpu_reg_range[i].flag == flag)
+-                      return 1;
+-      }
+-
+-      /* Invalid */
+-      return 0;
+-}
+-
+-static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
+-                            int index, unsigned flag)
+-{
+-      if (cpu_reg_range[index].flag == flag) {
+-              *min = cpu_reg_range[index].min;
+-              *max = cpu_reg_range[index].max;
+-      } else
+-              *max = 0;
+-
+-      return *max;
+-}
+-
+-/* This function can also be called with seq = NULL for printk */
+-static void print_cpu_data(struct seq_file *seq, unsigned type,
+-                         u32 low, u32 high)
+-{
+-      struct cpu_private *priv;
+-      u64 val = high;
+-
+-      if (seq) {
+-              priv = seq->private;
+-              if (priv->file) {
+-                      val = (val << 32) | low;
+-                      seq_printf(seq, "0x%llx\n", val);
+-              } else
+-                      seq_printf(seq, " %08x: %08x_%08x\n",
+-                                 type, high, low);
+-      } else
+-              printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
+-}
+-
+-/* This function can also be called with seq = NULL for printk */
+-static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
+-{
+-      unsigned msr, msr_min, msr_max;
+-      struct cpu_private *priv;
+-      u32 low, high;
+-      int i;
+-
+-      if (seq) {
+-              priv = seq->private;
+-              if (priv->file) {
+-                      if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
+-                                             &low, &high))
+-                              print_cpu_data(seq, priv->reg, low, high);
+-                      return;
+-              }
+-      }
+-
+-      for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
+-              if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
+-                      continue;
+-
+-              for (msr = msr_min; msr <= msr_max; msr++) {
+-                      if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
+-                              continue;
+-                      print_cpu_data(seq, msr, low, high);
+-              }
+-      }
+-}
+-
+-static void print_tss(void *arg)
+-{
+-      struct pt_regs *regs = task_pt_regs(current);
+-      struct seq_file *seq = arg;
+-      unsigned int seg;
+-
+-      seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
+-      seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
+-      seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
+-      seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
+-
+-      seq_printf(seq, " RSI\t: %016lx\n", regs->si);
+-      seq_printf(seq, " RDI\t: %016lx\n", regs->di);
+-      seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
+-      seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
+-
+-#ifdef CONFIG_X86_64
+-      seq_printf(seq, " R08\t: %016lx\n", regs->r8);
+-      seq_printf(seq, " R09\t: %016lx\n", regs->r9);
+-      seq_printf(seq, " R10\t: %016lx\n", regs->r10);
+-      seq_printf(seq, " R11\t: %016lx\n", regs->r11);
+-      seq_printf(seq, " R12\t: %016lx\n", regs->r12);
+-      seq_printf(seq, " R13\t: %016lx\n", regs->r13);
+-      seq_printf(seq, " R14\t: %016lx\n", regs->r14);
+-      seq_printf(seq, " R15\t: %016lx\n", regs->r15);
+-#endif
+-
+-      asm("movl %%cs,%0" : "=r" (seg));
+-      seq_printf(seq, " CS\t:             %04x\n", seg);
+-      asm("movl %%ds,%0" : "=r" (seg));
+-      seq_printf(seq, " DS\t:             %04x\n", seg);
+-      seq_printf(seq, " SS\t:             %04lx\n", regs->ss & 0xffff);
+-      asm("movl %%es,%0" : "=r" (seg));
+-      seq_printf(seq, " ES\t:             %04x\n", seg);
+-      asm("movl %%fs,%0" : "=r" (seg));
+-      seq_printf(seq, " FS\t:             %04x\n", seg);
+-      asm("movl %%gs,%0" : "=r" (seg));
+-      seq_printf(seq, " GS\t:             %04x\n", seg);
+-
+-      seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
+-
+-      seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
+-}
+-
+-static void print_cr(void *arg)
+-{
+-      struct seq_file *seq = arg;
+-
+-      seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
+-      seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
+-      seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
+-      seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
+-#ifdef CONFIG_X86_64
+-      seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
+-#endif
+-}
+-
+-static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
+-{
+-      seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
+-}
+-
+-static void print_dt(void *seq)
+-{
+-      struct desc_ptr dt;
+-      unsigned long ldt;
+-
+-      /* IDT */
+-      store_idt((struct desc_ptr *)&dt);
+-      print_desc_ptr("IDT", seq, dt);
+-
+-      /* GDT */
+-      store_gdt((struct desc_ptr *)&dt);
+-      print_desc_ptr("GDT", seq, dt);
+-
+-      /* LDT */
+-      store_ldt(ldt);
+-      seq_printf(seq, " LDT\t: %016lx\n", ldt);
+-
+-      /* TR */
+-      store_tr(ldt);
+-      seq_printf(seq, " TR\t: %016lx\n", ldt);
+-}
+-
+-static void print_dr(void *arg)
+-{
+-      struct seq_file *seq = arg;
+-      unsigned long dr;
+-      int i;
+-
+-      for (i = 0; i < 8; i++) {
+-              /* Ignore db4, db5 */
+-              if ((i == 4) || (i == 5))
+-                      continue;
+-              get_debugreg(dr, i);
+-              seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
+-      }
+-
+-      seq_printf(seq, "\n MSR\t:\n");
+-}
+-
+-static void print_apic(void *arg)
+-{
+-      struct seq_file *seq = arg;
+-
+-#ifdef CONFIG_X86_LOCAL_APIC
+-      seq_printf(seq, " LAPIC\t:\n");
+-      seq_printf(seq, " ID\t\t: %08x\n",  apic_read(APIC_ID) >> 24);
+-      seq_printf(seq, " LVR\t\t: %08x\n",  apic_read(APIC_LVR));
+-      seq_printf(seq, " TASKPRI\t: %08x\n",  apic_read(APIC_TASKPRI));
+-      seq_printf(seq, " ARBPRI\t\t: %08x\n",  apic_read(APIC_ARBPRI));
+-      seq_printf(seq, " PROCPRI\t: %08x\n",  apic_read(APIC_PROCPRI));
+-      seq_printf(seq, " LDR\t\t: %08x\n",  apic_read(APIC_LDR));
+-      seq_printf(seq, " DFR\t\t: %08x\n",  apic_read(APIC_DFR));
+-      seq_printf(seq, " SPIV\t\t: %08x\n",  apic_read(APIC_SPIV));
+-      seq_printf(seq, " ISR\t\t: %08x\n",  apic_read(APIC_ISR));
+-      seq_printf(seq, " ESR\t\t: %08x\n",  apic_read(APIC_ESR));
+-      seq_printf(seq, " ICR\t\t: %08x\n",  apic_read(APIC_ICR));
+-      seq_printf(seq, " ICR2\t\t: %08x\n",  apic_read(APIC_ICR2));
+-      seq_printf(seq, " LVTT\t\t: %08x\n",  apic_read(APIC_LVTT));
+-      seq_printf(seq, " LVTTHMR\t: %08x\n",  apic_read(APIC_LVTTHMR));
+-      seq_printf(seq, " LVTPC\t\t: %08x\n",  apic_read(APIC_LVTPC));
+-      seq_printf(seq, " LVT0\t\t: %08x\n",  apic_read(APIC_LVT0));
+-      seq_printf(seq, " LVT1\t\t: %08x\n",  apic_read(APIC_LVT1));
+-      seq_printf(seq, " LVTERR\t\t: %08x\n",  apic_read(APIC_LVTERR));
+-      seq_printf(seq, " TMICT\t\t: %08x\n",  apic_read(APIC_TMICT));
+-      seq_printf(seq, " TMCCT\t\t: %08x\n",  apic_read(APIC_TMCCT));
+-      seq_printf(seq, " TDCR\t\t: %08x\n",  apic_read(APIC_TDCR));
+-      if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
+-              unsigned int i, v, maxeilvt;
+-
+-              v = apic_read(APIC_EFEAT);
+-              maxeilvt = (v >> 16) & 0xff;
+-              seq_printf(seq, " EFEAT\t\t: %08x\n", v);
+-              seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL));
+-
+-              for (i = 0; i < maxeilvt; i++) {
+-                      v = apic_read(APIC_EILVTn(i));
+-                      seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v);
+-              }
+-      }
+-#endif /* CONFIG_X86_LOCAL_APIC */
+-      seq_printf(seq, "\n MSR\t:\n");
+-}
+-
+-static int cpu_seq_show(struct seq_file *seq, void *v)
+-{
+-      struct cpu_private *priv = seq->private;
+-
+-      if (priv == NULL)
+-              return -EINVAL;
+-
+-      switch (cpu_base[priv->type].flag) {
+-      case CPU_TSS:
+-              smp_call_function_single(priv->cpu, print_tss, seq, 1);
+-              break;
+-      case CPU_CR:
+-              smp_call_function_single(priv->cpu, print_cr, seq, 1);
+-              break;
+-      case CPU_DT:
+-              smp_call_function_single(priv->cpu, print_dt, seq, 1);
+-              break;
+-      case CPU_DEBUG:
+-              if (priv->file == CPU_INDEX_BIT)
+-                      smp_call_function_single(priv->cpu, print_dr, seq, 1);
+-              print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
+-              break;
+-      case CPU_APIC:
+-              if (priv->file == CPU_INDEX_BIT)
+-                      smp_call_function_single(priv->cpu, print_apic, seq, 1);
+-              print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
+-              break;
+-
+-      default:
+-              print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
+-              break;
+-      }
+-      seq_printf(seq, "\n");
+-
+-      return 0;
+-}
+-
+-static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
+-{
+-      if (*pos == 0) /* One time is enough ;-) */
+-              return seq;
+-
+-      return NULL;
+-}
+-
+-static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+-{
+-      (*pos)++;
+-
+-      return cpu_seq_start(seq, pos);
+-}
+-
+-static void cpu_seq_stop(struct seq_file *seq, void *v)
+-{
+-}
+-
+-static const struct seq_operations cpu_seq_ops = {
+-      .start          = cpu_seq_start,
+-      .next           = cpu_seq_next,
+-      .stop           = cpu_seq_stop,
+-      .show           = cpu_seq_show,
+-};
+-
+-static int cpu_seq_open(struct inode *inode, struct file *file)
+-{
+-      struct cpu_private *priv = inode->i_private;
+-      struct seq_file *seq;
+-      int err;
+-
+-      err = seq_open(file, &cpu_seq_ops);
+-      if (!err) {
+-              seq = file->private_data;
+-              seq->private = priv;
+-      }
+-
+-      return err;
+-}
+-
+-static int write_msr(struct cpu_private *priv, u64 val)
+-{
+-      u32 low, high;
+-
+-      high = (val >> 32) & 0xffffffff;
+-      low = val & 0xffffffff;
+-
+-      if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
+-              return 0;
+-
+-      return -EPERM;
+-}
+-
+-static int write_cpu_register(struct cpu_private *priv, const char *buf)
+-{
+-      int ret = -EPERM;
+-      u64 val;
+-
+-      ret = strict_strtoull(buf, 0, &val);
+-      if (ret < 0)
+-              return ret;
+-
+-      /* Supporting only MSRs */
+-      if (priv->type < CPU_TSS_BIT)
+-              return write_msr(priv, val);
+-
+-      return ret;
+-}
+-
+-static ssize_t cpu_write(struct file *file, const char __user *ubuf,
+-                           size_t count, loff_t *off)
+-{
+-      struct seq_file *seq = file->private_data;
+-      struct cpu_private *priv = seq->private;
+-      char buf[19];
+-
+-      if ((priv == NULL) || (count >= sizeof(buf)))
+-              return -EINVAL;
+-
+-      if (copy_from_user(&buf, ubuf, count))
+-              return -EFAULT;
+-
+-      buf[count] = 0;
+-
+-      if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
+-              if (!write_cpu_register(priv, buf))
+-                      return count;
+-
+-      return -EACCES;
+-}
+-
+-static const struct file_operations cpu_fops = {
+-      .owner          = THIS_MODULE,
+-      .open           = cpu_seq_open,
+-      .read           = seq_read,
+-      .write          = cpu_write,
+-      .llseek         = seq_lseek,
+-      .release        = seq_release,
+-};
+-
+-static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
+-                         unsigned file, struct dentry *dentry)
+-{
+-      struct cpu_private *priv = NULL;
+-
+-      /* Already intialized */
+-      if (file == CPU_INDEX_BIT)
+-              if (per_cpu(cpu_arr[type].init, cpu))
+-                      return 0;
+-
+-      priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+-      if (priv == NULL)
+-              return -ENOMEM;
+-
+-      priv->cpu = cpu;
+-      priv->type = type;
+-      priv->reg = reg;
+-      priv->file = file;
+-      mutex_lock(&cpu_debug_lock);
+-      per_cpu(priv_arr[type], cpu) = priv;
+-      per_cpu(cpu_priv_count, cpu)++;
+-      mutex_unlock(&cpu_debug_lock);
+-
+-      if (file)
+-              debugfs_create_file(cpu_file[file].name, S_IRUGO,
+-                                  dentry, (void *)priv, &cpu_fops);
+-      else {
+-              debugfs_create_file(cpu_base[type].name, S_IRUGO,
+-                                  per_cpu(cpu_arr[type].dentry, cpu),
+-                                  (void *)priv, &cpu_fops);
+-              mutex_lock(&cpu_debug_lock);
+-              per_cpu(cpu_arr[type].init, cpu) = 1;
+-              mutex_unlock(&cpu_debug_lock);
+-      }
+-
+-      return 0;
+-}
+-
+-static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
+-                           struct dentry *dentry)
+-{
+-      unsigned file;
+-      int err = 0;
+-
+-      for (file = 0; file <  ARRAY_SIZE(cpu_file); file++) {
+-              err = cpu_create_file(cpu, type, reg, file, dentry);
+-              if (err)
+-                      return err;
+-      }
+-
+-      return err;
+-}
+-
+-static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
+-{
+-      struct dentry *cpu_dentry = NULL;
+-      unsigned reg, reg_min, reg_max;
+-      int i, err = 0;
+-      char reg_dir[12];
+-      u32 low, high;
+-
+-      for (i = 0; i < ARRAY_SIZE(cpu_reg_range); i++) {
+-              if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
+-                                 cpu_base[type].flag))
+-                      continue;
+-
+-              for (reg = reg_min; reg <= reg_max; reg++) {
+-                      if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
+-                              continue;
+-
+-                      sprintf(reg_dir, "0x%x", reg);
+-                      cpu_dentry = debugfs_create_dir(reg_dir, dentry);
+-                      err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
+-                      if (err)
+-                              return err;
+-              }
+-      }
+-
+-      return err;
+-}
+-
+-static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
+-{
+-      struct dentry *cpu_dentry = NULL;
+-      unsigned type;
+-      int err = 0;
+-
+-      for (type = 0; type <  ARRAY_SIZE(cpu_base) - 1; type++) {
+-              if (!is_typeflag_valid(cpu, cpu_base[type].flag))
+-                      continue;
+-              cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
+-              per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
+-
+-              if (type < CPU_TSS_BIT)
+-                      err = cpu_init_msr(cpu, type, cpu_dentry);
+-              else
+-                      err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
+-                                            cpu_dentry);
+-              if (err)
+-                      return err;
+-      }
+-
+-      return err;
+-}
+-
+-static int cpu_init_cpu(void)
+-{
+-      struct dentry *cpu_dentry = NULL;
+-      struct cpuinfo_x86 *cpui;
+-      char cpu_dir[12];
+-      unsigned cpu;
+-      int err = 0;
+-
+-      for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
+-              cpui = &cpu_data(cpu);
+-              if (!cpu_has(cpui, X86_FEATURE_MSR))
+-                      continue;
+-
+-              sprintf(cpu_dir, "cpu%d", cpu);
+-              cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
+-              err = cpu_init_allreg(cpu, cpu_dentry);
+-
+-              pr_info("cpu%d(%d) debug files %d\n",
+-                      cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
+-              if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
+-                      pr_err("Register files count %d exceeds limit %d\n",
+-                              per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
+-                      per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
+-                      err = -ENFILE;
+-              }
+-              if (err)
+-                      return err;
+-      }
+-
+-      return err;
+-}
+-
+-static int __init cpu_debug_init(void)
+-{
+-      cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
+-
+-      return cpu_init_cpu();
+-}
+-
+-static void __exit cpu_debug_exit(void)
+-{
+-      int i, cpu;
+-
+-      if (cpu_debugfs_dir)
+-              debugfs_remove_recursive(cpu_debugfs_dir);
+-
+-      for (cpu = 0; cpu <  nr_cpu_ids; cpu++)
+-              for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
+-                      kfree(per_cpu(priv_arr[i], cpu));
+-}
+-
+-module_init(cpu_debug_init);
+-module_exit(cpu_debug_exit);
+-
+-MODULE_AUTHOR("Jaswinder Singh Rajput");
+-MODULE_DESCRIPTION("CPU Debug module");
+-MODULE_LICENSE("GPL");
+--- a/arch/x86/kernel/cpu/Makefile
++++ b/arch/x86/kernel/cpu/Makefile
+@@ -18,8 +18,6 @@ obj-y                        += vmware.o hypervisor.o sched.o
+ obj-$(CONFIG_X86_32)  += bugs.o cmpxchg.o
+ obj-$(CONFIG_X86_64)  += bugs_64.o
+-obj-$(CONFIG_X86_CPU_DEBUG)           += cpu_debug.o
+-
+ obj-$(CONFIG_CPU_SUP_INTEL)           += intel.o
+ obj-$(CONFIG_CPU_SUP_AMD)             += amd.o
+ obj-$(CONFIG_CPU_SUP_CYRIX_32)                += cyrix.o
diff --git a/queue-2.6.32/x86-set-hotpluggable-nodes-in-nodes_possible_map.patch b/queue-2.6.32/x86-set-hotpluggable-nodes-in-nodes_possible_map.patch
new file mode 100644 (file)
index 0000000..8575289
--- /dev/null
@@ -0,0 +1,53 @@
+From 3a5fc0e40cb467e692737bc798bc99773c81e1e2 Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Wed, 20 Jan 2010 12:10:47 -0800
+Subject: x86: Set hotpluggable nodes in nodes_possible_map
+
+From: David Rientjes <rientjes@google.com>
+
+commit 3a5fc0e40cb467e692737bc798bc99773c81e1e2 upstream.
+
+nodes_possible_map does not currently include nodes that have SRAT
+entries that are all ACPI_SRAT_MEM_HOT_PLUGGABLE since the bit is
+cleared in nodes_parsed if it does not have an online address range.
+
+Unequivocally setting the bit in nodes_parsed is insufficient since
+existing code, such as acpi_get_nodes(), assumes all nodes in the map
+have online address ranges.  In fact, all code using nodes_parsed
+assumes such nodes represent an address range of online memory.
+
+nodes_possible_map is created by unioning nodes_parsed and
+cpu_nodes_parsed; the former represents nodes with online memory and
+the latter represents memoryless nodes.  We now set the bit for
+hotpluggable nodes in cpu_nodes_parsed so that it also gets set in
+nodes_possible_map.
+
+[ hpa: Haicheng Li points out that this makes the naming of the
+  variable cpu_nodes_parsed somewhat counterintuitive.  However, leave
+  it as is in the interest of keeping the pure bug fix patch small. ]
+
+Signed-off-by: David Rientjes <rientjes@google.com>
+Tested-by: Haicheng Li <haicheng.li@linux.intel.com>
+LKML-Reference: <alpine.DEB.2.00.1001201152040.30528@chino.kir.corp.google.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/x86/mm/srat_64.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/mm/srat_64.c
++++ b/arch/x86/mm/srat_64.c
+@@ -229,9 +229,11 @@ update_nodes_add(int node, unsigned long
+                       printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
+       }
+-      if (changed)
++      if (changed) {
++              node_set(node, cpu_nodes_parsed);
+               printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n",
+                                nd->start, nd->end);
++      }
+ }
+ /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */