--- /dev/null
+From 5f4513864304672e6ea9eac60583eeac32e679f2 Mon Sep 17 00:00:00 2001
+From: Henrik Rydberg <rydberg@euromail.se>
+Date: Thu, 26 Sep 2013 08:33:16 +0200
+Subject: hwmon: (applesmc) Check key count before proceeding
+
+From: Henrik Rydberg <rydberg@euromail.se>
+
+commit 5f4513864304672e6ea9eac60583eeac32e679f2 upstream.
+
+After reports from Chris and Josh Boyer of a rare crash in applesmc,
+Guenter pointed at the initialization problem fixed below. The patch
+has not been verified to fix the crash, but should be applied
+regardless.
+
+Reported-by: <jwboyer@fedoraproject.org>
+Suggested-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwmon/applesmc.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -485,16 +485,25 @@ static int applesmc_init_smcreg_try(void
+ {
+ struct applesmc_registers *s = &smcreg;
+ bool left_light_sensor, right_light_sensor;
++ unsigned int count;
+ u8 tmp[1];
+ int ret;
+
+ if (s->init_complete)
+ return 0;
+
+- ret = read_register_count(&s->key_count);
++ ret = read_register_count(&count);
+ if (ret)
+ return ret;
+
++ if (s->cache && s->key_count != count) {
++ pr_warn("key count changed from %d to %d\n",
++ s->key_count, count);
++ kfree(s->cache);
++ s->cache = NULL;
++ }
++ s->key_count = count;
++
+ if (!s->cache)
+ s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL);
+ if (!s->cache)
--- /dev/null
+From 0fc86eca1b338d06ec500b34ef7def79c32b602b Mon Sep 17 00:00:00 2001
+From: Henrik Rydberg <rydberg@euromail.se>
+Date: Thu, 26 Jan 2012 06:08:41 -0500
+Subject: hwmon: (applesmc) Silence uninitialized warnings
+
+From: Henrik Rydberg <rydberg@euromail.se>
+
+commit 0fc86eca1b338d06ec500b34ef7def79c32b602b upstream.
+
+Some error paths do not set a result, leading to the (false)
+assumption that the value may be used uninitialized. Set results for
+those paths as well.
+
+Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
+Signed-off-by: Guenter Roeck <guenter.roeck@ericsson.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwmon/applesmc.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/hwmon/applesmc.c
++++ b/drivers/hwmon/applesmc.c
+@@ -344,8 +344,10 @@ static int applesmc_get_lower_bound(unsi
+ while (begin != end) {
+ int middle = begin + (end - begin) / 2;
+ entry = applesmc_get_entry_by_index(middle);
+- if (IS_ERR(entry))
++ if (IS_ERR(entry)) {
++ *lo = 0;
+ return PTR_ERR(entry);
++ }
+ if (strcmp(entry->key, key) < 0)
+ begin = middle + 1;
+ else
+@@ -364,8 +366,10 @@ static int applesmc_get_upper_bound(unsi
+ while (begin != end) {
+ int middle = begin + (end - begin) / 2;
+ entry = applesmc_get_entry_by_index(middle);
+- if (IS_ERR(entry))
++ if (IS_ERR(entry)) {
++ *hi = smcreg.key_count;
+ return PTR_ERR(entry);
++ }
+ if (strcmp(key, entry->key) < 0)
+ end = middle;
+ else
--- /dev/null
+From khalid.aziz@oracle.com Wed Oct 2 19:38:33 2013
+From: Khalid Aziz <khalid.aziz@oracle.com>
+Date: Mon, 23 Sep 2013 13:54:09 -0600
+Subject: mm: fix aio performance regression for database caused by THP
+To: bhutchings@solarflare.com, gregkh@linuxfoundation.org
+Cc: stable@vger.kernel.org, pshelar@nicira.com, cl@linux.com, aarcange@redhat.com, hannes@cmpxchg.org, mel@csn.ul.ie, riel@redhat.com, minchan@kernel.org, andi@firstfloor.org, akpm@linux-foundation.org, torvalds@linux-foundation.org
+Message-ID: <1379966049.30551.9.camel@concerto>
+
+From: Khalid Aziz <khalid.aziz@oracle.com>
+
+commit 7cb2ef56e6a8b7b368b2e883a0a47d02fed66911 upstream.
+
+This patch needed to be backported due to changes to mm/swap.c some time
+after 3.6 kernel.
+
+I am working with a tool that simulates oracle database I/O workload.
+This tool (orion to be specific -
+<http://docs.oracle.com/cd/E11882_01/server.112/e16638/iodesign.htm#autoId24>)
+allocates hugetlbfs pages using shmget() with SHM_HUGETLB flag. It then
+does aio into these pages from flash disks using various common block
+sizes used by database. I am looking at performance with two of the most
+common block sizes - 1M and 64K. aio performance with these two block
+sizes plunged after Transparent HugePages was introduced in the kernel.
+Here are performance numbers:
+
+ pre-THP 2.6.39 3.11-rc5
+1M read 8384 MB/s 5629 MB/s 6501 MB/s
+64K read 7867 MB/s 4576 MB/s 4251 MB/s
+
+I have narrowed the performance impact down to the overheads introduced by
+THP in __get_page_tail() and put_compound_page() routines. perf top shows
+>40% of cycles being spent in these two routines. Every time direct I/O
+to hugetlbfs pages starts, kernel calls get_page() to grab a reference to
+the pages and calls put_page() when I/O completes to put the reference
+away. THP introduced significant amount of locking overhead to get_page()
+and put_page() when dealing with compound pages because hugepages can be
+split underneath get_page() and put_page(). It added this overhead
+irrespective of whether it is dealing with hugetlbfs pages or transparent
+hugepages. This resulted in 20%-45% drop in aio performance when using
+hugetlbfs pages.
+
+Since hugetlbfs pages can not be split, there is no reason to go through
+all the locking overhead for these pages from what I can see. I added
+code to __get_page_tail() and put_compound_page() to bypass all the
+locking code when working with hugetlbfs pages. This improved performance
+significantly. Performance numbers with this patch:
+
+ pre-THP 3.11-rc5 3.11-rc5 + Patch
+1M read 8384 MB/s 6501 MB/s 8371 MB/s
+64K read 7867 MB/s 4251 MB/s 6510 MB/s
+
+Performance with 64K read is still lower than what it was before THP, but
+still a 53% improvement. It does mean there is more work to be done but I
+will take a 53% improvement for now.
+
+Please take a look at the following patch and let me know if it looks
+reasonable.
+
+[akpm@linux-foundation.org: tweak comments]
+Signed-off-by: Khalid Aziz <khalid.aziz@oracle.com>
+Cc: Pravin B Shelar <pshelar@nicira.com>
+Cc: Christoph Lameter <cl@linux.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Mel Gorman <mel@csn.ul.ie>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Andi Kleen <andi@firstfloor.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/swap.c | 65 ++++++++++++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 47 insertions(+), 18 deletions(-)
+
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -41,6 +41,8 @@ static DEFINE_PER_CPU(struct pagevec[NR_
+ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
+ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
+
++int PageHuge(struct page *page);
++
+ /*
+ * This path almost never happens for VM activity - pages are normally
+ * freed via pagevecs. But it gets used by networking.
+@@ -69,13 +71,26 @@ static void __put_compound_page(struct p
+ {
+ compound_page_dtor *dtor;
+
+- __page_cache_release(page);
++ if (!PageHuge(page))
++ __page_cache_release(page);
+ dtor = get_compound_page_dtor(page);
+ (*dtor)(page);
+ }
+
+ static void put_compound_page(struct page *page)
+ {
++ /*
++ * hugetlbfs pages can not be split from under us. So if this
++ * is a hugetlbfs page, check refcount on head page and release
++ * the page if refcount is zero.
++ */
++ if (PageHuge(page)) {
++ page = compound_head(page);
++ if (put_page_testzero(page))
++ __put_compound_page(page);
++ return;
++ }
++
+ if (unlikely(PageTail(page))) {
+ /* __split_huge_page_refcount can run under us */
+ struct page *page_head = compound_trans_head(page);
+@@ -158,26 +173,40 @@ bool __get_page_tail(struct page *page)
+ * proper PT lock that already serializes against
+ * split_huge_page().
+ */
+- unsigned long flags;
+ bool got = false;
+- struct page *page_head = compound_trans_head(page);
+
+- if (likely(page != page_head && get_page_unless_zero(page_head))) {
+- /*
+- * page_head wasn't a dangling pointer but it
+- * may not be a head page anymore by the time
+- * we obtain the lock. That is ok as long as it
+- * can't be freed from under us.
+- */
+- flags = compound_lock_irqsave(page_head);
+- /* here __split_huge_page_refcount won't run anymore */
+- if (likely(PageTail(page))) {
+- __get_page_tail_foll(page, false);
+- got = true;
++ /*
++ * If this is a hugetlbfs page, it can not be split under
++ * us. Simply increment counts for tail page and its head page
++ */
++ if (PageHuge(page)) {
++ struct page *page_head;
++
++ page_head = compound_head(page);
++ atomic_inc(&page_head->_count);
++ got = true;
++ } else {
++ struct page *page_head = compound_trans_head(page);
++ unsigned long flags;
++
++ if (likely(page != page_head &&
++ get_page_unless_zero(page_head))) {
++ /*
++ * page_head wasn't a dangling pointer but it
++ * may not be a head page anymore by the time
++ * we obtain the lock. That is ok as long as it
++ * can't be freed from under us.
++ */
++ flags = compound_lock_irqsave(page_head);
++ /* here __split_huge_page_refcount won't run anymore */
++ if (likely(PageTail(page))) {
++ __get_page_tail_foll(page, false);
++ got = true;
++ }
++ compound_unlock_irqrestore(page_head, flags);
++ if (unlikely(!got))
++ put_page(page_head);
+ }
+- compound_unlock_irqrestore(page_head, flags);
+- if (unlikely(!got))
+- put_page(page_head);
+ }
+ return got;
+ }