--- /dev/null
+From 906fe033145aab7d65a428bfda2cf19c75720894 Mon Sep 17 00:00:00 2001
+From: Ed Swarthout <Ed.Swarthout@freescale.com>
+Date: Thu, 5 Jun 2014 18:56:17 -0500
+Subject: cpufreq: ppc-corenet-cpu-freq: do_div use quotient
+
+From: Ed Swarthout <Ed.Swarthout@freescale.com>
+
+commit 906fe033145aab7d65a428bfda2cf19c75720894 upstream.
+
+Commit 6712d2931933 (cpufreq: ppc-corenet-cpufreq: Fix __udivdi3 modpost
+error) used the remainder from do_div instead of the quotient. Fix that
+and add one to ensure minimum is met.
+
+Fixes: 6712d2931933 (cpufreq: ppc-corenet-cpufreq: Fix __udivdi3 modpost error)
+Signed-off-by: Ed Swarthout <Ed.Swarthout@freescale.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/ppc-corenet-cpufreq.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
++++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
+@@ -138,7 +138,7 @@ static int corenet_cpufreq_cpu_init(stru
+ struct cpufreq_frequency_table *table;
+ struct cpu_data *data;
+ unsigned int cpu = policy->cpu;
+- u64 transition_latency_hz;
++ u64 u64temp;
+
+ np = of_get_cpu_node(cpu, NULL);
+ if (!np)
+@@ -206,9 +206,10 @@ static int corenet_cpufreq_cpu_init(stru
+ for_each_cpu(i, per_cpu(cpu_mask, cpu))
+ per_cpu(cpu_data, i) = data;
+
+- transition_latency_hz = 12ULL * NSEC_PER_SEC;
+- policy->cpuinfo.transition_latency =
+- do_div(transition_latency_hz, fsl_get_sys_freq());
++ /* Minimum transition latency is 12 platform clocks */
++ u64temp = 12ULL * NSEC_PER_SEC;
++ do_div(u64temp, fsl_get_sys_freq());
++ policy->cpuinfo.transition_latency = u64temp + 1;
+
+ of_node_put(np);
+
--- /dev/null
+From fefa8ff810c5ab4c4206aed9d159c4d6fe8d4f1c Mon Sep 17 00:00:00 2001
+From: Aaron Plattner <aplattner@nvidia.com>
+Date: Wed, 18 Jun 2014 11:27:32 -0700
+Subject: cpufreq: unlock when failing cpufreq_update_policy()
+
+From: Aaron Plattner <aplattner@nvidia.com>
+
+commit fefa8ff810c5ab4c4206aed9d159c4d6fe8d4f1c upstream.
+
+Commit bd0fa9bb455d introduced a failure path to cpufreq_update_policy() if
+cpufreq_driver->get(cpu) returns NULL. However, it jumps to the 'no_policy'
+label, which exits without unlocking any of the locks the function acquired
+earlier. This causes later calls into cpufreq to hang.
+
+Fix this by creating a new 'unlock' label and jumping to that instead.
+
+Fixes: bd0fa9bb455d ("cpufreq: Return error if ->get() failed in cpufreq_update_policy()")
+Link: https://devtalk.nvidia.com/default/topic/751903/kernel-3-15-and-nv-drivers-337-340-failed-to-initialize-the-nvidia-kernel-module-gtx-550-ti-/
+Signed-off-by: Aaron Plattner <aplattner@nvidia.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -2166,10 +2166,8 @@ int cpufreq_update_policy(unsigned int c
+ struct cpufreq_policy new_policy;
+ int ret;
+
+- if (!policy) {
+- ret = -ENODEV;
+- goto no_policy;
+- }
++ if (!policy)
++ return -ENODEV;
+
+ down_write(&policy->rwsem);
+
+@@ -2188,7 +2186,7 @@ int cpufreq_update_policy(unsigned int c
+ new_policy.cur = cpufreq_driver->get(cpu);
+ if (WARN_ON(!new_policy.cur)) {
+ ret = -EIO;
+- goto no_policy;
++ goto unlock;
+ }
+
+ if (!policy->cur) {
+@@ -2203,10 +2201,10 @@ int cpufreq_update_policy(unsigned int c
+
+ ret = cpufreq_set_policy(policy, &new_policy);
+
++unlock:
+ up_write(&policy->rwsem);
+
+ cpufreq_cpu_put(policy);
+-no_policy:
+ return ret;
+ }
+ EXPORT_SYMBOL(cpufreq_update_policy);
--- /dev/null
+From 22e7478ddbcb670e33fab72d0bbe7c394c3a2c84 Mon Sep 17 00:00:00 2001
+From: Jeff Mahoney <jeffm@suse.com>
+Date: Wed, 21 May 2014 13:28:07 -0400
+Subject: reiserfs: call truncate_setsize under tailpack mutex
+
+From: Jeff Mahoney <jeffm@suse.com>
+
+commit 22e7478ddbcb670e33fab72d0bbe7c394c3a2c84 upstream.
+
+Prior to commit 0e4f6a791b1e (Fix reiserfs_file_release()), reiserfs
+truncates serialized on i_mutex. They mostly still do, with the exception
+of reiserfs_file_release. That blocks out other writers via the tailpack
+mutex and the inode openers counter adjusted in reiserfs_file_open.
+
+However, NFS will call reiserfs_setattr without having called ->open, so
+we end up with a race when nfs is calling ->setattr while another
+process is releasing the file. Ultimately, it triggers the
+BUG_ON(inode->i_size != new_file_size) check in maybe_indirect_to_direct.
+
+The solution is to pull the lock into reiserfs_setattr to encompass the
+truncate_setsize call as well.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/reiserfs/inode.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -3220,8 +3220,14 @@ int reiserfs_setattr(struct dentry *dent
+ attr->ia_size != i_size_read(inode)) {
+ error = inode_newsize_ok(inode, attr->ia_size);
+ if (!error) {
++ /*
++ * Could race against reiserfs_file_release
++ * if called from NFS, so take tailpack mutex.
++ */
++ mutex_lock(&REISERFS_I(inode)->tailpack);
+ truncate_setsize(inode, attr->ia_size);
+- reiserfs_vfs_truncate_file(inode);
++ reiserfs_truncate_file(inode, 1);
++ mutex_unlock(&REISERFS_I(inode)->tailpack);
+ }
+ }
+
powerpc-don-t-setup-cpus-with-bad-status.patch
powerpc-add-at_hwcap2-to-indicate-v.crypto-category-support.patch
powerpc-don-t-skip-epapr-spin-table-cpus.patch
+xfs-block-allocation-work-needs-to-be-kswapd-aware.patch
+xfs-xfs_readsb-needs-to-check-for-magic-numbers.patch
+reiserfs-call-truncate_setsize-under-tailpack-mutex.patch
+cpufreq-ppc-corenet-cpu-freq-do_div-use-quotient.patch
+cpufreq-unlock-when-failing-cpufreq_update_policy.patch
--- /dev/null
+From 1f6d64829db78a7e1d63e15c9f48f0a5d2b5a679 Mon Sep 17 00:00:00 2001
+From: Dave Chinner <dchinner@redhat.com>
+Date: Fri, 6 Jun 2014 15:59:59 +1000
+Subject: xfs: block allocation work needs to be kswapd aware
+
+From: Dave Chinner <dchinner@redhat.com>
+
+commit 1f6d64829db78a7e1d63e15c9f48f0a5d2b5a679 upstream.
+
+Upon memory pressure, kswapd calls xfs_vm_writepage() from
+shrink_page_list(). This can result in delayed allocation occurring
+and that gets deferred to the the allocation workqueue.
+
+The allocation then runs outside kswapd context, which means if it
+needs memory (and it does to demand page metadata from disk) it can
+block in shrink_inactive_list() waiting for IO congestion. These
+blocking waits are normally avoiding in kswapd context, so under
+memory pressure writeback from kswapd can be arbitrarily delayed by
+memory reclaim.
+
+To avoid this, pass the kswapd context to the allocation being done
+by the workqueue, so that memory reclaim understands correctly that
+the work is being done for kswapd and therefore it is not blocked
+and does not delay memory reclaim.
+
+To avoid issues with int->char conversion of flag fields (as noticed
+in v1 of this patch) convert the flag fields in the struct
+xfs_bmalloca to bool types. pahole indicates these variables are
+still single byte variables, so no extra space is consumed by this
+change.
+
+Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Signed-off-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Dave Chinner <david@fromorbit.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_bmap_util.c | 16 +++++++++++++---
+ fs/xfs/xfs_bmap_util.h | 13 +++++++------
+ 2 files changed, 20 insertions(+), 9 deletions(-)
+
+--- a/fs/xfs/xfs_bmap_util.c
++++ b/fs/xfs/xfs_bmap_util.c
+@@ -258,14 +258,23 @@ xfs_bmapi_allocate_worker(
+ struct xfs_bmalloca *args = container_of(work,
+ struct xfs_bmalloca, work);
+ unsigned long pflags;
++ unsigned long new_pflags = PF_FSTRANS;
+
+- /* we are in a transaction context here */
+- current_set_flags_nested(&pflags, PF_FSTRANS);
++ /*
++ * we are in a transaction context here, but may also be doing work
++ * in kswapd context, and hence we may need to inherit that state
++ * temporarily to ensure that we don't block waiting for memory reclaim
++ * in any way.
++ */
++ if (args->kswapd)
++ new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
++
++ current_set_flags_nested(&pflags, new_pflags);
+
+ args->result = __xfs_bmapi_allocate(args);
+ complete(args->done);
+
+- current_restore_flags_nested(&pflags, PF_FSTRANS);
++ current_restore_flags_nested(&pflags, new_pflags);
+ }
+
+ /*
+@@ -284,6 +293,7 @@ xfs_bmapi_allocate(
+
+
+ args->done = &done;
++ args->kswapd = current_is_kswapd();
+ INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
+ queue_work(xfs_alloc_wq, &args->work);
+ wait_for_completion(&done);
+--- a/fs/xfs/xfs_bmap_util.h
++++ b/fs/xfs/xfs_bmap_util.h
+@@ -50,12 +50,13 @@ struct xfs_bmalloca {
+ xfs_extlen_t total; /* total blocks needed for xaction */
+ xfs_extlen_t minlen; /* minimum allocation size (blocks) */
+ xfs_extlen_t minleft; /* amount must be left after alloc */
+- char eof; /* set if allocating past last extent */
+- char wasdel; /* replacing a delayed allocation */
+- char userdata;/* set if is user data */
+- char aeof; /* allocated space at eof */
+- char conv; /* overwriting unwritten extents */
+- char stack_switch;
++ bool eof; /* set if allocating past last extent */
++ bool wasdel; /* replacing a delayed allocation */
++ bool userdata;/* set if is user data */
++ bool aeof; /* allocated space at eof */
++ bool conv; /* overwriting unwritten extents */
++ bool stack_switch;
++ bool kswapd; /* allocation in kswapd context */
+ int flags;
+ struct completion *done;
+ struct work_struct work;
--- /dev/null
+From 556b8883cfac3d3203557e161ea8005f8b5479b2 Mon Sep 17 00:00:00 2001
+From: Dave Chinner <dchinner@redhat.com>
+Date: Fri, 6 Jun 2014 16:00:43 +1000
+Subject: xfs: xfs_readsb needs to check for magic numbers
+
+From: Dave Chinner <dchinner@redhat.com>
+
+commit 556b8883cfac3d3203557e161ea8005f8b5479b2 upstream.
+
+Commit daba542 ("xfs: skip verification on initial "guess"
+superblock read") dropped the use of a verifier for the initial
+superblock read so we can probe the sector size of the filesystem
+stored in the superblock. It, however, now fails to validate that
+what was read initially is actually an XFS superblock and hence will
+fail the sector size check and return ENOSYS.
+
+This causes probe-based mounts to fail because it expects XFS to
+return EINVAL when it doesn't recognise the superblock format.
+
+Reported-by: Plamen Petrov <plamen.sisi@gmail.com>
+Tested-by: Plamen Petrov <plamen.sisi@gmail.com>
+Signed-off-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Dave Chinner <david@fromorbit.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_mount.c | 23 +++++++++++++++++------
+ 1 file changed, 17 insertions(+), 6 deletions(-)
+
+--- a/fs/xfs/xfs_mount.c
++++ b/fs/xfs/xfs_mount.c
+@@ -323,8 +323,19 @@ reread:
+ /*
+ * Initialize the mount structure from the superblock.
+ */
+- xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp));
+- xfs_sb_quota_from_disk(&mp->m_sb);
++ xfs_sb_from_disk(sbp, XFS_BUF_TO_SBP(bp));
++ xfs_sb_quota_from_disk(sbp);
++
++ /*
++ * If we haven't validated the superblock, do so now before we try
++ * to check the sector size and reread the superblock appropriately.
++ */
++ if (sbp->sb_magicnum != XFS_SB_MAGIC) {
++ if (loud)
++ xfs_warn(mp, "Invalid superblock magic number");
++ error = EINVAL;
++ goto release_buf;
++ }
+
+ /*
+ * We must be able to do sector-sized and sector-aligned IO.
+@@ -337,11 +348,11 @@ reread:
+ goto release_buf;
+ }
+
+- /*
+- * Re-read the superblock so the buffer is correctly sized,
+- * and properly verified.
+- */
+ if (buf_ops == NULL) {
++ /*
++ * Re-read the superblock so the buffer is correctly sized,
++ * and properly verified.
++ */
+ xfs_buf_relse(bp);
+ sector_size = sbp->sb_sectsize;
+ buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;