--- /dev/null
+From 7f62656be8a8ef14c168db2d98021fb9c8cc1076 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Wed, 13 Nov 2013 10:49:40 +0300
+Subject: aio: checking for NULL instead of IS_ERR
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit 7f62656be8a8ef14c168db2d98021fb9c8cc1076 upstream.
+
+alloc_anon_inode() returns an ERR_PTR(), it doesn't return NULL.
+
+Fixes: 71ad7490c1f3 ('rework aio migrate pages to use aio fs')
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -164,8 +164,8 @@ static struct file *aio_private_file(str
+ struct file *file;
+ struct path path;
+ struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
+- if (!inode)
+- return ERR_PTR(-ENOMEM);
++ if (IS_ERR(inode))
++ return ERR_CAST(inode);
+
+ inode->i_mapping->a_ops = &aio_ctx_aops;
+ inode->i_mapping->private_data = ctx;
--- /dev/null
+From d1b9432712a25eeb06114fb4b587133525a47de5 Mon Sep 17 00:00:00 2001
+From: Gu Zheng <guz.fnst@cn.fujitsu.com>
+Date: Wed, 4 Dec 2013 18:19:06 +0800
+Subject: aio: clean up aio ring in the fail path
+
+From: Gu Zheng <guz.fnst@cn.fujitsu.com>
+
+commit d1b9432712a25eeb06114fb4b587133525a47de5 upstream.
+
+Clean up the aio ring file in the fail path of aio_setup_ring
+and ioctx_alloc. And maybe it can fix the GPF issue reported by
+Dave Jones:
+https://lkml.org/lkml/2013/11/25/898
+
+Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
+Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -367,8 +367,10 @@ static int aio_setup_ring(struct kioctx
+ if (nr_pages > AIO_RING_PAGES) {
+ ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
+- if (!ctx->ring_pages)
++ if (!ctx->ring_pages) {
++ put_aio_ring_file(ctx);
+ return -ENOMEM;
++ }
+ }
+
+ ctx->mmap_size = nr_pages * PAGE_SIZE;
+@@ -645,7 +647,7 @@ static struct kioctx *ioctx_alloc(unsign
+ aio_nr + nr_events < aio_nr) {
+ spin_unlock(&aio_nr_lock);
+ err = -EAGAIN;
+- goto err;
++ goto err_ctx;
+ }
+ aio_nr += ctx->max_reqs;
+ spin_unlock(&aio_nr_lock);
+@@ -662,6 +664,8 @@ static struct kioctx *ioctx_alloc(unsign
+
+ err_cleanup:
+ aio_nr_sub(ctx->max_reqs);
++err_ctx:
++ aio_free_ring(ctx);
+ err:
+ free_percpu(ctx->cpu);
+ free_percpu(ctx->reqs.pcpu_count);
--- /dev/null
+From e34ecee2ae791df674dfb466ce40692ca6218e43 Mon Sep 17 00:00:00 2001
+From: Kent Overstreet <kmo@daterainc.com>
+Date: Thu, 10 Oct 2013 19:31:47 -0700
+Subject: aio: Fix a trinity splat
+
+From: Kent Overstreet <kmo@daterainc.com>
+
+commit e34ecee2ae791df674dfb466ce40692ca6218e43 upstream.
+
+aio kiocb refcounting was broken - it was relying on keeping track of
+the number of available ring buffer entries, which it needs to do
+anyways; then at shutdown time it'd wait for completions to be delivered
+until the # of available ring buffer entries equalled what it was
+initialized to.
+
+Problem with that is that the ring buffer is mapped writable into
+userspace, so userspace could futz with the head and tail pointers to
+cause the kernel to see extra completions, and cause free_ioctx() to
+return while there were still outstanding kiocbs. Which would be bad.
+
+Fix is just to directly refcount the kiocbs - which is more
+straightforward, and with the new percpu refcounting code doesn't cost
+us any cacheline bouncing which was the whole point of the original
+scheme.
+
+Also clean up ioctx_alloc()'s error path and fix a bug where it wasn't
+subtracting from aio_nr if ioctx_add_table() failed.
+
+Signed-off-by: Kent Overstreet <kmo@daterainc.com>
+Cc: Benjamin LaHaise <bcrl@kvack.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 129 +++++++++++++++++++++++----------------------------------------
+ 1 file changed, 48 insertions(+), 81 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -80,6 +80,8 @@ struct kioctx {
+ struct percpu_ref users;
+ atomic_t dead;
+
++ struct percpu_ref reqs;
++
+ unsigned long user_id;
+
+ struct __percpu kioctx_cpu *cpu;
+@@ -107,7 +109,6 @@ struct kioctx {
+ struct page **ring_pages;
+ long nr_pages;
+
+- struct rcu_head rcu_head;
+ struct work_struct free_work;
+
+ struct {
+@@ -412,26 +413,34 @@ static int kiocb_cancel(struct kioctx *c
+ return cancel(kiocb);
+ }
+
+-static void free_ioctx_rcu(struct rcu_head *head)
++static void free_ioctx(struct work_struct *work)
+ {
+- struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
++ struct kioctx *ctx = container_of(work, struct kioctx, free_work);
+
++ pr_debug("freeing %p\n", ctx);
++
++ aio_free_ring(ctx);
+ free_percpu(ctx->cpu);
+ kmem_cache_free(kioctx_cachep, ctx);
+ }
+
++static void free_ioctx_reqs(struct percpu_ref *ref)
++{
++ struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
++
++ INIT_WORK(&ctx->free_work, free_ioctx);
++ schedule_work(&ctx->free_work);
++}
++
+ /*
+ * When this function runs, the kioctx has been removed from the "hash table"
+ * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
+ * now it's safe to cancel any that need to be.
+ */
+-static void free_ioctx(struct work_struct *work)
++static void free_ioctx_users(struct percpu_ref *ref)
+ {
+- struct kioctx *ctx = container_of(work, struct kioctx, free_work);
+- struct aio_ring *ring;
++ struct kioctx *ctx = container_of(ref, struct kioctx, users);
+ struct kiocb *req;
+- unsigned cpu, avail;
+- DEFINE_WAIT(wait);
+
+ spin_lock_irq(&ctx->ctx_lock);
+
+@@ -445,54 +454,8 @@ static void free_ioctx(struct work_struc
+
+ spin_unlock_irq(&ctx->ctx_lock);
+
+- for_each_possible_cpu(cpu) {
+- struct kioctx_cpu *kcpu = per_cpu_ptr(ctx->cpu, cpu);
+-
+- atomic_add(kcpu->reqs_available, &ctx->reqs_available);
+- kcpu->reqs_available = 0;
+- }
+-
+- while (1) {
+- prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE);
+-
+- ring = kmap_atomic(ctx->ring_pages[0]);
+- avail = (ring->head <= ring->tail)
+- ? ring->tail - ring->head
+- : ctx->nr_events - ring->head + ring->tail;
+-
+- atomic_add(avail, &ctx->reqs_available);
+- ring->head = ring->tail;
+- kunmap_atomic(ring);
+-
+- if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1)
+- break;
+-
+- schedule();
+- }
+- finish_wait(&ctx->wait, &wait);
+-
+- WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1);
+-
+- aio_free_ring(ctx);
+-
+- pr_debug("freeing %p\n", ctx);
+-
+- /*
+- * Here the call_rcu() is between the wait_event() for reqs_active to
+- * hit 0, and freeing the ioctx.
+- *
+- * aio_complete() decrements reqs_active, but it has to touch the ioctx
+- * after to issue a wakeup so we use rcu.
+- */
+- call_rcu(&ctx->rcu_head, free_ioctx_rcu);
+-}
+-
+-static void free_ioctx_ref(struct percpu_ref *ref)
+-{
+- struct kioctx *ctx = container_of(ref, struct kioctx, users);
+-
+- INIT_WORK(&ctx->free_work, free_ioctx);
+- schedule_work(&ctx->free_work);
++ percpu_ref_kill(&ctx->reqs);
++ percpu_ref_put(&ctx->reqs);
+ }
+
+ static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
+@@ -551,6 +514,16 @@ static int ioctx_add_table(struct kioctx
+ }
+ }
+
++static void aio_nr_sub(unsigned nr)
++{
++ spin_lock(&aio_nr_lock);
++ if (WARN_ON(aio_nr - nr > aio_nr))
++ aio_nr = 0;
++ else
++ aio_nr -= nr;
++ spin_unlock(&aio_nr_lock);
++}
++
+ /* ioctx_alloc
+ * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
+ */
+@@ -588,8 +561,11 @@ static struct kioctx *ioctx_alloc(unsign
+
+ ctx->max_reqs = nr_events;
+
+- if (percpu_ref_init(&ctx->users, free_ioctx_ref))
+- goto out_freectx;
++ if (percpu_ref_init(&ctx->users, free_ioctx_users))
++ goto err;
++
++ if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs))
++ goto err;
+
+ spin_lock_init(&ctx->ctx_lock);
+ spin_lock_init(&ctx->completion_lock);
+@@ -600,10 +576,10 @@ static struct kioctx *ioctx_alloc(unsign
+
+ ctx->cpu = alloc_percpu(struct kioctx_cpu);
+ if (!ctx->cpu)
+- goto out_freeref;
++ goto err;
+
+ if (aio_setup_ring(ctx) < 0)
+- goto out_freepcpu;
++ goto err;
+
+ atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
+ ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
+@@ -615,7 +591,8 @@ static struct kioctx *ioctx_alloc(unsign
+ if (aio_nr + nr_events > (aio_max_nr * 2UL) ||
+ aio_nr + nr_events < aio_nr) {
+ spin_unlock(&aio_nr_lock);
+- goto out_cleanup;
++ err = -EAGAIN;
++ goto err;
+ }
+ aio_nr += ctx->max_reqs;
+ spin_unlock(&aio_nr_lock);
+@@ -624,23 +601,19 @@ static struct kioctx *ioctx_alloc(unsign
+
+ err = ioctx_add_table(ctx, mm);
+ if (err)
+- goto out_cleanup_put;
++ goto err_cleanup;
+
+ pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
+ ctx, ctx->user_id, mm, ctx->nr_events);
+ return ctx;
+
+-out_cleanup_put:
+- percpu_ref_put(&ctx->users);
+-out_cleanup:
+- err = -EAGAIN;
++err_cleanup:
++ aio_nr_sub(ctx->max_reqs);
++err:
+ aio_free_ring(ctx);
+-out_freepcpu:
+ free_percpu(ctx->cpu);
+-out_freeref:
++ free_percpu(ctx->reqs.pcpu_count);
+ free_percpu(ctx->users.pcpu_count);
+-out_freectx:
+- put_aio_ring_file(ctx);
+ kmem_cache_free(kioctx_cachep, ctx);
+ pr_debug("error allocating ioctx %d\n", err);
+ return ERR_PTR(err);
+@@ -675,10 +648,7 @@ static void kill_ioctx(struct mm_struct
+ * -EAGAIN with no ioctxs actually in use (as far as userspace
+ * could tell).
+ */
+- spin_lock(&aio_nr_lock);
+- BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
+- aio_nr -= ctx->max_reqs;
+- spin_unlock(&aio_nr_lock);
++ aio_nr_sub(ctx->max_reqs);
+
+ if (ctx->mmap_size)
+ vm_munmap(ctx->mmap_base, ctx->mmap_size);
+@@ -810,6 +780,8 @@ static inline struct kiocb *aio_get_req(
+ if (unlikely(!req))
+ goto out_put;
+
++ percpu_ref_get(&ctx->reqs);
++
+ req->ki_ctx = ctx;
+ return req;
+ out_put:
+@@ -879,12 +851,6 @@ void aio_complete(struct kiocb *iocb, lo
+ return;
+ }
+
+- /*
+- * Take rcu_read_lock() in case the kioctx is being destroyed, as we
+- * need to issue a wakeup after incrementing reqs_available.
+- */
+- rcu_read_lock();
+-
+ if (iocb->ki_list.next) {
+ unsigned long flags;
+
+@@ -959,7 +925,7 @@ void aio_complete(struct kiocb *iocb, lo
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+
+- rcu_read_unlock();
++ percpu_ref_put(&ctx->reqs);
+ }
+ EXPORT_SYMBOL(aio_complete);
+
+@@ -1370,6 +1336,7 @@ static int io_submit_one(struct kioctx *
+ return 0;
+ out_put_req:
+ put_reqs_available(ctx, 1);
++ percpu_ref_put(&ctx->reqs);
+ kiocb_free(req);
+ return ret;
+ }
--- /dev/null
+From ddb8c45ba15149ebd41d7586261c05f7ca37f9a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sasha.levin@oracle.com>
+Date: Tue, 19 Nov 2013 17:33:03 -0500
+Subject: aio: nullify aio->ring_pages after freeing it
+
+From: Sasha Levin <sasha.levin@oracle.com>
+
+commit ddb8c45ba15149ebd41d7586261c05f7ca37f9a1 upstream.
+
+After freeing ring_pages we leave it as is causing a dangling pointer. This
+has already caused an issue so to help catching any issues in the future
+NULL it out.
+
+Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -251,8 +251,10 @@ static void aio_free_ring(struct kioctx
+
+ put_aio_ring_file(ctx);
+
+- if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages)
++ if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
+ kfree(ctx->ring_pages);
++ ctx->ring_pages = NULL;
++ }
+ }
+
+ static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
--- /dev/null
+From d558023207e008a4476a3b7bb8706b2a2bf5d84f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sasha.levin@oracle.com>
+Date: Tue, 19 Nov 2013 17:33:02 -0500
+Subject: aio: prevent double free in ioctx_alloc
+
+From: Sasha Levin <sasha.levin@oracle.com>
+
+commit d558023207e008a4476a3b7bb8706b2a2bf5d84f upstream.
+
+ioctx_alloc() calls aio_setup_ring() to allocate a ring. If aio_setup_ring()
+fails to do so it would call aio_free_ring() before returning, but
+ioctx_alloc() would call aio_free_ring() again causing a double free of
+the ring.
+
+This is easily reproducible from userspace.
+
+Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -661,7 +661,6 @@ static struct kioctx *ioctx_alloc(unsign
+ err_cleanup:
+ aio_nr_sub(ctx->max_reqs);
+ err:
+- aio_free_ring(ctx);
+ free_percpu(ctx->cpu);
+ free_percpu(ctx->reqs.pcpu_count);
+ free_percpu(ctx->users.pcpu_count);
--- /dev/null
+From 3e71985f2439d8c4090dc2820e497e6f3d72dcff Mon Sep 17 00:00:00 2001
+From: Pierre Ossman <pierre@ossman.eu>
+Date: Wed, 6 Nov 2013 20:00:32 +0100
+Subject: drm/radeon/audio: correct ACR table
+
+From: Pierre Ossman <pierre@ossman.eu>
+
+commit 3e71985f2439d8c4090dc2820e497e6f3d72dcff upstream.
+
+The values were taken from the HDMI spec, but they assumed
+exact x/1.001 clocks. Since we round the clocks, we also need
+to calculate different N and CTS values.
+
+Note that the N for 25.2/1.001 MHz at 44.1 kHz audio is out of
+spec. Hopefully this mode is rarely used and/or HDMI sinks
+tolerate overly large values of N.
+
+bug:
+https://bugs.freedesktop.org/show_bug.cgi?id=69675
+
+Signed-off-by: Pierre Ossman <pierre@ossman.eu>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: Josh Boyer <jwboyer@fedoraproject.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/r600_hdmi.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -58,15 +58,15 @@ enum r600_hdmi_iec_status_bits {
+ static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
+ /* 32kHz 44.1kHz 48kHz */
+ /* Clock N CTS N CTS N CTS */
+- { 25175, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
++ { 25175, 4096, 25175, 28224, 125875, 6144, 25175 }, /* 25,20/1.001 MHz */
+ { 25200, 4096, 25200, 6272, 28000, 6144, 25200 }, /* 25.20 MHz */
+ { 27000, 4096, 27000, 6272, 30000, 6144, 27000 }, /* 27.00 MHz */
+ { 27027, 4096, 27027, 6272, 30030, 6144, 27027 }, /* 27.00*1.001 MHz */
+ { 54000, 4096, 54000, 6272, 60000, 6144, 54000 }, /* 54.00 MHz */
+ { 54054, 4096, 54054, 6272, 60060, 6144, 54054 }, /* 54.00*1.001 MHz */
+- { 74176, 11648, 210937, 17836, 234375, 11648, 140625 }, /* 74.25/1.001 MHz */
++ { 74176, 4096, 74176, 5733, 75335, 6144, 74176 }, /* 74.25/1.001 MHz */
+ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
+- { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
++ { 148352, 4096, 148352, 5733, 150670, 6144, 148352 }, /* 148.50/1.001 MHz */
+ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
+ };
+
--- /dev/null
+From a2098250fbda149cfad9e626afe80abe3b21e574 Mon Sep 17 00:00:00 2001
+From: Pierre Ossman <pierre@ossman.eu>
+Date: Wed, 6 Nov 2013 20:09:08 +0100
+Subject: drm/radeon/audio: improve ACR calculation
+
+From: Pierre Ossman <pierre@ossman.eu>
+
+commit a2098250fbda149cfad9e626afe80abe3b21e574 upstream.
+
+In order to have any realistic chance of calculating proper
+ACR values, we need to be able to calculate both N and CTS,
+not just CTS. We still aim for the ideal N as specified in
+the HDMI spec though.
+
+bug:
+https://bugs.freedesktop.org/show_bug.cgi?id=69675
+
+Signed-off-by: Pierre Ossman <pierre@ossman.eu>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: Josh Boyer <jwboyer@fedoraproject.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/r600_hdmi.c | 68 +++++++++++++++++++++++++------------
+ 1 file changed, 46 insertions(+), 22 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/r600_hdmi.c
++++ b/drivers/gpu/drm/radeon/r600_hdmi.c
+@@ -24,6 +24,7 @@
+ * Authors: Christian König
+ */
+ #include <linux/hdmi.h>
++#include <linux/gcd.h>
+ #include <drm/drmP.h>
+ #include <drm/radeon_drm.h>
+ #include "radeon.h"
+@@ -67,25 +68,47 @@ static const struct radeon_hdmi_acr r600
+ { 74250, 4096, 74250, 6272, 82500, 6144, 74250 }, /* 74.25 MHz */
+ { 148352, 11648, 421875, 8918, 234375, 5824, 140625 }, /* 148.50/1.001 MHz */
+ { 148500, 4096, 148500, 6272, 165000, 6144, 148500 }, /* 148.50 MHz */
+- { 0, 4096, 0, 6272, 0, 6144, 0 } /* Other */
+ };
+
++
+ /*
+- * calculate CTS value if it's not found in the table
++ * calculate CTS and N values if they are not found in the table
+ */
+-static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
++static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int *N, int freq)
+ {
+- u64 n;
+- u32 d;
++ int n, cts;
++ unsigned long div, mul;
+
+- if (*CTS == 0) {
+- n = (u64)clock * (u64)N * 1000ULL;
+- d = 128 * freq;
+- do_div(n, d);
+- *CTS = n;
+- }
+- DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
+- N, *CTS, freq);
++ /* Safe, but overly large values */
++ n = 128 * freq;
++ cts = clock * 1000;
++
++ /* Smallest valid fraction */
++ div = gcd(n, cts);
++
++ n /= div;
++ cts /= div;
++
++ /*
++ * The optimal N is 128*freq/1000. Calculate the closest larger
++ * value that doesn't truncate any bits.
++ */
++ mul = ((128*freq/1000) + (n-1))/n;
++
++ n *= mul;
++ cts *= mul;
++
++ /* Check that we are in spec (not always possible) */
++ if (n < (128*freq/1500))
++ printk(KERN_WARNING "Calculated ACR N value is too small. You may experience audio problems.\n");
++ if (n > (128*freq/300))
++ printk(KERN_WARNING "Calculated ACR N value is too large. You may experience audio problems.\n");
++
++ *N = n;
++ *CTS = cts;
++
++ DRM_DEBUG("Calculated ACR timing N=%d CTS=%d for frequency %d\n",
++ *N, *CTS, freq);
+ }
+
+ struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
+@@ -93,15 +116,16 @@ struct radeon_hdmi_acr r600_hdmi_acr(uin
+ struct radeon_hdmi_acr res;
+ u8 i;
+
+- for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
+- r600_hdmi_predefined_acr[i].clock != 0; i++)
+- ;
+- res = r600_hdmi_predefined_acr[i];
+-
+- /* In case some CTS are missing */
+- r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
+- r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
+- r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
++ /* Precalculated values for common clocks */
++ for (i = 0; i < ARRAY_SIZE(r600_hdmi_predefined_acr); i++) {
++ if (r600_hdmi_predefined_acr[i].clock == clock)
++ return r600_hdmi_predefined_acr[i];
++ }
++
++ /* And odd clocks get manually calculated */
++ r600_hdmi_calc_cts(clock, &res.cts_32khz, &res.n_32khz, 32000);
++ r600_hdmi_calc_cts(clock, &res.cts_44_1khz, &res.n_44_1khz, 44100);
++ r600_hdmi_calc_cts(clock, &res.cts_48khz, &res.n_48khz, 48000);
+
+ return res;
+ }
--- /dev/null
+From 71ad7490c1f32bd7829df76360f9fa17829868f3 Mon Sep 17 00:00:00 2001
+From: Benjamin LaHaise <bcrl@kvack.org>
+Date: Tue, 17 Sep 2013 10:18:25 -0400
+Subject: rework aio migrate pages to use aio fs
+
+From: Benjamin LaHaise <bcrl@kvack.org>
+
+commit 71ad7490c1f32bd7829df76360f9fa17829868f3 upstream.
+
+Don't abuse anon_inodes.c to host private files needed by aio;
+we can bloody well declare a mini-fs of our own instead of
+patching up what anon_inodes can create for us.
+
+Tested-by: Benjamin LaHaise <bcrl@kvack.org>
+Acked-by: Benjamin LaHaise <bcrl@kvack.org>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++------
+ 1 file changed, 57 insertions(+), 6 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -36,10 +36,10 @@
+ #include <linux/eventfd.h>
+ #include <linux/blkdev.h>
+ #include <linux/compat.h>
+-#include <linux/anon_inodes.h>
+ #include <linux/migrate.h>
+ #include <linux/ramfs.h>
+ #include <linux/percpu-refcount.h>
++#include <linux/mount.h>
+
+ #include <asm/kmap_types.h>
+ #include <asm/uaccess.h>
+@@ -153,12 +153,67 @@ unsigned long aio_max_nr = 0x10000; /* s
+ static struct kmem_cache *kiocb_cachep;
+ static struct kmem_cache *kioctx_cachep;
+
++static struct vfsmount *aio_mnt;
++
++static const struct file_operations aio_ring_fops;
++static const struct address_space_operations aio_ctx_aops;
++
++static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
++{
++ struct qstr this = QSTR_INIT("[aio]", 5);
++ struct file *file;
++ struct path path;
++ struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
++ if (!inode)
++ return ERR_PTR(-ENOMEM);
++
++ inode->i_mapping->a_ops = &aio_ctx_aops;
++ inode->i_mapping->private_data = ctx;
++ inode->i_size = PAGE_SIZE * nr_pages;
++
++ path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
++ if (!path.dentry) {
++ iput(inode);
++ return ERR_PTR(-ENOMEM);
++ }
++ path.mnt = mntget(aio_mnt);
++
++ d_instantiate(path.dentry, inode);
++ file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops);
++ if (IS_ERR(file)) {
++ path_put(&path);
++ return file;
++ }
++
++ file->f_flags = O_RDWR;
++ file->private_data = ctx;
++ return file;
++}
++
++static struct dentry *aio_mount(struct file_system_type *fs_type,
++ int flags, const char *dev_name, void *data)
++{
++ static const struct dentry_operations ops = {
++ .d_dname = simple_dname,
++ };
++ return mount_pseudo(fs_type, "aio:", NULL, &ops, 0xa10a10a1);
++}
++
+ /* aio_setup
+ * Creates the slab caches used by the aio routines, panic on
+ * failure as this is done early during the boot sequence.
+ */
+ static int __init aio_setup(void)
+ {
++ static struct file_system_type aio_fs = {
++ .name = "aio",
++ .mount = aio_mount,
++ .kill_sb = kill_anon_super,
++ };
++ aio_mnt = kern_mount(&aio_fs);
++ if (IS_ERR(aio_mnt))
++ panic("Failed to create aio fs mount.");
++
+ kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+
+@@ -284,16 +339,12 @@ static int aio_setup_ring(struct kioctx
+ if (nr_pages < 0)
+ return -EINVAL;
+
+- file = anon_inode_getfile_private("[aio]", &aio_ring_fops, ctx, O_RDWR);
++ file = aio_private_file(ctx, nr_pages);
+ if (IS_ERR(file)) {
+ ctx->aio_ring_file = NULL;
+ return -EAGAIN;
+ }
+
+- file->f_inode->i_mapping->a_ops = &aio_ctx_aops;
+- file->f_inode->i_mapping->private_data = ctx;
+- file->f_inode->i_size = PAGE_SIZE * (loff_t)nr_pages;
+-
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page;
+ page = find_or_create_page(file->f_inode->i_mapping,
elevator-fix-a-race-in-elevator-switching-and-md-device-initialization.patch
elevator-acquire-q-sysfs_lock-in-elevator_change.patch
ntp-make-periodic-rtc-update-more-reliable.patch
+aio-fix-a-trinity-splat.patch
+rework-aio-migrate-pages-to-use-aio-fs.patch
+aio-checking-for-null-instead-of-is_err.patch
+aio-prevent-double-free-in-ioctx_alloc.patch
+aio-nullify-aio-ring_pages-after-freeing-it.patch
+aio-clean-up-aio-ring-in-the-fail-path.patch
+drm-radeon-audio-improve-acr-calculation.patch
+drm-radeon-audio-correct-acr-table.patch