From: Greg Kroah-Hartman Date: Sun, 20 Apr 2014 16:51:21 +0000 (-0700) Subject: 3.13-stable patches X-Git-Tag: v3.13.11~15 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=533a0fa6a203a405d669947249265805c2b396cf;p=thirdparty%2Fkernel%2Fstable-queue.git 3.13-stable patches added patches: user-namespace-fix-incorrect-memory-barriers.patch --- diff --git a/queue-3.13/user-namespace-fix-incorrect-memory-barriers.patch b/queue-3.13/user-namespace-fix-incorrect-memory-barriers.patch new file mode 100644 index 00000000000..1d2e98bf7d1 --- /dev/null +++ b/queue-3.13/user-namespace-fix-incorrect-memory-barriers.patch @@ -0,0 +1,76 @@ +From e79323bd87808fdfbc68ce6c5371bd224d9672ee Mon Sep 17 00:00:00 2001 +From: Mikulas Patocka +Date: Mon, 14 Apr 2014 16:58:55 -0400 +Subject: user namespace: fix incorrect memory barriers + +From: Mikulas Patocka + +commit e79323bd87808fdfbc68ce6c5371bd224d9672ee upstream. + +smp_read_barrier_depends() can be used if there is data dependency between +the readers - i.e. if the read operation after the barrier uses address +that was obtained from the read operation before the barrier. + +In this file, there is only control dependency, no data dependecy, so the +use of smp_read_barrier_depends() is incorrect. The code could fail in the +following way: +* the cpu predicts that idx < entries is true and starts executing the + body of the for loop +* the cpu fetches map->extent[0].first and map->extent[0].count +* the cpu fetches map->nr_extents +* the cpu verifies that idx < extents is true, so it commits the + instructions in the body of the for loop + +The problem is that in this scenario, the cpu read map->extent[0].first +and map->nr_extents in the wrong order. We need a full read memory barrier +to prevent it. + +Signed-off-by: Mikulas Patocka +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/user_namespace.c | 11 +++++------ + 1 file changed, 5 insertions(+), 6 deletions(-) + +--- a/kernel/user_namespace.c ++++ b/kernel/user_namespace.c +@@ -152,7 +152,7 @@ static u32 map_id_range_down(struct uid_ + + /* Find the matching extent */ + extents = map->nr_extents; +- smp_read_barrier_depends(); ++ smp_rmb(); + for (idx = 0; idx < extents; idx++) { + first = map->extent[idx].first; + last = first + map->extent[idx].count - 1; +@@ -176,7 +176,7 @@ static u32 map_id_down(struct uid_gid_ma + + /* Find the matching extent */ + extents = map->nr_extents; +- smp_read_barrier_depends(); ++ smp_rmb(); + for (idx = 0; idx < extents; idx++) { + first = map->extent[idx].first; + last = first + map->extent[idx].count - 1; +@@ -199,7 +199,7 @@ static u32 map_id_up(struct uid_gid_map + + /* Find the matching extent */ + extents = map->nr_extents; +- smp_read_barrier_depends(); ++ smp_rmb(); + for (idx = 0; idx < extents; idx++) { + first = map->extent[idx].lower_first; + last = first + map->extent[idx].count - 1; +@@ -615,9 +615,8 @@ static ssize_t map_write(struct file *fi + * were written before the count of the extents. + * + * To achieve this smp_wmb() is used on guarantee the write +- * order and smp_read_barrier_depends() is guaranteed that we +- * don't have crazy architectures returning stale data. +- * ++ * order and smp_rmb() is guaranteed that we don't have crazy ++ * architectures returning stale data. + */ + mutex_lock(&id_map_mutex); +