From 43ece75cf384ddeb739036f72a2d9ede1b04d8ca Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Mon, 20 Apr 2020 13:24:15 +0200 Subject: [PATCH] 4.14-stable patches added patches: mm-vmalloc.c-move-area-pages-after-if-statement.patch --- ...c-move-area-pages-after-if-statement.patch | 64 +++++++++++++++++++ queue-4.14/series | 1 + 2 files changed, 65 insertions(+) create mode 100644 queue-4.14/mm-vmalloc.c-move-area-pages-after-if-statement.patch diff --git a/queue-4.14/mm-vmalloc.c-move-area-pages-after-if-statement.patch b/queue-4.14/mm-vmalloc.c-move-area-pages-after-if-statement.patch new file mode 100644 index 00000000000..f6131f20eab --- /dev/null +++ b/queue-4.14/mm-vmalloc.c-move-area-pages-after-if-statement.patch @@ -0,0 +1,64 @@ +From 7ea362427c170061b8822dd41bafaa72b3bcb9ad Mon Sep 17 00:00:00 2001 +From: Austin Kim +Date: Mon, 23 Sep 2019 15:36:42 -0700 +Subject: mm/vmalloc.c: move 'area->pages' after if statement + +From: Austin Kim + +commit 7ea362427c170061b8822dd41bafaa72b3bcb9ad upstream. + +If !area->pages statement is true where memory allocation fails, area is +freed. + +In this case 'area->pages = pages' should not executed. So move +'area->pages = pages' after if statement. + +[akpm@linux-foundation.org: give area->pages the same treatment] +Link: http://lkml.kernel.org/r/20190830035716.GA190684@LGEARND20B15 +Signed-off-by: Austin Kim +Acked-by: Michal Hocko +Reviewed-by: Andrew Morton +Cc: Uladzislau Rezki (Sony) +Cc: Roman Gushchin +Cc: Roman Penyaev +Cc: Rick Edgecombe +Cc: Mike Rapoport +Cc: Andrey Ryabinin +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Lee Jones +Signed-off-by: Greg Kroah-Hartman + +--- + mm/vmalloc.c | 8 +++++--- + 1 file changed, 5 insertions(+), 3 deletions(-) + +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -1682,7 +1682,6 @@ static void *__vmalloc_area_node(struct + nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; + array_size = (nr_pages * sizeof(struct page *)); + +- area->nr_pages = nr_pages; + /* Please note that the recursion is strictly bounded. */ + if (array_size > PAGE_SIZE) { + pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask, +@@ -1690,13 +1689,16 @@ static void *__vmalloc_area_node(struct + } else { + pages = kmalloc_node(array_size, nested_gfp, node); + } +- area->pages = pages; +- if (!area->pages) { ++ ++ if (!pages) { + remove_vm_area(area->addr); + kfree(area); + return NULL; + } + ++ area->pages = pages; ++ area->nr_pages = nr_pages; ++ + for (i = 0; i < area->nr_pages; i++) { + struct page *page; + diff --git a/queue-4.14/series b/queue-4.14/series index 907a364b378..268461b6a33 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -132,3 +132,4 @@ x86-intel_rdt-add-two-new-resources-for-l2-code-and-data-prioritization-cdp.patc x86-intel_rdt-enable-l2-cdp-in-msr-ia32_l2_qos_cfg.patch x86-resctrl-preserve-cdp-enable-over-cpu-hotplug.patch x86-resctrl-fix-invalid-attempt-at-removing-the-default-resource-group.patch +mm-vmalloc.c-move-area-pages-after-if-statement.patch -- 2.47.3