--- /dev/null
+From d72e9a7a93e4f8e9e52491921d99e0c8aa89eb4e Mon Sep 17 00:00:00 2001
+From: Minchan Kim <minchan@kernel.org>
+Date: Thu, 13 Apr 2017 14:56:37 -0700
+Subject: zram: do not use copy_page with non-page aligned address
+
+From: Minchan Kim <minchan@kernel.org>
+
+commit d72e9a7a93e4f8e9e52491921d99e0c8aa89eb4e upstream.
+
+The copy_page is optimized memcpy for page-alinged address. If it is
+used with non-page aligned address, it can corrupt memory which means
+system corruption. With zram, it can happen with
+
+1. 64K architecture
+2. partial IO
+3. slub debug
+
+Partial IO need to allocate a page and zram allocates it via kmalloc.
+With slub debug, kmalloc(PAGE_SIZE) doesn't return page-size aligned
+address. And finally, copy_page(mem, cmem) corrupts memory.
+
+So, this patch changes it to memcpy.
+
+Actuaully, we don't need to change zram_bvec_write part because zsmalloc
+returns page-aligned address in case of PAGE_SIZE class but it's not
+good to rely on the internal of zsmalloc.
+
+Note:
+ When this patch is merged to stable, clear_page should be fixed, too.
+ Unfortunately, recent zram removes it by "same page merge" feature so
+ it's hard to backport this patch to -stable tree.
+
+I will handle it when I receive the mail from stable tree maintainer to
+merge this patch to backport.
+
+Fixes: 42e99bd ("zram: optimize memory operations with clear_page()/copy_page()")
+Link: http://lkml.kernel.org/r/1492042622-12074-2-git-send-email-minchan@kernel.org
+Signed-off-by: Minchan Kim <minchan@kernel.org>
+Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/block/zram/zram_drv.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/block/zram/zram_drv.c
++++ b/drivers/block/zram/zram_drv.c
+@@ -574,13 +574,13 @@ static int zram_decompress_page(struct z
+
+ if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+- clear_page(mem);
++ memset(mem, 0, PAGE_SIZE);
+ return 0;
+ }
+
+ cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
+ if (size == PAGE_SIZE)
+- copy_page(mem, cmem);
++ memcpy(mem, cmem, PAGE_SIZE);
+ else
+ ret = zcomp_decompress(zram->comp, cmem, size, mem);
+ zs_unmap_object(meta->mem_pool, handle);
+@@ -738,7 +738,7 @@ static int zram_bvec_write(struct zram *
+
+ if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
+ src = kmap_atomic(page);
+- copy_page(cmem, src);
++ memcpy(cmem, src, PAGE_SIZE);
+ kunmap_atomic(src);
+ } else {
+ memcpy(cmem, src, clen);