--- /dev/null
+From a45f795c65b479b4ba107b6ccde29b896d51ee98 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Fri, 2 Dec 2016 16:35:07 +0100
+Subject: libceph: introduce ceph_crypt() for in-place en/decryption
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit a45f795c65b479b4ba107b6ccde29b896d51ee98 upstream.
+
+Starting with 4.9, kernel stacks may be vmalloced and therefore not
+guaranteed to be physically contiguous; the new CONFIG_VMAP_STACK
+option is enabled by default on x86. This makes it invalid to use
+on-stack buffers with the crypto scatterlist API, as sg_set_buf()
+expects a logical address and won't work with vmalloced addresses.
+
+There isn't a different (e.g. kvec-based) crypto API we could switch
+net/ceph/crypto.c to and the current scatterlist.h API isn't getting
+updated to accommodate this use case. Allocating a new header and
+padding for each operation is a non-starter, so do the en/decryption
+in-place on a single pre-assembled (header + data + padding) heap
+buffer. This is explicitly supported by the crypto API:
+
+ "... the caller may provide the same scatter/gather list for the
+ plaintext and cipher text. After the completion of the cipher
+ operation, the plaintext data is replaced with the ciphertext data
+ in case of an encryption and vice versa for a decryption."
+
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Sage Weil <sage@redhat.com>
+Cc: Brad Spengler <spender@grsecurity.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/ceph/crypto.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ net/ceph/crypto.h | 2 +
+ 2 files changed, 89 insertions(+)
+
+--- a/net/ceph/crypto.c
++++ b/net/ceph/crypto.c
+@@ -526,6 +526,93 @@ int ceph_encrypt2(struct ceph_crypto_key
+ }
+ }
+
++static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
++ void *buf, int buf_len, int in_len, int *pout_len)
++{
++ struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
++ SKCIPHER_REQUEST_ON_STACK(req, tfm);
++ struct sg_table sgt;
++ struct scatterlist prealloc_sg;
++ char iv[AES_BLOCK_SIZE];
++ int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
++ int crypt_len = encrypt ? in_len + pad_byte : in_len;
++ int ret;
++
++ if (IS_ERR(tfm))
++ return PTR_ERR(tfm);
++
++ WARN_ON(crypt_len > buf_len);
++ if (encrypt)
++ memset(buf + in_len, pad_byte, pad_byte);
++ ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
++ if (ret)
++ goto out_tfm;
++
++ crypto_skcipher_setkey((void *)tfm, key->key, key->len);
++ memcpy(iv, aes_iv, AES_BLOCK_SIZE);
++
++ skcipher_request_set_tfm(req, tfm);
++ skcipher_request_set_callback(req, 0, NULL, NULL);
++ skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
++
++ /*
++ print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
++ key->key, key->len, 1);
++ print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
++ buf, crypt_len, 1);
++ */
++ if (encrypt)
++ ret = crypto_skcipher_encrypt(req);
++ else
++ ret = crypto_skcipher_decrypt(req);
++ skcipher_request_zero(req);
++ if (ret) {
++ pr_err("%s %scrypt failed: %d\n", __func__,
++ encrypt ? "en" : "de", ret);
++ goto out_sgt;
++ }
++ /*
++ print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
++ buf, crypt_len, 1);
++ */
++
++ if (encrypt) {
++ *pout_len = crypt_len;
++ } else {
++ pad_byte = *(char *)(buf + in_len - 1);
++ if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
++ in_len >= pad_byte) {
++ *pout_len = in_len - pad_byte;
++ } else {
++ pr_err("%s got bad padding %d on in_len %d\n",
++ __func__, pad_byte, in_len);
++ ret = -EPERM;
++ goto out_sgt;
++ }
++ }
++
++out_sgt:
++ teardown_sgtable(&sgt);
++out_tfm:
++ crypto_free_skcipher(tfm);
++ return ret;
++}
++
++int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
++ void *buf, int buf_len, int in_len, int *pout_len)
++{
++ switch (key->type) {
++ case CEPH_CRYPTO_NONE:
++ *pout_len = in_len;
++ return 0;
++ case CEPH_CRYPTO_AES:
++ return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
++ pout_len);
++ default:
++ return -ENOTSUPP;
++ }
++}
++
+ static int ceph_key_preparse(struct key_preparsed_payload *prep)
+ {
+ struct ceph_crypto_key *ckey;
+--- a/net/ceph/crypto.h
++++ b/net/ceph/crypto.h
+@@ -43,6 +43,8 @@ int ceph_encrypt2(struct ceph_crypto_key
+ void *dst, size_t *dst_len,
+ const void *src1, size_t src1_len,
+ const void *src2, size_t src2_len);
++int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
++ void *buf, int buf_len, int in_len, int *pout_len);
+ int ceph_crypto_init(void);
+ void ceph_crypto_shutdown(void);
+
--- /dev/null
+From 497de07d89c1410d76a15bec2bb41f24a2a89f31 Mon Sep 17 00:00:00 2001
+From: Gu Zheng <guzheng1@huawei.com>
+Date: Mon, 9 Jan 2017 09:34:48 +0800
+Subject: tmpfs: clear S_ISGID when setting posix ACLs
+
+From: Gu Zheng <guzheng1@huawei.com>
+
+commit 497de07d89c1410d76a15bec2bb41f24a2a89f31 upstream.
+
+This change was missed the tmpfs modification in In CVE-2016-7097
+commit 073931017b49 ("posix_acl: Clear SGID bit when setting
+file permissions")
+It can test by xfstest generic/375, which failed to clear
+setgid bit in the following test case on tmpfs:
+
+ touch $testfile
+ chown 100:100 $testfile
+ chmod 2755 $testfile
+ _runas -u 100 -g 101 -- setfacl -m u::rwx,g::rwx,o::rwx $testfile
+
+Signed-off-by: Gu Zheng <guzheng1@huawei.com>
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Brad Spengler <spender@grsecurity.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/posix_acl.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+--- a/fs/posix_acl.c
++++ b/fs/posix_acl.c
+@@ -922,11 +922,10 @@ int simple_set_acl(struct inode *inode,
+ int error;
+
+ if (type == ACL_TYPE_ACCESS) {
+- error = posix_acl_equiv_mode(acl, &inode->i_mode);
+- if (error < 0)
+- return 0;
+- if (error == 0)
+- acl = NULL;
++ error = posix_acl_update_mode(inode,
++ &inode->i_mode, &acl);
++ if (error)
++ return error;
+ }
+
+ inode->i_ctime = current_time(inode);