--- /dev/null
+From bf4e6b4e757488dee1b6a581f49c7ac34cd217f8 Mon Sep 17 00:00:00 2001
+From: Hannes Reinecke <hare@suse.de>
+Date: Thu, 26 Nov 2015 08:46:57 +0100
+Subject: block: Always check queue limits for cloned requests
+
+From: Hannes Reinecke <hare@suse.de>
+
+commit bf4e6b4e757488dee1b6a581f49c7ac34cd217f8 upstream.
+
+When a cloned request is retried on other queues it always needs
+to be checked against the queue limits of that queue.
+Otherwise the calculations for nr_phys_segments might be wrong,
+leading to a crash in scsi_init_sgtable().
+
+To clarify this the patch renames blk_rq_check_limits()
+to blk_cloned_rq_check_limits() and removes the symbol
+export, as the new function should only be used for
+cloned requests and never exported.
+
+Cc: Mike Snitzer <snitzer@redhat.com>
+Cc: Ewan Milne <emilne@redhat.com>
+Cc: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+Fixes: e2a60da74 ("block: Clean up special command handling logic")
+Acked-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c | 21 +++++++--------------
+ include/linux/blkdev.h | 1 -
+ 2 files changed, 7 insertions(+), 15 deletions(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -2023,7 +2023,8 @@ void submit_bio(int rw, struct bio *bio)
+ EXPORT_SYMBOL(submit_bio);
+
+ /**
+- * blk_rq_check_limits - Helper function to check a request for the queue limit
++ * blk_cloned_rq_check_limits - Helper function to check a cloned request
++ * for new the queue limits
+ * @q: the queue
+ * @rq: the request being checked
+ *
+@@ -2034,20 +2035,13 @@ EXPORT_SYMBOL(submit_bio);
+ * after it is inserted to @q, it should be checked against @q before
+ * the insertion using this generic function.
+ *
+- * This function should also be useful for request stacking drivers
+- * in some cases below, so export this function.
+ * Request stacking drivers like request-based dm may change the queue
+- * limits while requests are in the queue (e.g. dm's table swapping).
+- * Such request stacking drivers should check those requests against
+- * the new queue limits again when they dispatch those requests,
+- * although such checkings are also done against the old queue limits
+- * when submitting requests.
++ * limits when retrying requests on other queues. Those requests need
++ * to be checked against the new queue limits again during dispatch.
+ */
+-int blk_rq_check_limits(struct request_queue *q, struct request *rq)
++static int blk_cloned_rq_check_limits(struct request_queue *q,
++ struct request *rq)
+ {
+- if (!rq_mergeable(rq))
+- return 0;
+-
+ if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
+ printk(KERN_ERR "%s: over max size limit.\n", __func__);
+ return -EIO;
+@@ -2067,7 +2061,6 @@ int blk_rq_check_limits(struct request_q
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(blk_rq_check_limits);
+
+ /**
+ * blk_insert_cloned_request - Helper for stacking drivers to submit a request
+@@ -2079,7 +2072,7 @@ int blk_insert_cloned_request(struct req
+ unsigned long flags;
+ int where = ELEVATOR_INSERT_BACK;
+
+- if (blk_rq_check_limits(q, rq))
++ if (blk_cloned_rq_check_limits(q, rq))
+ return -EIO;
+
+ if (rq->rq_disk &&
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -767,7 +767,6 @@ extern void blk_rq_set_block_pc(struct r
+ extern void blk_requeue_request(struct request_queue *, struct request *);
+ extern void blk_add_request_payload(struct request *rq, struct page *page,
+ unsigned int len);
+-extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
+ extern int blk_lld_busy(struct request_queue *q);
+ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
--- /dev/null
+From 23688bf4f830a89866fd0ed3501e342a7360fe4f Mon Sep 17 00:00:00 2001
+From: Junichi Nomura <j-nomura@ce.jp.nec.com>
+Date: Tue, 22 Dec 2015 10:23:44 -0700
+Subject: block: ensure to split after potentially bouncing a bio
+
+From: Junichi Nomura <j-nomura@ce.jp.nec.com>
+
+commit 23688bf4f830a89866fd0ed3501e342a7360fe4f upstream.
+
+blk_queue_bio() does split then bounce, which makes the segment
+counting based on pages before bouncing and could go wrong. Move
+the split to after bouncing, like we do for blk-mq, and the we
+fix the issue of having the bio count for segments be wrong.
+
+Fixes: 54efd50bfd87 ("block: make generic_make_request handle arbitrarily sized bios")
+Tested-by: Artem S. Tashkinov <t.artem@lycos.com>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ block/blk-core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -1616,8 +1616,6 @@ static void blk_queue_bio(struct request
+ struct request *req;
+ unsigned int request_count = 0;
+
+- blk_queue_split(q, &bio, q->bio_split);
+-
+ /*
+ * low level driver can indicate that it wants pages above a
+ * certain limit bounced to low memory (ie for highmem, or even
+@@ -1625,6 +1623,8 @@ static void blk_queue_bio(struct request
+ */
+ blk_queue_bounce(q, &bio);
+
++ blk_queue_split(q, &bio, q->bio_split);
++
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+ bio->bi_error = -EIO;
+ bio_endio(bio);
--- /dev/null
+From 4afa5f9617927453ac04b24b584f6c718dfb4f45 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Sun, 1 Nov 2015 17:11:19 +0800
+Subject: crypto: algif_hash - Only export and import on sockets with data
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 4afa5f9617927453ac04b24b584f6c718dfb4f45 upstream.
+
+The hash_accept call fails to work on sockets that have not received
+any data. For some algorithm implementations it may cause crashes.
+
+This patch fixes this by ensuring that we only export and import on
+sockets that have received data.
+
+Reported-by: Harsh Jain <harshjain.prof@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Tested-by: Stephan Mueller <smueller@chronox.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/algif_hash.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/crypto/algif_hash.c
++++ b/crypto/algif_hash.c
+@@ -181,9 +181,14 @@ static int hash_accept(struct socket *so
+ struct sock *sk2;
+ struct alg_sock *ask2;
+ struct hash_ctx *ctx2;
++ bool more;
+ int err;
+
+- err = crypto_ahash_export(req, state);
++ lock_sock(sk);
++ more = ctx->more;
++ err = more ? crypto_ahash_export(req, state) : 0;
++ release_sock(sk);
++
+ if (err)
+ return err;
+
+@@ -194,7 +199,10 @@ static int hash_accept(struct socket *so
+ sk2 = newsock->sk;
+ ask2 = alg_sk(sk2);
+ ctx2 = ask2->private;
+- ctx2->more = 1;
++ ctx2->more = more;
++
++ if (!more)
++ return err;
+
+ err = crypto_ahash_import(&ctx2->req, state);
+ if (err) {
--- /dev/null
+From 0d96e4bab2855a030077cc695a3563fd7cb0e7d8 Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Fri, 18 Dec 2015 19:16:57 +0800
+Subject: crypto: algif_skcipher - Use new skcipher interface
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 0d96e4bab2855a030077cc695a3563fd7cb0e7d8 upstream.
+
+This patch replaces uses of ablkcipher with the new skcipher
+interface.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Tested-by: <smueller@chronox.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/algif_skcipher.c | 61 +++++++++++++++++++++++-------------------------
+ 1 file changed, 30 insertions(+), 31 deletions(-)
+
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -47,7 +47,7 @@ struct skcipher_ctx {
+ bool merge;
+ bool enc;
+
+- struct ablkcipher_request req;
++ struct skcipher_request req;
+ };
+
+ struct skcipher_async_rsgl {
+@@ -64,13 +64,13 @@ struct skcipher_async_req {
+ };
+
+ #define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
+- crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
++ crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
+
+ #define GET_REQ_SIZE(ctx) \
+- crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
++ crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
+
+ #define GET_IV_SIZE(ctx) \
+- crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
++ crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
+
+ #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
+ sizeof(struct scatterlist) - 1)
+@@ -302,8 +302,8 @@ static int skcipher_sendmsg(struct socke
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
+- unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
++ unsigned ivsize = crypto_skcipher_ivsize(tfm);
+ struct skcipher_sg_list *sgl;
+ struct af_alg_control con = {};
+ long copied = 0;
+@@ -507,7 +507,7 @@ static int skcipher_recvmsg_async(struct
+ struct skcipher_sg_list *sgl;
+ struct scatterlist *sg;
+ struct skcipher_async_req *sreq;
+- struct ablkcipher_request *req;
++ struct skcipher_request *req;
+ struct skcipher_async_rsgl *last_rsgl = NULL;
+ unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
+ unsigned int reqlen = sizeof(struct skcipher_async_req) +
+@@ -531,9 +531,9 @@ static int skcipher_recvmsg_async(struct
+ }
+ sg_init_table(sreq->tsg, tx_nents);
+ memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
+- ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req));
+- ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+- skcipher_async_cb, sk);
++ skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
++ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++ skcipher_async_cb, sk);
+
+ while (iov_iter_count(&msg->msg_iter)) {
+ struct skcipher_async_rsgl *rsgl;
+@@ -608,10 +608,10 @@ static int skcipher_recvmsg_async(struct
+ if (mark)
+ sg_mark_end(sreq->tsg + txbufs - 1);
+
+- ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
+- len, sreq->iv);
+- err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
+- crypto_ablkcipher_decrypt(req);
++ skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
++ len, sreq->iv);
++ err = ctx->enc ? crypto_skcipher_encrypt(req) :
++ crypto_skcipher_decrypt(req);
+ if (err == -EINPROGRESS) {
+ atomic_inc(&ctx->inflight);
+ err = -EIOCBQUEUED;
+@@ -632,7 +632,7 @@ static int skcipher_recvmsg_sync(struct
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+- unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
++ unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
+ &ctx->req));
+ struct skcipher_sg_list *sgl;
+ struct scatterlist *sg;
+@@ -669,14 +669,13 @@ static int skcipher_recvmsg_sync(struct
+ if (!used)
+ goto free;
+
+- ablkcipher_request_set_crypt(&ctx->req, sg,
+- ctx->rsgl.sg, used,
+- ctx->iv);
++ skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
++ ctx->iv);
+
+ err = af_alg_wait_for_completion(
+ ctx->enc ?
+- crypto_ablkcipher_encrypt(&ctx->req) :
+- crypto_ablkcipher_decrypt(&ctx->req),
++ crypto_skcipher_encrypt(&ctx->req) :
++ crypto_skcipher_decrypt(&ctx->req),
+ &ctx->completion);
+
+ free:
+@@ -751,17 +750,17 @@ static struct proto_ops algif_skcipher_o
+
+ static void *skcipher_bind(const char *name, u32 type, u32 mask)
+ {
+- return crypto_alloc_ablkcipher(name, type, mask);
++ return crypto_alloc_skcipher(name, type, mask);
+ }
+
+ static void skcipher_release(void *private)
+ {
+- crypto_free_ablkcipher(private);
++ crypto_free_skcipher(private);
+ }
+
+ static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
+ {
+- return crypto_ablkcipher_setkey(private, key, keylen);
++ return crypto_skcipher_setkey(private, key, keylen);
+ }
+
+ static void skcipher_wait(struct sock *sk)
+@@ -778,13 +777,13 @@ static void skcipher_sock_destruct(struc
+ {
+ struct alg_sock *ask = alg_sk(sk);
+ struct skcipher_ctx *ctx = ask->private;
+- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
+
+ if (atomic_read(&ctx->inflight))
+ skcipher_wait(sk);
+
+ skcipher_free_sgl(sk);
+- sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
++ sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
+ sock_kfree_s(sk, ctx, ctx->len);
+ af_alg_release_parent(sk);
+ }
+@@ -793,20 +792,20 @@ static int skcipher_accept_parent(void *
+ {
+ struct skcipher_ctx *ctx;
+ struct alg_sock *ask = alg_sk(sk);
+- unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
++ unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private);
+
+ ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+- ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
++ ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private),
+ GFP_KERNEL);
+ if (!ctx->iv) {
+ sock_kfree_s(sk, ctx, len);
+ return -ENOMEM;
+ }
+
+- memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
++ memset(ctx->iv, 0, crypto_skcipher_ivsize(private));
+
+ INIT_LIST_HEAD(&ctx->tsgl);
+ ctx->len = len;
+@@ -819,9 +818,9 @@ static int skcipher_accept_parent(void *
+
+ ask->private = ctx;
+
+- ablkcipher_request_set_tfm(&ctx->req, private);
+- ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+- af_alg_complete, &ctx->completion);
++ skcipher_request_set_tfm(&ctx->req, private);
++ skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
++ af_alg_complete, &ctx->completion);
+
+ sk->sk_destruct = skcipher_sock_destruct;
+
--- /dev/null
+From c7556ff7e3e4f2747583bcc787f12ec9460ec3a6 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Sun, 18 Oct 2015 17:51:20 +0100
+Subject: crypto: caam - fix non-block aligned hash calculation
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit c7556ff7e3e4f2747583bcc787f12ec9460ec3a6 upstream.
+
+caam does not properly calculate the size of the retained state
+when non-block aligned hashes are requested - it uses the wrong
+buffer sizes, which results in errors such as:
+
+caam_jr 2102000.jr1: 40000501: DECO: desc idx 5: SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table.
+
+We end up here with:
+
+in_len 0x46 blocksize 0x40 last_bufsize 0x0 next_bufsize 0x6
+to_hash 0x40 ctx_len 0x28 nbytes 0x20
+
+which results in a job descriptor of:
+
+jobdesc@889: ed03d918: b0861c08 3daa0080 f1400000 3d03d938
+jobdesc@889: ed03d928: 00000068 f8400000 3cde2a40 00000028
+
+where the word at 0xed03d928 is the expected data size (0x68), and a
+scatterlist containing:
+
+sg@892: ed03d938: 00000000 3cde2a40 00000028 00000000
+sg@892: ed03d948: 00000000 3d03d100 00000006 00000000
+sg@892: ed03d958: 00000000 7e8aa700 40000020 00000000
+
+0x68 comes from 0x28 (the context size) plus the "in_len" rounded down
+to a block size (0x40). in_len comes from 0x26 bytes of unhashed data
+from the previous operation, plus the 0x20 bytes from the latest
+operation.
+
+The fixed version would create:
+
+sg@892: ed03d938: 00000000 3cde2a40 00000028 00000000
+sg@892: ed03d948: 00000000 3d03d100 00000026 00000000
+sg@892: ed03d958: 00000000 7e8aa700 40000020 00000000
+
+which replaces the 0x06 length with the correct 0x26 bytes of previously
+unhashed data.
+
+This fixes a previous commit which erroneously "fixed" this due to a
+DMA-API bug report; that commit indicates that the bug was caused via a
+test_ahash_pnum() function in the tcrypt module. No such function has
+ever existed in the mainline kernel. Given that the change in this
+commit has been tested with DMA API debug enabled and shows no issue,
+I can only conclude that test_ahash_pnum() was triggering that bad
+behaviour by CAAM.
+
+Fixes: 7d5196aba3c8 ("crypto: caam - Correct DMA unmap size in ahash_update_ctx()")
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/caam/caamhash.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -829,7 +829,7 @@ static int ahash_update_ctx(struct ahash
+ state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
+ edesc->sec4_sg + 1,
+ buf, state->buf_dma,
+- *next_buflen, *buflen);
++ *buflen, last_buflen);
+
+ if (src_nents) {
+ src_map_to_sec4_sg(jrdev, req->src, src_nents,
--- /dev/null
+From 97bce7e0b58dfc7d159ded329f57961868fb060b Mon Sep 17 00:00:00 2001
+From: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+Date: Sun, 20 Sep 2015 16:42:36 +0200
+Subject: crypto: crc32c-pclmul - use .rodata instead of .rotata
+
+From: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+
+commit 97bce7e0b58dfc7d159ded329f57961868fb060b upstream.
+
+Module crc32c-intel uses a special read-only data section named .rotata.
+This section is defined for K_table, and its name seems to be a spelling
+mistake for .rodata.
+
+Fixes: 473946e674eb ("crypto: crc32c-pclmul - Shrink K_table to 32-bit words")
+Signed-off-by: Nicolas Iooss <nicolas.iooss_linux@m4x.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/crc32c-pcl-intel-asm_64.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
++++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+@@ -330,7 +330,7 @@ ENDPROC(crc_pcl)
+ ## PCLMULQDQ tables
+ ## Table is 128 entries x 2 words (8 bytes) each
+ ################################################################
+-.section .rotata, "a", %progbits
++.section .rodata, "a", %progbits
+ .align 8
+ K_table:
+ .long 0x493c7d27, 0x00000001
--- /dev/null
+From cb8affb55c7e64816f3effcd9b2fc3268c016fac Mon Sep 17 00:00:00 2001
+From: David Gstir <david@sigma-star.at>
+Date: Sun, 15 Nov 2015 17:14:41 +0100
+Subject: crypto: nx - Fix timing leak in GCM and CCM decryption
+
+From: David Gstir <david@sigma-star.at>
+
+commit cb8affb55c7e64816f3effcd9b2fc3268c016fac upstream.
+
+Using non-constant time memcmp() makes the verification of the authentication
+tag in the decrypt path vulnerable to timing attacks. Fix this by using
+crypto_memneq() instead.
+
+Signed-off-by: David Gstir <david@sigma-star.at>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/nx/nx-aes-ccm.c | 2 +-
+ drivers/crypto/nx/nx-aes-gcm.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/crypto/nx/nx-aes-ccm.c
++++ b/drivers/crypto/nx/nx-aes-ccm.c
+@@ -409,7 +409,7 @@ static int ccm_nx_decrypt(struct aead_re
+ processed += to_process;
+ } while (processed < nbytes);
+
+- rc = memcmp(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
++ rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag,
+ authsize) ? -EBADMSG : 0;
+ out:
+ spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
+--- a/drivers/crypto/nx/nx-aes-gcm.c
++++ b/drivers/crypto/nx/nx-aes-gcm.c
+@@ -21,6 +21,7 @@
+
+ #include <crypto/internal/aead.h>
+ #include <crypto/aes.h>
++#include <crypto/algapi.h>
+ #include <crypto/scatterwalk.h>
+ #include <linux/module.h>
+ #include <linux/types.h>
+@@ -418,7 +419,7 @@ mac:
+ itag, req->src, req->assoclen + nbytes,
+ crypto_aead_authsize(crypto_aead_reqtfm(req)),
+ SCATTERWALK_FROM_SG);
+- rc = memcmp(itag, otag,
++ rc = crypto_memneq(itag, otag,
+ crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
+ -EBADMSG : 0;
+ }
--- /dev/null
+From 176155dac13f528e0a58c14dc322623219365d91 Mon Sep 17 00:00:00 2001
+From: Tadeusz Struk <tadeusz.struk@intel.com>
+Date: Wed, 21 Oct 2015 14:57:09 -0700
+Subject: crypto: qat - don't use userspace pointer
+
+From: Tadeusz Struk <tadeusz.struk@intel.com>
+
+commit 176155dac13f528e0a58c14dc322623219365d91 upstream.
+
+Bugfix - don't dereference userspace pointer.
+
+Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/qat/qat_common/adf_ctl_drv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
++++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+@@ -198,7 +198,7 @@ static int adf_copy_key_value_data(struc
+ goto out_err;
+ }
+
+- params_head = section_head->params;
++ params_head = section.params;
+
+ while (params_head) {
+ if (copy_from_user(&key_val, (void __user *)params_head,
--- /dev/null
+From 70d906bc17500edfa9bdd8c8b7e59618c7911613 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Sun, 6 Dec 2015 02:51:37 +0100
+Subject: crypto: skcipher - Copy iv from desc even for 0-len walks
+
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+
+commit 70d906bc17500edfa9bdd8c8b7e59618c7911613 upstream.
+
+Some ciphers actually support encrypting zero length plaintexts. For
+example, many AEAD modes support this. The resulting ciphertext for
+those winds up being only the authentication tag, which is a result of
+the key, the iv, the additional data, and the fact that the plaintext
+had zero length. The blkcipher constructors won't copy the IV to the
+right place, however, when using a zero length input, resulting in
+some significant problems when ciphers call their initialization
+routines, only to find that the ->iv parameter is uninitialized. One
+such example of this would be using chacha20poly1305 with a zero length
+input, which then calls chacha20, which calls the key setup routine,
+which eventually OOPSes due to the uninitialized ->iv member.
+
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/ablkcipher.c | 2 +-
+ crypto/blkcipher.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/crypto/ablkcipher.c
++++ b/crypto/ablkcipher.c
+@@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
++ walk->iv = req->info;
+ walk->nbytes = walk->total;
+ if (unlikely(!walk->total))
+ return 0;
+
+ walk->iv_buffer = NULL;
+- walk->iv = req->info;
+ if (unlikely(((unsigned long)walk->iv & alignmask))) {
+ int err = ablkcipher_copy_iv(walk, tfm, alignmask);
+
+--- a/crypto/blkcipher.c
++++ b/crypto/blkcipher.c
+@@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct b
+ if (WARN_ON_ONCE(in_irq()))
+ return -EDEADLK;
+
++ walk->iv = desc->info;
+ walk->nbytes = walk->total;
+ if (unlikely(!walk->total))
+ return 0;
+
+ walk->buffer = NULL;
+- walk->iv = desc->info;
+ if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
+ int err = blkcipher_copy_iv(walk);
+ if (err)
--- /dev/null
+From 4f9ea86604e3ba64edd2817795798168fbb3c1a6 Mon Sep 17 00:00:00 2001
+From: LABBE Corentin <clabbe.montjoie@gmail.com>
+Date: Mon, 16 Nov 2015 09:35:54 +0100
+Subject: crypto: sun4i-ss - add missing statesize
+
+From: LABBE Corentin <clabbe.montjoie@gmail.com>
+
+commit 4f9ea86604e3ba64edd2817795798168fbb3c1a6 upstream.
+
+sun4i-ss implementaton of md5/sha1 is via ahash algorithms.
+Commit 8996eafdcbad ("crypto: ahash - ensure statesize is non-zero")
+made impossible to load them without giving statesize. This patch
+specifiy statesize for sha1 and md5.
+
+Fixes: 6298e948215f ("crypto: sunxi-ss - Add Allwinner Security System crypto accelerator")
+Tested-by: Chen-Yu Tsai <wens@csie.org>
+Signed-off-by: LABBE Corentin <clabbe.montjoie@gmail.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/sunxi-ss/sun4i-ss-core.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-core.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-core.c
+@@ -39,6 +39,7 @@ static struct sun4i_ss_alg_template ss_a
+ .import = sun4i_hash_import_md5,
+ .halg = {
+ .digestsize = MD5_DIGEST_SIZE,
++ .statesize = sizeof(struct md5_state),
+ .base = {
+ .cra_name = "md5",
+ .cra_driver_name = "md5-sun4i-ss",
+@@ -66,6 +67,7 @@ static struct sun4i_ss_alg_template ss_a
+ .import = sun4i_hash_import_sha1,
+ .halg = {
+ .digestsize = SHA1_DIGEST_SIZE,
++ .statesize = sizeof(struct sha1_state),
+ .base = {
+ .cra_name = "sha1",
+ .cra_driver_name = "sha1-sun4i-ss",
--- /dev/null
+From 79960943fdc114fd4583c9ab164b5c89da7aa601 Mon Sep 17 00:00:00 2001
+From: David Gstir <david@sigma-star.at>
+Date: Sun, 15 Nov 2015 17:14:42 +0100
+Subject: crypto: talitos - Fix timing leak in ESP ICV verification
+
+From: David Gstir <david@sigma-star.at>
+
+commit 79960943fdc114fd4583c9ab164b5c89da7aa601 upstream.
+
+Using non-constant time memcmp() makes the verification of the authentication
+tag in the decrypt path vulnerable to timing attacks. Fix this by using
+crypto_memneq() instead.
+
+Signed-off-by: David Gstir <david@sigma-star.at>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/talitos.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1015,7 +1015,7 @@ static void ipsec_esp_decrypt_swauth_don
+ } else
+ oicv = (char *)&edesc->link_tbl[0];
+
+- err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0;
++ err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
+ }
+
+ kfree(edesc);
--- /dev/null
+From 30ce6e1cc5a0f781d60227e9096c86e188d2c2bd Mon Sep 17 00:00:00 2001
+From: Mike Snitzer <snitzer@redhat.com>
+Date: Mon, 23 Nov 2015 16:24:45 -0500
+Subject: dm btree: fix leak of bufio-backed block in btree_split_sibling error path
+
+From: Mike Snitzer <snitzer@redhat.com>
+
+commit 30ce6e1cc5a0f781d60227e9096c86e188d2c2bd upstream.
+
+The block allocated at the start of btree_split_sibling() is never
+released if later insert_at() fails.
+
+Fix this by releasing the previously allocated bufio block using
+unlock_block().
+
+Reported-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/persistent-data/dm-btree.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/persistent-data/dm-btree.c
++++ b/drivers/md/persistent-data/dm-btree.c
+@@ -471,8 +471,10 @@ static int btree_split_sibling(struct sh
+
+ r = insert_at(sizeof(__le64), pn, parent_index + 1,
+ le64_to_cpu(rn->keys[0]), &location);
+- if (r)
++ if (r) {
++ unlock_block(s->info, right);
+ return r;
++ }
+
+ if (key < le64_to_cpu(rn->keys[0])) {
+ unlock_block(s->info, right);
--- /dev/null
+From 26bbe7ef6d5cdc7ec08cba6d433fca4060f258f3 Mon Sep 17 00:00:00 2001
+From: Seth Jennings <sjennings@variantweb.net>
+Date: Fri, 11 Dec 2015 13:40:57 -0800
+Subject: drivers/base/memory.c: prohibit offlining of memory blocks with missing sections
+
+From: Seth Jennings <sjennings@variantweb.net>
+
+commit 26bbe7ef6d5cdc7ec08cba6d433fca4060f258f3 upstream.
+
+Commit bdee237c0343 ("x86: mm: Use 2GB memory block size on large-memory
+x86-64 systems") and 982792c782ef ("x86, mm: probe memory block size for
+generic x86 64bit") introduced large block sizes for x86. This made it
+possible to have multiple sections per memory block where previously,
+there was a only every one section per block.
+
+Since blocks consist of contiguous ranges of section, there can be holes
+in the blocks where sections are not present. If one attempts to
+offline such a block, a crash occurs since the code is not designed to
+deal with this.
+
+This patch is a quick fix to gaurd against the crash by not allowing
+blocks with non-present sections to be offlined.
+
+Addresses https://bugzilla.kernel.org/show_bug.cgi?id=107781
+
+Signed-off-by: Seth Jennings <sjennings@variantweb.net>
+Reported-by: Andrew Banman <abanman@sgi.com>
+Cc: Daniel J Blueman <daniel@numascale.com>
+Cc: Yinghai Lu <yinghai@kernel.org>
+Cc: Greg KH <greg@kroah.com>
+Cc: Russ Anderson <rja@sgi.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/base/memory.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/base/memory.c
++++ b/drivers/base/memory.c
+@@ -303,6 +303,10 @@ static int memory_subsys_offline(struct
+ if (mem->state == MEM_OFFLINE)
+ return 0;
+
++ /* Can't offline block with non-present sections */
++ if (mem->section_count != sections_per_block)
++ return -EINVAL;
++
+ return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
+ }
+
--- /dev/null
+From 569cf1876a32e574ba8a7fb825cd91bafd003882 Mon Sep 17 00:00:00 2001
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+Date: Thu, 3 Sep 2015 13:38:23 -0700
+Subject: f2fs crypto: allocate buffer for decrypting filename
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+commit 569cf1876a32e574ba8a7fb825cd91bafd003882 upstream.
+
+We got dentry pages from high_mem, and its address space directly goes into the
+decryption path via f2fs_fname_disk_to_usr.
+But, sg_init_one assumes the address is not from high_mem, so we can get this
+panic since it doesn't call kmap_high but kunmap_high is triggered at the end.
+
+kernel BUG at ../../../../../../kernel/mm/highmem.c:290!
+Internal error: Oops - BUG: 0 [#1] PREEMPT SMP ARM
+...
+ (kunmap_high+0xb0/0xb8) from [<c0114534>] (__kunmap_atomic+0xa0/0xa4)
+ (__kunmap_atomic+0xa0/0xa4) from [<c035f028>] (blkcipher_walk_done+0x128/0x1ec)
+ (blkcipher_walk_done+0x128/0x1ec) from [<c0366c24>] (crypto_cbc_decrypt+0xc0/0x170)
+ (crypto_cbc_decrypt+0xc0/0x170) from [<c0367148>] (crypto_cts_decrypt+0xc0/0x114)
+ (crypto_cts_decrypt+0xc0/0x114) from [<c035ea98>] (async_decrypt+0x40/0x48)
+ (async_decrypt+0x40/0x48) from [<c032ca34>] (f2fs_fname_disk_to_usr+0x124/0x304)
+ (f2fs_fname_disk_to_usr+0x124/0x304) from [<c03056fc>] (f2fs_fill_dentries+0xac/0x188)
+ (f2fs_fill_dentries+0xac/0x188) from [<c03059c8>] (f2fs_readdir+0x1f0/0x300)
+ (f2fs_readdir+0x1f0/0x300) from [<c0218054>] (vfs_readdir+0x90/0xb4)
+ (vfs_readdir+0x90/0xb4) from [<c0218418>] (SyS_getdents64+0x64/0xcc)
+ (SyS_getdents64+0x64/0xcc) from [<c0105ba0>] (ret_fast_syscall+0x0/0x30)
+
+Reviewed-by: Chao Yu <chao2.yu@samsung.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/f2fs/dir.c | 13 ++++++++++---
+ fs/f2fs/namei.c | 10 +++++++++-
+ 2 files changed, 19 insertions(+), 4 deletions(-)
+
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -787,7 +787,6 @@ bool f2fs_fill_dentries(struct dir_conte
+ else
+ d_type = DT_UNKNOWN;
+
+- /* encrypted case */
+ de_name.name = d->filename[bit_pos];
+ de_name.len = le16_to_cpu(de->name_len);
+
+@@ -795,12 +794,20 @@ bool f2fs_fill_dentries(struct dir_conte
+ int save_len = fstr->len;
+ int ret;
+
++ de_name.name = kmalloc(de_name.len, GFP_NOFS);
++ if (!de_name.name)
++ return false;
++
++ memcpy(de_name.name, d->filename[bit_pos], de_name.len);
++
+ ret = f2fs_fname_disk_to_usr(d->inode, &de->hash_code,
+ &de_name, fstr);
+- de_name = *fstr;
+- fstr->len = save_len;
++ kfree(de_name.name);
+ if (ret < 0)
+ return true;
++
++ de_name = *fstr;
++ fstr->len = save_len;
+ }
+
+ if (!dir_emit(ctx, de_name.name, de_name.len,
+--- a/fs/f2fs/namei.c
++++ b/fs/f2fs/namei.c
+@@ -947,8 +947,13 @@ static const char *f2fs_encrypted_follow
+
+ /* Symlink is encrypted */
+ sd = (struct f2fs_encrypted_symlink_data *)caddr;
+- cstr.name = sd->encrypted_path;
+ cstr.len = le16_to_cpu(sd->len);
++ cstr.name = kmalloc(cstr.len, GFP_NOFS);
++ if (!cstr.name) {
++ res = -ENOMEM;
++ goto errout;
++ }
++ memcpy(cstr.name, sd->encrypted_path, cstr.len);
+
+ /* this is broken symlink case */
+ if (cstr.name[0] == 0 && cstr.len == 0) {
+@@ -970,6 +975,8 @@ static const char *f2fs_encrypted_follow
+ if (res < 0)
+ goto errout;
+
++ kfree(cstr.name);
++
+ paddr = pstr.name;
+
+ /* Null-terminate the name */
+@@ -979,6 +986,7 @@ static const char *f2fs_encrypted_follow
+ page_cache_release(cpage);
+ return *cookie = paddr;
+ errout:
++ kfree(cstr.name);
+ f2fs_fname_crypto_free_buffer(&pstr);
+ kunmap(cpage);
+ page_cache_release(cpage);
--- /dev/null
+From e470127e9606b1fa151c4184243e61296d1e0c0f Mon Sep 17 00:00:00 2001
+From: Ioan-Adrian Ratiu <adi@adirat.com>
+Date: Fri, 20 Nov 2015 22:19:02 +0200
+Subject: HID: usbhid: fix recursive deadlock
+
+From: Ioan-Adrian Ratiu <adi@adirat.com>
+
+commit e470127e9606b1fa151c4184243e61296d1e0c0f upstream.
+
+The critical section protected by usbhid->lock in hid_ctrl() is too
+big and because of this it causes a recursive deadlock. "Too big" means
+the case statement and the call to hid_input_report() do not need to be
+protected by the spinlock (no URB operations are done inside them).
+
+The deadlock happens because in certain rare cases drivers try to grab
+the lock while handling the ctrl irq which grabs the lock before them
+as described above. For example newer wacom tablets like 056a:033c try
+to reschedule proximity reads from wacom_intuos_schedule_prox_event()
+calling hid_hw_request() -> usbhid_request() -> usbhid_submit_report()
+which tries to grab the usbhid lock already held by hid_ctrl().
+
+There are two ways to get out of this deadlock:
+ 1. Make the drivers work "around" the ctrl critical region, in the
+ wacom case for ex. by delaying the scheduling of the proximity read
+ request itself to a workqueue.
+ 2. Shrink the critical region so the usbhid lock protects only the
+ instructions which modify usbhid state, calling hid_input_report()
+ with the spinlock unlocked, allowing the device driver to grab the
+ lock first, finish and then grab the lock afterwards in hid_ctrl().
+
+This patch implements the 2nd solution.
+
+Signed-off-by: Ioan-Adrian Ratiu <adi@adirat.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Jason Gerecke <jason.gerecke@wacom.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/usbhid/hid-core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/hid/usbhid/hid-core.c
++++ b/drivers/hid/usbhid/hid-core.c
+@@ -477,8 +477,6 @@ static void hid_ctrl(struct urb *urb)
+ struct usbhid_device *usbhid = hid->driver_data;
+ int unplug = 0, status = urb->status;
+
+- spin_lock(&usbhid->lock);
+-
+ switch (status) {
+ case 0: /* success */
+ if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN)
+@@ -498,6 +496,8 @@ static void hid_ctrl(struct urb *urb)
+ hid_warn(urb->dev, "ctrl urb status %d received\n", status);
+ }
+
++ spin_lock(&usbhid->lock);
++
+ if (unplug) {
+ usbhid->ctrltail = usbhid->ctrlhead;
+ } else {
--- /dev/null
+From 1a093ceb053832c25b92f3cf26b957543c7baf9b Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+Date: Mon, 28 Dec 2015 11:27:15 -0500
+Subject: NFSv4.1/pnfs: Fixup an lo->plh_block_lgets imbalance in layoutreturn
+
+From: Trond Myklebust <trond.myklebust@primarydata.com>
+
+commit 1a093ceb053832c25b92f3cf26b957543c7baf9b upstream.
+
+Since commit 2d8ae84fbc32, nothing is bumping lo->plh_block_lgets in the
+layoutreturn path, so it should not be touched in nfs4_layoutreturn_release
+either.
+
+Fixes: 2d8ae84fbc32 ("NFSv4.1/pnfs: Remove redundant lo->plh_block_lgets...")
+Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/nfs/nfs4proc.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -8060,7 +8060,6 @@ static void nfs4_layoutreturn_release(vo
+ pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
+ pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
+ pnfs_clear_layoutreturn_waitbit(lo);
+- lo->plh_block_lgets--;
+ spin_unlock(&lo->plh_inode->i_lock);
+ pnfs_free_lseg_list(&freeme);
+ pnfs_put_layout_hdr(lrp->args.layout);
--- /dev/null
+From b1b1e15ef6b80facf76d6757649dfd7295eda29f Mon Sep 17 00:00:00 2001
+From: Tariq Saeed <tariq.x.saeed@oracle.com>
+Date: Thu, 21 Jan 2016 16:40:39 -0800
+Subject: ocfs2: NFS hangs in __ocfs2_cluster_lock due to race with ocfs2_unblock_lock
+
+From: Tariq Saeed <tariq.x.saeed@oracle.com>
+
+commit b1b1e15ef6b80facf76d6757649dfd7295eda29f upstream.
+
+NFS on a 2 node ocfs2 cluster each node exporting dir. The lock causing
+the hang is the global bit map inode lock. Node 1 is master, has the
+lock granted in PR mode; Node 2 is in the converting list (PR -> EX).
+There are no holders of the lock on the master node so it should
+downconvert to NL and grant EX to node 2 but that does not happen.
+BLOCKED + QUEUED in lock res are set and it is on osb blocked list.
+Threads are waiting in __ocfs2_cluster_lock on BLOCKED. One thread
+wants EX, rest want PR. So it is as though the downconvert thread needs
+to be kicked to complete the conv.
+
+The hang is caused by an EX req coming into __ocfs2_cluster_lock on the
+heels of a PR req after it sets BUSY (drops l_lock, releasing EX
+thread), forcing the incoming EX to wait on BUSY without doing anything.
+PR has called ocfs2_dlm_lock, which sets the node 1 lock from NL -> PR,
+queues ast.
+
+At this time, upconvert (PR ->EX) arrives from node 2, finds conflict
+with node 1 lock in PR, so the lock res is put on dlm thread's dirty
+listt.
+
+After ret from ocf2_dlm_lock, PR thread now waits behind EX on BUSY till
+awoken by ast.
+
+Now it is dlm_thread that serially runs dlm_shuffle_lists, ast, bast, in
+that order. dlm_shuffle_lists ques a bast on behalf of node 2 (which
+will be run by dlm_thread right after the ast). ast does its part, sets
+UPCONVERT_FINISHING, clears BUSY and wakes its waiters. Next,
+dlm_thread runs bast. It sets BLOCKED and kicks dc thread. dc thread
+runs ocfs2_unblock_lock, but since UPCONVERT_FINISHING set, skips doing
+anything and reques.
+
+Inside of __ocfs2_cluster_lock, since EX has been waiting on BUSY ahead
+of PR, it wakes up first, finds BLOCKED set and skips doing anything but
+clearing UPCONVERT_FINISHING (which was actually "meant" for the PR
+thread), and this time waits on BLOCKED. Next, the PR thread comes out
+of wait but since UPCONVERT_FINISHING is not set, it skips updating the
+l_ro_holders and goes straight to wait on BLOCKED. So there, we have a
+hang! Threads in __ocfs2_cluster_lock wait on BLOCKED, lock res in osb
+blocked list. Only when dc thread is awoken, it will run
+ocfs2_unblock_lock and things will unhang.
+
+One way to fix this is to wake the dc thread on the flag after clearing
+UPCONVERT_FINISHING
+
+Orabug: 20933419
+Signed-off-by: Tariq Saeed <tariq.x.saeed@oracle.com>
+Signed-off-by: Santosh Shilimkar <santosh.shilimkar@oracle.com>
+Reviewed-by: Wengang Wang <wen.gang.wang@oracle.com>
+Reviewed-by: Mark Fasheh <mfasheh@suse.de>
+Cc: Joel Becker <jlbec@evilplan.org>
+Cc: Junxiao Bi <junxiao.bi@oracle.com>
+Reviewed-by: Joseph Qi <joseph.qi@huawei.com>
+Cc: Eric Ren <zren@suse.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ocfs2/dlmglue.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/ocfs2/dlmglue.c
++++ b/fs/ocfs2/dlmglue.c
+@@ -1390,6 +1390,7 @@ static int __ocfs2_cluster_lock(struct o
+ unsigned int gen;
+ int noqueue_attempted = 0;
+ int dlm_locked = 0;
++ int kick_dc = 0;
+
+ if (!(lockres->l_flags & OCFS2_LOCK_INITIALIZED)) {
+ mlog_errno(-EINVAL);
+@@ -1524,7 +1525,12 @@ update_holders:
+ unlock:
+ lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
++ /* ocfs2_unblock_lock reques on seeing OCFS2_LOCK_UPCONVERT_FINISHING */
++ kick_dc = (lockres->l_flags & OCFS2_LOCK_BLOCKED);
++
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
++ if (kick_dc)
++ ocfs2_wake_downconvert_thread(osb);
+ out:
+ /*
+ * This is helping work around a lock inversion between the page lock
--- /dev/null
+crypto-crc32c-pclmul-use-.rodata-instead-of-.rotata.patch
+crypto-caam-fix-non-block-aligned-hash-calculation.patch
+f2fs-crypto-allocate-buffer-for-decrypting-filename.patch
+crypto-algif_hash-only-export-and-import-on-sockets-with-data.patch
+crypto-qat-don-t-use-userspace-pointer.patch
+crypto-nx-fix-timing-leak-in-gcm-and-ccm-decryption.patch
+crypto-talitos-fix-timing-leak-in-esp-icv-verification.patch
+crypto-skcipher-copy-iv-from-desc-even-for-0-len-walks.patch
+crypto-algif_skcipher-use-new-skcipher-interface.patch
+crypto-sun4i-ss-add-missing-statesize.patch
+block-always-check-queue-limits-for-cloned-requests.patch
+dm-btree-fix-leak-of-bufio-backed-block-in-btree_split_sibling-error-path.patch
+drivers-base-memory.c-prohibit-offlining-of-memory-blocks-with-missing-sections.patch
+block-ensure-to-split-after-potentially-bouncing-a-bio.patch
+nfsv4.1-pnfs-fixup-an-lo-plh_block_lgets-imbalance-in-layoutreturn.patch
+ocfs2-nfs-hangs-in-__ocfs2_cluster_lock-due-to-race-with-ocfs2_unblock_lock.patch
+hid-usbhid-fix-recursive-deadlock.patch