--- /dev/null
+From d31911b9374a76560d2c8ea4aa6ce5781621e81d Mon Sep 17 00:00:00 2001
+From: Haibo Chen <haibo.chen@freescale.com>
+Date: Tue, 25 Aug 2015 10:02:11 +0800
+Subject: mmc: sdhci: fix dma memory leak in sdhci_pre_req()
+
+From: Haibo Chen <haibo.chen@freescale.com>
+
+commit d31911b9374a76560d2c8ea4aa6ce5781621e81d upstream.
+
+Currently one mrq->data maybe execute dma_map_sg() twice
+when mmc subsystem prepare over one new request, and the
+following log show up:
+ sdhci[sdhci_pre_dma_transfer] invalid cookie: 24, next-cookie 25
+
+In this condition, mrq->date map a dma-memory(1) in sdhci_pre_req
+for the first time, and map another dma-memory(2) in sdhci_prepare_data
+for the second time. But driver only unmap the dma-memory(2), and
+dma-memory(1) never unmapped, which cause the dma memory leak issue.
+
+This patch use another method to map the dma memory for the mrq->data
+which can fix this dma memory leak issue.
+
+Fixes: 348487cb28e6 ("mmc: sdhci: use pipeline mmc requests to improve performance")
+Reported-and-tested-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Haibo Chen <haibo.chen@freescale.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/host/sdhci.c | 67 +++++++++++++++++------------------------------
+ drivers/mmc/host/sdhci.h | 8 ++---
+ include/linux/mmc/core.h | 1
+ 3 files changed, 30 insertions(+), 46 deletions(-)
+
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -55,8 +55,7 @@ static int sdhci_execute_tuning(struct m
+ static void sdhci_tuning_timer(unsigned long data);
+ static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
+ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+- struct mmc_data *data,
+- struct sdhci_host_next *next);
++ struct mmc_data *data);
+ static int sdhci_do_get_cd(struct sdhci_host *host);
+
+ #ifdef CONFIG_PM
+@@ -510,7 +509,7 @@ static int sdhci_adma_table_pre(struct s
+ goto fail;
+ BUG_ON(host->align_addr & host->align_mask);
+
+- host->sg_count = sdhci_pre_dma_transfer(host, data, NULL);
++ host->sg_count = sdhci_pre_dma_transfer(host, data);
+ if (host->sg_count < 0)
+ goto unmap_align;
+
+@@ -649,9 +648,11 @@ static void sdhci_adma_table_post(struct
+ }
+ }
+
+- if (!data->host_cookie)
++ if (data->host_cookie == COOKIE_MAPPED) {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len, direction);
++ data->host_cookie = COOKIE_UNMAPPED;
++ }
+ }
+
+ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
+@@ -847,7 +848,7 @@ static void sdhci_prepare_data(struct sd
+ } else {
+ int sg_cnt;
+
+- sg_cnt = sdhci_pre_dma_transfer(host, data, NULL);
++ sg_cnt = sdhci_pre_dma_transfer(host, data);
+ if (sg_cnt <= 0) {
+ /*
+ * This only happens when someone fed
+@@ -963,11 +964,13 @@ static void sdhci_finish_data(struct sdh
+ if (host->flags & SDHCI_USE_ADMA)
+ sdhci_adma_table_post(host, data);
+ else {
+- if (!data->host_cookie)
++ if (data->host_cookie == COOKIE_MAPPED) {
+ dma_unmap_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
++ data->host_cookie = COOKIE_UNMAPPED;
++ }
+ }
+ }
+
+@@ -2131,49 +2134,36 @@ static void sdhci_post_req(struct mmc_ho
+ struct mmc_data *data = mrq->data;
+
+ if (host->flags & SDHCI_REQ_USE_DMA) {
+- if (data->host_cookie)
++ if (data->host_cookie == COOKIE_GIVEN ||
++ data->host_cookie == COOKIE_MAPPED)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+ data->flags & MMC_DATA_WRITE ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+- mrq->data->host_cookie = 0;
++ data->host_cookie = COOKIE_UNMAPPED;
+ }
+ }
+
+ static int sdhci_pre_dma_transfer(struct sdhci_host *host,
+- struct mmc_data *data,
+- struct sdhci_host_next *next)
++ struct mmc_data *data)
+ {
+ int sg_count;
+
+- if (!next && data->host_cookie &&
+- data->host_cookie != host->next_data.cookie) {
+- pr_debug(DRIVER_NAME "[%s] invalid cookie: %d, next-cookie %d\n",
+- __func__, data->host_cookie, host->next_data.cookie);
+- data->host_cookie = 0;
++ if (data->host_cookie == COOKIE_MAPPED) {
++ data->host_cookie = COOKIE_GIVEN;
++ return data->sg_count;
+ }
+
+- /* Check if next job is already prepared */
+- if (next ||
+- (!next && data->host_cookie != host->next_data.cookie)) {
+- sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg,
+- data->sg_len,
+- data->flags & MMC_DATA_WRITE ?
+- DMA_TO_DEVICE : DMA_FROM_DEVICE);
+-
+- } else {
+- sg_count = host->next_data.sg_count;
+- host->next_data.sg_count = 0;
+- }
++ WARN_ON(data->host_cookie == COOKIE_GIVEN);
+
++ sg_count = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
++ data->flags & MMC_DATA_WRITE ?
++ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ if (sg_count == 0)
+- return -EINVAL;
++ return -ENOSPC;
+
+- if (next) {
+- next->sg_count = sg_count;
+- data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie;
+- } else
+- host->sg_count = sg_count;
++ data->sg_count = sg_count;
++ data->host_cookie = COOKIE_MAPPED;
+
+ return sg_count;
+ }
+@@ -2183,16 +2173,10 @@ static void sdhci_pre_req(struct mmc_hos
+ {
+ struct sdhci_host *host = mmc_priv(mmc);
+
+- if (mrq->data->host_cookie) {
+- mrq->data->host_cookie = 0;
+- return;
+- }
++ mrq->data->host_cookie = COOKIE_UNMAPPED;
+
+ if (host->flags & SDHCI_REQ_USE_DMA)
+- if (sdhci_pre_dma_transfer(host,
+- mrq->data,
+- &host->next_data) < 0)
+- mrq->data->host_cookie = 0;
++ sdhci_pre_dma_transfer(host, mrq->data);
+ }
+
+ static void sdhci_card_event(struct mmc_host *mmc)
+@@ -3090,7 +3074,6 @@ int sdhci_add_host(struct sdhci_host *ho
+ host->max_clk = host->ops->get_max_clock(host);
+ }
+
+- host->next_data.cookie = 1;
+ /*
+ * In case of Host Controller v3.00, find out whether clock
+ * multiplier is supported.
+--- a/drivers/mmc/host/sdhci.h
++++ b/drivers/mmc/host/sdhci.h
+@@ -309,9 +309,10 @@ struct sdhci_adma2_64_desc {
+ */
+ #define SDHCI_MAX_SEGS 128
+
+-struct sdhci_host_next {
+- unsigned int sg_count;
+- s32 cookie;
++enum sdhci_cookie {
++ COOKIE_UNMAPPED,
++ COOKIE_MAPPED,
++ COOKIE_GIVEN,
+ };
+
+ struct sdhci_host {
+@@ -506,7 +507,6 @@ struct sdhci_host {
+ #define SDHCI_TUNING_MODE_1 0
+ struct timer_list tuning_timer; /* Timer for tuning */
+
+- struct sdhci_host_next next_data;
+ unsigned long private[0] ____cacheline_aligned;
+ };
+
+--- a/include/linux/mmc/core.h
++++ b/include/linux/mmc/core.h
+@@ -121,6 +121,7 @@ struct mmc_data {
+ struct mmc_request *mrq; /* associated request */
+
+ unsigned int sg_len; /* size of scatter list */
++ int sg_count; /* mapped sg entries */
+ struct scatterlist *sg; /* I/O scatter list */
+ s32 host_cookie; /* host private data */
+ };
--- /dev/null
+From 96be5f2806cd65a2ebced3bfcdf7df0116e6c4a6 Mon Sep 17 00:00:00 2001
+From: Elad Raz <eladr@mellanox.com>
+Date: Sat, 22 Aug 2015 08:44:11 +0300
+Subject: netfilter: ipset: Fixing unnamed union init
+
+From: Elad Raz <eladr@mellanox.com>
+
+commit 96be5f2806cd65a2ebced3bfcdf7df0116e6c4a6 upstream.
+
+In continue to proposed Vinson Lee's post [1], this patch fixes compilation
+issues founded at gcc 4.4.7. The initialization of .cidr field of unnamed
+unions causes compilation error in gcc 4.4.x.
+
+References
+
+Visible links
+[1] https://lkml.org/lkml/2015/7/5/74
+
+Signed-off-by: Elad Raz <eladr@mellanox.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Cc: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
+index 3c862c0a76d1..a93dfebffa81 100644
+--- a/net/netfilter/ipset/ip_set_hash_netnet.c
++++ b/net/netfilter/ipset/ip_set_hash_netnet.c
+@@ -131,6 +131,13 @@ hash_netnet4_data_next(struct hash_netnet4_elem *next,
+ #define HOST_MASK 32
+ #include "ip_set_hash_gen.h"
+
++static void
++hash_netnet4_init(struct hash_netnet4_elem *e)
++{
++ e->cidr[0] = HOST_MASK;
++ e->cidr[1] = HOST_MASK;
++}
++
+ static int
+ hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+@@ -160,7 +167,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ {
+ const struct hash_netnet *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+- struct hash_netnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
++ struct hash_netnet4_elem e = { };
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 ip = 0, ip_to = 0, last;
+ u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
+@@ -169,6 +176,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
++ hash_netnet4_init(&e);
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+ return -IPSET_ERR_PROTOCOL;
+@@ -357,6 +365,13 @@ hash_netnet6_data_next(struct hash_netnet4_elem *next,
+ #define IP_SET_EMIT_CREATE
+ #include "ip_set_hash_gen.h"
+
++static void
++hash_netnet6_init(struct hash_netnet6_elem *e)
++{
++ e->cidr[0] = HOST_MASK;
++ e->cidr[1] = HOST_MASK;
++}
++
+ static int
+ hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+@@ -385,13 +400,14 @@ hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+ enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
+ {
+ ipset_adtfn adtfn = set->variant->adt[adt];
+- struct hash_netnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
++ struct hash_netnet6_elem e = { };
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ int ret;
+
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
++ hash_netnet6_init(&e);
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
+ return -IPSET_ERR_PROTOCOL;
+diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c
+index 0c68734f5cc4..9a14c237830f 100644
+--- a/net/netfilter/ipset/ip_set_hash_netportnet.c
++++ b/net/netfilter/ipset/ip_set_hash_netportnet.c
+@@ -142,6 +142,13 @@ hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
+ #define HOST_MASK 32
+ #include "ip_set_hash_gen.h"
+
++static void
++hash_netportnet4_init(struct hash_netportnet4_elem *e)
++{
++ e->cidr[0] = HOST_MASK;
++ e->cidr[1] = HOST_MASK;
++}
++
+ static int
+ hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+@@ -175,7 +182,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ {
+ const struct hash_netportnet *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+- struct hash_netportnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
++ struct hash_netportnet4_elem e = { };
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
+ u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
+@@ -185,6 +192,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
++ hash_netportnet4_init(&e);
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
+@@ -412,6 +420,13 @@ hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
+ #define IP_SET_EMIT_CREATE
+ #include "ip_set_hash_gen.h"
+
++static void
++hash_netportnet6_init(struct hash_netportnet6_elem *e)
++{
++ e->cidr[0] = HOST_MASK;
++ e->cidr[1] = HOST_MASK;
++}
++
+ static int
+ hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
+ const struct xt_action_param *par,
+@@ -445,7 +460,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+ {
+ const struct hash_netportnet *h = set->data;
+ ipset_adtfn adtfn = set->variant->adt[adt];
+- struct hash_netportnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
++ struct hash_netportnet6_elem e = { };
+ struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
+ u32 port, port_to;
+ bool with_ports = false;
+@@ -454,6 +469,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
+ if (tb[IPSET_ATTR_LINENO])
+ *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
+
++ hash_netportnet6_init(&e);
+ if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
+ !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
powerpc-msi-fix-race-condition-in-tearing-down-msi-interrupts.patch
rsi-fix-possible-leak-when-loading-firmware.patch
inet-fix-potential-deadlock-in-reqsk_queue_unlink.patch
+ubifs-kill-unneeded-locking-in-ubifs_init_security.patch
+ubi-validate-data_size.patch
+ubi-return-enospc-if-no-enough-space-available.patch
+mmc-sdhci-fix-dma-memory-leak-in-sdhci_pre_req.patch
+netfilter-ipset-fixing-unnamed-union-init.patch
--- /dev/null
+From 7c7feb2ebfc9c0552c51f0c050db1d1a004faac5 Mon Sep 17 00:00:00 2001
+From: shengyong <shengyong1@huawei.com>
+Date: Mon, 28 Sep 2015 17:57:19 +0000
+Subject: UBI: return ENOSPC if no enough space available
+
+From: shengyong <shengyong1@huawei.com>
+
+commit 7c7feb2ebfc9c0552c51f0c050db1d1a004faac5 upstream.
+
+UBI: attaching mtd1 to ubi0
+UBI: scanning is finished
+UBI error: init_volumes: not enough PEBs, required 706, available 686
+UBI error: ubi_wl_init: no enough physical eraseblocks (-20, need 1)
+UBI error: ubi_attach_mtd_dev: failed to attach mtd1, error -12 <= NOT ENOMEM
+UBI error: ubi_init: cannot attach mtd1
+
+If available PEBs are not enough when initializing volumes, return -ENOSPC
+directly. If available PEBs are not enough when initializing WL, return
+-ENOSPC instead of -ENOMEM.
+
+Signed-off-by: Sheng Yong <shengyong1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Reviewed-by: David Gstir <david@sigma-star.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/vtbl.c | 1 +
+ drivers/mtd/ubi/wl.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+--- a/drivers/mtd/ubi/vtbl.c
++++ b/drivers/mtd/ubi/vtbl.c
+@@ -646,6 +646,7 @@ static int init_volumes(struct ubi_devic
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
++ return -ENOSPC;
+ }
+ ubi->rsvd_pebs += reserved_pebs;
+ ubi->avail_pebs -= reserved_pebs;
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi,
+ if (ubi->corr_peb_count)
+ ubi_err(ubi, "%d PEBs are corrupted and not used",
+ ubi->corr_peb_count);
++ err = -ENOSPC;
+ goto out_free;
+ }
+ ubi->avail_pebs -= reserved_pebs;
--- /dev/null
+From 281fda27673f833a01d516658a64d22a32c8e072 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Tue, 22 Sep 2015 23:58:07 +0200
+Subject: UBI: Validate data_size
+
+From: Richard Weinberger <richard@nod.at>
+
+commit 281fda27673f833a01d516658a64d22a32c8e072 upstream.
+
+Make sure that data_size is less than LEB size.
+Otherwise a handcrafted UBI image is able to trigger
+an out of bounds memory access in ubi_compare_lebs().
+
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Reviewed-by: David Gstir <david@sigma-star.at>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mtd/ubi/io.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/mtd/ubi/io.c
++++ b/drivers/mtd/ubi/io.c
+@@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct
+ goto bad;
+ }
+
++ if (data_size > ubi->leb_size) {
++ ubi_err(ubi, "bad data_size");
++ goto bad;
++ }
++
+ if (vol_type == UBI_VID_STATIC) {
+ /*
+ * Although from high-level point of view static volumes may
--- /dev/null
+From cf6f54e3f133229f02a90c04fe0ff9dd9d3264b4 Mon Sep 17 00:00:00 2001
+From: Richard Weinberger <richard@nod.at>
+Date: Wed, 8 Jul 2015 11:46:36 +0200
+Subject: UBIFS: Kill unneeded locking in ubifs_init_security
+
+From: Richard Weinberger <richard@nod.at>
+
+commit cf6f54e3f133229f02a90c04fe0ff9dd9d3264b4 upstream.
+
+Fixes the following lockdep splat:
+[ 1.244527] =============================================
+[ 1.245193] [ INFO: possible recursive locking detected ]
+[ 1.245193] 4.2.0-rc1+ #37 Not tainted
+[ 1.245193] ---------------------------------------------
+[ 1.245193] cp/742 is trying to acquire lock:
+[ 1.245193] (&sb->s_type->i_mutex_key#9){+.+.+.}, at: [<ffffffff812b3f69>] ubifs_init_security+0x29/0xb0
+[ 1.245193]
+[ 1.245193] but task is already holding lock:
+[ 1.245193] (&sb->s_type->i_mutex_key#9){+.+.+.}, at: [<ffffffff81198e7f>] path_openat+0x3af/0x1280
+[ 1.245193]
+[ 1.245193] other info that might help us debug this:
+[ 1.245193] Possible unsafe locking scenario:
+[ 1.245193]
+[ 1.245193] CPU0
+[ 1.245193] ----
+[ 1.245193] lock(&sb->s_type->i_mutex_key#9);
+[ 1.245193] lock(&sb->s_type->i_mutex_key#9);
+[ 1.245193]
+[ 1.245193] *** DEADLOCK ***
+[ 1.245193]
+[ 1.245193] May be due to missing lock nesting notation
+[ 1.245193]
+[ 1.245193] 2 locks held by cp/742:
+[ 1.245193] #0: (sb_writers#5){.+.+.+}, at: [<ffffffff811ad37f>] mnt_want_write+0x1f/0x50
+[ 1.245193] #1: (&sb->s_type->i_mutex_key#9){+.+.+.}, at: [<ffffffff81198e7f>] path_openat+0x3af/0x1280
+[ 1.245193]
+[ 1.245193] stack backtrace:
+[ 1.245193] CPU: 2 PID: 742 Comm: cp Not tainted 4.2.0-rc1+ #37
+[ 1.245193] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140816_022509-build35 04/01/2014
+[ 1.245193] ffffffff8252d530 ffff88007b023a38 ffffffff814f6f49 ffffffff810b56c5
+[ 1.245193] ffff88007c30cc80 ffff88007b023af8 ffffffff810a150d ffff88007b023a68
+[ 1.245193] 000000008101302a ffff880000000000 00000008f447e23f ffffffff8252d500
+[ 1.245193] Call Trace:
+[ 1.245193] [<ffffffff814f6f49>] dump_stack+0x4c/0x65
+[ 1.245193] [<ffffffff810b56c5>] ? console_unlock+0x1c5/0x510
+[ 1.245193] [<ffffffff810a150d>] __lock_acquire+0x1a6d/0x1ea0
+[ 1.245193] [<ffffffff8109fa78>] ? __lock_is_held+0x58/0x80
+[ 1.245193] [<ffffffff810a1a93>] lock_acquire+0xd3/0x270
+[ 1.245193] [<ffffffff812b3f69>] ? ubifs_init_security+0x29/0xb0
+[ 1.245193] [<ffffffff814fc83b>] mutex_lock_nested+0x6b/0x3a0
+[ 1.245193] [<ffffffff812b3f69>] ? ubifs_init_security+0x29/0xb0
+[ 1.245193] [<ffffffff812b3f69>] ? ubifs_init_security+0x29/0xb0
+[ 1.245193] [<ffffffff812b3f69>] ubifs_init_security+0x29/0xb0
+[ 1.245193] [<ffffffff8128e286>] ubifs_create+0xa6/0x1f0
+[ 1.245193] [<ffffffff81198e7f>] ? path_openat+0x3af/0x1280
+[ 1.245193] [<ffffffff81195d15>] vfs_create+0x95/0xc0
+[ 1.245193] [<ffffffff8119929c>] path_openat+0x7cc/0x1280
+[ 1.245193] [<ffffffff8109ffe3>] ? __lock_acquire+0x543/0x1ea0
+[ 1.245193] [<ffffffff81088f20>] ? sched_clock_cpu+0x90/0xc0
+[ 1.245193] [<ffffffff81088c00>] ? calc_global_load_tick+0x60/0x90
+[ 1.245193] [<ffffffff81088f20>] ? sched_clock_cpu+0x90/0xc0
+[ 1.245193] [<ffffffff811a9cef>] ? __alloc_fd+0xaf/0x180
+[ 1.245193] [<ffffffff8119ac55>] do_filp_open+0x75/0xd0
+[ 1.245193] [<ffffffff814ffd86>] ? _raw_spin_unlock+0x26/0x40
+[ 1.245193] [<ffffffff811a9cef>] ? __alloc_fd+0xaf/0x180
+[ 1.245193] [<ffffffff81189bd9>] do_sys_open+0x129/0x200
+[ 1.245193] [<ffffffff81189cc9>] SyS_open+0x19/0x20
+[ 1.245193] [<ffffffff81500717>] entry_SYSCALL_64_fastpath+0x12/0x6f
+
+While the lockdep splat is a false positive, becuase path_openat holds i_mutex
+of the parent directory and ubifs_init_security() tries to acquire i_mutex
+of a new inode, it reveals that taking i_mutex in ubifs_init_security() is
+in vain because it is only being called in the inode allocation path
+and therefore nobody else can see the inode yet.
+
+Reported-and-tested-by: Boris Brezillon <boris.brezillon@free-electrons.com>
+Reviewed-and-tested-by: Dongsheng Yang <yangds.fnst@cn.fujitsu.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: dedekind1@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/ubifs/xattr.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/fs/ubifs/xattr.c
++++ b/fs/ubifs/xattr.c
+@@ -652,11 +652,8 @@ int ubifs_init_security(struct inode *de
+ {
+ int err;
+
+- mutex_lock(&inode->i_mutex);
+ err = security_inode_init_security(inode, dentry, qstr,
+ &init_xattrs, 0);
+- mutex_unlock(&inode->i_mutex);
+-
+ if (err) {
+ struct ubifs_info *c = dentry->i_sb->s_fs_info;
+ ubifs_err(c, "cannot initialize security for inode %lu, error %d",