--- /dev/null
+From bf2afee14e07de16d3cafc67edbfc2a3cc65e4bc Mon Sep 17 00:00:00 2001
+From: Ronnie Sahlberg <lsahlber@redhat.com>
+Date: Fri, 8 Sep 2017 10:37:35 +1000
+Subject: cifs: check rsp for NULL before dereferencing in SMB2_open
+
+From: Ronnie Sahlberg <lsahlber@redhat.com>
+
+commit bf2afee14e07de16d3cafc67edbfc2a3cc65e4bc upstream.
+
+In SMB2_open there are several paths where the SendReceive2
+call will return an error before it sets rsp_iov.iov_base
+thus leaving iov_base uninitialized.
+
+Thus we need to check rsp before we dereference it in
+the call to get_rfc1002_length().
+
+A report of this issue was previously reported in
+http://www.spinics.net/lists/linux-cifs/msg12846.html
+
+RH-bugzilla : 1476151
+
+Version 2 :
+* Lets properly initialize rsp_iov before we use it.
+
+Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>.
+Signed-off-by: Steve French <smfrench@gmail.com>
+Reported-by: Xiaoli Feng <xifeng@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2pdu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -1622,7 +1622,7 @@ SMB2_open(const unsigned int xid, struct
+ struct cifs_tcon *tcon = oparms->tcon;
+ struct cifs_ses *ses = tcon->ses;
+ struct kvec iov[4];
+- struct kvec rsp_iov;
++ struct kvec rsp_iov = {NULL, 0};
+ int resp_buftype;
+ int uni_path_len;
+ __le16 *copy_path = NULL;
+@@ -1751,7 +1751,7 @@ SMB2_open(const unsigned int xid, struct
+
+ if (rc != 0) {
+ cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+- if (err_buf)
++ if (err_buf && rsp)
+ *err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
+ GFP_KERNEL);
+ goto creat_exit;
--- /dev/null
+From f5c4ba816315d3b813af16f5571f86c8d4e897bd Mon Sep 17 00:00:00 2001
+From: Shu Wang <shuwang@redhat.com>
+Date: Fri, 8 Sep 2017 18:48:33 +0800
+Subject: cifs: release auth_key.response for reconnect.
+
+From: Shu Wang <shuwang@redhat.com>
+
+commit f5c4ba816315d3b813af16f5571f86c8d4e897bd upstream.
+
+There is a race that cause cifs reconnect in cifs_mount,
+- cifs_mount
+ - cifs_get_tcp_session
+ - [ start thread cifs_demultiplex_thread
+ - cifs_read_from_socket: -ECONNABORTED
+ - DELAY_WORK smb2_reconnect_server ]
+ - cifs_setup_session
+ - [ smb2_reconnect_server ]
+
+auth_key.response was allocated in cifs_setup_session, and
+will release when the session destoried. So when session re-
+connect, auth_key.response should be check and released.
+
+Tested with my system:
+CIFS VFS: Free previous auth_key.response = ffff8800320bbf80
+
+A simple auth_key.response allocation call trace:
+- cifs_setup_session
+- SMB2_sess_setup
+- SMB2_sess_auth_rawntlmssp_authenticate
+- build_ntlmssp_auth_blob
+- setup_ntlmv2_rsp
+
+Signed-off-by: Shu Wang <shuwang@redhat.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/connect.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -4143,6 +4143,14 @@ cifs_setup_session(const unsigned int xi
+ cifs_dbg(FYI, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d\n",
+ server->sec_mode, server->capabilities, server->timeAdj);
+
++ if (ses->auth_key.response) {
++ cifs_dbg(VFS, "Free previous auth_key.response = %p\n",
++ ses->auth_key.response);
++ kfree(ses->auth_key.response);
++ ses->auth_key.response = NULL;
++ ses->auth_key.len = 0;
++ }
++
+ if (server->ops->sess_setup)
+ rc = server->ops->sess_setup(xid, ses, nls_info);
+
--- /dev/null
+From 94183331e815617246b1baa97e0916f358c794bb Mon Sep 17 00:00:00 2001
+From: Shu Wang <shuwang@redhat.com>
+Date: Thu, 7 Sep 2017 16:03:27 +0800
+Subject: cifs: release cifs root_cred after exit_cifs
+
+From: Shu Wang <shuwang@redhat.com>
+
+commit 94183331e815617246b1baa97e0916f358c794bb upstream.
+
+memory leak was found by kmemleak. exit_cifs_spnego
+should be called before cifs module removed, or
+cifs root_cred will not be released.
+
+kmemleak report:
+unreferenced object 0xffff880070a3ce40 (size 192):
+ backtrace:
+ kmemleak_alloc+0x4a/0xa0
+ kmem_cache_alloc+0xc7/0x1d0
+ prepare_kernel_cred+0x20/0x120
+ init_cifs_spnego+0x2d/0x170 [cifs]
+ 0xffffffffc07801f3
+ do_one_initcall+0x51/0x1b0
+ do_init_module+0x60/0x1fd
+ load_module+0x161e/0x1b60
+ SYSC_finit_module+0xa9/0x100
+ SyS_finit_module+0xe/0x10
+
+Signed-off-by: Shu Wang <shuwang@redhat.com>
+Signed-off-by: Steve French <smfrench@gmail.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifsfs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1447,7 +1447,7 @@ exit_cifs(void)
+ exit_cifs_idmap();
+ #endif
+ #ifdef CONFIG_CIFS_UPCALL
+- unregister_key_type(&cifs_spnego_key_type);
++ exit_cifs_spnego();
+ #endif
+ cifs_destroy_request_bufs();
+ cifs_destroy_mids();
--- /dev/null
+From c3ca015fab6df124c933b91902f3f2a3473f9da5 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Thu, 31 Aug 2017 21:47:43 -0400
+Subject: dax: remove the pmem_dax_ops->flush abstraction
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit c3ca015fab6df124c933b91902f3f2a3473f9da5 upstream.
+
+Commit abebfbe2f731 ("dm: add ->flush() dax operation support") is
+buggy. A DM device may be composed of multiple underlying devices and
+all of them need to be flushed. That commit just routes the flush
+request to the first device and ignores the other devices.
+
+It could be fixed by adding more complex logic to the device mapper. But
+there is only one implementation of the method pmem_dax_ops->flush - that
+is pmem_dax_flush() - and it calls arch_wb_cache_pmem(). Consequently, we
+don't need the pmem_dax_ops->flush abstraction at all, we can call
+arch_wb_cache_pmem() directly from dax_flush() because dax_dev->ops->flush
+can't ever reach anything different from arch_wb_cache_pmem().
+
+It should be also pointed out that for some uses of persistent memory it
+is needed to flush only a very small amount of data (such as 1 cacheline),
+and it would be overkill if we go through that device mapper machinery for
+a single flushed cache line.
+
+Fix this by removing the pmem_dax_ops->flush abstraction and call
+arch_wb_cache_pmem() directly from dax_flush(). Also, remove the device
+mapper code that forwards the flushes.
+
+Fixes: abebfbe2f731 ("dm: add ->flush() dax operation support")
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dax/super.c | 21 ++++++++++++++-------
+ drivers/md/dm-linear.c | 15 ---------------
+ drivers/md/dm-stripe.c | 20 --------------------
+ drivers/md/dm.c | 19 -------------------
+ drivers/nvdimm/pmem.c | 7 -------
+ fs/dax.c | 4 ++--
+ include/linux/dax.h | 5 +----
+ include/linux/device-mapper.h | 3 ---
+ 8 files changed, 17 insertions(+), 77 deletions(-)
+
+--- a/drivers/dax/super.c
++++ b/drivers/dax/super.c
+@@ -189,8 +189,10 @@ static umode_t dax_visible(struct kobjec
+ if (!dax_dev)
+ return 0;
+
+- if (a == &dev_attr_write_cache.attr && !dax_dev->ops->flush)
++#ifndef CONFIG_ARCH_HAS_PMEM_API
++ if (a == &dev_attr_write_cache.attr)
+ return 0;
++#endif
+ return a->mode;
+ }
+
+@@ -255,18 +257,23 @@ size_t dax_copy_from_iter(struct dax_dev
+ }
+ EXPORT_SYMBOL_GPL(dax_copy_from_iter);
+
+-void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+- size_t size)
++#ifdef CONFIG_ARCH_HAS_PMEM_API
++void arch_wb_cache_pmem(void *addr, size_t size);
++void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
+ {
+- if (!dax_alive(dax_dev))
++ if (unlikely(!dax_alive(dax_dev)))
+ return;
+
+- if (!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags))
++ if (unlikely(!test_bit(DAXDEV_WRITE_CACHE, &dax_dev->flags)))
+ return;
+
+- if (dax_dev->ops->flush)
+- dax_dev->ops->flush(dax_dev, pgoff, addr, size);
++ arch_wb_cache_pmem(addr, size);
++}
++#else
++void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
++{
+ }
++#endif
+ EXPORT_SYMBOL_GPL(dax_flush);
+
+ void dax_write_cache(struct dax_device *dax_dev, bool wc)
+--- a/drivers/md/dm-linear.c
++++ b/drivers/md/dm-linear.c
+@@ -184,20 +184,6 @@ static size_t linear_dax_copy_from_iter(
+ return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ }
+
+-static void linear_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
+- size_t size)
+-{
+- struct linear_c *lc = ti->private;
+- struct block_device *bdev = lc->dev->bdev;
+- struct dax_device *dax_dev = lc->dev->dax_dev;
+- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+-
+- dev_sector = linear_map_sector(ti, sector);
+- if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
+- return;
+- dax_flush(dax_dev, pgoff, addr, size);
+-}
+-
+ static struct target_type linear_target = {
+ .name = "linear",
+ .version = {1, 4, 0},
+@@ -212,7 +198,6 @@ static struct target_type linear_target
+ .iterate_devices = linear_iterate_devices,
+ .direct_access = linear_dax_direct_access,
+ .dax_copy_from_iter = linear_dax_copy_from_iter,
+- .dax_flush = linear_dax_flush,
+ };
+
+ int __init dm_linear_init(void)
+--- a/drivers/md/dm-stripe.c
++++ b/drivers/md/dm-stripe.c
+@@ -351,25 +351,6 @@ static size_t stripe_dax_copy_from_iter(
+ return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ }
+
+-static void stripe_dax_flush(struct dm_target *ti, pgoff_t pgoff, void *addr,
+- size_t size)
+-{
+- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+- struct stripe_c *sc = ti->private;
+- struct dax_device *dax_dev;
+- struct block_device *bdev;
+- uint32_t stripe;
+-
+- stripe_map_sector(sc, sector, &stripe, &dev_sector);
+- dev_sector += sc->stripe[stripe].physical_start;
+- dax_dev = sc->stripe[stripe].dev->dax_dev;
+- bdev = sc->stripe[stripe].dev->bdev;
+-
+- if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(size, PAGE_SIZE), &pgoff))
+- return;
+- dax_flush(dax_dev, pgoff, addr, size);
+-}
+-
+ /*
+ * Stripe status:
+ *
+@@ -491,7 +472,6 @@ static struct target_type stripe_target
+ .io_hints = stripe_io_hints,
+ .direct_access = stripe_dax_direct_access,
+ .dax_copy_from_iter = stripe_dax_copy_from_iter,
+- .dax_flush = stripe_dax_flush,
+ };
+
+ int __init dm_stripe_init(void)
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -987,24 +987,6 @@ static size_t dm_dax_copy_from_iter(stru
+ return ret;
+ }
+
+-static void dm_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+- size_t size)
+-{
+- struct mapped_device *md = dax_get_private(dax_dev);
+- sector_t sector = pgoff * PAGE_SECTORS;
+- struct dm_target *ti;
+- int srcu_idx;
+-
+- ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+-
+- if (!ti)
+- goto out;
+- if (ti->type->dax_flush)
+- ti->type->dax_flush(ti, pgoff, addr, size);
+- out:
+- dm_put_live_table(md, srcu_idx);
+-}
+-
+ /*
+ * A target may call dm_accept_partial_bio only from the map routine. It is
+ * allowed for all bio types except REQ_PREFLUSH.
+@@ -2992,7 +2974,6 @@ static const struct block_device_operati
+ static const struct dax_operations dm_dax_ops = {
+ .direct_access = dm_dax_direct_access,
+ .copy_from_iter = dm_dax_copy_from_iter,
+- .flush = dm_dax_flush,
+ };
+
+ /*
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -243,16 +243,9 @@ static size_t pmem_copy_from_iter(struct
+ return copy_from_iter_flushcache(addr, bytes, i);
+ }
+
+-static void pmem_dax_flush(struct dax_device *dax_dev, pgoff_t pgoff,
+- void *addr, size_t size)
+-{
+- arch_wb_cache_pmem(addr, size);
+-}
+-
+ static const struct dax_operations pmem_dax_ops = {
+ .direct_access = pmem_dax_direct_access,
+ .copy_from_iter = pmem_copy_from_iter,
+- .flush = pmem_dax_flush,
+ };
+
+ static const struct attribute_group *pmem_attribute_groups[] = {
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -786,7 +786,7 @@ static int dax_writeback_one(struct bloc
+ }
+
+ dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
+- dax_flush(dax_dev, pgoff, kaddr, size);
++ dax_flush(dax_dev, kaddr, size);
+ /*
+ * After we have flushed the cache, we can clear the dirty tag. There
+ * cannot be new dirty data in the pfn after the flush has completed as
+@@ -981,7 +981,7 @@ int __dax_zero_page_range(struct block_d
+ return rc;
+ }
+ memset(kaddr + offset, 0, size);
+- dax_flush(dax_dev, pgoff, kaddr + offset, size);
++ dax_flush(dax_dev, kaddr + offset, size);
+ dax_read_unlock(id);
+ }
+ return 0;
+--- a/include/linux/dax.h
++++ b/include/linux/dax.h
+@@ -19,8 +19,6 @@ struct dax_operations {
+ /* copy_from_iter: required operation for fs-dax direct-i/o */
+ size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
+ struct iov_iter *);
+- /* flush: optional driver-specific cache management after writes */
+- void (*flush)(struct dax_device *, pgoff_t, void *, size_t);
+ };
+
+ extern struct attribute_group dax_attribute_group;
+@@ -84,8 +82,7 @@ long dax_direct_access(struct dax_device
+ void **kaddr, pfn_t *pfn);
+ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+ size_t bytes, struct iov_iter *i);
+-void dax_flush(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
+- size_t size);
++void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
+ void dax_write_cache(struct dax_device *dax_dev, bool wc);
+ bool dax_write_cache_enabled(struct dax_device *dax_dev);
+
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -134,8 +134,6 @@ typedef long (*dm_dax_direct_access_fn)
+ long nr_pages, void **kaddr, pfn_t *pfn);
+ typedef size_t (*dm_dax_copy_from_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
+ void *addr, size_t bytes, struct iov_iter *i);
+-typedef void (*dm_dax_flush_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr,
+- size_t size);
+ #define PAGE_SECTORS (PAGE_SIZE / 512)
+
+ void dm_error(const char *message);
+@@ -186,7 +184,6 @@ struct target_type {
+ dm_io_hints_fn io_hints;
+ dm_dax_direct_access_fn direct_access;
+ dm_dax_copy_from_iter_fn dax_copy_from_iter;
+- dm_dax_flush_fn dax_flush;
+
+ /* For internal device-mapper use. */
+ struct list_head list;
--- /dev/null
+From b7e326f7b7375392d06f9cfbc27a7c63181f69d7 Mon Sep 17 00:00:00 2001
+From: Hyunchul Lee <cheol.lee@lge.com>
+Date: Mon, 31 Jul 2017 16:22:20 +0900
+Subject: dm integrity: do not check integrity for failed read operations
+
+From: Hyunchul Lee <cheol.lee@lge.com>
+
+commit b7e326f7b7375392d06f9cfbc27a7c63181f69d7 upstream.
+
+Even though read operations fail, dm_integrity_map_continue() calls
+integrity_metadata() to check integrity. In this case, just complete
+these.
+
+This also makes it so read I/O errors do not generate integrity warnings
+in the kernel log.
+
+Signed-off-by: Hyunchul Lee <cheol.lee@lge.com>
+Acked-by: Milan Broz <gmazyland@gmail.com>
+Acked-by: Mikulas Patocka <mpatocka@redhat.com>
+Signed-off-by: Mike Snitzer <snitzer@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/dm-integrity.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/dm-integrity.c
++++ b/drivers/md/dm-integrity.c
+@@ -1697,7 +1697,11 @@ sleep:
+
+ if (need_sync_io) {
+ wait_for_completion_io(&read_comp);
+- integrity_metadata(&dio->work);
++ if (likely(!bio->bi_status))
++ integrity_metadata(&dio->work);
++ else
++ dec_in_flight(dio);
++
+ } else {
+ INIT_WORK(&dio->work, integrity_metadata);
+ queue_work(ic->metadata_wq, &dio->work);
--- /dev/null
+From fd7d56270b526ca3ed0c224362e3c64a0f86687a Mon Sep 17 00:00:00 2001
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 14 Sep 2017 11:42:17 +0200
+Subject: fs/proc: Report eip/esp in /prod/PID/stat for coredumping
+
+From: John Ogness <john.ogness@linutronix.de>
+
+commit fd7d56270b526ca3ed0c224362e3c64a0f86687a upstream.
+
+Commit 0a1eb2d474ed ("fs/proc: Stop reporting eip and esp in
+/proc/PID/stat") stopped reporting eip/esp because it is
+racy and dangerous for executing tasks. The comment adds:
+
+ As far as I know, there are no use programs that make any
+ material use of these fields, so just get rid of them.
+
+However, existing userspace core-dump-handler applications (for
+example, minicoredumper) are using these fields since they
+provide an excellent cross-platform interface to these valuable
+pointers. So that commit introduced a user space visible
+regression.
+
+Partially revert the change and make the readout possible for
+tasks with the proper permissions and only if the target task
+has the PF_DUMPCORE flag set.
+
+Fixes: 0a1eb2d474ed ("fs/proc: Stop reporting eip and esp in> /proc/PID/stat")
+Reported-by: Marco Felsch <marco.felsch@preh.de>
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Reviewed-by: Andy Lutomirski <luto@kernel.org>
+Cc: Tycho Andersen <tycho.andersen@canonical.com>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Brian Gerst <brgerst@gmail.com>
+Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Al Viro <viro@zeniv.linux.org.uk>
+Cc: Linux API <linux-api@vger.kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: http://lkml.kernel.org/r/87poatfwg6.fsf@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/proc/array.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -62,6 +62,7 @@
+ #include <linux/mman.h>
+ #include <linux/sched/mm.h>
+ #include <linux/sched/numa_balancing.h>
++#include <linux/sched/task_stack.h>
+ #include <linux/sched/task.h>
+ #include <linux/sched/cputime.h>
+ #include <linux/proc_fs.h>
+@@ -421,7 +422,15 @@ static int do_task_stat(struct seq_file
+ * esp and eip are intentionally zeroed out. There is no
+ * non-racy way to read them without freezing the task.
+ * Programs that need reliable values can use ptrace(2).
++ *
++ * The only exception is if the task is core dumping because
++ * a program is not able to use ptrace(2) in that case. It is
++ * safe because the task has stopped executing permanently.
+ */
++ if (permitted && (task->flags & PF_DUMPCORE)) {
++ eip = KSTK_EIP(task);
++ esp = KSTK_ESP(task);
++ }
+ }
+
+ get_task_comm(tcomm, task);
--- /dev/null
+From 01f5bbd17a8066b58dba9b5049fad504bce67322 Mon Sep 17 00:00:00 2001
+From: Adrian Hunter <adrian.hunter@intel.com>
+Date: Thu, 7 Sep 2017 10:40:35 +0300
+Subject: mmc: block: Fix incorrectly initialized requests
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+commit 01f5bbd17a8066b58dba9b5049fad504bce67322 upstream.
+
+mmc_init_request() depends on card->bouncesz so it must be calculated
+before blk_init_allocated_queue() starts allocating requests.
+
+Reported-by: Seraphime Kirkovski <kirkseraph@gmail.com>
+Fixes: 304419d8a7e9 ("mmc: core: Allocate per-request data using the..")
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Tested-by: Seraphime Kirkovski <kirkseraph@gmail.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Tested-by: Pavel Machek <pavel@ucw.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/core/queue.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -242,6 +242,12 @@ int mmc_init_queue(struct mmc_queue *mq,
+ if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+
++ /*
++ * mmc_init_request() depends on card->bouncesz so it must be calculated
++ * before blk_init_allocated_queue() starts allocating requests.
++ */
++ card->bouncesz = mmc_queue_calc_bouncesz(host);
++
+ mq->card = card;
+ mq->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!mq->queue)
+@@ -265,7 +271,6 @@ int mmc_init_queue(struct mmc_queue *mq,
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
+
+- card->bouncesz = mmc_queue_calc_bouncesz(host);
+ if (card->bouncesz) {
+ blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512);
+ blk_queue_max_segments(mq->queue, card->bouncesz / 512);
--- /dev/null
+From 92dc689563170b90ba844b8a2eb95e8a5eda2e83 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Mon, 11 Sep 2017 12:08:43 -0400
+Subject: nvme-pci: fix host memory buffer allocation fallback
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 92dc689563170b90ba844b8a2eb95e8a5eda2e83 upstream.
+
+nvme_alloc_host_mem currently contains two loops that are interwinded,
+and the outer retry loop turns out to be broken. Fix this by untangling
+the two.
+
+Based on a report an initial patch from Akinobu Mita.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reported-by: Akinobu Mita <akinobu.mita@gmail.com>
+Tested-by: Akinobu Mita <akinobu.mita@gmail.com>
+Reviewed-by: Keith Busch <keith.busch@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/pci.c | 48 ++++++++++++++++++++++++++++++------------------
+ 1 file changed, 30 insertions(+), 18 deletions(-)
+
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1609,18 +1609,16 @@ static void nvme_free_host_mem(struct nv
+ dev->host_mem_descs = NULL;
+ }
+
+-static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
++static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
++ u32 chunk_size)
+ {
+ struct nvme_host_mem_buf_desc *descs;
+- u32 chunk_size, max_entries, len;
++ u32 max_entries, len;
+ dma_addr_t descs_dma;
+ int i = 0;
+ void **bufs;
+ u64 size = 0, tmp;
+
+- /* start big and work our way down */
+- chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER);
+-retry:
+ tmp = (preferred + chunk_size - 1);
+ do_div(tmp, chunk_size);
+ max_entries = tmp;
+@@ -1647,15 +1645,9 @@ retry:
+ i++;
+ }
+
+- if (!size || (min && size < min)) {
+- dev_warn(dev->ctrl.device,
+- "failed to allocate host memory buffer.\n");
++ if (!size)
+ goto out_free_bufs;
+- }
+
+- dev_info(dev->ctrl.device,
+- "allocated %lld MiB host memory buffer.\n",
+- size >> ilog2(SZ_1M));
+ dev->nr_host_mem_descs = i;
+ dev->host_mem_size = size;
+ dev->host_mem_descs = descs;
+@@ -1676,15 +1668,28 @@ out_free_descs:
+ dma_free_coherent(dev->dev, max_entries * sizeof(*descs), descs,
+ descs_dma);
+ out:
+- /* try a smaller chunk size if we failed early */
+- if (chunk_size >= PAGE_SIZE * 2 && (i == 0 || size < min)) {
+- chunk_size /= 2;
+- goto retry;
+- }
+ dev->host_mem_descs = NULL;
+ return -ENOMEM;
+ }
+
++static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred)
++{
++ u32 chunk_size;
++
++ /* start big and work our way down */
++ for (chunk_size = min_t(u64, preferred, PAGE_SIZE << MAX_ORDER);
++ chunk_size >= PAGE_SIZE * 2;
++ chunk_size /= 2) {
++ if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
++ if (!min || dev->host_mem_size >= min)
++ return 0;
++ nvme_free_host_mem(dev);
++ }
++ }
++
++ return -ENOMEM;
++}
++
+ static void nvme_setup_host_mem(struct nvme_dev *dev)
+ {
+ u64 max = (u64)max_host_mem_size_mb * SZ_1M;
+@@ -1712,8 +1717,15 @@ static void nvme_setup_host_mem(struct n
+ }
+
+ if (!dev->host_mem_descs) {
+- if (nvme_alloc_host_mem(dev, min, preferred))
++ if (nvme_alloc_host_mem(dev, min, preferred)) {
++ dev_warn(dev->ctrl.device,
++ "failed to allocate host memory buffer.\n");
+ return;
++ }
++
++ dev_info(dev->ctrl.device,
++ "allocated %lld MiB host memory buffer.\n",
++ dev->host_mem_size >> ilog2(SZ_1M));
+ }
+
+ if (nvme_set_host_mem(dev, enable_bits))
--- /dev/null
+From 9620cfba97a8b88ae91f0e275e8ff110b578bb6e Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Wed, 6 Sep 2017 12:19:57 +0200
+Subject: nvme-pci: propagate (some) errors from host memory buffer setup
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 9620cfba97a8b88ae91f0e275e8ff110b578bb6e upstream.
+
+We want to catch command execution errors when resetting the device, so
+propagate errors from the Set Features when setting up the host memory
+buffer. We keep ignoring memory allocation failures, as the spec
+clearly says that the controller must work without a host memory buffer.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Keith Busch <keith.busch@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/pci.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1690,12 +1690,13 @@ static int nvme_alloc_host_mem(struct nv
+ return -ENOMEM;
+ }
+
+-static void nvme_setup_host_mem(struct nvme_dev *dev)
++static int nvme_setup_host_mem(struct nvme_dev *dev)
+ {
+ u64 max = (u64)max_host_mem_size_mb * SZ_1M;
+ u64 preferred = (u64)dev->ctrl.hmpre * 4096;
+ u64 min = (u64)dev->ctrl.hmmin * 4096;
+ u32 enable_bits = NVME_HOST_MEM_ENABLE;
++ int ret = 0;
+
+ preferred = min(preferred, max);
+ if (min > max) {
+@@ -1703,7 +1704,7 @@ static void nvme_setup_host_mem(struct n
+ "min host memory (%lld MiB) above limit (%d MiB).\n",
+ min >> ilog2(SZ_1M), max_host_mem_size_mb);
+ nvme_free_host_mem(dev);
+- return;
++ return 0;
+ }
+
+ /*
+@@ -1720,7 +1721,7 @@ static void nvme_setup_host_mem(struct n
+ if (nvme_alloc_host_mem(dev, min, preferred)) {
+ dev_warn(dev->ctrl.device,
+ "failed to allocate host memory buffer.\n");
+- return;
++ return 0; /* controller must work without HMB */
+ }
+
+ dev_info(dev->ctrl.device,
+@@ -1728,8 +1729,10 @@ static void nvme_setup_host_mem(struct n
+ dev->host_mem_size >> ilog2(SZ_1M));
+ }
+
+- if (nvme_set_host_mem(dev, enable_bits))
++ ret = nvme_set_host_mem(dev, enable_bits);
++ if (ret)
+ nvme_free_host_mem(dev);
++ return ret;
+ }
+
+ static int nvme_setup_io_queues(struct nvme_dev *dev)
+@@ -2173,8 +2176,11 @@ static void nvme_reset_work(struct work_
+ "unable to allocate dma for dbbuf\n");
+ }
+
+- if (dev->ctrl.hmpre)
+- nvme_setup_host_mem(dev);
++ if (dev->ctrl.hmpre) {
++ result = nvme_setup_host_mem(dev);
++ if (result < 0)
++ goto out;
++ }
+
+ result = nvme_setup_io_queues(dev);
+ if (result)
--- /dev/null
+From 30f92d62e5b41a94de2d0bbd677a6ea2fcfed74f Mon Sep 17 00:00:00 2001
+From: Akinobu Mita <akinobu.mita@gmail.com>
+Date: Wed, 6 Sep 2017 12:15:31 +0200
+Subject: nvme-pci: use appropriate initial chunk size for HMB allocation
+
+From: Akinobu Mita <akinobu.mita@gmail.com>
+
+commit 30f92d62e5b41a94de2d0bbd677a6ea2fcfed74f upstream.
+
+The initial chunk size for host memory buffer allocation is currently
+PAGE_SIZE << MAX_ORDER. MAX_ORDER order allocation is usually failed
+without CONFIG_DMA_CMA. So the HMB allocation is retried with chunk size
+PAGE_SIZE << (MAX_ORDER - 1) in general, but there is no problem if the
+retry allocation works correctly.
+
+Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
+[hch: rebased]
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Keith Busch <keith.busch@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/nvme/host/pci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -1677,7 +1677,7 @@ static int nvme_alloc_host_mem(struct nv
+ u32 chunk_size;
+
+ /* start big and work our way down */
+- for (chunk_size = min_t(u64, preferred, PAGE_SIZE << MAX_ORDER);
++ for (chunk_size = min_t(u64, preferred, PAGE_SIZE * MAX_ORDER_NR_PAGES);
+ chunk_size >= PAGE_SIZE * 2;
+ chunk_size /= 2) {
+ if (!__nvme_alloc_host_mem(dev, preferred, chunk_size)) {
--- /dev/null
+From b468b6a4969f9bdddb31d484f151bfa03fbee767 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Thu, 7 Sep 2017 13:54:36 +0200
+Subject: scsi: scsi_transport_fc: fix NULL pointer dereference in fc_bsg_job_timeout
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit b468b6a4969f9bdddb31d484f151bfa03fbee767 upstream.
+
+bsg-lib now embeddeds the job structure into the request, and
+req->special can't be used anymore.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/scsi_transport_fc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/scsi/scsi_transport_fc.c
++++ b/drivers/scsi/scsi_transport_fc.c
+@@ -3550,7 +3550,7 @@ fc_vport_sched_delete(struct work_struct
+ static enum blk_eh_timer_return
+ fc_bsg_job_timeout(struct request *req)
+ {
+- struct bsg_job *job = (void *) req->special;
++ struct bsg_job *job = blk_mq_rq_to_pdu(req);
+ struct Scsi_Host *shost = fc_bsg_to_shost(job);
+ struct fc_rport *rport = fc_bsg_to_rport(job);
+ struct fc_internal *i = to_fc_internal(shost->transportt);
--- /dev/null
+From 9764c02fcbad40001fd3f63558d918e4d519bb75 Mon Sep 17 00:00:00 2001
+From: Steve French <smfrench@gmail.com>
+Date: Sun, 17 Sep 2017 10:41:35 -0500
+Subject: SMB3: Add support for multidialect negotiate (SMB2.1 and later)
+
+From: Steve French <smfrench@gmail.com>
+
+commit 9764c02fcbad40001fd3f63558d918e4d519bb75 upstream.
+
+With the need to discourage use of less secure dialect, SMB1 (CIFS),
+we temporarily upgraded the dialect to SMB3 in 4.13, but since there
+are various servers which only support SMB2.1 (2.1 is more secure
+than CIFS/SMB1) but not optimal for a default dialect - add support
+for multidialect negotiation. cifs.ko will now request SMB2.1
+or later (ie SMB2.1 or SMB3.0, SMB3.02) and the server will
+pick the latest most secure one it can support.
+
+In addition since we are sending multidialect negotiate, add
+support for secure negotiate to validate that a man in the
+middle didn't downgrade us.
+
+Signed-off-by: Steve French <smfrench@gmail.com>
+Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifsglob.h | 6 +++
+ fs/cifs/connect.c | 24 ++++++++++----
+ fs/cifs/smb2ops.c | 40 ++++++++++++++++++++++++
+ fs/cifs/smb2pdu.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++-------
+ fs/cifs/smb2pdu.h | 2 -
+ 5 files changed, 139 insertions(+), 18 deletions(-)
+
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -188,6 +188,8 @@ enum smb_version {
+ #ifdef CONFIG_CIFS_SMB311
+ Smb_311,
+ #endif /* SMB311 */
++ Smb_3any,
++ Smb_default,
+ Smb_version_err
+ };
+
+@@ -1701,6 +1703,10 @@ extern struct smb_version_values smb20_v
+ #define SMB21_VERSION_STRING "2.1"
+ extern struct smb_version_operations smb21_operations;
+ extern struct smb_version_values smb21_values;
++#define SMBDEFAULT_VERSION_STRING "default"
++extern struct smb_version_values smbdefault_values;
++#define SMB3ANY_VERSION_STRING "3"
++extern struct smb_version_values smb3any_values;
+ #define SMB30_VERSION_STRING "3.0"
+ extern struct smb_version_operations smb30_operations;
+ extern struct smb_version_values smb30_values;
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -301,6 +301,8 @@ static const match_table_t cifs_smb_vers
+ { Smb_311, SMB311_VERSION_STRING },
+ { Smb_311, ALT_SMB311_VERSION_STRING },
+ #endif /* SMB311 */
++ { Smb_3any, SMB3ANY_VERSION_STRING },
++ { Smb_default, SMBDEFAULT_VERSION_STRING },
+ { Smb_version_err, NULL }
+ };
+
+@@ -1147,6 +1149,14 @@ cifs_parse_smb_version(char *value, stru
+ vol->vals = &smb311_values;
+ break;
+ #endif /* SMB311 */
++ case Smb_3any:
++ vol->ops = &smb30_operations; /* currently identical with 3.0 */
++ vol->vals = &smb3any_values;
++ break;
++ case Smb_default:
++ vol->ops = &smb30_operations; /* currently identical with 3.0 */
++ vol->vals = &smbdefault_values;
++ break;
+ default:
+ cifs_dbg(VFS, "Unknown vers= option specified: %s\n", value);
+ return 1;
+@@ -1273,9 +1283,9 @@ cifs_parse_mount_options(const char *mou
+
+ vol->actimeo = CIFS_DEF_ACTIMEO;
+
+- /* FIXME: add autonegotiation for SMB3 or later rather than just SMB3 */
+- vol->ops = &smb30_operations; /* both secure and accepted widely */
+- vol->vals = &smb30_values;
++ /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
++ vol->ops = &smb30_operations;
++ vol->vals = &smbdefault_values;
+
+ vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT;
+
+@@ -1987,11 +1997,10 @@ cifs_parse_mount_options(const char *mou
+
+ if (got_version == false)
+ pr_warn("No dialect specified on mount. Default has changed to "
+- "a more secure dialect, SMB3 (vers=3.0), from CIFS "
++ "a more secure dialect, SMB2.1 or later (e.g. SMB3), from CIFS "
+ "(SMB1). To use the less secure SMB1 dialect to access "
+- "old servers which do not support SMB3 specify vers=1.0"
+- " on mount. For somewhat newer servers such as Windows "
+- "7 try vers=2.1.\n");
++ "old servers which do not support SMB3 (or SMB2.1) specify vers=1.0"
++ " on mount.\n");
+
+ kfree(mountdata_copy);
+ return 0;
+@@ -2132,6 +2141,7 @@ static int match_server(struct TCP_Serve
+ if (vol->nosharesock)
+ return 0;
+
++ /* BB update this for smb3any and default case */
+ if ((server->vals != vol->vals) || (server->ops != vol->ops))
+ return 0;
+
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2906,6 +2906,46 @@ struct smb_version_values smb21_values =
+ .create_lease_size = sizeof(struct create_lease),
+ };
+
++struct smb_version_values smb3any_values = {
++ .version_string = SMB3ANY_VERSION_STRING,
++ .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
++ .large_lock_type = 0,
++ .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
++ .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
++ .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++ .header_size = sizeof(struct smb2_hdr),
++ .max_header_size = MAX_SMB2_HDR_SIZE,
++ .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++ .lock_cmd = SMB2_LOCK,
++ .cap_unix = 0,
++ .cap_nt_find = SMB2_NT_FIND,
++ .cap_large_files = SMB2_LARGE_FILES,
++ .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
++ .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
++ .create_lease_size = sizeof(struct create_lease_v2),
++};
++
++struct smb_version_values smbdefault_values = {
++ .version_string = SMBDEFAULT_VERSION_STRING,
++ .protocol_id = SMB302_PROT_ID, /* doesn't matter, send protocol array */
++ .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
++ .large_lock_type = 0,
++ .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
++ .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
++ .unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
++ .header_size = sizeof(struct smb2_hdr),
++ .max_header_size = MAX_SMB2_HDR_SIZE,
++ .read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
++ .lock_cmd = SMB2_LOCK,
++ .cap_unix = 0,
++ .cap_nt_find = SMB2_NT_FIND,
++ .cap_large_files = SMB2_LARGE_FILES,
++ .signing_enabled = SMB2_NEGOTIATE_SIGNING_ENABLED | SMB2_NEGOTIATE_SIGNING_REQUIRED,
++ .signing_required = SMB2_NEGOTIATE_SIGNING_REQUIRED,
++ .create_lease_size = sizeof(struct create_lease_v2),
++};
++
+ struct smb_version_values smb30_values = {
+ .version_string = SMB30_VERSION_STRING,
+ .protocol_id = SMB30_PROT_ID,
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -479,10 +479,25 @@ SMB2_negotiate(const unsigned int xid, s
+
+ req->hdr.sync_hdr.SessionId = 0;
+
+- req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
+-
+- req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
+- inc_rfc1001_len(req, 2);
++ if (strcmp(ses->server->vals->version_string,
++ SMB3ANY_VERSION_STRING) == 0) {
++ req->Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
++ req->Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
++ req->DialectCount = cpu_to_le16(2);
++ inc_rfc1001_len(req, 4);
++ } else if (strcmp(ses->server->vals->version_string,
++ SMBDEFAULT_VERSION_STRING) == 0) {
++ req->Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
++ req->Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
++ req->Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
++ req->DialectCount = cpu_to_le16(3);
++ inc_rfc1001_len(req, 6);
++ } else {
++ /* otherwise send specific dialect */
++ req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
++ req->DialectCount = cpu_to_le16(1);
++ inc_rfc1001_len(req, 2);
++ }
+
+ /* only one of SMB2 signing flags may be set in SMB2 request */
+ if (ses->sign)
+@@ -516,16 +531,42 @@ SMB2_negotiate(const unsigned int xid, s
+ */
+ if (rc == -EOPNOTSUPP) {
+ cifs_dbg(VFS, "Dialect not supported by server. Consider "
+- "specifying vers=1.0 or vers=2.1 on mount for accessing"
++ "specifying vers=1.0 or vers=2.0 on mount for accessing"
+ " older servers\n");
+ goto neg_exit;
+ } else if (rc != 0)
+ goto neg_exit;
+
++ if (strcmp(ses->server->vals->version_string,
++ SMB3ANY_VERSION_STRING) == 0) {
++ if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
++ cifs_dbg(VFS,
++ "SMB2 dialect returned but not requested\n");
++ return -EIO;
++ } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
++ cifs_dbg(VFS,
++ "SMB2.1 dialect returned but not requested\n");
++ return -EIO;
++ }
++ } else if (strcmp(ses->server->vals->version_string,
++ SMBDEFAULT_VERSION_STRING) == 0) {
++ if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID)) {
++ cifs_dbg(VFS,
++ "SMB2 dialect returned but not requested\n");
++ return -EIO;
++ } else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID)) {
++ /* ops set to 3.0 by default for default so update */
++ ses->server->ops = &smb21_operations;
++ }
++ } else if (rsp->DialectRevision != ses->server->vals->protocol_id) {
++ /* if requested single dialect ensure returned dialect matched */
++ cifs_dbg(VFS, "Illegal 0x%x dialect returned: not requested\n",
++ cpu_to_le16(rsp->DialectRevision));
++ return -EIO;
++ }
++
+ cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
+
+- /* BB we may eventually want to match the negotiated vs. requested
+- dialect, even though we are only requesting one at a time */
+ if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
+ cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
+ else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
+@@ -546,6 +587,8 @@ SMB2_negotiate(const unsigned int xid, s
+ }
+ server->dialect = le16_to_cpu(rsp->DialectRevision);
+
++ /* BB: add check that dialect was valid given dialect(s) we asked for */
++
+ /* SMB2 only has an extended negflavor */
+ server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
+ /* set it to the maximum buffer size value we can send with 1 credit */
+@@ -594,6 +637,7 @@ int smb3_validate_negotiate(const unsign
+ struct validate_negotiate_info_req vneg_inbuf;
+ struct validate_negotiate_info_rsp *pneg_rsp;
+ u32 rsplen;
++ u32 inbuflen; /* max of 4 dialects */
+
+ cifs_dbg(FYI, "validate negotiate\n");
+
+@@ -622,9 +666,30 @@ int smb3_validate_negotiate(const unsign
+ else
+ vneg_inbuf.SecurityMode = 0;
+
+- vneg_inbuf.DialectCount = cpu_to_le16(1);
+- vneg_inbuf.Dialects[0] =
+- cpu_to_le16(tcon->ses->server->vals->protocol_id);
++
++ if (strcmp(tcon->ses->server->vals->version_string,
++ SMB3ANY_VERSION_STRING) == 0) {
++ vneg_inbuf.Dialects[0] = cpu_to_le16(SMB30_PROT_ID);
++ vneg_inbuf.Dialects[1] = cpu_to_le16(SMB302_PROT_ID);
++ vneg_inbuf.DialectCount = cpu_to_le16(2);
++ /* structure is big enough for 3 dialects, sending only 2 */
++ inbuflen = sizeof(struct validate_negotiate_info_req) - 2;
++ } else if (strcmp(tcon->ses->server->vals->version_string,
++ SMBDEFAULT_VERSION_STRING) == 0) {
++ vneg_inbuf.Dialects[0] = cpu_to_le16(SMB21_PROT_ID);
++ vneg_inbuf.Dialects[1] = cpu_to_le16(SMB30_PROT_ID);
++ vneg_inbuf.Dialects[2] = cpu_to_le16(SMB302_PROT_ID);
++ vneg_inbuf.DialectCount = cpu_to_le16(3);
++ /* structure is big enough for 3 dialects */
++ inbuflen = sizeof(struct validate_negotiate_info_req);
++ } else {
++ /* otherwise specific dialect was requested */
++ vneg_inbuf.Dialects[0] =
++ cpu_to_le16(tcon->ses->server->vals->protocol_id);
++ vneg_inbuf.DialectCount = cpu_to_le16(1);
++ /* structure is big enough for 3 dialects, sending only 1 */
++ inbuflen = sizeof(struct validate_negotiate_info_req) - 4;
++ }
+
+ rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
+ FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -716,7 +716,7 @@ struct validate_negotiate_info_req {
+ __u8 Guid[SMB2_CLIENT_GUID_SIZE];
+ __le16 SecurityMode;
+ __le16 DialectCount;
+- __le16 Dialects[1]; /* dialect (someday maybe list) client asked for */
++ __le16 Dialects[3]; /* BB expand this if autonegotiate > 3 dialects */
+ } __packed;
+
+ struct validate_negotiate_info_rsp {