--- /dev/null
+From f16eb8a4b096514ac06fb25bf599dcc792899b3d Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Mon, 11 Mar 2019 18:41:03 +0200
+Subject: ACPI / device_sysfs: Avoid OF modalias creation for removed device
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit f16eb8a4b096514ac06fb25bf599dcc792899b3d upstream.
+
+If SSDT overlay is loaded via ConfigFS and then unloaded the device,
+we would like to have OF modalias for, already gone. Thus, acpi_get_name()
+returns no allocated buffer for such case and kernel crashes afterwards:
+
+ ACPI: Host-directed Dynamic ACPI Table Unload
+ ads7950 spi-PRP0001:00: Dropping the link to regulator.0
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000000
+ #PF error: [normal kernel read fault]
+ PGD 80000000070d6067 P4D 80000000070d6067 PUD 70d0067 PMD 0
+ Oops: 0000 [#1] SMP PTI
+ CPU: 0 PID: 40 Comm: kworker/u4:2 Not tainted 5.0.0+ #96
+ Hardware name: Intel Corporation Merrifield/BODEGA BAY, BIOS 542 2015.01.21:18.19.48
+ Workqueue: kacpi_hotplug acpi_device_del_work_fn
+ RIP: 0010:create_of_modalias.isra.1+0x4c/0x150
+ Code: 00 00 48 89 44 24 18 31 c0 48 8d 54 24 08 48 c7 44 24 10 00 00 00 00 48 c7 44 24 08 ff ff ff ff e8 7a b0 03 00 48 8b 4c 24 10 <0f> b6 01 84 c0 74 27 48 c7 c7 00 09 f4 a5 0f b6 f0 8d 50 20 f6 04
+ RSP: 0000:ffffa51040297c10 EFLAGS: 00010246
+ RAX: 0000000000001001 RBX: 0000000000000785 RCX: 0000000000000000
+ RDX: 0000000000001001 RSI: 0000000000000286 RDI: ffffa2163dc042e0
+ RBP: ffffa216062b1196 R08: 0000000000001001 R09: ffffa21639873000
+ R10: ffffffffa606761d R11: 0000000000000001 R12: ffffa21639873218
+ R13: ffffa2163deb5060 R14: ffffa216063d1010 R15: 0000000000000000
+ FS: 0000000000000000(0000) GS:ffffa2163e000000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000000000000 CR3: 0000000007114000 CR4: 00000000001006f0
+ Call Trace:
+ __acpi_device_uevent_modalias+0xb0/0x100
+ spi_uevent+0xd/0x40
+
+ ...
+
+In order to fix above let create_of_modalias() check the status returned
+by acpi_get_name() and bail out in case of failure.
+
+Fixes: 8765c5ba1949 ("ACPI / scan: Rework modalias creation when "compatible" is present")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=201381
+Reported-by: Ferry Toth <fntoth@gmail.com>
+Tested-by: Ferry Toth<fntoth@gmail.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Cc: 4.1+ <stable@vger.kernel.org> # 4.1+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/device_sysfs.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/drivers/acpi/device_sysfs.c
++++ b/drivers/acpi/device_sysfs.c
+@@ -202,11 +202,15 @@ static int create_of_modalias(struct acp
+ {
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ const union acpi_object *of_compatible, *obj;
++ acpi_status status;
+ int len, count;
+ int i, nval;
+ char *c;
+
+- acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
++ status = acpi_get_name(acpi_dev->handle, ACPI_SINGLE_NAME, &buf);
++ if (ACPI_FAILURE(status))
++ return -ENODEV;
++
+ /* DT strings are all in lower case */
+ for (c = buf.pointer; *c != '\0'; c++)
+ *c = tolower(*c);
--- /dev/null
+From 7b9b9edb49ad377b1e06abf14354c227e9ac4b06 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <piastryyy@gmail.com>
+Date: Wed, 13 Feb 2019 15:43:08 -0800
+Subject: CIFS: Do not reset lease state to NONE on lease break
+
+From: Pavel Shilovsky <piastryyy@gmail.com>
+
+commit 7b9b9edb49ad377b1e06abf14354c227e9ac4b06 upstream.
+
+Currently on lease break the client sets a caching level twice:
+when oplock is detected and when oplock is processed. While the
+1st attempt sets the level to the value provided by the server,
+the 2nd one resets the level to None unconditionally.
+This happens because the oplock/lease processing code was changed
+to avoid races between page cache flushes and oplock breaks.
+The commit c11f1df5003d534 ("cifs: Wait for writebacks to complete
+before attempting write.") fixed the races for oplocks but didn't
+apply the same changes for leases resulting in overwriting the
+server granted value to None. Fix this by properly processing
+lease breaks.
+
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+CC: Stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/smb2misc.c | 17 ++++++++++++++---
+ fs/cifs/smb2ops.c | 15 ++++++++++++---
+ 2 files changed, 26 insertions(+), 6 deletions(-)
+
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -517,7 +517,6 @@ smb2_tcon_has_lease(struct cifs_tcon *tc
+ __u8 lease_state;
+ struct list_head *tmp;
+ struct cifsFileInfo *cfile;
+- struct TCP_Server_Info *server = tcon->ses->server;
+ struct cifs_pending_open *open;
+ struct cifsInodeInfo *cinode;
+ int ack_req = le32_to_cpu(rsp->Flags &
+@@ -537,13 +536,25 @@ smb2_tcon_has_lease(struct cifs_tcon *tc
+ cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
+ le32_to_cpu(rsp->NewLeaseState));
+
+- server->ops->set_oplock_level(cinode, lease_state, 0, NULL);
+-
+ if (ack_req)
+ cfile->oplock_break_cancelled = false;
+ else
+ cfile->oplock_break_cancelled = true;
+
++ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
++
++ /*
++ * Set or clear flags depending on the lease state being READ.
++ * HANDLE caching flag should be added when the client starts
++ * to defer closing remote file handles with HANDLE leases.
++ */
++ if (lease_state & SMB2_LEASE_READ_CACHING_HE)
++ set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
++ &cinode->flags);
++ else
++ clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
++ &cinode->flags);
++
+ queue_work(cifsoplockd_wq, &cfile->oplock_break);
+ kfree(lw);
+ return true;
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2595,6 +2595,15 @@ smb2_downgrade_oplock(struct TCP_Server_
+ }
+
+ static void
++smb21_downgrade_oplock(struct TCP_Server_Info *server,
++ struct cifsInodeInfo *cinode, bool set_level2)
++{
++ server->ops->set_oplock_level(cinode,
++ set_level2 ? SMB2_LEASE_READ_CACHING_HE :
++ 0, 0, NULL);
++}
++
++static void
+ smb2_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
+ unsigned int epoch, bool *purge_cache)
+ {
+@@ -3646,7 +3655,7 @@ struct smb_version_operations smb21_oper
+ .print_stats = smb2_print_stats,
+ .is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
+- .downgrade_oplock = smb2_downgrade_oplock,
++ .downgrade_oplock = smb21_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+ .negotiate_wsize = smb2_negotiate_wsize,
+@@ -3743,7 +3752,7 @@ struct smb_version_operations smb30_oper
+ .dump_share_caps = smb2_dump_share_caps,
+ .is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
+- .downgrade_oplock = smb2_downgrade_oplock,
++ .downgrade_oplock = smb21_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+ .negotiate_wsize = smb3_negotiate_wsize,
+@@ -3848,7 +3857,7 @@ struct smb_version_operations smb311_ope
+ .dump_share_caps = smb2_dump_share_caps,
+ .is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
+- .downgrade_oplock = smb2_downgrade_oplock,
++ .downgrade_oplock = smb21_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+ .negotiate_wsize = smb3_negotiate_wsize,
--- /dev/null
+From c781af7e0c1fed9f1d0e0ec31b86f5b21a8dca17 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <piastryyy@gmail.com>
+Date: Mon, 4 Mar 2019 14:02:50 -0800
+Subject: CIFS: Do not skip SMB2 message IDs on send failures
+
+From: Pavel Shilovsky <piastryyy@gmail.com>
+
+commit c781af7e0c1fed9f1d0e0ec31b86f5b21a8dca17 upstream.
+
+When we hit failures during constructing MIDs or sending PDUs
+through the network, we end up not using message IDs assigned
+to the packet. The next SMB packet will skip those message IDs
+and continue with the next one. This behavior may lead to a server
+not granting us credits until we use the skipped IDs. Fix this by
+reverting the current ID to the original value if any errors occur
+before we push the packet through the network stack.
+
+This patch fixes the generic/310 test from the xfs-tests.
+
+Cc: <stable@vger.kernel.org> # 4.19.x
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifsglob.h | 19 +++++++++++++++++++
+ fs/cifs/smb2ops.c | 13 +++++++++++++
+ fs/cifs/smb2transport.c | 14 ++++++++++++--
+ fs/cifs/transport.c | 6 +++++-
+ 4 files changed, 49 insertions(+), 3 deletions(-)
+
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -236,6 +236,8 @@ struct smb_version_operations {
+ int * (*get_credits_field)(struct TCP_Server_Info *, const int);
+ unsigned int (*get_credits)(struct mid_q_entry *);
+ __u64 (*get_next_mid)(struct TCP_Server_Info *);
++ void (*revert_current_mid)(struct TCP_Server_Info *server,
++ const unsigned int val);
+ /* data offset from read response message */
+ unsigned int (*read_data_offset)(char *);
+ /*
+@@ -770,6 +772,22 @@ get_next_mid(struct TCP_Server_Info *ser
+ return cpu_to_le16(mid);
+ }
+
++static inline void
++revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
++{
++ if (server->ops->revert_current_mid)
++ server->ops->revert_current_mid(server, val);
++}
++
++static inline void
++revert_current_mid_from_hdr(struct TCP_Server_Info *server,
++ const struct smb2_sync_hdr *shdr)
++{
++ unsigned int num = le16_to_cpu(shdr->CreditCharge);
++
++ return revert_current_mid(server, num > 0 ? num : 1);
++}
++
+ static inline __u16
+ get_mid(const struct smb_hdr *smb)
+ {
+@@ -1422,6 +1440,7 @@ struct mid_q_entry {
+ struct kref refcount;
+ struct TCP_Server_Info *server; /* server corresponding to this mid */
+ __u64 mid; /* multiplex id */
++ __u16 credits; /* number of credits consumed by this mid */
+ __u32 pid; /* process id */
+ __u32 sequence_number; /* for CIFS signing */
+ unsigned long when_alloc; /* when mid was created */
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -219,6 +219,15 @@ smb2_get_next_mid(struct TCP_Server_Info
+ return mid;
+ }
+
++static void
++smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
++{
++ spin_lock(&GlobalMid_Lock);
++ if (server->CurrentMid >= val)
++ server->CurrentMid -= val;
++ spin_unlock(&GlobalMid_Lock);
++}
++
+ static struct mid_q_entry *
+ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
+ {
+@@ -3550,6 +3559,7 @@ struct smb_version_operations smb20_oper
+ .get_credits = smb2_get_credits,
+ .wait_mtu_credits = cifs_wait_mtu_credits,
+ .get_next_mid = smb2_get_next_mid,
++ .revert_current_mid = smb2_revert_current_mid,
+ .read_data_offset = smb2_read_data_offset,
+ .read_data_length = smb2_read_data_length,
+ .map_error = map_smb2_to_linux_error,
+@@ -3645,6 +3655,7 @@ struct smb_version_operations smb21_oper
+ .get_credits = smb2_get_credits,
+ .wait_mtu_credits = smb2_wait_mtu_credits,
+ .get_next_mid = smb2_get_next_mid,
++ .revert_current_mid = smb2_revert_current_mid,
+ .read_data_offset = smb2_read_data_offset,
+ .read_data_length = smb2_read_data_length,
+ .map_error = map_smb2_to_linux_error,
+@@ -3741,6 +3752,7 @@ struct smb_version_operations smb30_oper
+ .get_credits = smb2_get_credits,
+ .wait_mtu_credits = smb2_wait_mtu_credits,
+ .get_next_mid = smb2_get_next_mid,
++ .revert_current_mid = smb2_revert_current_mid,
+ .read_data_offset = smb2_read_data_offset,
+ .read_data_length = smb2_read_data_length,
+ .map_error = map_smb2_to_linux_error,
+@@ -3846,6 +3858,7 @@ struct smb_version_operations smb311_ope
+ .get_credits = smb2_get_credits,
+ .wait_mtu_credits = smb2_wait_mtu_credits,
+ .get_next_mid = smb2_get_next_mid,
++ .revert_current_mid = smb2_revert_current_mid,
+ .read_data_offset = smb2_read_data_offset,
+ .read_data_length = smb2_read_data_length,
+ .map_error = map_smb2_to_linux_error,
+--- a/fs/cifs/smb2transport.c
++++ b/fs/cifs/smb2transport.c
+@@ -576,6 +576,7 @@ smb2_mid_entry_alloc(const struct smb2_s
+ struct TCP_Server_Info *server)
+ {
+ struct mid_q_entry *temp;
++ unsigned int credits = le16_to_cpu(shdr->CreditCharge);
+
+ if (server == NULL) {
+ cifs_dbg(VFS, "Null TCP session in smb2_mid_entry_alloc\n");
+@@ -586,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_s
+ memset(temp, 0, sizeof(struct mid_q_entry));
+ kref_init(&temp->refcount);
+ temp->mid = le64_to_cpu(shdr->MessageId);
++ temp->credits = credits > 0 ? credits : 1;
+ temp->pid = current->pid;
+ temp->command = shdr->Command; /* Always LE */
+ temp->when_alloc = jiffies;
+@@ -674,13 +676,18 @@ smb2_setup_request(struct cifs_ses *ses,
+ smb2_seq_num_into_buf(ses->server, shdr);
+
+ rc = smb2_get_mid_entry(ses, shdr, &mid);
+- if (rc)
++ if (rc) {
++ revert_current_mid_from_hdr(ses->server, shdr);
+ return ERR_PTR(rc);
++ }
++
+ rc = smb2_sign_rqst(rqst, ses->server);
+ if (rc) {
++ revert_current_mid_from_hdr(ses->server, shdr);
+ cifs_delete_mid(mid);
+ return ERR_PTR(rc);
+ }
++
+ return mid;
+ }
+
+@@ -695,11 +702,14 @@ smb2_setup_async_request(struct TCP_Serv
+ smb2_seq_num_into_buf(server, shdr);
+
+ mid = smb2_mid_entry_alloc(shdr, server);
+- if (mid == NULL)
++ if (mid == NULL) {
++ revert_current_mid_from_hdr(server, shdr);
+ return ERR_PTR(-ENOMEM);
++ }
+
+ rc = smb2_sign_rqst(rqst, server);
+ if (rc) {
++ revert_current_mid_from_hdr(server, shdr);
+ DeleteMidQEntry(mid);
+ return ERR_PTR(rc);
+ }
+--- a/fs/cifs/transport.c
++++ b/fs/cifs/transport.c
+@@ -647,6 +647,7 @@ cifs_call_async(struct TCP_Server_Info *
+ cifs_in_send_dec(server);
+
+ if (rc < 0) {
++ revert_current_mid(server, mid->credits);
+ server->sequence_number -= 2;
+ cifs_delete_mid(mid);
+ }
+@@ -868,6 +869,7 @@ compound_send_recv(const unsigned int xi
+ for (i = 0; i < num_rqst; i++) {
+ midQ[i] = ses->server->ops->setup_request(ses, &rqst[i]);
+ if (IS_ERR(midQ[i])) {
++ revert_current_mid(ses->server, i);
+ for (j = 0; j < i; j++)
+ cifs_delete_mid(midQ[j]);
+ mutex_unlock(&ses->server->srv_mutex);
+@@ -897,8 +899,10 @@ compound_send_recv(const unsigned int xi
+ for (i = 0; i < num_rqst; i++)
+ cifs_save_when_sent(midQ[i]);
+
+- if (rc < 0)
++ if (rc < 0) {
++ revert_current_mid(ses->server, num_rqst);
+ ses->server->sequence_number -= 2;
++ }
+
+ mutex_unlock(&ses->server->srv_mutex);
+
--- /dev/null
+From 165df9a080b6863ae286fa01780c13d87cd81076 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <pshilov@microsoft.com>
+Date: Tue, 29 Jan 2019 16:40:28 -0800
+Subject: CIFS: Fix leaking locked VFS cache pages in writeback retry
+
+From: Pavel Shilovsky <pshilov@microsoft.com>
+
+commit 165df9a080b6863ae286fa01780c13d87cd81076 upstream.
+
+If we don't find a writable file handle when retrying writepages
+we break of the loop and do not unlock and put pages neither from
+wdata2 nor from the original wdata. Fix this by walking through
+all the remaining pages and cleanup them properly.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifssmb.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -2125,12 +2125,13 @@ cifs_writev_requeue(struct cifs_writedat
+
+ wdata2->cfile = find_writable_file(CIFS_I(inode), false);
+ if (!wdata2->cfile) {
+- cifs_dbg(VFS, "No writable handles for inode\n");
++ cifs_dbg(VFS, "No writable handle to retry writepages\n");
+ rc = -EBADF;
+- break;
++ } else {
++ wdata2->pid = wdata2->cfile->pid;
++ rc = server->ops->async_writev(wdata2,
++ cifs_writedata_release);
+ }
+- wdata2->pid = wdata2->cfile->pid;
+- rc = server->ops->async_writev(wdata2, cifs_writedata_release);
+
+ for (j = 0; j < nr_pages; j++) {
+ unlock_page(wdata2->pages[j]);
+@@ -2145,6 +2146,7 @@ cifs_writev_requeue(struct cifs_writedat
+ kref_put(&wdata2->refcount, cifs_writedata_release);
+ if (is_retryable_error(rc))
+ continue;
++ i += nr_pages;
+ break;
+ }
+
+@@ -2152,6 +2154,13 @@ cifs_writev_requeue(struct cifs_writedat
+ i += nr_pages;
+ } while (i < wdata->nr_pages);
+
++ /* cleanup remaining pages from the original wdata */
++ for (; i < wdata->nr_pages; i++) {
++ SetPageError(wdata->pages[i]);
++ end_page_writeback(wdata->pages[i]);
++ put_page(wdata->pages[i]);
++ }
++
+ if (rc != 0 && !is_retryable_error(rc))
+ mapping_set_error(inode->i_mapping, rc);
+ kref_put(&wdata->refcount, cifs_writedata_release);
--- /dev/null
+From 6dfbd84684700cb58b34e8602c01c12f3d2595c8 Mon Sep 17 00:00:00 2001
+From: Pavel Shilovsky <piastryyy@gmail.com>
+Date: Mon, 4 Mar 2019 17:48:01 -0800
+Subject: CIFS: Fix read after write for files with read caching
+
+From: Pavel Shilovsky <piastryyy@gmail.com>
+
+commit 6dfbd84684700cb58b34e8602c01c12f3d2595c8 upstream.
+
+When we have a READ lease for a file and have just issued a write
+operation to the server we need to purge the cache and set oplock/lease
+level to NONE to avoid reading stale data. Currently we do that
+only if a write operation succedeed thus not covering cases when
+a request was sent to the server but a negative error code was
+returned later for some other reasons (e.g. -EIOCBQUEUED or -EINTR).
+Fix this by turning off caching regardless of the error code being
+returned.
+
+The patches fixes generic tests 075 and 112 from the xfs-tests.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/file.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -3028,14 +3028,16 @@ cifs_strict_writev(struct kiocb *iocb, s
+ * these pages but not on the region from pos to ppos+len-1.
+ */
+ written = cifs_user_writev(iocb, from);
+- if (written > 0 && CIFS_CACHE_READ(cinode)) {
++ if (CIFS_CACHE_READ(cinode)) {
+ /*
+- * Windows 7 server can delay breaking level2 oplock if a write
+- * request comes - break it on the client to prevent reading
+- * an old data.
++ * We have read level caching and we have just sent a write
++ * request to the server thus making data in the cache stale.
++ * Zap the cache and set oplock/lease level to NONE to avoid
++ * reading stale data from the cache. All subsequent read
++ * operations will read new data from the server.
+ */
+ cifs_zap_mapping(inode);
+- cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",
++ cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
+ inode);
+ cinode->oplock = 0;
+ }
--- /dev/null
+From 6ebc97006b196aafa9df0497fdfa866cf26f259b Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Sun, 6 Jan 2019 18:47:44 -0800
+Subject: crypto: aead - set CRYPTO_TFM_NEED_KEY if ->setkey() fails
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 6ebc97006b196aafa9df0497fdfa866cf26f259b upstream.
+
+Some algorithms have a ->setkey() method that is not atomic, in the
+sense that setting a key can fail after changes were already made to the
+tfm context. In this case, if a key was already set the tfm can end up
+in a state that corresponds to neither the old key nor the new key.
+
+For example, in gcm.c, if the kzalloc() fails due to lack of memory,
+then the CTR part of GCM will have the new key but GHASH will not.
+
+It's not feasible to make all ->setkey() methods atomic, especially ones
+that have to key multiple sub-tfms. Therefore, make the crypto API set
+CRYPTO_TFM_NEED_KEY if ->setkey() fails, to prevent the tfm from being
+used until a new key is set.
+
+[Cc stable mainly because when introducing the NEED_KEY flag I changed
+ AF_ALG to rely on it; and unlike in-kernel crypto API users, AF_ALG
+ previously didn't have this problem. So these "incompletely keyed"
+ states became theoretically accessible via AF_ALG -- though, the
+ opportunities for causing real mischief seem pretty limited.]
+
+Fixes: dc26c17f743a ("crypto: aead - prevent using AEADs without setting key")
+Cc: <stable@vger.kernel.org> # v4.16+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/aead.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/crypto/aead.c
++++ b/crypto/aead.c
+@@ -61,8 +61,10 @@ int crypto_aead_setkey(struct crypto_aea
+ else
+ err = crypto_aead_alg(tfm)->setkey(tfm, key, keylen);
+
+- if (err)
++ if (unlikely(err)) {
++ crypto_aead_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return err;
++ }
+
+ crypto_aead_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
--- /dev/null
+From 0f533e67d26f228ea5dfdacc8a4bdeb487af5208 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 31 Jan 2019 23:51:36 -0800
+Subject: crypto: aegis - fix handling chunked inputs
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 0f533e67d26f228ea5dfdacc8a4bdeb487af5208 upstream.
+
+The generic AEGIS implementations all fail the improved AEAD tests
+because they produce the wrong result with some data layouts. The issue
+is that they assume that if the skcipher_walk API gives 'nbytes' not
+aligned to the walksize (a.k.a. walk.stride), then it is the end of the
+data. In fact, this can happen before the end. Fix them.
+
+Fixes: f606a88e5823 ("crypto: aegis - Add generic AEGIS AEAD implementations")
+Cc: <stable@vger.kernel.org> # v4.18+
+Cc: Ondrej Mosnacek <omosnace@redhat.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Ondrej Mosnacek <omosnace@redhat.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/aegis128.c | 14 +++++++-------
+ crypto/aegis128l.c | 14 +++++++-------
+ crypto/aegis256.c | 14 +++++++-------
+ 3 files changed, 21 insertions(+), 21 deletions(-)
+
+--- a/crypto/aegis128.c
++++ b/crypto/aegis128.c
+@@ -290,19 +290,19 @@ static void crypto_aegis128_process_cryp
+ const struct aegis128_ops *ops)
+ {
+ struct skcipher_walk walk;
+- u8 *src, *dst;
+- unsigned int chunksize;
+
+ ops->skcipher_walk_init(&walk, req, false);
+
+ while (walk.nbytes) {
+- src = walk.src.virt.addr;
+- dst = walk.dst.virt.addr;
+- chunksize = walk.nbytes;
++ unsigned int nbytes = walk.nbytes;
+
+- ops->crypt_chunk(state, dst, src, chunksize);
++ if (nbytes < walk.total)
++ nbytes = round_down(nbytes, walk.stride);
+
+- skcipher_walk_done(&walk, 0);
++ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
++ nbytes);
++
++ skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+ }
+
+--- a/crypto/aegis128l.c
++++ b/crypto/aegis128l.c
+@@ -353,19 +353,19 @@ static void crypto_aegis128l_process_cry
+ const struct aegis128l_ops *ops)
+ {
+ struct skcipher_walk walk;
+- u8 *src, *dst;
+- unsigned int chunksize;
+
+ ops->skcipher_walk_init(&walk, req, false);
+
+ while (walk.nbytes) {
+- src = walk.src.virt.addr;
+- dst = walk.dst.virt.addr;
+- chunksize = walk.nbytes;
++ unsigned int nbytes = walk.nbytes;
+
+- ops->crypt_chunk(state, dst, src, chunksize);
++ if (nbytes < walk.total)
++ nbytes = round_down(nbytes, walk.stride);
+
+- skcipher_walk_done(&walk, 0);
++ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
++ nbytes);
++
++ skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+ }
+
+--- a/crypto/aegis256.c
++++ b/crypto/aegis256.c
+@@ -303,19 +303,19 @@ static void crypto_aegis256_process_cryp
+ const struct aegis256_ops *ops)
+ {
+ struct skcipher_walk walk;
+- u8 *src, *dst;
+- unsigned int chunksize;
+
+ ops->skcipher_walk_init(&walk, req, false);
+
+ while (walk.nbytes) {
+- src = walk.src.virt.addr;
+- dst = walk.dst.virt.addr;
+- chunksize = walk.nbytes;
++ unsigned int nbytes = walk.nbytes;
+
+- ops->crypt_chunk(state, dst, src, chunksize);
++ if (nbytes < walk.total)
++ nbytes = round_down(nbytes, walk.stride);
+
+- skcipher_walk_done(&walk, 0);
++ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
++ nbytes);
++
++ skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+ }
+
--- /dev/null
+From 62fecf295e3c48be1b5f17c440b93875b9adb4d6 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Sun, 27 Jan 2019 10:16:52 +0100
+Subject: crypto: arm/crct10dif - revert to C code for short inputs
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 62fecf295e3c48be1b5f17c440b93875b9adb4d6 upstream.
+
+The SIMD routine ported from x86 used to have a special code path
+for inputs < 16 bytes, which got lost somewhere along the way.
+Instead, the current glue code aligns the input pointer to permit
+the NEON routine to use special versions of the vld1 instructions
+that assume 16 byte alignment, but this could result in inputs of
+less than 16 bytes to be passed in. This not only fails the new
+extended tests that Eric has implemented, it also results in the
+code reading past the end of the input, which could potentially
+result in crashes when dealing with less than 16 bytes of input
+at the end of a page which is followed by an unmapped page.
+
+So update the glue code to only invoke the NEON routine if the
+input is at least 16 bytes.
+
+Reported-by: Eric Biggers <ebiggers@kernel.org>
+Reviewed-by: Eric Biggers <ebiggers@kernel.org>
+Fixes: 1d481f1cd892 ("crypto: arm/crct10dif - port x86 SSE implementation to ARM")
+Cc: <stable@vger.kernel.org> # v4.10+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/crypto/crct10dif-ce-core.S | 14 +++++++-------
+ arch/arm/crypto/crct10dif-ce-glue.c | 23 ++++++-----------------
+ 2 files changed, 13 insertions(+), 24 deletions(-)
+
+--- a/arch/arm/crypto/crct10dif-ce-core.S
++++ b/arch/arm/crypto/crct10dif-ce-core.S
+@@ -124,10 +124,10 @@ ENTRY(crc_t10dif_pmull)
+ vext.8 q10, qzr, q0, #4
+
+ // receive the initial 64B data, xor the initial crc value
+- vld1.64 {q0-q1}, [arg2, :128]!
+- vld1.64 {q2-q3}, [arg2, :128]!
+- vld1.64 {q4-q5}, [arg2, :128]!
+- vld1.64 {q6-q7}, [arg2, :128]!
++ vld1.64 {q0-q1}, [arg2]!
++ vld1.64 {q2-q3}, [arg2]!
++ vld1.64 {q4-q5}, [arg2]!
++ vld1.64 {q6-q7}, [arg2]!
+ CPU_LE( vrev64.8 q0, q0 )
+ CPU_LE( vrev64.8 q1, q1 )
+ CPU_LE( vrev64.8 q2, q2 )
+@@ -167,7 +167,7 @@ CPU_LE( vrev64.8 q7, q7 )
+ _fold_64_B_loop:
+
+ .macro fold64, reg1, reg2
+- vld1.64 {q11-q12}, [arg2, :128]!
++ vld1.64 {q11-q12}, [arg2]!
+
+ vmull.p64 q8, \reg1\()h, d21
+ vmull.p64 \reg1, \reg1\()l, d20
+@@ -238,7 +238,7 @@ _16B_reduction_loop:
+ vmull.p64 q7, d15, d21
+ veor.8 q7, q7, q8
+
+- vld1.64 {q0}, [arg2, :128]!
++ vld1.64 {q0}, [arg2]!
+ CPU_LE( vrev64.8 q0, q0 )
+ vswp d0, d1
+ veor.8 q7, q7, q0
+@@ -335,7 +335,7 @@ _less_than_128:
+ vmov.i8 q0, #0
+ vmov s3, arg1_low32 // get the initial crc value
+
+- vld1.64 {q7}, [arg2, :128]!
++ vld1.64 {q7}, [arg2]!
+ CPU_LE( vrev64.8 q7, q7 )
+ vswp d14, d15
+ veor.8 q7, q7, q0
+--- a/arch/arm/crypto/crct10dif-ce-glue.c
++++ b/arch/arm/crypto/crct10dif-ce-glue.c
+@@ -35,26 +35,15 @@ static int crct10dif_update(struct shash
+ unsigned int length)
+ {
+ u16 *crc = shash_desc_ctx(desc);
+- unsigned int l;
+
+- if (!may_use_simd()) {
+- *crc = crc_t10dif_generic(*crc, data, length);
++ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
++ kernel_neon_begin();
++ *crc = crc_t10dif_pmull(*crc, data, length);
++ kernel_neon_end();
+ } else {
+- if (unlikely((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
+- l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
+- ((u32)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
+-
+- *crc = crc_t10dif_generic(*crc, data, l);
+-
+- length -= l;
+- data += l;
+- }
+- if (length > 0) {
+- kernel_neon_begin();
+- *crc = crc_t10dif_pmull(*crc, data, length);
+- kernel_neon_end();
+- }
++ *crc = crc_t10dif_generic(*crc, data, length);
+ }
++
+ return 0;
+ }
+
--- /dev/null
+From 969e2f59d589c15f6aaf306e590dde16f12ea4b3 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Jan 2019 17:33:46 +0100
+Subject: crypto: arm64/aes-ccm - fix bugs in non-NEON fallback routine
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit 969e2f59d589c15f6aaf306e590dde16f12ea4b3 upstream.
+
+Commit 5092fcf34908 ("crypto: arm64/aes-ce-ccm: add non-SIMD generic
+fallback") introduced C fallback code to replace the NEON routines
+when invoked from a context where the NEON is not available (i.e.,
+from the context of a softirq taken while the NEON is already being
+used in kernel process context)
+
+Fix two logical flaws in the MAC calculation of the associated data.
+
+Reported-by: Eric Biggers <ebiggers@kernel.org>
+Fixes: 5092fcf34908 ("crypto: arm64/aes-ce-ccm: add non-SIMD generic fallback")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/aes-ce-ccm-glue.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/arch/arm64/crypto/aes-ce-ccm-glue.c
++++ b/arch/arm64/crypto/aes-ce-ccm-glue.c
+@@ -125,7 +125,7 @@ static void ccm_update_mac(struct crypto
+ abytes -= added;
+ }
+
+- while (abytes > AES_BLOCK_SIZE) {
++ while (abytes >= AES_BLOCK_SIZE) {
+ __aes_arm64_encrypt(key->key_enc, mac, mac,
+ num_rounds(key));
+ crypto_xor(mac, in, AES_BLOCK_SIZE);
+@@ -139,8 +139,6 @@ static void ccm_update_mac(struct crypto
+ num_rounds(key));
+ crypto_xor(mac, in, abytes);
+ *macp = abytes;
+- } else {
+- *macp = 0;
+ }
+ }
+ }
--- /dev/null
+From eaf46edf6ea89675bd36245369c8de5063a0272c Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Thu, 24 Jan 2019 17:33:45 +0100
+Subject: crypto: arm64/aes-ccm - fix logical bug in AAD MAC handling
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit eaf46edf6ea89675bd36245369c8de5063a0272c upstream.
+
+The NEON MAC calculation routine fails to handle the case correctly
+where there is some data in the buffer, and the input fills it up
+exactly. In this case, we enter the loop at the end with w8 == 0,
+while a negative value is assumed, and so the loop carries on until
+the increment of the 32-bit counter wraps around, which is quite
+obviously wrong.
+
+So omit the loop altogether in this case, and exit right away.
+
+Reported-by: Eric Biggers <ebiggers@kernel.org>
+Fixes: a3fd82105b9d1 ("arm64/crypto: AES in CCM mode using ARMv8 Crypto ...")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/aes-ce-ccm-core.S | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/crypto/aes-ce-ccm-core.S
++++ b/arch/arm64/crypto/aes-ce-ccm-core.S
+@@ -74,12 +74,13 @@ ENTRY(ce_aes_ccm_auth_data)
+ beq 10f
+ ext v0.16b, v0.16b, v0.16b, #1 /* rotate out the mac bytes */
+ b 7b
+-8: mov w7, w8
++8: cbz w8, 91f
++ mov w7, w8
+ add w8, w8, #16
+ 9: ext v1.16b, v1.16b, v1.16b, #1
+ adds w7, w7, #1
+ bne 9b
+- eor v0.16b, v0.16b, v1.16b
++91: eor v0.16b, v0.16b, v1.16b
+ st1 {v0.16b}, [x0]
+ 10: str w8, [x3]
+ ret
--- /dev/null
+From 12455e320e19e9cc7ad97f4ab89c280fe297387c Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 31 Jan 2019 23:51:42 -0800
+Subject: crypto: arm64/aes-neonbs - fix returning final keystream block
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 12455e320e19e9cc7ad97f4ab89c280fe297387c upstream.
+
+The arm64 NEON bit-sliced implementation of AES-CTR fails the improved
+skcipher tests because it sometimes produces the wrong ciphertext. The
+bug is that the final keystream block isn't returned from the assembly
+code when the number of non-final blocks is zero. This can happen if
+the input data ends a few bytes after a page boundary. In this case the
+last bytes get "encrypted" by XOR'ing them with uninitialized memory.
+
+Fix the assembly code to return the final keystream block when needed.
+
+Fixes: 88a3f582bea9 ("crypto: arm64/aes - don't use IV buffer to return final keystream block")
+Cc: <stable@vger.kernel.org> # v4.11+
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/aes-neonbs-core.S | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/crypto/aes-neonbs-core.S
++++ b/arch/arm64/crypto/aes-neonbs-core.S
+@@ -971,18 +971,22 @@ CPU_LE( rev x8, x8 )
+
+ 8: next_ctr v0
+ st1 {v0.16b}, [x24]
+- cbz x23, 0f
++ cbz x23, .Lctr_done
+
+ cond_yield_neon 98b
+ b 99b
+
+-0: frame_pop
++.Lctr_done:
++ frame_pop
+ ret
+
+ /*
+ * If we are handling the tail of the input (x6 != NULL), return the
+ * final keystream block back to the caller.
+ */
++0: cbz x25, 8b
++ st1 {v0.16b}, [x25]
++ b 8b
+ 1: cbz x25, 8b
+ st1 {v1.16b}, [x25]
+ b 8b
--- /dev/null
+From d72b9d4acd548251f55b16843fc7a05dc5c80de8 Mon Sep 17 00:00:00 2001
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Date: Sun, 27 Jan 2019 10:16:53 +0100
+Subject: crypto: arm64/crct10dif - revert to C code for short inputs
+
+From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+
+commit d72b9d4acd548251f55b16843fc7a05dc5c80de8 upstream.
+
+The SIMD routine ported from x86 used to have a special code path
+for inputs < 16 bytes, which got lost somewhere along the way.
+Instead, the current glue code aligns the input pointer to 16 bytes,
+which is not really necessary on this architecture (although it
+could be beneficial to performance to expose aligned data to the
+the NEON routine), but this could result in inputs of less than
+16 bytes to be passed in. This not only fails the new extended
+tests that Eric has implemented, it also results in the code
+reading past the end of the input, which could potentially result
+in crashes when dealing with less than 16 bytes of input at the
+end of a page which is followed by an unmapped page.
+
+So update the glue code to only invoke the NEON routine if the
+input is at least 16 bytes.
+
+Reported-by: Eric Biggers <ebiggers@kernel.org>
+Reviewed-by: Eric Biggers <ebiggers@kernel.org>
+Fixes: 6ef5737f3931 ("crypto: arm64/crct10dif - port x86 SSE implementation to arm64")
+Cc: <stable@vger.kernel.org> # v4.10+
+Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/crypto/crct10dif-ce-glue.c | 25 ++++++-------------------
+ 1 file changed, 6 insertions(+), 19 deletions(-)
+
+--- a/arch/arm64/crypto/crct10dif-ce-glue.c
++++ b/arch/arm64/crypto/crct10dif-ce-glue.c
+@@ -39,26 +39,13 @@ static int crct10dif_update(struct shash
+ unsigned int length)
+ {
+ u16 *crc = shash_desc_ctx(desc);
+- unsigned int l;
+
+- if (unlikely((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE)) {
+- l = min_t(u32, length, CRC_T10DIF_PMULL_CHUNK_SIZE -
+- ((u64)data % CRC_T10DIF_PMULL_CHUNK_SIZE));
+-
+- *crc = crc_t10dif_generic(*crc, data, l);
+-
+- length -= l;
+- data += l;
+- }
+-
+- if (length > 0) {
+- if (may_use_simd()) {
+- kernel_neon_begin();
+- *crc = crc_t10dif_pmull(*crc, data, length);
+- kernel_neon_end();
+- } else {
+- *crc = crc_t10dif_generic(*crc, data, length);
+- }
++ if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && may_use_simd()) {
++ kernel_neon_begin();
++ *crc = crc_t10dif_pmull(*crc, data, length);
++ kernel_neon_end();
++ } else {
++ *crc = crc_t10dif_generic(*crc, data, length);
+ }
+
+ return 0;
--- /dev/null
+From ba7d7433a0e998c902132bd47330e355a1eaa894 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Sun, 6 Jan 2019 18:47:42 -0800
+Subject: crypto: hash - set CRYPTO_TFM_NEED_KEY if ->setkey() fails
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit ba7d7433a0e998c902132bd47330e355a1eaa894 upstream.
+
+Some algorithms have a ->setkey() method that is not atomic, in the
+sense that setting a key can fail after changes were already made to the
+tfm context. In this case, if a key was already set the tfm can end up
+in a state that corresponds to neither the old key nor the new key.
+
+It's not feasible to make all ->setkey() methods atomic, especially ones
+that have to key multiple sub-tfms. Therefore, make the crypto API set
+CRYPTO_TFM_NEED_KEY if ->setkey() fails and the algorithm requires a
+key, to prevent the tfm from being used until a new key is set.
+
+Note: we can't set CRYPTO_TFM_NEED_KEY for OPTIONAL_KEY algorithms, so
+->setkey() for those must nevertheless be atomic. That's fine for now
+since only the crc32 and crc32c algorithms set OPTIONAL_KEY, and it's
+not intended that OPTIONAL_KEY be used much.
+
+[Cc stable mainly because when introducing the NEED_KEY flag I changed
+ AF_ALG to rely on it; and unlike in-kernel crypto API users, AF_ALG
+ previously didn't have this problem. So these "incompletely keyed"
+ states became theoretically accessible via AF_ALG -- though, the
+ opportunities for causing real mischief seem pretty limited.]
+
+Fixes: 9fa68f620041 ("crypto: hash - prevent using keyed hashes without setting key")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/ahash.c | 28 +++++++++++++++++++---------
+ crypto/shash.c | 18 +++++++++++++-----
+ 2 files changed, 32 insertions(+), 14 deletions(-)
+
+--- a/crypto/ahash.c
++++ b/crypto/ahash.c
+@@ -190,6 +190,21 @@ static int ahash_setkey_unaligned(struct
+ return ret;
+ }
+
++static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
++ unsigned int keylen)
++{
++ return -ENOSYS;
++}
++
++static void ahash_set_needkey(struct crypto_ahash *tfm)
++{
++ const struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
++
++ if (tfm->setkey != ahash_nosetkey &&
++ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
++ crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
++}
++
+ int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+ {
+@@ -201,20 +216,16 @@ int crypto_ahash_setkey(struct crypto_ah
+ else
+ err = tfm->setkey(tfm, key, keylen);
+
+- if (err)
++ if (unlikely(err)) {
++ ahash_set_needkey(tfm);
+ return err;
++ }
+
+ crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(crypto_ahash_setkey);
+
+-static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
+- unsigned int keylen)
+-{
+- return -ENOSYS;
+-}
+-
+ static inline unsigned int ahash_align_buffer_size(unsigned len,
+ unsigned long mask)
+ {
+@@ -489,8 +500,7 @@ static int crypto_ahash_init_tfm(struct
+
+ if (alg->setkey) {
+ hash->setkey = alg->setkey;
+- if (!(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+- crypto_ahash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
++ ahash_set_needkey(hash);
+ }
+
+ return 0;
+--- a/crypto/shash.c
++++ b/crypto/shash.c
+@@ -53,6 +53,13 @@ static int shash_setkey_unaligned(struct
+ return err;
+ }
+
++static void shash_set_needkey(struct crypto_shash *tfm, struct shash_alg *alg)
++{
++ if (crypto_shash_alg_has_setkey(alg) &&
++ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
++ crypto_shash_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
++}
++
+ int crypto_shash_setkey(struct crypto_shash *tfm, const u8 *key,
+ unsigned int keylen)
+ {
+@@ -65,8 +72,10 @@ int crypto_shash_setkey(struct crypto_sh
+ else
+ err = shash->setkey(tfm, key, keylen);
+
+- if (err)
++ if (unlikely(err)) {
++ shash_set_needkey(tfm, shash);
+ return err;
++ }
+
+ crypto_shash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
+@@ -373,7 +382,8 @@ int crypto_init_shash_ops_async(struct c
+ crt->final = shash_async_final;
+ crt->finup = shash_async_finup;
+ crt->digest = shash_async_digest;
+- crt->setkey = shash_async_setkey;
++ if (crypto_shash_alg_has_setkey(alg))
++ crt->setkey = shash_async_setkey;
+
+ crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) &
+ CRYPTO_TFM_NEED_KEY);
+@@ -395,9 +405,7 @@ static int crypto_shash_init_tfm(struct
+
+ hash->descsize = alg->descsize;
+
+- if (crypto_shash_alg_has_setkey(alg) &&
+- !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY))
+- crypto_shash_set_flags(hash, CRYPTO_TFM_NEED_KEY);
++ shash_set_needkey(hash, alg);
+
+ return 0;
+ }
--- /dev/null
+From d644f1c8746ed24f81075480f9e9cb3777ae8d65 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 31 Jan 2019 23:51:37 -0800
+Subject: crypto: morus - fix handling chunked inputs
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit d644f1c8746ed24f81075480f9e9cb3777ae8d65 upstream.
+
+The generic MORUS implementations all fail the improved AEAD tests
+because they produce the wrong result with some data layouts. The issue
+is that they assume that if the skcipher_walk API gives 'nbytes' not
+aligned to the walksize (a.k.a. walk.stride), then it is the end of the
+data. In fact, this can happen before the end. Fix them.
+
+Fixes: 396be41f16fd ("crypto: morus - Add generic MORUS AEAD implementations")
+Cc: <stable@vger.kernel.org> # v4.18+
+Cc: Ondrej Mosnacek <omosnace@redhat.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Ondrej Mosnacek <omosnace@redhat.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/morus1280.c | 13 +++++++------
+ crypto/morus640.c | 13 +++++++------
+ 2 files changed, 14 insertions(+), 12 deletions(-)
+
+--- a/crypto/morus1280.c
++++ b/crypto/morus1280.c
+@@ -366,18 +366,19 @@ static void crypto_morus1280_process_cry
+ const struct morus1280_ops *ops)
+ {
+ struct skcipher_walk walk;
+- u8 *dst;
+- const u8 *src;
+
+ ops->skcipher_walk_init(&walk, req, false);
+
+ while (walk.nbytes) {
+- src = walk.src.virt.addr;
+- dst = walk.dst.virt.addr;
++ unsigned int nbytes = walk.nbytes;
+
+- ops->crypt_chunk(state, dst, src, walk.nbytes);
++ if (nbytes < walk.total)
++ nbytes = round_down(nbytes, walk.stride);
+
+- skcipher_walk_done(&walk, 0);
++ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
++ nbytes);
++
++ skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+ }
+
+--- a/crypto/morus640.c
++++ b/crypto/morus640.c
+@@ -365,18 +365,19 @@ static void crypto_morus640_process_cryp
+ const struct morus640_ops *ops)
+ {
+ struct skcipher_walk walk;
+- u8 *dst;
+- const u8 *src;
+
+ ops->skcipher_walk_init(&walk, req, false);
+
+ while (walk.nbytes) {
+- src = walk.src.virt.addr;
+- dst = walk.dst.virt.addr;
++ unsigned int nbytes = walk.nbytes;
+
+- ops->crypt_chunk(state, dst, src, walk.nbytes);
++ if (nbytes < walk.total)
++ nbytes = round_down(nbytes, walk.stride);
+
+- skcipher_walk_done(&walk, 0);
++ ops->crypt_chunk(state, walk.dst.virt.addr, walk.src.virt.addr,
++ nbytes);
++
++ skcipher_walk_done(&walk, walk.nbytes - nbytes);
+ }
+ }
+
--- /dev/null
+From 251b7aea34ba3c4d4fdfa9447695642eb8b8b098 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 3 Jan 2019 20:16:13 -0800
+Subject: crypto: pcbc - remove bogus memcpy()s with src == dest
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 251b7aea34ba3c4d4fdfa9447695642eb8b8b098 upstream.
+
+The memcpy()s in the PCBC implementation use walk->iv as both the source
+and destination, which has undefined behavior. These memcpy()'s are
+actually unneeded, because walk->iv is already used to hold the previous
+plaintext block XOR'd with the previous ciphertext block. Thus,
+walk->iv is already updated to its final value.
+
+So remove the broken and unnecessary memcpy()s.
+
+Fixes: 91652be5d1b9 ("[CRYPTO] pcbc: Add Propagated CBC template")
+Cc: <stable@vger.kernel.org> # v2.6.21+
+Cc: David Howells <dhowells@redhat.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/pcbc.c | 14 ++++----------
+ 1 file changed, 4 insertions(+), 10 deletions(-)
+
+--- a/crypto/pcbc.c
++++ b/crypto/pcbc.c
+@@ -51,7 +51,7 @@ static int crypto_pcbc_encrypt_segment(s
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+- u8 *iv = walk->iv;
++ u8 * const iv = walk->iv;
+
+ do {
+ crypto_xor(iv, src, bsize);
+@@ -72,7 +72,7 @@ static int crypto_pcbc_encrypt_inplace(s
+ int bsize = crypto_cipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+- u8 *iv = walk->iv;
++ u8 * const iv = walk->iv;
+ u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
+
+ do {
+@@ -84,8 +84,6 @@ static int crypto_pcbc_encrypt_inplace(s
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+- memcpy(walk->iv, iv, bsize);
+-
+ return nbytes;
+ }
+
+@@ -121,7 +119,7 @@ static int crypto_pcbc_decrypt_segment(s
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
+- u8 *iv = walk->iv;
++ u8 * const iv = walk->iv;
+
+ do {
+ crypto_cipher_decrypt_one(tfm, dst, src);
+@@ -132,8 +130,6 @@ static int crypto_pcbc_decrypt_segment(s
+ dst += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+- memcpy(walk->iv, iv, bsize);
+-
+ return nbytes;
+ }
+
+@@ -144,7 +140,7 @@ static int crypto_pcbc_decrypt_inplace(s
+ int bsize = crypto_cipher_blocksize(tfm);
+ unsigned int nbytes = walk->nbytes;
+ u8 *src = walk->src.virt.addr;
+- u8 *iv = walk->iv;
++ u8 * const iv = walk->iv;
+ u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
+
+ do {
+@@ -156,8 +152,6 @@ static int crypto_pcbc_decrypt_inplace(s
+ src += bsize;
+ } while ((nbytes -= bsize) >= bsize);
+
+- memcpy(walk->iv, iv, bsize);
+-
+ return nbytes;
+ }
+
--- /dev/null
+From b1f6b4bf416b49f00f3abc49c639371cdecaaad1 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Sun, 6 Jan 2019 18:47:43 -0800
+Subject: crypto: skcipher - set CRYPTO_TFM_NEED_KEY if ->setkey() fails
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit b1f6b4bf416b49f00f3abc49c639371cdecaaad1 upstream.
+
+Some algorithms have a ->setkey() method that is not atomic, in the
+sense that setting a key can fail after changes were already made to the
+tfm context. In this case, if a key was already set the tfm can end up
+in a state that corresponds to neither the old key nor the new key.
+
+For example, in lrw.c, if gf128mul_init_64k_bbe() fails due to lack of
+memory, then priv::table will be left NULL. After that, encryption with
+that tfm will cause a NULL pointer dereference.
+
+It's not feasible to make all ->setkey() methods atomic, especially ones
+that have to key multiple sub-tfms. Therefore, make the crypto API set
+CRYPTO_TFM_NEED_KEY if ->setkey() fails and the algorithm requires a
+key, to prevent the tfm from being used until a new key is set.
+
+[Cc stable mainly because when introducing the NEED_KEY flag I changed
+ AF_ALG to rely on it; and unlike in-kernel crypto API users, AF_ALG
+ previously didn't have this problem. So these "incompletely keyed"
+ states became theoretically accessible via AF_ALG -- though, the
+ opportunities for causing real mischief seem pretty limited.]
+
+Fixes: f8d33fac8480 ("crypto: skcipher - prevent using skciphers without setting key")
+Cc: <stable@vger.kernel.org> # v4.16+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/skcipher.c | 27 ++++++++++++++++++---------
+ 1 file changed, 18 insertions(+), 9 deletions(-)
+
+--- a/crypto/skcipher.c
++++ b/crypto/skcipher.c
+@@ -585,6 +585,12 @@ static unsigned int crypto_skcipher_exts
+ return crypto_alg_extsize(alg);
+ }
+
++static void skcipher_set_needkey(struct crypto_skcipher *tfm)
++{
++ if (tfm->keysize)
++ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
++}
++
+ static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+ {
+@@ -598,8 +604,10 @@ static int skcipher_setkey_blkcipher(str
+ err = crypto_blkcipher_setkey(blkcipher, key, keylen);
+ crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) &
+ CRYPTO_TFM_RES_MASK);
+- if (err)
++ if (unlikely(err)) {
++ skcipher_set_needkey(tfm);
+ return err;
++ }
+
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
+@@ -677,8 +685,7 @@ static int crypto_init_skcipher_ops_blkc
+ skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
+ skcipher->keysize = calg->cra_blkcipher.max_keysize;
+
+- if (skcipher->keysize)
+- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
++ skcipher_set_needkey(skcipher);
+
+ return 0;
+ }
+@@ -698,8 +705,10 @@ static int skcipher_setkey_ablkcipher(st
+ crypto_skcipher_set_flags(tfm,
+ crypto_ablkcipher_get_flags(ablkcipher) &
+ CRYPTO_TFM_RES_MASK);
+- if (err)
++ if (unlikely(err)) {
++ skcipher_set_needkey(tfm);
+ return err;
++ }
+
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
+@@ -776,8 +785,7 @@ static int crypto_init_skcipher_ops_ablk
+ sizeof(struct ablkcipher_request);
+ skcipher->keysize = calg->cra_ablkcipher.max_keysize;
+
+- if (skcipher->keysize)
+- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
++ skcipher_set_needkey(skcipher);
+
+ return 0;
+ }
+@@ -820,8 +828,10 @@ static int skcipher_setkey(struct crypto
+ else
+ err = cipher->setkey(tfm, key, keylen);
+
+- if (err)
++ if (unlikely(err)) {
++ skcipher_set_needkey(tfm);
+ return err;
++ }
+
+ crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
+ return 0;
+@@ -852,8 +862,7 @@ static int crypto_skcipher_init_tfm(stru
+ skcipher->ivsize = alg->ivsize;
+ skcipher->keysize = alg->max_keysize;
+
+- if (skcipher->keysize)
+- crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY);
++ skcipher_set_needkey(skcipher);
+
+ if (alg->exit)
+ skcipher->base.exit = crypto_skcipher_exit_tfm;
--- /dev/null
+From eb5e6730db98fcc4b51148b4a819fa4bf864ae54 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Wed, 23 Jan 2019 20:57:35 -0800
+Subject: crypto: testmgr - skip crc32c context test for ahash algorithms
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit eb5e6730db98fcc4b51148b4a819fa4bf864ae54 upstream.
+
+Instantiating "cryptd(crc32c)" causes a crypto self-test failure because
+the crypto_alloc_shash() in alg_test_crc32c() fails. This is because
+cryptd(crc32c) is an ahash algorithm, not a shash algorithm; so it can
+only be accessed through the ahash API, unlike shash algorithms which
+can be accessed through both the ahash and shash APIs.
+
+As the test is testing the shash descriptor format which is only
+applicable to shash algorithms, skip it for ahash algorithms.
+
+(Note that it's still important to fix crypto self-test failures even
+ for weird algorithm instantiations like cryptd(crc32c) that no one
+ would really use; in fips_enabled mode unprivileged users can use them
+ to panic the kernel, and also they prevent treating a crypto self-test
+ failure as a bug when fuzzing the kernel.)
+
+Fixes: 8e3ee85e68c5 ("crypto: crc32c - Test descriptor context format")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ crypto/testmgr.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/crypto/testmgr.c
++++ b/crypto/testmgr.c
+@@ -1894,14 +1894,21 @@ static int alg_test_crc32c(const struct
+
+ err = alg_test_hash(desc, driver, type, mask);
+ if (err)
+- goto out;
++ return err;
+
+ tfm = crypto_alloc_shash(driver, type, mask);
+ if (IS_ERR(tfm)) {
++ if (PTR_ERR(tfm) == -ENOENT) {
++ /*
++ * This crc32c implementation is only available through
++ * ahash API, not the shash API, so the remaining part
++ * of the test is not applicable to it.
++ */
++ return 0;
++ }
+ printk(KERN_ERR "alg: crc32c: Failed to load transform for %s: "
+ "%ld\n", driver, PTR_ERR(tfm));
+- err = PTR_ERR(tfm);
+- goto out;
++ return PTR_ERR(tfm);
+ }
+
+ do {
+@@ -1928,7 +1935,6 @@ static int alg_test_crc32c(const struct
+
+ crypto_free_shash(tfm);
+
+-out:
+ return err;
+ }
+
--- /dev/null
+From ba6771c0a0bc2fac9d6a8759bab8493bd1cffe3b Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 31 Jan 2019 23:51:38 -0800
+Subject: crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit ba6771c0a0bc2fac9d6a8759bab8493bd1cffe3b upstream.
+
+The x86 AEGIS implementations all fail the improved AEAD tests because
+they produce the wrong result with some data layouts. The issue is that
+they assume that if the skcipher_walk API gives 'nbytes' not aligned to
+the walksize (a.k.a. walk.stride), then it is the end of the data. In
+fact, this can happen before the end.
+
+Also, when the CRYPTO_TFM_REQ_MAY_SLEEP flag is given, they can
+incorrectly sleep in the skcipher_walk_*() functions while preemption
+has been disabled by kernel_fpu_begin().
+
+Fix these bugs.
+
+Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations")
+Cc: <stable@vger.kernel.org> # v4.18+
+Cc: Ondrej Mosnacek <omosnace@redhat.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Ondrej Mosnacek <omosnace@redhat.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/aegis128-aesni-glue.c | 38 +++++++++++++--------------------
+ arch/x86/crypto/aegis128l-aesni-glue.c | 38 +++++++++++++--------------------
+ arch/x86/crypto/aegis256-aesni-glue.c | 38 +++++++++++++--------------------
+ 3 files changed, 45 insertions(+), 69 deletions(-)
+
+--- a/arch/x86/crypto/aegis128-aesni-glue.c
++++ b/arch/x86/crypto/aegis128-aesni-glue.c
+@@ -119,31 +119,20 @@ static void crypto_aegis128_aesni_proces
+ }
+
+ static void crypto_aegis128_aesni_process_crypt(
+- struct aegis_state *state, struct aead_request *req,
++ struct aegis_state *state, struct skcipher_walk *walk,
+ const struct aegis_crypt_ops *ops)
+ {
+- struct skcipher_walk walk;
+- u8 *src, *dst;
+- unsigned int chunksize, base;
+-
+- ops->skcipher_walk_init(&walk, req, false);
+-
+- while (walk.nbytes) {
+- src = walk.src.virt.addr;
+- dst = walk.dst.virt.addr;
+- chunksize = walk.nbytes;
+-
+- ops->crypt_blocks(state, chunksize, src, dst);
+-
+- base = chunksize & ~(AEGIS128_BLOCK_SIZE - 1);
+- src += base;
+- dst += base;
+- chunksize &= AEGIS128_BLOCK_SIZE - 1;
+-
+- if (chunksize > 0)
+- ops->crypt_tail(state, chunksize, src, dst);
++ while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
++ ops->crypt_blocks(state,
++ round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
++ walk->src.virt.addr, walk->dst.virt.addr);
++ skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
++ }
+
+- skcipher_walk_done(&walk, 0);
++ if (walk->nbytes) {
++ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
++ walk->dst.virt.addr);
++ skcipher_walk_done(walk, 0);
+ }
+ }
+
+@@ -186,13 +175,16 @@ static void crypto_aegis128_aesni_crypt(
+ {
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct aegis_ctx *ctx = crypto_aegis128_aesni_ctx(tfm);
++ struct skcipher_walk walk;
+ struct aegis_state state;
+
++ ops->skcipher_walk_init(&walk, req, true);
++
+ kernel_fpu_begin();
+
+ crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
+ crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
+- crypto_aegis128_aesni_process_crypt(&state, req, ops);
++ crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
+ crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
+
+ kernel_fpu_end();
+--- a/arch/x86/crypto/aegis128l-aesni-glue.c
++++ b/arch/x86/crypto/aegis128l-aesni-glue.c
+@@ -119,31 +119,20 @@ static void crypto_aegis128l_aesni_proce
+ }
+
+ static void crypto_aegis128l_aesni_process_crypt(
+- struct aegis_state *state, struct aead_request *req,
++ struct aegis_state *state, struct skcipher_walk *walk,
+ const struct aegis_crypt_ops *ops)
+ {
+- struct skcipher_walk walk;
+- u8 *src, *dst;
+- unsigned int chunksize, base;
+-
+- ops->skcipher_walk_init(&walk, req, false);
+-
+- while (walk.nbytes) {
+- src = walk.src.virt.addr;
+- dst = walk.dst.virt.addr;
+- chunksize = walk.nbytes;
+-
+- ops->crypt_blocks(state, chunksize, src, dst);
+-
+- base = chunksize & ~(AEGIS128L_BLOCK_SIZE - 1);
+- src += base;
+- dst += base;
+- chunksize &= AEGIS128L_BLOCK_SIZE - 1;
+-
+- if (chunksize > 0)
+- ops->crypt_tail(state, chunksize, src, dst);
++ while (walk->nbytes >= AEGIS128L_BLOCK_SIZE) {
++ ops->crypt_blocks(state, round_down(walk->nbytes,
++ AEGIS128L_BLOCK_SIZE),
++ walk->src.virt.addr, walk->dst.virt.addr);
++ skcipher_walk_done(walk, walk->nbytes % AEGIS128L_BLOCK_SIZE);
++ }
+
+- skcipher_walk_done(&walk, 0);
++ if (walk->nbytes) {
++ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
++ walk->dst.virt.addr);
++ skcipher_walk_done(walk, 0);
+ }
+ }
+
+@@ -186,13 +175,16 @@ static void crypto_aegis128l_aesni_crypt
+ {
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct aegis_ctx *ctx = crypto_aegis128l_aesni_ctx(tfm);
++ struct skcipher_walk walk;
+ struct aegis_state state;
+
++ ops->skcipher_walk_init(&walk, req, true);
++
+ kernel_fpu_begin();
+
+ crypto_aegis128l_aesni_init(&state, ctx->key.bytes, req->iv);
+ crypto_aegis128l_aesni_process_ad(&state, req->src, req->assoclen);
+- crypto_aegis128l_aesni_process_crypt(&state, req, ops);
++ crypto_aegis128l_aesni_process_crypt(&state, &walk, ops);
+ crypto_aegis128l_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
+
+ kernel_fpu_end();
+--- a/arch/x86/crypto/aegis256-aesni-glue.c
++++ b/arch/x86/crypto/aegis256-aesni-glue.c
+@@ -119,31 +119,20 @@ static void crypto_aegis256_aesni_proces
+ }
+
+ static void crypto_aegis256_aesni_process_crypt(
+- struct aegis_state *state, struct aead_request *req,
++ struct aegis_state *state, struct skcipher_walk *walk,
+ const struct aegis_crypt_ops *ops)
+ {
+- struct skcipher_walk walk;
+- u8 *src, *dst;
+- unsigned int chunksize, base;
+-
+- ops->skcipher_walk_init(&walk, req, false);
+-
+- while (walk.nbytes) {
+- src = walk.src.virt.addr;
+- dst = walk.dst.virt.addr;
+- chunksize = walk.nbytes;
+-
+- ops->crypt_blocks(state, chunksize, src, dst);
+-
+- base = chunksize & ~(AEGIS256_BLOCK_SIZE - 1);
+- src += base;
+- dst += base;
+- chunksize &= AEGIS256_BLOCK_SIZE - 1;
+-
+- if (chunksize > 0)
+- ops->crypt_tail(state, chunksize, src, dst);
++ while (walk->nbytes >= AEGIS256_BLOCK_SIZE) {
++ ops->crypt_blocks(state,
++ round_down(walk->nbytes, AEGIS256_BLOCK_SIZE),
++ walk->src.virt.addr, walk->dst.virt.addr);
++ skcipher_walk_done(walk, walk->nbytes % AEGIS256_BLOCK_SIZE);
++ }
+
+- skcipher_walk_done(&walk, 0);
++ if (walk->nbytes) {
++ ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
++ walk->dst.virt.addr);
++ skcipher_walk_done(walk, 0);
+ }
+ }
+
+@@ -186,13 +175,16 @@ static void crypto_aegis256_aesni_crypt(
+ {
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct aegis_ctx *ctx = crypto_aegis256_aesni_ctx(tfm);
++ struct skcipher_walk walk;
+ struct aegis_state state;
+
++ ops->skcipher_walk_init(&walk, req, true);
++
+ kernel_fpu_begin();
+
+ crypto_aegis256_aesni_init(&state, ctx->key, req->iv);
+ crypto_aegis256_aesni_process_ad(&state, req->src, req->assoclen);
+- crypto_aegis256_aesni_process_crypt(&state, req, ops);
++ crypto_aegis256_aesni_process_crypt(&state, &walk, ops);
+ crypto_aegis256_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
+
+ kernel_fpu_end();
--- /dev/null
+From 3af349639597fea582a93604734d717e59a0e223 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 31 Jan 2019 23:51:40 -0800
+Subject: crypto: x86/aesni-gcm - fix crash on empty plaintext
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 3af349639597fea582a93604734d717e59a0e223 upstream.
+
+gcmaes_crypt_by_sg() dereferences the NULL pointer returned by
+scatterwalk_ffwd() when encrypting an empty plaintext and the source
+scatterlist ends immediately after the associated data.
+
+Fix it by only fast-forwarding to the src/dst data scatterlists if the
+data length is nonzero.
+
+This bug is reproduced by the "rfc4543(gcm(aes))" test vectors when run
+with the new AEAD test manager.
+
+Fixes: e845520707f8 ("crypto: aesni - Update aesni-intel_glue to use scatter/gather")
+Cc: <stable@vger.kernel.org> # v4.17+
+Cc: Dave Watson <davejwatson@fb.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/aesni-intel_glue.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -821,11 +821,14 @@ static int gcmaes_crypt_by_sg(bool enc,
+ scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
+ }
+
+- src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
+- scatterwalk_start(&src_sg_walk, src_sg);
+- if (req->src != req->dst) {
+- dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
+- scatterwalk_start(&dst_sg_walk, dst_sg);
++ if (left) {
++ src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
++ scatterwalk_start(&src_sg_walk, src_sg);
++ if (req->src != req->dst) {
++ dst_sg = scatterwalk_ffwd(dst_start, req->dst,
++ req->assoclen);
++ scatterwalk_start(&dst_sg_walk, dst_sg);
++ }
+ }
+
+ kernel_fpu_begin();
--- /dev/null
+From 2060e284e9595fc3baed6e035903c05b93266555 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 31 Jan 2019 23:51:39 -0800
+Subject: crypto: x86/morus - fix handling chunked inputs and MAY_SLEEP
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 2060e284e9595fc3baed6e035903c05b93266555 upstream.
+
+The x86 MORUS implementations all fail the improved AEAD tests because
+they produce the wrong result with some data layouts. The issue is that
+they assume that if the skcipher_walk API gives 'nbytes' not aligned to
+the walksize (a.k.a. walk.stride), then it is the end of the data. In
+fact, this can happen before the end.
+
+Also, when the CRYPTO_TFM_REQ_MAY_SLEEP flag is given, they can
+incorrectly sleep in the skcipher_walk_*() functions while preemption
+has been disabled by kernel_fpu_begin().
+
+Fix these bugs.
+
+Fixes: 56e8e57fc3a7 ("crypto: morus - Add common SIMD glue code for MORUS")
+Cc: <stable@vger.kernel.org> # v4.18+
+Cc: Ondrej Mosnacek <omosnace@redhat.com>
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Reviewed-by: Ondrej Mosnacek <omosnace@redhat.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/crypto/morus1280_glue.c | 40 +++++++++++++++------------------------
+ arch/x86/crypto/morus640_glue.c | 39 ++++++++++++++------------------------
+ 2 files changed, 31 insertions(+), 48 deletions(-)
+
+--- a/arch/x86/crypto/morus1280_glue.c
++++ b/arch/x86/crypto/morus1280_glue.c
+@@ -85,31 +85,20 @@ static void crypto_morus1280_glue_proces
+
+ static void crypto_morus1280_glue_process_crypt(struct morus1280_state *state,
+ struct morus1280_ops ops,
+- struct aead_request *req)
++ struct skcipher_walk *walk)
+ {
+- struct skcipher_walk walk;
+- u8 *cursor_src, *cursor_dst;
+- unsigned int chunksize, base;
+-
+- ops.skcipher_walk_init(&walk, req, false);
+-
+- while (walk.nbytes) {
+- cursor_src = walk.src.virt.addr;
+- cursor_dst = walk.dst.virt.addr;
+- chunksize = walk.nbytes;
+-
+- ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
+-
+- base = chunksize & ~(MORUS1280_BLOCK_SIZE - 1);
+- cursor_src += base;
+- cursor_dst += base;
+- chunksize &= MORUS1280_BLOCK_SIZE - 1;
+-
+- if (chunksize > 0)
+- ops.crypt_tail(state, cursor_src, cursor_dst,
+- chunksize);
++ while (walk->nbytes >= MORUS1280_BLOCK_SIZE) {
++ ops.crypt_blocks(state, walk->src.virt.addr,
++ walk->dst.virt.addr,
++ round_down(walk->nbytes,
++ MORUS1280_BLOCK_SIZE));
++ skcipher_walk_done(walk, walk->nbytes % MORUS1280_BLOCK_SIZE);
++ }
+
+- skcipher_walk_done(&walk, 0);
++ if (walk->nbytes) {
++ ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
++ walk->nbytes);
++ skcipher_walk_done(walk, 0);
+ }
+ }
+
+@@ -147,12 +136,15 @@ static void crypto_morus1280_glue_crypt(
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct morus1280_ctx *ctx = crypto_aead_ctx(tfm);
+ struct morus1280_state state;
++ struct skcipher_walk walk;
++
++ ops.skcipher_walk_init(&walk, req, true);
+
+ kernel_fpu_begin();
+
+ ctx->ops->init(&state, &ctx->key, req->iv);
+ crypto_morus1280_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
+- crypto_morus1280_glue_process_crypt(&state, ops, req);
++ crypto_morus1280_glue_process_crypt(&state, ops, &walk);
+ ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
+
+ kernel_fpu_end();
+--- a/arch/x86/crypto/morus640_glue.c
++++ b/arch/x86/crypto/morus640_glue.c
+@@ -85,31 +85,19 @@ static void crypto_morus640_glue_process
+
+ static void crypto_morus640_glue_process_crypt(struct morus640_state *state,
+ struct morus640_ops ops,
+- struct aead_request *req)
++ struct skcipher_walk *walk)
+ {
+- struct skcipher_walk walk;
+- u8 *cursor_src, *cursor_dst;
+- unsigned int chunksize, base;
+-
+- ops.skcipher_walk_init(&walk, req, false);
+-
+- while (walk.nbytes) {
+- cursor_src = walk.src.virt.addr;
+- cursor_dst = walk.dst.virt.addr;
+- chunksize = walk.nbytes;
+-
+- ops.crypt_blocks(state, cursor_src, cursor_dst, chunksize);
+-
+- base = chunksize & ~(MORUS640_BLOCK_SIZE - 1);
+- cursor_src += base;
+- cursor_dst += base;
+- chunksize &= MORUS640_BLOCK_SIZE - 1;
+-
+- if (chunksize > 0)
+- ops.crypt_tail(state, cursor_src, cursor_dst,
+- chunksize);
++ while (walk->nbytes >= MORUS640_BLOCK_SIZE) {
++ ops.crypt_blocks(state, walk->src.virt.addr,
++ walk->dst.virt.addr,
++ round_down(walk->nbytes, MORUS640_BLOCK_SIZE));
++ skcipher_walk_done(walk, walk->nbytes % MORUS640_BLOCK_SIZE);
++ }
+
+- skcipher_walk_done(&walk, 0);
++ if (walk->nbytes) {
++ ops.crypt_tail(state, walk->src.virt.addr, walk->dst.virt.addr,
++ walk->nbytes);
++ skcipher_walk_done(walk, 0);
+ }
+ }
+
+@@ -143,12 +131,15 @@ static void crypto_morus640_glue_crypt(s
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct morus640_ctx *ctx = crypto_aead_ctx(tfm);
+ struct morus640_state state;
++ struct skcipher_walk walk;
++
++ ops.skcipher_walk_init(&walk, req, true);
+
+ kernel_fpu_begin();
+
+ ctx->ops->init(&state, &ctx->key, req->iv);
+ crypto_morus640_glue_process_ad(&state, ctx->ops, req->src, req->assoclen);
+- crypto_morus640_glue_process_crypt(&state, ops, req);
++ crypto_morus640_glue_process_crypt(&state, ops, &walk);
+ ctx->ops->final(&state, tag_xor, req->assoclen, cryptlen);
+
+ kernel_fpu_end();
libnvdimm-pmem-honor-force_raw-for-legacy-pmem-regions.patch
libnvdimm-fix-altmap-reservation-size-calculation.patch
fix-cgroup_do_mount-handling-of-failure-exits.patch
+crypto-aead-set-crypto_tfm_need_key-if-setkey-fails.patch
+crypto-aegis-fix-handling-chunked-inputs.patch
+crypto-arm-crct10dif-revert-to-c-code-for-short-inputs.patch
+crypto-arm64-aes-neonbs-fix-returning-final-keystream-block.patch
+crypto-arm64-crct10dif-revert-to-c-code-for-short-inputs.patch
+crypto-hash-set-crypto_tfm_need_key-if-setkey-fails.patch
+crypto-morus-fix-handling-chunked-inputs.patch
+crypto-pcbc-remove-bogus-memcpy-s-with-src-dest.patch
+crypto-skcipher-set-crypto_tfm_need_key-if-setkey-fails.patch
+crypto-testmgr-skip-crc32c-context-test-for-ahash-algorithms.patch
+crypto-x86-aegis-fix-handling-chunked-inputs-and-may_sleep.patch
+crypto-x86-aesni-gcm-fix-crash-on-empty-plaintext.patch
+crypto-x86-morus-fix-handling-chunked-inputs-and-may_sleep.patch
+crypto-arm64-aes-ccm-fix-logical-bug-in-aad-mac-handling.patch
+crypto-arm64-aes-ccm-fix-bugs-in-non-neon-fallback-routine.patch
+cifs-fix-leaking-locked-vfs-cache-pages-in-writeback-retry.patch
+cifs-do-not-reset-lease-state-to-none-on-lease-break.patch
+cifs-do-not-skip-smb2-message-ids-on-send-failures.patch
+cifs-fix-read-after-write-for-files-with-read-caching.patch
+smb3-make-default-i-o-size-for-smb3-mounts-larger.patch
+tracing-use-strncpy-instead-of-memcpy-for-string-keys-in-hist-triggers.patch
+tracing-do-not-free-iter-trace-in-fail-path-of-tracing_open_pipe.patch
+tracing-perf-use-strndup_user-instead-of-buggy-open-coded-version.patch
+vmw_balloon-release-lock-on-error-in-vmballoon_reset.patch
+xen-fix-dom0-boot-on-huge-systems.patch
+acpi-device_sysfs-avoid-of-modalias-creation-for-removed-device.patch
--- /dev/null
+From e8506d25f740fd058791cc12a6dfa9386ada6b96 Mon Sep 17 00:00:00 2001
+From: Steve French <stfrench@microsoft.com>
+Date: Thu, 28 Feb 2019 21:32:15 -0600
+Subject: smb3: make default i/o size for smb3 mounts larger
+
+From: Steve French <stfrench@microsoft.com>
+
+commit e8506d25f740fd058791cc12a6dfa9386ada6b96 upstream.
+
+We negotiate rsize mounts (and it can be overridden by user) to
+typically 4MB, so using larger default I/O sizes from userspace
+(changing to 1MB default i/o size returned by stat) the
+performance is much better (and not just for long latency
+network connections) in most use cases for SMB3 than the default I/O
+size (which ends up being 128K for cp and can be even smaller for cp).
+This can be 4x slower or worse depending on network latency.
+
+By changing inode->blocksize from 32K (which was perhaps ok
+for very old SMB1/CIFS) to a larger value, 1MB (but still less than
+max size negotiated with the server which is 4MB, in order to minimize
+risk) it significantly increases performance for the
+noncached case, and slightly increases it for the cached case.
+This can be changed by the user on mount (specifying bsize=
+values from 16K to 16MB) to tune better for performance
+for applications that depend on blocksize.
+
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
+CC: Stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifs_fs_sb.h | 1 +
+ fs/cifs/cifsfs.c | 1 +
+ fs/cifs/cifsglob.h | 1 +
+ fs/cifs/connect.c | 26 ++++++++++++++++++++++++--
+ fs/cifs/inode.c | 2 +-
+ 5 files changed, 28 insertions(+), 3 deletions(-)
+
+--- a/fs/cifs/cifs_fs_sb.h
++++ b/fs/cifs/cifs_fs_sb.h
+@@ -58,6 +58,7 @@ struct cifs_sb_info {
+ spinlock_t tlink_tree_lock;
+ struct tcon_link *master_tlink;
+ struct nls_table *local_nls;
++ unsigned int bsize;
+ unsigned int rsize;
+ unsigned int wsize;
+ unsigned long actimeo; /* attribute cache timeout (jiffies) */
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -554,6 +554,7 @@ cifs_show_options(struct seq_file *s, st
+
+ seq_printf(s, ",rsize=%u", cifs_sb->rsize);
+ seq_printf(s, ",wsize=%u", cifs_sb->wsize);
++ seq_printf(s, ",bsize=%u", cifs_sb->bsize);
+ seq_printf(s, ",echo_interval=%lu",
+ tcon->ses->server->echo_interval / HZ);
+ if (tcon->snapshot_time)
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -559,6 +559,7 @@ struct smb_vol {
+ bool resilient:1; /* noresilient not required since not fored for CA */
+ bool domainauto:1;
+ bool rdma:1;
++ unsigned int bsize;
+ unsigned int rsize;
+ unsigned int wsize;
+ bool sockopt_tcp_nodelay:1;
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -102,7 +102,7 @@ enum {
+ Opt_backupuid, Opt_backupgid, Opt_uid,
+ Opt_cruid, Opt_gid, Opt_file_mode,
+ Opt_dirmode, Opt_port,
+- Opt_rsize, Opt_wsize, Opt_actimeo,
++ Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
+ Opt_echo_interval, Opt_max_credits,
+ Opt_snapshot,
+
+@@ -204,6 +204,7 @@ static const match_table_t cifs_mount_op
+ { Opt_dirmode, "dirmode=%s" },
+ { Opt_dirmode, "dir_mode=%s" },
+ { Opt_port, "port=%s" },
++ { Opt_blocksize, "bsize=%s" },
+ { Opt_rsize, "rsize=%s" },
+ { Opt_wsize, "wsize=%s" },
+ { Opt_actimeo, "actimeo=%s" },
+@@ -1571,7 +1572,7 @@ cifs_parse_mount_options(const char *mou
+ vol->cred_uid = current_uid();
+ vol->linux_uid = current_uid();
+ vol->linux_gid = current_gid();
+-
++ vol->bsize = 1024 * 1024; /* can improve cp performance significantly */
+ /*
+ * default to SFM style remapping of seven reserved characters
+ * unless user overrides it or we negotiate CIFS POSIX where
+@@ -1944,6 +1945,26 @@ cifs_parse_mount_options(const char *mou
+ }
+ port = (unsigned short)option;
+ break;
++ case Opt_blocksize:
++ if (get_option_ul(args, &option)) {
++ cifs_dbg(VFS, "%s: Invalid blocksize value\n",
++ __func__);
++ goto cifs_parse_mount_err;
++ }
++ /*
++ * inode blocksize realistically should never need to be
++ * less than 16K or greater than 16M and default is 1MB.
++ * Note that small inode block sizes (e.g. 64K) can lead
++ * to very poor performance of common tools like cp and scp
++ */
++ if ((option < CIFS_MAX_MSGSIZE) ||
++ (option > (4 * SMB3_DEFAULT_IOSIZE))) {
++ cifs_dbg(VFS, "%s: Invalid blocksize\n",
++ __func__);
++ goto cifs_parse_mount_err;
++ }
++ vol->bsize = option;
++ break;
+ case Opt_rsize:
+ if (get_option_ul(args, &option)) {
+ cifs_dbg(VFS, "%s: Invalid rsize value\n",
+@@ -3839,6 +3860,7 @@ int cifs_setup_cifs_sb(struct smb_vol *p
+ spin_lock_init(&cifs_sb->tlink_tree_lock);
+ cifs_sb->tlink_tree = RB_ROOT;
+
++ cifs_sb->bsize = pvolume_info->bsize;
+ /*
+ * Temporarily set r/wsize for matching superblock. If we end up using
+ * new sb then client will later negotiate it downward if needed.
+--- a/fs/cifs/inode.c
++++ b/fs/cifs/inode.c
+@@ -2080,7 +2080,7 @@ int cifs_getattr(const struct path *path
+ return rc;
+
+ generic_fillattr(inode, stat);
+- stat->blksize = CIFS_MAX_MSGSIZE;
++ stat->blksize = cifs_sb->bsize;
+ stat->ino = CIFS_I(inode)->uniqueid;
+
+ /* old CIFS Unix Extensions doesn't return create time */
--- /dev/null
+From e7f0c424d0806b05d6f47be9f202b037eb701707 Mon Sep 17 00:00:00 2001
+From: "zhangyi (F)" <yi.zhang@huawei.com>
+Date: Wed, 13 Feb 2019 20:29:06 +0800
+Subject: tracing: Do not free iter->trace in fail path of tracing_open_pipe()
+
+From: zhangyi (F) <yi.zhang@huawei.com>
+
+commit e7f0c424d0806b05d6f47be9f202b037eb701707 upstream.
+
+Commit d716ff71dd12 ("tracing: Remove taking of trace_types_lock in
+pipe files") use the current tracer instead of the copy in
+tracing_open_pipe(), but it forget to remove the freeing sentence in
+the error path.
+
+There's an error path that can call kfree(iter->trace) after the iter->trace
+was assigned to tr->current_trace, which would be bad to free.
+
+Link: http://lkml.kernel.org/r/1550060946-45984-1-git-send-email-yi.zhang@huawei.com
+
+Cc: stable@vger.kernel.org
+Fixes: d716ff71dd12 ("tracing: Remove taking of trace_types_lock in pipe files")
+Signed-off-by: zhangyi (F) <yi.zhang@huawei.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5626,7 +5626,6 @@ out:
+ return ret;
+
+ fail:
+- kfree(iter->trace);
+ kfree(iter);
+ __trace_array_put(tr);
+ mutex_unlock(&trace_types_lock);
--- /dev/null
+From 83540fbc8812a580b6ad8f93f4c29e62e417687e Mon Sep 17 00:00:00 2001
+From: Jann Horn <jannh@google.com>
+Date: Wed, 20 Feb 2019 17:54:43 +0100
+Subject: tracing/perf: Use strndup_user() instead of buggy open-coded version
+
+From: Jann Horn <jannh@google.com>
+
+commit 83540fbc8812a580b6ad8f93f4c29e62e417687e upstream.
+
+The first version of this method was missing the check for
+`ret == PATH_MAX`; then such a check was added, but it didn't call kfree()
+on error, so there was still a small memory leak in the error case.
+Fix it by using strndup_user() instead of open-coding it.
+
+Link: http://lkml.kernel.org/r/20190220165443.152385-1-jannh@google.com
+
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: stable@vger.kernel.org
+Fixes: 0eadcc7a7bc0 ("perf/core: Fix perf_uprobe_init()")
+Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org>
+Acked-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_event_perf.c | 16 +++++++---------
+ 1 file changed, 7 insertions(+), 9 deletions(-)
+
+--- a/kernel/trace/trace_event_perf.c
++++ b/kernel/trace/trace_event_perf.c
+@@ -299,15 +299,13 @@ int perf_uprobe_init(struct perf_event *
+
+ if (!p_event->attr.uprobe_path)
+ return -EINVAL;
+- path = kzalloc(PATH_MAX, GFP_KERNEL);
+- if (!path)
+- return -ENOMEM;
+- ret = strncpy_from_user(
+- path, u64_to_user_ptr(p_event->attr.uprobe_path), PATH_MAX);
+- if (ret == PATH_MAX)
+- return -E2BIG;
+- if (ret < 0)
+- goto out;
++
++ path = strndup_user(u64_to_user_ptr(p_event->attr.uprobe_path),
++ PATH_MAX);
++ if (IS_ERR(path)) {
++ ret = PTR_ERR(path);
++ return (ret == -EINVAL) ? -E2BIG : ret;
++ }
+ if (path[0] == '\0') {
+ ret = -EINVAL;
+ goto out;
--- /dev/null
+From 9f0bbf3115ca9f91f43b7c74e9ac7d79f47fc6c2 Mon Sep 17 00:00:00 2001
+From: Tom Zanussi <tom.zanussi@linux.intel.com>
+Date: Mon, 4 Feb 2019 15:07:24 -0600
+Subject: tracing: Use strncpy instead of memcpy for string keys in hist triggers
+
+From: Tom Zanussi <tom.zanussi@linux.intel.com>
+
+commit 9f0bbf3115ca9f91f43b7c74e9ac7d79f47fc6c2 upstream.
+
+Because there may be random garbage beyond a string's null terminator,
+it's not correct to copy the the complete character array for use as a
+hist trigger key. This results in multiple histogram entries for the
+'same' string key.
+
+So, in the case of a string key, use strncpy instead of memcpy to
+avoid copying in the extra bytes.
+
+Before, using the gdbus entries in the following hist trigger as an
+example:
+
+ # echo 'hist:key=comm' > /sys/kernel/debug/tracing/events/sched/sched_waking/trigger
+ # cat /sys/kernel/debug/tracing/events/sched/sched_waking/hist
+
+ ...
+
+ { comm: ImgDecoder #4 } hitcount: 203
+ { comm: gmain } hitcount: 213
+ { comm: gmain } hitcount: 216
+ { comm: StreamTrans #73 } hitcount: 221
+ { comm: mozStorage #3 } hitcount: 230
+ { comm: gdbus } hitcount: 233
+ { comm: StyleThread#5 } hitcount: 253
+ { comm: gdbus } hitcount: 256
+ { comm: gdbus } hitcount: 260
+ { comm: StyleThread#4 } hitcount: 271
+
+ ...
+
+ # cat /sys/kernel/debug/tracing/events/sched/sched_waking/hist | egrep gdbus | wc -l
+ 51
+
+After:
+
+ # cat /sys/kernel/debug/tracing/events/sched/sched_waking/hist | egrep gdbus | wc -l
+ 1
+
+Link: http://lkml.kernel.org/r/50c35ae1267d64eee975b8125e151e600071d4dc.1549309756.git.tom.zanussi@linux.intel.com
+
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: stable@vger.kernel.org
+Fixes: 79e577cbce4c4 ("tracing: Support string type key properly")
+Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events_hist.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace_events_hist.c
++++ b/kernel/trace/trace_events_hist.c
+@@ -4695,9 +4695,10 @@ static inline void add_to_key(char *comp
+ /* ensure NULL-termination */
+ if (size > key_field->size - 1)
+ size = key_field->size - 1;
+- }
+
+- memcpy(compound_key + key_field->offset, key, size);
++ strncpy(compound_key + key_field->offset, (char *)key, size);
++ } else
++ memcpy(compound_key + key_field->offset, key, size);
+ }
+
+ static void
--- /dev/null
+From d04071a5d6413b65f17f7bd6e2bdb22e22e4ace7 Mon Sep 17 00:00:00 2001
+From: Dan Carpenter <dan.carpenter@oracle.com>
+Date: Mon, 11 Feb 2019 21:45:45 +0300
+Subject: vmw_balloon: release lock on error in vmballoon_reset()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+commit d04071a5d6413b65f17f7bd6e2bdb22e22e4ace7 upstream.
+
+We added some locking to this function but forgot to drop the lock on
+these two error paths. This bug would lead to an immediate deadlock.
+
+Fixes: c7b3690fb152 ("vmw_balloon: stats rework")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Nadav Amit <namit@vmware.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/misc/vmw_balloon.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/misc/vmw_balloon.c
++++ b/drivers/misc/vmw_balloon.c
+@@ -1287,7 +1287,7 @@ static void vmballoon_reset(struct vmbal
+ vmballoon_pop(b);
+
+ if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
+- return;
++ goto unlock;
+
+ if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
+ if (vmballoon_init_batching(b)) {
+@@ -1298,7 +1298,7 @@ static void vmballoon_reset(struct vmbal
+ * The guest will retry in one second.
+ */
+ vmballoon_send_start(b, 0);
+- return;
++ goto unlock;
+ }
+ } else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
+ vmballoon_deinit_batching(b);
+@@ -1314,6 +1314,7 @@ static void vmballoon_reset(struct vmbal
+ if (vmballoon_send_guest_id(b))
+ pr_err("failed to send guest ID to the host\n");
+
++unlock:
+ up_write(&b->conf_sem);
+ }
+
--- /dev/null
+From 01bd2ac2f55a1916d81dace12fa8d7ae1c79b5ea Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Thu, 7 Mar 2019 10:11:19 +0100
+Subject: xen: fix dom0 boot on huge systems
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 01bd2ac2f55a1916d81dace12fa8d7ae1c79b5ea upstream.
+
+Commit f7c90c2aa40048 ("x86/xen: don't write ptes directly in 32-bit
+PV guests") introduced a regression for booting dom0 on huge systems
+with lots of RAM (in the TB range).
+
+Reason is that on those hosts the p2m list needs to be moved early in
+the boot process and this requires temporary page tables to be created.
+Said commit modified xen_set_pte_init() to use a hypercall for writing
+a PTE, but this requires the page table being in the direct mapped
+area, which is not the case for the temporary page tables used in
+xen_relocate_p2m().
+
+As the page tables are completely written before being linked to the
+actual address space instead of set_pte() a plain write to memory can
+be used in xen_relocate_p2m().
+
+Fixes: f7c90c2aa40048 ("x86/xen: don't write ptes directly in 32-bit PV guests")
+Cc: stable@vger.kernel.org
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/xen/mmu_pv.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -2114,10 +2114,10 @@ void __init xen_relocate_p2m(void)
+ pt = early_memremap(pt_phys, PAGE_SIZE);
+ clear_page(pt);
+ for (idx_pte = 0;
+- idx_pte < min(n_pte, PTRS_PER_PTE);
+- idx_pte++) {
+- set_pte(pt + idx_pte,
+- pfn_pte(p2m_pfn, PAGE_KERNEL));
++ idx_pte < min(n_pte, PTRS_PER_PTE);
++ idx_pte++) {
++ pt[idx_pte] = pfn_pte(p2m_pfn,
++ PAGE_KERNEL);
+ p2m_pfn++;
+ }
+ n_pte -= PTRS_PER_PTE;
+@@ -2125,8 +2125,7 @@ void __init xen_relocate_p2m(void)
+ make_lowmem_page_readonly(__va(pt_phys));
+ pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
+ PFN_DOWN(pt_phys));
+- set_pmd(pmd + idx_pt,
+- __pmd(_PAGE_TABLE | pt_phys));
++ pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys);
+ pt_phys += PAGE_SIZE;
+ }
+ n_pt -= PTRS_PER_PMD;
+@@ -2134,7 +2133,7 @@ void __init xen_relocate_p2m(void)
+ make_lowmem_page_readonly(__va(pmd_phys));
+ pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
+ PFN_DOWN(pmd_phys));
+- set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
++ pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys);
+ pmd_phys += PAGE_SIZE;
+ }
+ n_pmd -= PTRS_PER_PUD;