]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.11
authorSasha Levin <sashal@kernel.org>
Thu, 18 Mar 2021 22:10:43 +0000 (18:10 -0400)
committerSasha Levin <sashal@kernel.org>
Thu, 18 Mar 2021 22:10:43 +0000 (18:10 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
20 files changed:
queue-5.11/gfs2-add-common-helper-for-holding-and-releasing-the.patch [new file with mode: 0644]
queue-5.11/gfs2-bypass-signal_our_withdraw-if-no-journal.patch [new file with mode: 0644]
queue-5.11/gfs2-move-freeze-glock-outside-the-make_fs_rw-and-_r.patch [new file with mode: 0644]
queue-5.11/gpiolib-read-gpio-line-names-from-a-firmware-node.patch [new file with mode: 0644]
queue-5.11/io_uring-clear-iocb_waitq-for-non-eiocbqueued-return.patch [new file with mode: 0644]
queue-5.11/io_uring-don-t-attempt-io-reissue-from-the-ring-exit.patch [new file with mode: 0644]
queue-5.11/io_uring-don-t-keep-looping-for-more-events-if-we-ca.patch [new file with mode: 0644]
queue-5.11/io_uring-refactor-io_cqring_wait.patch [new file with mode: 0644]
queue-5.11/io_uring-refactor-scheduling-in-io_cqring_wait.patch [new file with mode: 0644]
queue-5.11/io_uring-simplify-do_read-return-parsing.patch [new file with mode: 0644]
queue-5.11/kvm-x86-mmu-expand-on-the-comment-in-kvm_vcpu_ad_nee.patch [new file with mode: 0644]
queue-5.11/kvm-x86-mmu-set-spte_ad_wrprot_only_mask-if-and-only.patch [new file with mode: 0644]
queue-5.11/mptcp-dispose-initial-struct-socket-when-its-subflow.patch [new file with mode: 0644]
queue-5.11/mptcp-pm-add-lockdep-assertions.patch [new file with mode: 0644]
queue-5.11/mptcp-send-ack-for-every-add_addr.patch [new file with mode: 0644]
queue-5.11/net-bonding-fix-error-return-code-of-bond_neigh_init.patch [new file with mode: 0644]
queue-5.11/regulator-pca9450-add-sd_vsel-gpio-for-ldo5.patch [new file with mode: 0644]
queue-5.11/regulator-pca9450-clear-preset_en-bit-to-fix-buck1-2.patch [new file with mode: 0644]
queue-5.11/regulator-pca9450-enable-system-reset-on-wdog_b-asse.patch [new file with mode: 0644]
queue-5.11/series [new file with mode: 0644]

diff --git a/queue-5.11/gfs2-add-common-helper-for-holding-and-releasing-the.patch b/queue-5.11/gfs2-add-common-helper-for-holding-and-releasing-the.patch
new file mode 100644 (file)
index 0000000..4369c1e
--- /dev/null
@@ -0,0 +1,241 @@
+From a65c0f19dafdedad807cef03d5ad0c2445bc986a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Dec 2020 14:43:27 -0600
+Subject: gfs2: Add common helper for holding and releasing the freeze glock
+
+From: Bob Peterson <rpeterso@redhat.com>
+
+[ Upstream commit c77b52c0a137994ad796f44544c802b0b766e496 ]
+
+Many places in the gfs2 code queued and dequeued the freeze glock.
+Almost all of them acquire it in SHARED mode, and need to specify the
+same LM_FLAG_NOEXP and GL_EXACT flags.
+
+This patch adds common helper functions gfs2_freeze_lock and gfs2_freeze_unlock
+to make the code more readable, and to prepare for the next patch.
+
+Signed-off-by: Bob Peterson <rpeterso@redhat.com>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/gfs2/ops_fstype.c |  6 ++----
+ fs/gfs2/recovery.c   |  8 +++-----
+ fs/gfs2/super.c      | 42 ++++++++++++++----------------------------
+ fs/gfs2/util.c       | 25 +++++++++++++++++++++++++
+ fs/gfs2/util.h       |  3 +++
+ 5 files changed, 47 insertions(+), 37 deletions(-)
+
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 61fce59cb4d3..4ee56f5e93cb 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -1198,14 +1198,12 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+       if (sb_rdonly(sb)) {
+               struct gfs2_holder freeze_gh;
+-              error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+-                                         LM_FLAG_NOEXP | GL_EXACT,
+-                                         &freeze_gh);
++              error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
+               if (error) {
+                       fs_err(sdp, "can't make FS RO: %d\n", error);
+                       goto fail_per_node;
+               }
+-              gfs2_glock_dq_uninit(&freeze_gh);
++              gfs2_freeze_unlock(&freeze_gh);
+       } else {
+               error = gfs2_make_fs_rw(sdp);
+               if (error) {
+diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
+index a3c1911862f0..8f9c6480a5df 100644
+--- a/fs/gfs2/recovery.c
++++ b/fs/gfs2/recovery.c
+@@ -470,9 +470,7 @@ void gfs2_recover_func(struct work_struct *work)
+               /* Acquire a shared hold on the freeze lock */
+-              error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+-                                         LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
+-                                         GL_EXACT, &thaw_gh);
++              error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY);
+               if (error)
+                       goto fail_gunlock_ji;
+@@ -524,7 +522,7 @@ void gfs2_recover_func(struct work_struct *work)
+               clean_journal(jd, &head);
+               up_read(&sdp->sd_log_flush_lock);
+-              gfs2_glock_dq_uninit(&thaw_gh);
++              gfs2_freeze_unlock(&thaw_gh);
+               t_rep = ktime_get();
+               fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, "
+                       "jhead:%lldms, tlck:%lldms, replay:%lldms]\n",
+@@ -546,7 +544,7 @@ void gfs2_recover_func(struct work_struct *work)
+       goto done;
+ fail_gunlock_thaw:
+-      gfs2_glock_dq_uninit(&thaw_gh);
++      gfs2_freeze_unlock(&thaw_gh);
+ fail_gunlock_ji:
+       if (jlocked) {
+               gfs2_glock_dq_uninit(&ji_gh);
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 2f56acc41c04..ea312a94ce69 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -173,9 +173,7 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+       if (error)
+               return error;
+-      error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+-                                 LM_FLAG_NOEXP | GL_EXACT,
+-                                 &freeze_gh);
++      error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
+       if (error)
+               goto fail_threads;
+@@ -205,12 +203,12 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+       set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+-      gfs2_glock_dq_uninit(&freeze_gh);
++      gfs2_freeze_unlock(&freeze_gh);
+       return 0;
+ fail:
+-      gfs2_glock_dq_uninit(&freeze_gh);
++      gfs2_freeze_unlock(&freeze_gh);
+ fail_threads:
+       if (sdp->sd_quotad_process)
+               kthread_stop(sdp->sd_quotad_process);
+@@ -452,7 +450,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
+       }
+       if (error)
+-              gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
++              gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+ out:
+       while (!list_empty(&list)) {
+@@ -616,21 +614,12 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
+       gfs2_holder_mark_uninitialized(&freeze_gh);
+       if (sdp->sd_freeze_gl &&
+           !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
+-              if (!log_write_allowed) {
+-                      error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
+-                                                 LM_ST_SHARED, LM_FLAG_TRY |
+-                                                 LM_FLAG_NOEXP | GL_EXACT,
+-                                                 &freeze_gh);
+-                      if (error == GLR_TRYFAILED)
+-                              error = 0;
+-              } else {
+-                      error = gfs2_glock_nq_init(sdp->sd_freeze_gl,
+-                                                 LM_ST_SHARED,
+-                                                 LM_FLAG_NOEXP | GL_EXACT,
+-                                                 &freeze_gh);
+-                      if (error && !gfs2_withdrawn(sdp))
+-                              return error;
+-              }
++              error = gfs2_freeze_lock(sdp, &freeze_gh,
++                                       log_write_allowed ? 0 : LM_FLAG_TRY);
++              if (error == GLR_TRYFAILED)
++                      error = 0;
++              if (error && !gfs2_withdrawn(sdp))
++                      return error;
+       }
+       gfs2_flush_delete_work(sdp);
+@@ -661,8 +650,7 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
+                                  atomic_read(&sdp->sd_reserving_log) == 0,
+                                  HZ * 5);
+       }
+-      if (gfs2_holder_initialized(&freeze_gh))
+-              gfs2_glock_dq_uninit(&freeze_gh);
++      gfs2_freeze_unlock(&freeze_gh);
+       gfs2_quota_cleanup(sdp);
+@@ -772,10 +760,8 @@ void gfs2_freeze_func(struct work_struct *work)
+       struct super_block *sb = sdp->sd_vfs;
+       atomic_inc(&sb->s_active);
+-      error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED,
+-                                 LM_FLAG_NOEXP | GL_EXACT, &freeze_gh);
++      error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
+       if (error) {
+-              fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error);
+               gfs2_assert_withdraw(sdp, 0);
+       } else {
+               atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
+@@ -785,7 +771,7 @@ void gfs2_freeze_func(struct work_struct *work)
+                               error);
+                       gfs2_assert_withdraw(sdp, 0);
+               }
+-              gfs2_glock_dq_uninit(&freeze_gh);
++              gfs2_freeze_unlock(&freeze_gh);
+       }
+       deactivate_super(sb);
+       clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
+@@ -853,7 +839,7 @@ static int gfs2_unfreeze(struct super_block *sb)
+                 return 0;
+       }
+-      gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
++      gfs2_freeze_unlock(&sdp->sd_freeze_gh);
+       mutex_unlock(&sdp->sd_freeze_mutex);
+       return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
+ }
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index 574bea29f21e..e6c93e811c3e 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -91,6 +91,31 @@ int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+       return error;
+ }
++/**
++ * gfs2_freeze_lock - hold the freeze glock
++ * @sdp: the superblock
++ * @freeze_gh: pointer to the requested holder
++ * @caller_flags: any additional flags needed by the caller
++ */
++int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh,
++                   int caller_flags)
++{
++      int flags = LM_FLAG_NOEXP | GL_EXACT | caller_flags;
++      int error;
++
++      error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags,
++                                 freeze_gh);
++      if (error && error != GLR_TRYFAILED)
++              fs_err(sdp, "can't lock the freeze lock: %d\n", error);
++      return error;
++}
++
++void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh)
++{
++      if (gfs2_holder_initialized(freeze_gh))
++              gfs2_glock_dq_uninit(freeze_gh);
++}
++
+ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ {
+       struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
+diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h
+index a4443dd8a94b..69e1a0ae5a4d 100644
+--- a/fs/gfs2/util.h
++++ b/fs/gfs2/util.h
+@@ -149,6 +149,9 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
+ extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
+                              bool verbose);
++extern int gfs2_freeze_lock(struct gfs2_sbd *sdp,
++                          struct gfs2_holder *freeze_gh, int caller_flags);
++extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh);
+ #define gfs2_io_error(sdp) \
+ gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__)
+-- 
+2.30.1
+
diff --git a/queue-5.11/gfs2-bypass-signal_our_withdraw-if-no-journal.patch b/queue-5.11/gfs2-bypass-signal_our_withdraw-if-no-journal.patch
new file mode 100644 (file)
index 0000000..f85a8de
--- /dev/null
@@ -0,0 +1,64 @@
+From cf0f4ecdb8efff3a6ce51555938dd737c02f2222 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Mar 2021 07:58:54 -0500
+Subject: gfs2: bypass signal_our_withdraw if no journal
+
+From: Bob Peterson <rpeterso@redhat.com>
+
+[ Upstream commit d5bf630f355d8c532bef2347cf90e8ae60a5f1bd ]
+
+Before this patch, function signal_our_withdraw referenced the journal
+inode immediately. But corrupt file systems may have some invalid
+journals, in which case our attempt to read it in will withdraw and the
+resulting signal_our_withdraw would dereference the NULL value.
+
+This patch adds a check to signal_our_withdraw so that if the journal
+has not yet been initialized, it simply returns and does the old-style
+withdraw.
+
+Thanks, Andy Price, for his analysis.
+
+Reported-by: syzbot+50a8a9cf8127f2c6f5df@syzkaller.appspotmail.com
+Fixes: 601ef0d52e96 ("gfs2: Force withdraw to replay journals and wait for it to finish")
+Signed-off-by: Bob Peterson <rpeterso@redhat.com>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/gfs2/util.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index 8d3c670c990f..dc4985429cf2 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -119,17 +119,22 @@ void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh)
+ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+ {
+       struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl;
+-      struct inode *inode = sdp->sd_jdesc->jd_inode;
+-      struct gfs2_inode *ip = GFS2_I(inode);
+-      struct gfs2_glock *i_gl = ip->i_gl;
+-      u64 no_formal_ino = ip->i_no_formal_ino;
++      struct inode *inode;
++      struct gfs2_inode *ip;
++      struct gfs2_glock *i_gl;
++      u64 no_formal_ino;
+       int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+       int ret = 0;
+       int tries;
+-      if (test_bit(SDF_NORECOVERY, &sdp->sd_flags))
++      if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc)
+               return;
++      inode = sdp->sd_jdesc->jd_inode;
++      ip = GFS2_I(inode);
++      i_gl = ip->i_gl;
++      no_formal_ino = ip->i_no_formal_ino;
++
+       /* Prevent any glock dq until withdraw recovery is complete */
+       set_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
+       /*
+-- 
+2.30.1
+
diff --git a/queue-5.11/gfs2-move-freeze-glock-outside-the-make_fs_rw-and-_r.patch b/queue-5.11/gfs2-move-freeze-glock-outside-the-make_fs_rw-and-_r.patch
new file mode 100644 (file)
index 0000000..7d5bc3f
--- /dev/null
@@ -0,0 +1,216 @@
+From 1979502c47e0319f763f1fe474b19205f139d8ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Dec 2020 14:43:28 -0600
+Subject: gfs2: move freeze glock outside the make_fs_rw and _ro functions
+
+From: Bob Peterson <rpeterso@redhat.com>
+
+[ Upstream commit 96b1454f2e8ede4c619fde405a1bb4e9ba8d218e ]
+
+Before this patch, sister functions gfs2_make_fs_rw and gfs2_make_fs_ro locked
+(held) the freeze glock by calling gfs2_freeze_lock and gfs2_freeze_unlock.
+The problem is, not all the callers of gfs2_make_fs_ro should be doing this.
+The three callers of gfs2_make_fs_ro are: remount (gfs2_reconfigure),
+signal_our_withdraw, and unmount (gfs2_put_super). But when unmounting the
+file system we can get into the following circular lock dependency:
+
+deactivate_super
+   down_write(&s->s_umount); <-------------------------------------- s_umount
+   deactivate_locked_super
+      gfs2_kill_sb
+         kill_block_super
+            generic_shutdown_super
+               gfs2_put_super
+                  gfs2_make_fs_ro
+                     gfs2_glock_nq_init sd_freeze_gl
+                        freeze_go_sync
+                           if (freeze glock in SH)
+                              freeze_super (vfs)
+                                 down_write(&sb->s_umount); <------- s_umount
+
+This patch moves the hold of the freeze glock outside the two sister rw/ro
+functions to their callers, but it doesn't request the glock from
+gfs2_put_super, thus eliminating the circular dependency.
+
+Signed-off-by: Bob Peterson <rpeterso@redhat.com>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/gfs2/ops_fstype.c | 31 +++++++++++++++++--------------
+ fs/gfs2/super.c      | 23 -----------------------
+ fs/gfs2/util.c       | 18 ++++++++++++++++--
+ 3 files changed, 33 insertions(+), 39 deletions(-)
+
+diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
+index 4ee56f5e93cb..f2c6bbe5cdb8 100644
+--- a/fs/gfs2/ops_fstype.c
++++ b/fs/gfs2/ops_fstype.c
+@@ -1084,6 +1084,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+       int silent = fc->sb_flags & SB_SILENT;
+       struct gfs2_sbd *sdp;
+       struct gfs2_holder mount_gh;
++      struct gfs2_holder freeze_gh;
+       int error;
+       sdp = init_sbd(sb);
+@@ -1195,23 +1196,18 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
+               goto fail_per_node;
+       }
+-      if (sb_rdonly(sb)) {
+-              struct gfs2_holder freeze_gh;
++      error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
++      if (error)
++              goto fail_per_node;
+-              error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
+-              if (error) {
+-                      fs_err(sdp, "can't make FS RO: %d\n", error);
+-                      goto fail_per_node;
+-              }
+-              gfs2_freeze_unlock(&freeze_gh);
+-      } else {
++      if (!sb_rdonly(sb))
+               error = gfs2_make_fs_rw(sdp);
+-              if (error) {
+-                      fs_err(sdp, "can't make FS RW: %d\n", error);
+-                      goto fail_per_node;
+-              }
+-      }
++      gfs2_freeze_unlock(&freeze_gh);
++      if (error) {
++              fs_err(sdp, "can't make FS RW: %d\n", error);
++              goto fail_per_node;
++      }
+       gfs2_glock_dq_uninit(&mount_gh);
+       gfs2_online_uevent(sdp);
+       return 0;
+@@ -1512,6 +1508,12 @@ static int gfs2_reconfigure(struct fs_context *fc)
+               fc->sb_flags |= SB_RDONLY;
+       if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) {
++              struct gfs2_holder freeze_gh;
++
++              error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
++              if (error)
++                      return -EINVAL;
++
+               if (fc->sb_flags & SB_RDONLY) {
+                       error = gfs2_make_fs_ro(sdp);
+                       if (error)
+@@ -1521,6 +1523,7 @@ static int gfs2_reconfigure(struct fs_context *fc)
+                       if (error)
+                               errorfc(fc, "unable to remount read-write");
+               }
++              gfs2_freeze_unlock(&freeze_gh);
+       }
+       sdp->sd_args = *newargs;
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index ea312a94ce69..754ea2a137b4 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -165,7 +165,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+ {
+       struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
+       struct gfs2_glock *j_gl = ip->i_gl;
+-      struct gfs2_holder freeze_gh;
+       struct gfs2_log_header_host head;
+       int error;
+@@ -173,10 +172,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+       if (error)
+               return error;
+-      error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
+-      if (error)
+-              goto fail_threads;
+-
+       j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
+       if (gfs2_withdrawn(sdp)) {
+               error = -EIO;
+@@ -203,13 +198,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
+       set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+-      gfs2_freeze_unlock(&freeze_gh);
+-
+       return 0;
+ fail:
+-      gfs2_freeze_unlock(&freeze_gh);
+-fail_threads:
+       if (sdp->sd_quotad_process)
+               kthread_stop(sdp->sd_quotad_process);
+       sdp->sd_quotad_process = NULL;
+@@ -607,21 +598,9 @@ static void gfs2_dirty_inode(struct inode *inode, int flags)
+ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
+ {
+-      struct gfs2_holder freeze_gh;
+       int error = 0;
+       int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+-      gfs2_holder_mark_uninitialized(&freeze_gh);
+-      if (sdp->sd_freeze_gl &&
+-          !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
+-              error = gfs2_freeze_lock(sdp, &freeze_gh,
+-                                       log_write_allowed ? 0 : LM_FLAG_TRY);
+-              if (error == GLR_TRYFAILED)
+-                      error = 0;
+-              if (error && !gfs2_withdrawn(sdp))
+-                      return error;
+-      }
+-
+       gfs2_flush_delete_work(sdp);
+       if (!log_write_allowed && current == sdp->sd_quotad_process)
+               fs_warn(sdp, "The quotad daemon is withdrawing.\n");
+@@ -650,8 +629,6 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
+                                  atomic_read(&sdp->sd_reserving_log) == 0,
+                                  HZ * 5);
+       }
+-      gfs2_freeze_unlock(&freeze_gh);
+-
+       gfs2_quota_cleanup(sdp);
+       if (!log_write_allowed)
+diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
+index e6c93e811c3e..8d3c670c990f 100644
+--- a/fs/gfs2/util.c
++++ b/fs/gfs2/util.c
+@@ -123,6 +123,7 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+       struct gfs2_inode *ip = GFS2_I(inode);
+       struct gfs2_glock *i_gl = ip->i_gl;
+       u64 no_formal_ino = ip->i_no_formal_ino;
++      int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+       int ret = 0;
+       int tries;
+@@ -143,8 +144,21 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
+        * therefore we need to clear SDF_JOURNAL_LIVE manually.
+        */
+       clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
+-      if (!sb_rdonly(sdp->sd_vfs))
+-              ret = gfs2_make_fs_ro(sdp);
++      if (!sb_rdonly(sdp->sd_vfs)) {
++              struct gfs2_holder freeze_gh;
++
++              gfs2_holder_mark_uninitialized(&freeze_gh);
++              if (sdp->sd_freeze_gl &&
++                  !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) {
++                      ret = gfs2_freeze_lock(sdp, &freeze_gh,
++                                     log_write_allowed ? 0 : LM_FLAG_TRY);
++                      if (ret == GLR_TRYFAILED)
++                              ret = 0;
++              }
++              if (!ret)
++                      ret = gfs2_make_fs_ro(sdp);
++              gfs2_freeze_unlock(&freeze_gh);
++      }
+       if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */
+               if (!ret)
+-- 
+2.30.1
+
diff --git a/queue-5.11/gpiolib-read-gpio-line-names-from-a-firmware-node.patch b/queue-5.11/gpiolib-read-gpio-line-names-from-a-firmware-node.patch
new file mode 100644 (file)
index 0000000..2eb33b9
--- /dev/null
@@ -0,0 +1,82 @@
+From 5dbb9ded0c59063e40c6fc3ef03a134b31e02931 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Mar 2021 14:02:40 +0200
+Subject: gpiolib: Read "gpio-line-names" from a firmware node
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit b41ba2ec54a70908067034f139aa23d0dd2985ce ]
+
+On STM32MP1, the GPIO banks are subnodes of pin-controller@50002000,
+see arch/arm/boot/dts/stm32mp151.dtsi. The driver for
+pin-controller@50002000 is in drivers/pinctrl/stm32/pinctrl-stm32.c
+and iterates over all of its DT subnodes when registering each GPIO
+bank gpiochip. Each gpiochip has:
+
+  - gpio_chip.parent = dev,
+    where dev is the device node of the pin controller
+  - gpio_chip.of_node = np,
+    which is the OF node of the GPIO bank
+
+Therefore, dev_fwnode(chip->parent) != of_fwnode_handle(chip.of_node),
+i.e. pin-controller@50002000 != pin-controller@50002000/gpio@5000*000.
+
+The original code behaved correctly, as it extracted the "gpio-line-names"
+from of_fwnode_handle(chip.of_node) = pin-controller@50002000/gpio@5000*000.
+
+To achieve the same behaviour, read property from the firmware node.
+
+Fixes: 7cba1a4d5e162 ("gpiolib: generalize devprop_gpiochip_set_names() for device properties")
+Reported-by: Marek Vasut <marex@denx.de>
+Reported-by: Roman Guskov <rguskov@dh-electronics.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Tested-by: Marek Vasut <marex@denx.de>
+Reviewed-by: Marek Vasut <marex@denx.de>
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpio/gpiolib.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index e4cfa27f6893..3451572166f2 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -365,22 +365,18 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc)
+  *
+  * Looks for device property "gpio-line-names" and if it exists assigns
+  * GPIO line names for the chip. The memory allocated for the assigned
+- * names belong to the underlying software node and should not be released
++ * names belong to the underlying firmware node and should not be released
+  * by the caller.
+  */
+ static int devprop_gpiochip_set_names(struct gpio_chip *chip)
+ {
+       struct gpio_device *gdev = chip->gpiodev;
+-      struct device *dev = chip->parent;
++      struct fwnode_handle *fwnode = dev_fwnode(&gdev->dev);
+       const char **names;
+       int ret, i;
+       int count;
+-      /* GPIO chip may not have a parent device whose properties we inspect. */
+-      if (!dev)
+-              return 0;
+-
+-      count = device_property_string_array_count(dev, "gpio-line-names");
++      count = fwnode_property_string_array_count(fwnode, "gpio-line-names");
+       if (count < 0)
+               return 0;
+@@ -394,7 +390,7 @@ static int devprop_gpiochip_set_names(struct gpio_chip *chip)
+       if (!names)
+               return -ENOMEM;
+-      ret = device_property_read_string_array(dev, "gpio-line-names",
++      ret = fwnode_property_read_string_array(fwnode, "gpio-line-names",
+                                               names, count);
+       if (ret < 0) {
+               dev_warn(&gdev->dev, "failed to read GPIO line names\n");
+-- 
+2.30.1
+
diff --git a/queue-5.11/io_uring-clear-iocb_waitq-for-non-eiocbqueued-return.patch b/queue-5.11/io_uring-clear-iocb_waitq-for-non-eiocbqueued-return.patch
new file mode 100644 (file)
index 0000000..8fdbbe7
--- /dev/null
@@ -0,0 +1,36 @@
+From b79ee7004e598a51a2994375bdafc6ab7fc5c8ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Mar 2021 21:02:58 -0700
+Subject: io_uring: clear IOCB_WAITQ for non -EIOCBQUEUED return
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit b5b0ecb736f1ce1e68eb50613c0cfecff10198eb ]
+
+The callback can only be armed, if we get -EIOCBQUEUED returned. It's
+important that we clear the WAITQ bit for other cases, otherwise we can
+queue for async retry and filemap will assume that we're armed and
+return -EAGAIN instead of just blocking for the IO.
+
+Cc: stable@vger.kernel.org # 5.9+
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index c18e4a334614..262fd4cfd3ad 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -3587,6 +3587,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
+               goto out_free;
+       } else if (ret > 0 && ret < io_size) {
+               /* we got some bytes, but not all. retry. */
++              kiocb->ki_flags &= ~IOCB_WAITQ;
+               goto retry;
+       }
+ done:
+-- 
+2.30.1
+
diff --git a/queue-5.11/io_uring-don-t-attempt-io-reissue-from-the-ring-exit.patch b/queue-5.11/io_uring-don-t-attempt-io-reissue-from-the-ring-exit.patch
new file mode 100644 (file)
index 0000000..ed008d9
--- /dev/null
@@ -0,0 +1,40 @@
+From 96581afe03e8fabc780fdf9684f61ce58b41e9d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 23 Feb 2021 19:17:35 -0700
+Subject: io_uring: don't attempt IO reissue from the ring exit path
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit 7c977a58dc83366e488c217fd88b1469d242bee5 ]
+
+If we're exiting the ring, just let the IO fail with -EAGAIN as nobody
+will care anyway. It's not the right context to reissue from.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 00ef0b90d149..68508f010b90 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -2717,6 +2717,13 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
+               return false;
+       if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
+               return false;
++      /*
++       * If ref is dying, we might be running poll reap from the exit work.
++       * Don't attempt to reissue from that path, just let it fail with
++       * -EAGAIN.
++       */
++      if (percpu_ref_is_dying(&req->ctx->refs))
++              return false;
+       lockdep_assert_held(&req->ctx->uring_lock);
+-- 
+2.30.1
+
diff --git a/queue-5.11/io_uring-don-t-keep-looping-for-more-events-if-we-ca.patch b/queue-5.11/io_uring-don-t-keep-looping-for-more-events-if-we-ca.patch
new file mode 100644 (file)
index 0000000..20840d4
--- /dev/null
@@ -0,0 +1,72 @@
+From ad742409d9a626f899fea0760032e99191c3af8c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Mar 2021 17:15:48 -0700
+Subject: io_uring: don't keep looping for more events if we can't flush
+ overflow
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit ca0a26511c679a797f86589894a4523db36d833e ]
+
+It doesn't make sense to wait for more events to come in, if we can't
+even flush the overflow we already have to the ring. Return -EBUSY for
+that condition, just like we do for attempts to submit with overflow
+pending.
+
+Cc: stable@vger.kernel.org # 5.11
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 7621978e9fc8..cab380a337e4 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -1823,18 +1823,22 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+       return all_flushed;
+ }
+-static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
++static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
+                                    struct task_struct *tsk,
+                                    struct files_struct *files)
+ {
++      bool ret = true;
++
+       if (test_bit(0, &ctx->cq_check_overflow)) {
+               /* iopoll syncs against uring_lock, not completion_lock */
+               if (ctx->flags & IORING_SETUP_IOPOLL)
+                       mutex_lock(&ctx->uring_lock);
+-              __io_cqring_overflow_flush(ctx, force, tsk, files);
++              ret = __io_cqring_overflow_flush(ctx, force, tsk, files);
+               if (ctx->flags & IORING_SETUP_IOPOLL)
+                       mutex_unlock(&ctx->uring_lock);
+       }
++
++      return ret;
+ }
+ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags)
+@@ -7280,11 +7284,16 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+       iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
+       trace_io_uring_cqring_wait(ctx, min_events);
+       do {
+-              io_cqring_overflow_flush(ctx, false, NULL, NULL);
++              /* if we can't even flush overflow, don't wait for more */
++              if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) {
++                      ret = -EBUSY;
++                      break;
++              }
+               prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
+                                               TASK_INTERRUPTIBLE);
+               ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
+               finish_wait(&ctx->wait, &iowq.wq);
++              cond_resched();
+       } while (ret > 0);
+       restore_saved_sigmask_unless(ret == -EINTR);
+-- 
+2.30.1
+
diff --git a/queue-5.11/io_uring-refactor-io_cqring_wait.patch b/queue-5.11/io_uring-refactor-io_cqring_wait.patch
new file mode 100644 (file)
index 0000000..3169fd2
--- /dev/null
@@ -0,0 +1,86 @@
+From 610f9da5c12fa2d798bff9544c0cf8c34bf7ef28 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Feb 2021 13:51:58 +0000
+Subject: io_uring: refactor io_cqring_wait
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit eeb60b9ab4000d20261973642dfc9fb0e4b5d073 ]
+
+It's easy to make a mistake in io_cqring_wait() because for all
+break/continue clauses we need to watch for prepare/finish_wait to be
+used correctly. Extract all those into a new helper
+io_cqring_wait_schedule(), and transforming the loop into simple series
+of func calls: prepare(); check_and_schedule(); finish();
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 43 ++++++++++++++++++++++---------------------
+ 1 file changed, 22 insertions(+), 21 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 3e610ac062a3..7621978e9fc8 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -7208,6 +7208,25 @@ static int io_run_task_work_sig(void)
+       return -EINTR;
+ }
++/* when returns >0, the caller should retry */
++static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
++                                        struct io_wait_queue *iowq,
++                                        signed long *timeout)
++{
++      int ret;
++
++      /* make sure we run task_work before checking for signals */
++      ret = io_run_task_work_sig();
++      if (ret || io_should_wake(iowq))
++              return ret;
++      /* let the caller flush overflows, retry */
++      if (test_bit(0, &ctx->cq_check_overflow))
++              return 1;
++
++      *timeout = schedule_timeout(*timeout);
++      return !*timeout ? -ETIME : 1;
++}
++
+ /*
+  * Wait until events become available, if we don't already have some. The
+  * application must reap them itself, as they reside on the shared cq ring.
+@@ -7264,27 +7283,9 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+               io_cqring_overflow_flush(ctx, false, NULL, NULL);
+               prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
+                                               TASK_INTERRUPTIBLE);
+-              /* make sure we run task_work before checking for signals */
+-              ret = io_run_task_work_sig();
+-              if (ret > 0) {
+-                      finish_wait(&ctx->wait, &iowq.wq);
+-                      continue;
+-              }
+-              else if (ret < 0)
+-                      break;
+-              if (io_should_wake(&iowq))
+-                      break;
+-              if (test_bit(0, &ctx->cq_check_overflow)) {
+-                      finish_wait(&ctx->wait, &iowq.wq);
+-                      continue;
+-              }
+-              timeout = schedule_timeout(timeout);
+-              if (timeout == 0) {
+-                      ret = -ETIME;
+-                      break;
+-              }
+-      } while (1);
+-      finish_wait(&ctx->wait, &iowq.wq);
++              ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
++              finish_wait(&ctx->wait, &iowq.wq);
++      } while (ret > 0);
+       restore_saved_sigmask_unless(ret == -EINTR);
+-- 
+2.30.1
+
diff --git a/queue-5.11/io_uring-refactor-scheduling-in-io_cqring_wait.patch b/queue-5.11/io_uring-refactor-scheduling-in-io_cqring_wait.patch
new file mode 100644 (file)
index 0000000..28d97ed
--- /dev/null
@@ -0,0 +1,67 @@
+From 03fc298d186de3b2ce9936c6112c21f03ddd76f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Feb 2021 13:51:57 +0000
+Subject: io_uring: refactor scheduling in io_cqring_wait
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit c1d5a224683b333ddbe278e455d639ccd4f5ca2b ]
+
+schedule_timeout() with timeout=MAX_SCHEDULE_TIMEOUT is guaranteed to
+work just as schedule(), so instead of hand-coding it based on arguments
+always use the timeout version and simplify code.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 19 ++++++++-----------
+ 1 file changed, 8 insertions(+), 11 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 68508f010b90..3e610ac062a3 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -7226,9 +7226,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+               .to_wait        = min_events,
+       };
+       struct io_rings *rings = ctx->rings;
+-      struct timespec64 ts;
+-      signed long timeout = 0;
+-      int ret = 0;
++      signed long timeout = MAX_SCHEDULE_TIMEOUT;
++      int ret;
+       do {
+               io_cqring_overflow_flush(ctx, false, NULL, NULL);
+@@ -7252,6 +7251,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+       }
+       if (uts) {
++              struct timespec64 ts;
++
+               if (get_timespec64(&ts, uts))
+                       return -EFAULT;
+               timeout = timespec64_to_jiffies(&ts);
+@@ -7277,14 +7278,10 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
+                       finish_wait(&ctx->wait, &iowq.wq);
+                       continue;
+               }
+-              if (uts) {
+-                      timeout = schedule_timeout(timeout);
+-                      if (timeout == 0) {
+-                              ret = -ETIME;
+-                              break;
+-                      }
+-              } else {
+-                      schedule();
++              timeout = schedule_timeout(timeout);
++              if (timeout == 0) {
++                      ret = -ETIME;
++                      break;
+               }
+       } while (1);
+       finish_wait(&ctx->wait, &iowq.wq);
+-- 
+2.30.1
+
diff --git a/queue-5.11/io_uring-simplify-do_read-return-parsing.patch b/queue-5.11/io_uring-simplify-do_read-return-parsing.patch
new file mode 100644 (file)
index 0000000..eab80a1
--- /dev/null
@@ -0,0 +1,55 @@
+From 5dcd1a4c57f4b7346dfee295fa579ba022743bd8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Feb 2021 18:59:56 +0000
+Subject: io_uring: simplify do_read return parsing
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 57cd657b8272a66277c139e7bbdc8b86057cb415 ]
+
+do_read() returning 0 bytes read (not -EAGAIN/etc.) is not an important
+enough of a case to prioritise it. Fold it into ret < 0 check, so we get
+rid of an extra if and make it a bit more readable.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index cab380a337e4..c18e4a334614 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -3518,7 +3518,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
+       else
+               kiocb->ki_flags |= IOCB_NOWAIT;
+-
+       /* If the file doesn't support async, just async punt */
+       no_async = force_nonblock && !io_file_supports_async(req->file, READ);
+       if (no_async)
+@@ -3530,9 +3529,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
+       ret = io_iter_do_read(req, iter);
+-      if (!ret) {
+-              goto done;
+-      } else if (ret == -EIOCBQUEUED) {
++      if (ret == -EIOCBQUEUED) {
+               ret = 0;
+               goto out_free;
+       } else if (ret == -EAGAIN) {
+@@ -3546,7 +3543,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
+               iov_iter_revert(iter, io_size - iov_iter_count(iter));
+               ret = 0;
+               goto copy_iov;
+-      } else if (ret < 0) {
++      } else if (ret <= 0) {
+               /* make sure -ERESTARTSYS -> -EINTR is done */
+               goto done;
+       }
+-- 
+2.30.1
+
diff --git a/queue-5.11/kvm-x86-mmu-expand-on-the-comment-in-kvm_vcpu_ad_nee.patch b/queue-5.11/kvm-x86-mmu-expand-on-the-comment-in-kvm_vcpu_ad_nee.patch
new file mode 100644 (file)
index 0000000..72b6906
--- /dev/null
@@ -0,0 +1,42 @@
+From a4d2c639de2fbf92345a35a8e411a103bdcf71d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Feb 2021 16:50:08 -0800
+Subject: KVM: x86/mmu: Expand on the comment in
+ kvm_vcpu_ad_need_write_protect()
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 2855f98265dc579bd2becb79ce0156d08e0df813 ]
+
+Expand the comment about need to use write-protection for nested EPT
+when PML is enabled to clarify that the tagging is a nop when PML is
+_not_ enabled.  Without the clarification, omitting the PML check looks
+wrong at first^Wfifth glance.
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210213005015.1651772-8-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/mmu/mmu_internal.h | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
+index bfc6389edc28..8404145fb179 100644
+--- a/arch/x86/kvm/mmu/mmu_internal.h
++++ b/arch/x86/kvm/mmu/mmu_internal.h
+@@ -79,7 +79,10 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
+        * When using the EPT page-modification log, the GPAs in the log
+        * would come from L2 rather than L1.  Therefore, we need to rely
+        * on write protection to record dirty pages.  This also bypasses
+-       * PML, since writes now result in a vmexit.
++       * PML, since writes now result in a vmexit.  Note, this helper will
++       * tag SPTEs as needing write-protection even if PML is disabled or
++       * unsupported, but that's ok because the tag is consumed if and only
++       * if PML is enabled.  Omit the PML check to save a few uops.
+        */
+       return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
+ }
+-- 
+2.30.1
+
diff --git a/queue-5.11/kvm-x86-mmu-set-spte_ad_wrprot_only_mask-if-and-only.patch b/queue-5.11/kvm-x86-mmu-set-spte_ad_wrprot_only_mask-if-and-only.patch
new file mode 100644 (file)
index 0000000..d2cf756
--- /dev/null
@@ -0,0 +1,57 @@
+From 57c967f19f85f45564612d893b95c94023643538 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 25 Feb 2021 12:47:26 -0800
+Subject: KVM: x86/mmu: Set SPTE_AD_WRPROT_ONLY_MASK if and only if PML is
+ enabled
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 44ac5958a6c1fd91ac8810fbb37194e377d78db5 ]
+
+Check that PML is actually enabled before setting the mask to force a
+SPTE to be write-protected.  The bits used for the !AD_ENABLED case are
+in the upper half of the SPTE.  With 64-bit paging and EPT, these bits
+are ignored, but with 32-bit PAE paging they are reserved.  Setting them
+for L2 SPTEs without checking PML breaks NPT on 32-bit KVM.
+
+Fixes: 1f4e5fc83a42 ("KVM: x86: fix nested guest live migration with PML")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210225204749.1512652-2-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/mmu/mmu_internal.h | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
+index 8404145fb179..cf101b73a360 100644
+--- a/arch/x86/kvm/mmu/mmu_internal.h
++++ b/arch/x86/kvm/mmu/mmu_internal.h
+@@ -76,15 +76,15 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
+ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
+ {
+       /*
+-       * When using the EPT page-modification log, the GPAs in the log
+-       * would come from L2 rather than L1.  Therefore, we need to rely
+-       * on write protection to record dirty pages.  This also bypasses
+-       * PML, since writes now result in a vmexit.  Note, this helper will
+-       * tag SPTEs as needing write-protection even if PML is disabled or
+-       * unsupported, but that's ok because the tag is consumed if and only
+-       * if PML is enabled.  Omit the PML check to save a few uops.
++       * When using the EPT page-modification log, the GPAs in the CPU dirty
++       * log would come from L2 rather than L1.  Therefore, we need to rely
++       * on write protection to record dirty pages, which bypasses PML, since
++       * writes now result in a vmexit.  Note, the check on CPU dirty logging
++       * being enabled is mandatory as the bits used to denote WP-only SPTEs
++       * are reserved for NPT w/ PAE (32-bit KVM).
+        */
+-      return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
++      return vcpu->arch.mmu == &vcpu->arch.guest_mmu &&
++             kvm_x86_ops.cpu_dirty_log_size;
+ }
+ bool is_nx_huge_page_enabled(void);
+-- 
+2.30.1
+
diff --git a/queue-5.11/mptcp-dispose-initial-struct-socket-when-its-subflow.patch b/queue-5.11/mptcp-dispose-initial-struct-socket-when-its-subflow.patch
new file mode 100644 (file)
index 0000000..156f906
--- /dev/null
@@ -0,0 +1,88 @@
+From c89c754d4d6b941c7c6bf4621cdac12ce68b3bcc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Mar 2021 13:32:11 -0800
+Subject: mptcp: dispose initial struct socket when its subflow is closed
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 17aee05dc8822e354f5ad2d68ee39e3ba4b6acf2 ]
+
+Christoph Paasch reported following crash:
+dst_release underflow
+WARNING: CPU: 0 PID: 1319 at net/core/dst.c:175 dst_release+0xc1/0xd0 net/core/dst.c:175
+CPU: 0 PID: 1319 Comm: syz-executor217 Not tainted 5.11.0-rc6af8e85128b4d0d24083c5cac646e891227052e0c #70
+Call Trace:
+ rt_cache_route+0x12e/0x140 net/ipv4/route.c:1503
+ rt_set_nexthop.constprop.0+0x1fc/0x590 net/ipv4/route.c:1612
+ __mkroute_output net/ipv4/route.c:2484 [inline]
+...
+
+The worker leaves msk->subflow alone even when it
+happened to close the subflow ssk associated with it.
+
+Fixes: 866f26f2a9c33b ("mptcp: always graft subflow socket to parent")
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/157
+Reported-by: Christoph Paasch <cpaasch@apple.com>
+Suggested-by: Paolo Abeni <pabeni@redhat.com>
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/protocol.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 64b8a49652ae..7345df40385a 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2100,6 +2100,14 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
+       return backup;
+ }
++static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk)
++{
++      if (msk->subflow) {
++              iput(SOCK_INODE(msk->subflow));
++              msk->subflow = NULL;
++      }
++}
++
+ /* subflow sockets can be either outgoing (connect) or incoming
+  * (accept).
+  *
+@@ -2144,6 +2152,9 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+       if (ssk == msk->last_snd)
+               msk->last_snd = NULL;
++
++      if (msk->subflow && ssk == msk->subflow->sk)
++              mptcp_dispose_initial_subflow(msk);
+ }
+ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
+@@ -2533,12 +2544,6 @@ static void __mptcp_destroy_sock(struct sock *sk)
+       might_sleep();
+-      /* dispose the ancillatory tcp socket, if any */
+-      if (msk->subflow) {
+-              iput(SOCK_INODE(msk->subflow));
+-              msk->subflow = NULL;
+-      }
+-
+       /* be sure to always acquire the join list lock, to sync vs
+        * mptcp_finish_join().
+        */
+@@ -2563,6 +2568,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
+       sk_stream_kill_queues(sk);
+       xfrm_sk_free_policy(sk);
+       sk_refcnt_debug_release(sk);
++      mptcp_dispose_initial_subflow(msk);
+       sock_put(sk);
+ }
+-- 
+2.30.1
+
diff --git a/queue-5.11/mptcp-pm-add-lockdep-assertions.patch b/queue-5.11/mptcp-pm-add-lockdep-assertions.patch
new file mode 100644 (file)
index 0000000..2572818
--- /dev/null
@@ -0,0 +1,138 @@
+From 7e8da9ee4757582e00d482c92dd6481a29c8d41a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Feb 2021 15:23:30 -0800
+Subject: mptcp: pm: add lockdep assertions
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 3abc05d9ef6fe989706b679e1e6371d6360d3db4 ]
+
+Add a few assertions to make sure functions are called with the needed
+locks held.
+Two functions gain might_sleep annotations because they contain
+conditional calls to functions that sleep.
+
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/pm.c         |  2 ++
+ net/mptcp/pm_netlink.c | 13 +++++++++++++
+ net/mptcp/protocol.c   |  4 ++++
+ net/mptcp/protocol.h   |  5 +++++
+ 4 files changed, 24 insertions(+)
+
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index 5463d7c8c931..1c01c3bcbf5a 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -20,6 +20,8 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk,
+       pr_debug("msk=%p, local_id=%d", msk, addr->id);
++      lockdep_assert_held(&msk->pm.lock);
++
+       if (add_addr) {
+               pr_warn("addr_signal error, add_addr=%d", add_addr);
+               return -EINVAL;
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index b81ce0ea1f8b..71c41b948861 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -134,6 +134,8 @@ select_local_address(const struct pm_nl_pernet *pernet,
+ {
+       struct mptcp_pm_addr_entry *entry, *ret = NULL;
++      msk_owned_by_me(msk);
++
+       rcu_read_lock();
+       __mptcp_flush_join_list(msk);
+       list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
+@@ -191,6 +193,8 @@ lookup_anno_list_by_saddr(struct mptcp_sock *msk,
+ {
+       struct mptcp_pm_add_entry *entry;
++      lockdep_assert_held(&msk->pm.lock);
++
+       list_for_each_entry(entry, &msk->pm.anno_list, list) {
+               if (addresses_equal(&entry->addr, addr, false))
+                       return entry;
+@@ -266,6 +270,8 @@ static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+       struct sock *sk = (struct sock *)msk;
+       struct net *net = sock_net(sk);
++      lockdep_assert_held(&msk->pm.lock);
++
+       if (lookup_anno_list_by_saddr(msk, &entry->addr))
+               return false;
+@@ -408,6 +414,9 @@ void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk)
+ {
+       struct mptcp_subflow_context *subflow;
++      msk_owned_by_me(msk);
++      lockdep_assert_held(&msk->pm.lock);
++
+       if (!mptcp_pm_should_add_signal(msk))
+               return;
+@@ -443,6 +452,8 @@ void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
+       pr_debug("address rm_id %d", msk->pm.rm_id);
++      msk_owned_by_me(msk);
++
+       if (!msk->pm.rm_id)
+               return;
+@@ -478,6 +489,8 @@ void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id)
+       pr_debug("subflow rm_id %d", rm_id);
++      msk_owned_by_me(msk);
++
+       if (!rm_id)
+               return;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 056846eb2e5b..64b8a49652ae 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2186,6 +2186,8 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk)
+ {
+       struct mptcp_subflow_context *subflow, *tmp;
++      might_sleep();
++
+       list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+               struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+@@ -2529,6 +2531,8 @@ static void __mptcp_destroy_sock(struct sock *sk)
+       pr_debug("msk=%p", msk);
++      might_sleep();
++
+       /* dispose the ancillatory tcp socket, if any */
+       if (msk->subflow) {
+               iput(SOCK_INODE(msk->subflow));
+diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
+index 18fef4273bdc..c374345ad134 100644
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -286,6 +286,11 @@ struct mptcp_sock {
+ #define mptcp_for_each_subflow(__msk, __subflow)                      \
+       list_for_each_entry(__subflow, &((__msk)->conn_list), node)
++static inline void msk_owned_by_me(const struct mptcp_sock *msk)
++{
++      sock_owned_by_me((const struct sock *)msk);
++}
++
+ static inline struct mptcp_sock *mptcp_sk(const struct sock *sk)
+ {
+       return (struct mptcp_sock *)sk;
+-- 
+2.30.1
+
diff --git a/queue-5.11/mptcp-send-ack-for-every-add_addr.patch b/queue-5.11/mptcp-send-ack-for-every-add_addr.patch
new file mode 100644 (file)
index 0000000..7af3d17
--- /dev/null
@@ -0,0 +1,69 @@
+From ab45a34b091517bf13dc1a98dd56d8b13dcce544 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Feb 2021 15:09:09 -0800
+Subject: mptcp: send ack for every add_addr
+
+From: Geliang Tang <geliangtang@gmail.com>
+
+[ Upstream commit b5a7acd3bd63c7430c98d7f66d0aa457c9ccde30 ]
+
+This patch changes the sending ACK conditions for the ADD_ADDR, send an
+ACK packet for any ADD_ADDR, not just when ipv6 addresses or port
+numbers are included.
+
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/139
+Acked-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Geliang Tang <geliangtang@gmail.com>
+Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/pm.c         |  3 +--
+ net/mptcp/pm_netlink.c | 10 ++++------
+ 2 files changed, 5 insertions(+), 8 deletions(-)
+
+diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
+index da2ed576f289..5463d7c8c931 100644
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -188,8 +188,7 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
+ void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk)
+ {
+-      if (!mptcp_pm_should_add_signal_ipv6(msk) &&
+-          !mptcp_pm_should_add_signal_port(msk))
++      if (!mptcp_pm_should_add_signal(msk))
+               return;
+       mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK);
+diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
+index a6d983d80576..b81ce0ea1f8b 100644
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -408,8 +408,7 @@ void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk)
+ {
+       struct mptcp_subflow_context *subflow;
+-      if (!mptcp_pm_should_add_signal_ipv6(msk) &&
+-          !mptcp_pm_should_add_signal_port(msk))
++      if (!mptcp_pm_should_add_signal(msk))
+               return;
+       __mptcp_flush_join_list(msk);
+@@ -419,10 +418,9 @@ void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk)
+               u8 add_addr;
+               spin_unlock_bh(&msk->pm.lock);
+-              if (mptcp_pm_should_add_signal_ipv6(msk))
+-                      pr_debug("send ack for add_addr6");
+-              if (mptcp_pm_should_add_signal_port(msk))
+-                      pr_debug("send ack for add_addr_port");
++              pr_debug("send ack for add_addr%s%s",
++                       mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "",
++                       mptcp_pm_should_add_signal_port(msk) ? " [port]" : "");
+               lock_sock(ssk);
+               tcp_send_ack(ssk);
+-- 
+2.30.1
+
diff --git a/queue-5.11/net-bonding-fix-error-return-code-of-bond_neigh_init.patch b/queue-5.11/net-bonding-fix-error-return-code-of-bond_neigh_init.patch
new file mode 100644 (file)
index 0000000..1652fb1
--- /dev/null
@@ -0,0 +1,47 @@
+From 3d4ed58e5471feb504a2eb05432ad23b95ff655e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 7 Mar 2021 19:11:02 -0800
+Subject: net: bonding: fix error return code of bond_neigh_init()
+
+From: Jia-Ju Bai <baijiaju1990@gmail.com>
+
+[ Upstream commit 2055a99da8a253a357bdfd359b3338ef3375a26c ]
+
+When slave is NULL or slave_ops->ndo_neigh_setup is NULL, no error
+return code of bond_neigh_init() is assigned.
+To fix this bug, ret is assigned with -EINVAL in these cases.
+
+Fixes: 9e99bfefdbce ("bonding: fix bond_neigh_init()")
+Reported-by: TOTE Robot <oslab@tsinghua.edu.cn>
+Signed-off-by: Jia-Ju Bai <baijiaju1990@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 5fe5232cc3f3..fba6b6d1b430 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3917,11 +3917,15 @@ static int bond_neigh_init(struct neighbour *n)
+       rcu_read_lock();
+       slave = bond_first_slave_rcu(bond);
+-      if (!slave)
++      if (!slave) {
++              ret = -EINVAL;
+               goto out;
++      }
+       slave_ops = slave->dev->netdev_ops;
+-      if (!slave_ops->ndo_neigh_setup)
++      if (!slave_ops->ndo_neigh_setup) {
++              ret = -EINVAL;
+               goto out;
++      }
+       /* TODO: find another way [1] to implement this.
+        * Passing a zeroed structure is fragile,
+-- 
+2.30.1
+
diff --git a/queue-5.11/regulator-pca9450-add-sd_vsel-gpio-for-ldo5.patch b/queue-5.11/regulator-pca9450-add-sd_vsel-gpio-for-ldo5.patch
new file mode 100644 (file)
index 0000000..a1285e7
--- /dev/null
@@ -0,0 +1,69 @@
+From 4e620727ba444ef5ccf38d5012b6a0a4eaea039c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Feb 2021 11:55:28 +0100
+Subject: regulator: pca9450: Add SD_VSEL GPIO for LDO5
+
+From: Frieder Schrempf <frieder.schrempf@kontron.de>
+
+[ Upstream commit 8c67a11bae889f51fe5054364c3c789dfae3ad73 ]
+
+LDO5 has two separate control registers. LDO5CTRL_L is used if the
+input signal SD_VSEL is low and LDO5CTRL_H if it is high.
+The current driver implementation only uses LDO5CTRL_H. To make this
+work on boards that have SD_VSEL connected to a GPIO, we add support
+for specifying an optional GPIO and setting it to high at probe time.
+
+In the future we might also want to add support for boards that have
+SD_VSEL set to a fixed low level. In this case we need to change the
+driver to be able to use the LDO5CTRL_L register.
+
+Signed-off-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Link: https://lore.kernel.org/r/20210211105534.38972-1-frieder.schrempf@kontron.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/pca9450-regulator.c | 14 ++++++++++++++
+ 1 file changed, 14 insertions(+)
+
+diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
+index cb29421d745a..1bba8fdcb7b7 100644
+--- a/drivers/regulator/pca9450-regulator.c
++++ b/drivers/regulator/pca9450-regulator.c
+@@ -5,6 +5,7 @@
+  */
+ #include <linux/err.h>
++#include <linux/gpio/consumer.h>
+ #include <linux/i2c.h>
+ #include <linux/interrupt.h>
+ #include <linux/kernel.h>
+@@ -32,6 +33,7 @@ struct pca9450_regulator_desc {
+ struct pca9450 {
+       struct device *dev;
+       struct regmap *regmap;
++      struct gpio_desc *sd_vsel_gpio;
+       enum pca9450_chip_type type;
+       unsigned int rcnt;
+       int irq;
+@@ -795,6 +797,18 @@ static int pca9450_i2c_probe(struct i2c_client *i2c,
+               return ret;
+       }
++      /*
++       * The driver uses the LDO5CTRL_H register to control the LDO5 regulator.
++       * This is only valid if the SD_VSEL input of the PMIC is high. Let's
++       * check if the pin is available as GPIO and set it to high.
++       */
++      pca9450->sd_vsel_gpio = gpiod_get_optional(pca9450->dev, "sd-vsel", GPIOD_OUT_HIGH);
++
++      if (IS_ERR(pca9450->sd_vsel_gpio)) {
++              dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n");
++              return ret;
++      }
++
+       dev_info(&i2c->dev, "%s probed.\n",
+               type == PCA9450_TYPE_PCA9450A ? "pca9450a" : "pca9450bc");
+-- 
+2.30.1
+
diff --git a/queue-5.11/regulator-pca9450-clear-preset_en-bit-to-fix-buck1-2.patch b/queue-5.11/regulator-pca9450-clear-preset_en-bit-to-fix-buck1-2.patch
new file mode 100644 (file)
index 0000000..87b26f2
--- /dev/null
@@ -0,0 +1,64 @@
+From 60267fb07f807a21708d21943bb5e8beffe23d8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Feb 2021 12:52:20 +0100
+Subject: regulator: pca9450: Clear PRESET_EN bit to fix BUCK1/2/3 voltage
+ setting
+
+From: Frieder Schrempf <frieder.schrempf@kontron.de>
+
+[ Upstream commit 98b94b6e38ca0c4eeb29949c656f6a315000c23e ]
+
+The driver uses the DVS registers PCA9450_REG_BUCKxOUT_DVS0 to set the
+voltage for the buck regulators 1, 2 and 3. This has no effect as the
+PRESET_EN bit is set by default and therefore the preset values are used
+instead, which are set to 850 mV.
+
+To fix this we clear the PRESET_EN bit at time of initialization.
+
+Fixes: 0935ff5f1f0a ("regulator: pca9450: add pca9450 pmic driver")
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Link: https://lore.kernel.org/r/20210222115229.166620-1-frieder.schrempf@kontron.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/pca9450-regulator.c | 8 ++++++++
+ include/linux/regulator/pca9450.h     | 3 +++
+ 2 files changed, 11 insertions(+)
+
+diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
+index 833d398c6aa2..d38109cc3a01 100644
+--- a/drivers/regulator/pca9450-regulator.c
++++ b/drivers/regulator/pca9450-regulator.c
+@@ -797,6 +797,14 @@ static int pca9450_i2c_probe(struct i2c_client *i2c,
+               return ret;
+       }
++      /* Clear PRESET_EN bit in BUCK123_DVS to use DVS registers */
++      ret = regmap_clear_bits(pca9450->regmap, PCA9450_REG_BUCK123_DVS,
++                              BUCK123_PRESET_EN);
++      if (ret) {
++              dev_err(&i2c->dev, "Failed to clear PRESET_EN bit: %d\n", ret);
++              return ret;
++      }
++
+       /* Set reset behavior on assertion of WDOG_B signal */
+       ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_RESET_CTRL,
+                               WDOG_B_CFG_MASK, WDOG_B_CFG_COLD_LDO12);
+diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
+index ccdb5320a240..71902f41c919 100644
+--- a/include/linux/regulator/pca9450.h
++++ b/include/linux/regulator/pca9450.h
+@@ -147,6 +147,9 @@ enum {
+ #define BUCK6_FPWM                    0x04
+ #define BUCK6_ENMODE_MASK             0x03
++/* PCA9450_REG_BUCK123_PRESET_EN bit */
++#define BUCK123_PRESET_EN             0x80
++
+ /* PCA9450_BUCK1OUT_DVS0 bits */
+ #define BUCK1OUT_DVS0_MASK            0x7F
+ #define BUCK1OUT_DVS0_DEFAULT         0x14
+-- 
+2.30.1
+
diff --git a/queue-5.11/regulator-pca9450-enable-system-reset-on-wdog_b-asse.patch b/queue-5.11/regulator-pca9450-enable-system-reset-on-wdog_b-asse.patch
new file mode 100644 (file)
index 0000000..2102248
--- /dev/null
@@ -0,0 +1,64 @@
+From 80239285281f0bfea926caaaa9227c2b4d680974 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Feb 2021 11:55:30 +0100
+Subject: regulator: pca9450: Enable system reset on WDOG_B assertion
+
+From: Frieder Schrempf <frieder.schrempf@kontron.de>
+
+[ Upstream commit f7684f5a048febd2a7bc98ee81d6dce52f7268b8 ]
+
+By default the PCA9450 doesn't handle the assertion of the WDOG_B
+signal, but this is required to guarantee that things like software
+resets triggered by the watchdog work reliably.
+
+As we don't want to rely on the bootloader to enable this, we tell
+the PMIC to issue a cold reset in case the WDOG_B signal is
+asserted (WDOG_B_CFG = 10), just as the NXP U-Boot code does.
+
+Signed-off-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Link: https://lore.kernel.org/r/20210211105534.38972-3-frieder.schrempf@kontron.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/pca9450-regulator.c | 8 ++++++++
+ include/linux/regulator/pca9450.h     | 7 +++++++
+ 2 files changed, 15 insertions(+)
+
+diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
+index 1bba8fdcb7b7..833d398c6aa2 100644
+--- a/drivers/regulator/pca9450-regulator.c
++++ b/drivers/regulator/pca9450-regulator.c
+@@ -797,6 +797,14 @@ static int pca9450_i2c_probe(struct i2c_client *i2c,
+               return ret;
+       }
++      /* Set reset behavior on assertion of WDOG_B signal */
++      ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_RESET_CTRL,
++                              WDOG_B_CFG_MASK, WDOG_B_CFG_COLD_LDO12);
++      if (ret) {
++              dev_err(&i2c->dev, "Failed to set WDOG_B reset behavior\n");
++              return ret;
++      }
++
+       /*
+        * The driver uses the LDO5CTRL_H register to control the LDO5 regulator.
+        * This is only valid if the SD_VSEL input of the PMIC is high. Let's
+diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h
+index 1bbd3014f906..ccdb5320a240 100644
+--- a/include/linux/regulator/pca9450.h
++++ b/include/linux/regulator/pca9450.h
+@@ -216,4 +216,11 @@ enum {
+ #define IRQ_THERM_105                 0x02
+ #define IRQ_THERM_125                 0x01
++/* PCA9450_REG_RESET_CTRL bits */
++#define WDOG_B_CFG_MASK                       0xC0
++#define WDOG_B_CFG_NONE                       0x00
++#define WDOG_B_CFG_WARM                       0x40
++#define WDOG_B_CFG_COLD_LDO12         0x80
++#define WDOG_B_CFG_COLD                       0xC0
++
+ #endif /* __LINUX_REG_PCA9450_H__ */
+-- 
+2.30.1
+
diff --git a/queue-5.11/series b/queue-5.11/series
new file mode 100644 (file)
index 0000000..7c57b6f
--- /dev/null
@@ -0,0 +1,19 @@
+io_uring-don-t-attempt-io-reissue-from-the-ring-exit.patch
+kvm-x86-mmu-expand-on-the-comment-in-kvm_vcpu_ad_nee.patch
+kvm-x86-mmu-set-spte_ad_wrprot_only_mask-if-and-only.patch
+mptcp-send-ack-for-every-add_addr.patch
+mptcp-pm-add-lockdep-assertions.patch
+mptcp-dispose-initial-struct-socket-when-its-subflow.patch
+io_uring-refactor-scheduling-in-io_cqring_wait.patch
+io_uring-refactor-io_cqring_wait.patch
+io_uring-don-t-keep-looping-for-more-events-if-we-ca.patch
+io_uring-simplify-do_read-return-parsing.patch
+io_uring-clear-iocb_waitq-for-non-eiocbqueued-return.patch
+gpiolib-read-gpio-line-names-from-a-firmware-node.patch
+net-bonding-fix-error-return-code-of-bond_neigh_init.patch
+regulator-pca9450-add-sd_vsel-gpio-for-ldo5.patch
+regulator-pca9450-enable-system-reset-on-wdog_b-asse.patch
+regulator-pca9450-clear-preset_en-bit-to-fix-buck1-2.patch
+gfs2-add-common-helper-for-holding-and-releasing-the.patch
+gfs2-move-freeze-glock-outside-the-make_fs_rw-and-_r.patch
+gfs2-bypass-signal_our_withdraw-if-no-journal.patch