--- /dev/null
+From e1bc2d640c9853d3034a10777eb87a1089ad914c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Nov 2022 21:01:59 +0800
+Subject: ceph: fix NULL pointer dereference for req->r_session
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit 5bd76b8de5b74fa941a6eafee87728a0fe072267 ]
+
+The request's r_session maybe changed when it was forwarded or
+resent. Both the forwarding and resending cases the requests will
+be protected by the mdsc->mutex.
+
+Cc: stable@vger.kernel.org
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=2137955
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/caps.c | 48 ++++++++++++------------------------------------
+ 1 file changed, 12 insertions(+), 36 deletions(-)
+
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 668be87ffee6..51562d36fa83 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2297,7 +2297,6 @@ static int unsafe_request_wait(struct inode *inode)
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_request *req1 = NULL, *req2 = NULL;
+- unsigned int max_sessions;
+ int ret, err = 0;
+
+ spin_lock(&ci->i_unsafe_lock);
+@@ -2315,28 +2314,24 @@ static int unsafe_request_wait(struct inode *inode)
+ }
+ spin_unlock(&ci->i_unsafe_lock);
+
+- /*
+- * The mdsc->max_sessions is unlikely to be changed
+- * mostly, here we will retry it by reallocating the
+- * sessions array memory to get rid of the mdsc->mutex
+- * lock.
+- */
+-retry:
+- max_sessions = mdsc->max_sessions;
+-
+ /*
+ * Trigger to flush the journal logs in all the relevant MDSes
+ * manually, or in the worst case we must wait at most 5 seconds
+ * to wait the journal logs to be flushed by the MDSes periodically.
+ */
+- if ((req1 || req2) && likely(max_sessions)) {
+- struct ceph_mds_session **sessions = NULL;
+- struct ceph_mds_session *s;
++ if (req1 || req2) {
+ struct ceph_mds_request *req;
++ struct ceph_mds_session **sessions;
++ struct ceph_mds_session *s;
++ unsigned int max_sessions;
+ int i;
+
++ mutex_lock(&mdsc->mutex);
++ max_sessions = mdsc->max_sessions;
++
+ sessions = kcalloc(max_sessions, sizeof(s), GFP_KERNEL);
+ if (!sessions) {
++ mutex_unlock(&mdsc->mutex);
+ err = -ENOMEM;
+ goto out;
+ }
+@@ -2348,16 +2343,6 @@ static int unsafe_request_wait(struct inode *inode)
+ s = req->r_session;
+ if (!s)
+ continue;
+- if (unlikely(s->s_mds >= max_sessions)) {
+- spin_unlock(&ci->i_unsafe_lock);
+- for (i = 0; i < max_sessions; i++) {
+- s = sessions[i];
+- if (s)
+- ceph_put_mds_session(s);
+- }
+- kfree(sessions);
+- goto retry;
+- }
+ if (!sessions[s->s_mds]) {
+ s = ceph_get_mds_session(s);
+ sessions[s->s_mds] = s;
+@@ -2370,16 +2355,6 @@ static int unsafe_request_wait(struct inode *inode)
+ s = req->r_session;
+ if (!s)
+ continue;
+- if (unlikely(s->s_mds >= max_sessions)) {
+- spin_unlock(&ci->i_unsafe_lock);
+- for (i = 0; i < max_sessions; i++) {
+- s = sessions[i];
+- if (s)
+- ceph_put_mds_session(s);
+- }
+- kfree(sessions);
+- goto retry;
+- }
+ if (!sessions[s->s_mds]) {
+ s = ceph_get_mds_session(s);
+ sessions[s->s_mds] = s;
+@@ -2391,11 +2366,12 @@ static int unsafe_request_wait(struct inode *inode)
+ /* the auth MDS */
+ spin_lock(&ci->i_ceph_lock);
+ if (ci->i_auth_cap) {
+- s = ci->i_auth_cap->session;
+- if (!sessions[s->s_mds])
+- sessions[s->s_mds] = ceph_get_mds_session(s);
++ s = ci->i_auth_cap->session;
++ if (!sessions[s->s_mds])
++ sessions[s->s_mds] = ceph_get_mds_session(s);
+ }
+ spin_unlock(&ci->i_ceph_lock);
++ mutex_unlock(&mdsc->mutex);
+
+ /* send flush mdlog request to MDSes */
+ for (i = 0; i < max_sessions; i++) {
+--
+2.35.1
+
--- /dev/null
+From 8c1dd05613372e51a6bac73aa98dbce4e0bb94cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 6 Sep 2021 12:43:01 +0300
+Subject: ceph: fix off by one bugs in unsafe_request_wait()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 708c87168b6121abc74b2a57d0c498baaf70cbea ]
+
+The "> max" tests should be ">= max" to prevent an out of bounds access
+on the next lines.
+
+Fixes: e1a4541ec0b9 ("ceph: flush the mdlog before waiting on unsafe reqs")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Stable-dep-of: 5bd76b8de5b7 ("ceph: fix NULL pointer dereference for req->r_session")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/caps.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 2fa6b7cc0cc4..f14d52848b91 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2343,7 +2343,7 @@ static int unsafe_request_wait(struct inode *inode)
+ list_for_each_entry(req, &ci->i_unsafe_dirops,
+ r_unsafe_dir_item) {
+ s = req->r_session;
+- if (unlikely(s->s_mds > max)) {
++ if (unlikely(s->s_mds >= max)) {
+ spin_unlock(&ci->i_unsafe_lock);
+ goto retry;
+ }
+@@ -2357,7 +2357,7 @@ static int unsafe_request_wait(struct inode *inode)
+ list_for_each_entry(req, &ci->i_unsafe_iops,
+ r_unsafe_target_item) {
+ s = req->r_session;
+- if (unlikely(s->s_mds > max)) {
++ if (unlikely(s->s_mds >= max)) {
+ spin_unlock(&ci->i_unsafe_lock);
+ goto retry;
+ }
+--
+2.35.1
+
--- /dev/null
+From 70f0650f5769ca26a04f040dab2ae74a89d6f10b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Apr 2022 09:07:21 +0800
+Subject: ceph: fix possible NULL pointer dereference for req->r_session
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit 7acae6183cf37c48b8da48bbbdb78820fb3913f3 ]
+
+The request will be inserted into the ci->i_unsafe_dirops before
+assigning the req->r_session, so it's possible that we will hit
+NULL pointer dereference bug here.
+
+Cc: stable@vger.kernel.org
+URL: https://tracker.ceph.com/issues/55327
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Tested-by: Aaron Tomlin <atomlin@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Stable-dep-of: 5bd76b8de5b7 ("ceph: fix NULL pointer dereference for req->r_session")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/caps.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 4e2fada35808..ce6a858e765a 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2346,6 +2346,8 @@ static int unsafe_request_wait(struct inode *inode)
+ list_for_each_entry(req, &ci->i_unsafe_dirops,
+ r_unsafe_dir_item) {
+ s = req->r_session;
++ if (!s)
++ continue;
+ if (unlikely(s->s_mds >= max_sessions)) {
+ spin_unlock(&ci->i_unsafe_lock);
+ for (i = 0; i < max_sessions; i++) {
+@@ -2366,6 +2368,8 @@ static int unsafe_request_wait(struct inode *inode)
+ list_for_each_entry(req, &ci->i_unsafe_iops,
+ r_unsafe_target_item) {
+ s = req->r_session;
++ if (!s)
++ continue;
+ if (unlikely(s->s_mds >= max_sessions)) {
+ spin_unlock(&ci->i_unsafe_lock);
+ for (i = 0; i < max_sessions; i++) {
+--
+2.35.1
+
--- /dev/null
+From 4b1453e83a8c84f67403e980b7f26bbbf41bb5fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jul 2021 09:22:56 +0800
+Subject: ceph: flush mdlog before umounting
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit d095559ce4100f0c02aea229705230deac329c97 ]
+
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Stable-dep-of: 5bd76b8de5b7 ("ceph: fix NULL pointer dereference for req->r_session")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/mds_client.c | 25 +++++++++++++++++++++++++
+ fs/ceph/mds_client.h | 1 +
+ fs/ceph/strings.c | 1 +
+ include/linux/ceph/ceph_fs.h | 1 +
+ 4 files changed, 28 insertions(+)
+
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 45587b3025e4..fa51872ff850 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -4664,6 +4664,30 @@ static void wait_requests(struct ceph_mds_client *mdsc)
+ dout("wait_requests done\n");
+ }
+
++void send_flush_mdlog(struct ceph_mds_session *s)
++{
++ struct ceph_msg *msg;
++
++ /*
++ * Pre-luminous MDS crashes when it sees an unknown session request
++ */
++ if (!CEPH_HAVE_FEATURE(s->s_con.peer_features, SERVER_LUMINOUS))
++ return;
++
++ mutex_lock(&s->s_mutex);
++ dout("request mdlog flush to mds%d (%s)s seq %lld\n", s->s_mds,
++ ceph_session_state_name(s->s_state), s->s_seq);
++ msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_FLUSH_MDLOG,
++ s->s_seq);
++ if (!msg) {
++ pr_err("failed to request mdlog flush to mds%d (%s) seq %lld\n",
++ s->s_mds, ceph_session_state_name(s->s_state), s->s_seq);
++ } else {
++ ceph_con_send(&s->s_con, msg);
++ }
++ mutex_unlock(&s->s_mutex);
++}
++
+ /*
+ * called before mount is ro, and before dentries are torn down.
+ * (hmm, does this still race with new lookups?)
+@@ -4673,6 +4697,7 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
+ dout("pre_umount\n");
+ mdsc->stopping = 1;
+
++ ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true);
+ ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
+ ceph_flush_dirty_caps(mdsc);
+ wait_requests(mdsc);
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index 88fc80832016..a92e42e8a9f8 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -518,6 +518,7 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
+ kref_put(&req->r_kref, ceph_mdsc_release_request);
+ }
+
++extern void send_flush_mdlog(struct ceph_mds_session *s);
+ extern void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
+ void (*cb)(struct ceph_mds_session *),
+ bool check_state);
+diff --git a/fs/ceph/strings.c b/fs/ceph/strings.c
+index 4a79f3632260..573bb9556fb5 100644
+--- a/fs/ceph/strings.c
++++ b/fs/ceph/strings.c
+@@ -46,6 +46,7 @@ const char *ceph_session_op_name(int op)
+ case CEPH_SESSION_FLUSHMSG_ACK: return "flushmsg_ack";
+ case CEPH_SESSION_FORCE_RO: return "force_ro";
+ case CEPH_SESSION_REJECT: return "reject";
++ case CEPH_SESSION_REQUEST_FLUSH_MDLOG: return "flush_mdlog";
+ }
+ return "???";
+ }
+diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
+index 455e9b9e2adf..8287382d3d1d 100644
+--- a/include/linux/ceph/ceph_fs.h
++++ b/include/linux/ceph/ceph_fs.h
+@@ -288,6 +288,7 @@ enum {
+ CEPH_SESSION_FLUSHMSG_ACK,
+ CEPH_SESSION_FORCE_RO,
+ CEPH_SESSION_REJECT,
++ CEPH_SESSION_REQUEST_FLUSH_MDLOG,
+ };
+
+ extern const char *ceph_session_op_name(int op);
+--
+2.35.1
+
--- /dev/null
+From f8cdd34ab2e440220cabbe9bed7a4cad936379b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jul 2021 09:22:57 +0800
+Subject: ceph: flush the mdlog before waiting on unsafe reqs
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit e1a4541ec0b951685a49d1f72d183681e6433a45 ]
+
+For the client requests who will have unsafe and safe replies from
+MDS daemons, in the MDS side the MDS daemons won't flush the mdlog
+(journal log) immediatelly, because they think it's unnecessary.
+That's true for most cases but not all, likes the fsync request.
+The fsync will wait until all the unsafe replied requests to be
+safely replied.
+
+Normally if there have multiple threads or clients are running, the
+whole mdlog in MDS daemons could be flushed in time if any request
+will trigger the mdlog submit thread. So usually we won't experience
+the normal operations will stuck for a long time. But in case there
+has only one client with only thread is running, the stuck phenomenon
+maybe obvious and the worst case it must wait at most 5 seconds to
+wait the mdlog to be flushed by the MDS's tick thread periodically.
+
+This patch will trigger to flush the mdlog in the relevant and auth
+MDSes to which the in-flight requests are sent just before waiting
+the unsafe requests to finish.
+
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Stable-dep-of: 5bd76b8de5b7 ("ceph: fix NULL pointer dereference for req->r_session")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/caps.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 76 insertions(+)
+
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 7ae27a18cf18..2fa6b7cc0cc4 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2294,6 +2294,7 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid)
+ */
+ static int unsafe_request_wait(struct inode *inode)
+ {
++ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_request *req1 = NULL, *req2 = NULL;
+ int ret, err = 0;
+@@ -2313,6 +2314,81 @@ static int unsafe_request_wait(struct inode *inode)
+ }
+ spin_unlock(&ci->i_unsafe_lock);
+
++ /*
++ * Trigger to flush the journal logs in all the relevant MDSes
++ * manually, or in the worst case we must wait at most 5 seconds
++ * to wait the journal logs to be flushed by the MDSes periodically.
++ */
++ if (req1 || req2) {
++ struct ceph_mds_session **sessions = NULL;
++ struct ceph_mds_session *s;
++ struct ceph_mds_request *req;
++ unsigned int max;
++ int i;
++
++ /*
++ * The mdsc->max_sessions is unlikely to be changed
++ * mostly, here we will retry it by reallocating the
++ * sessions arrary memory to get rid of the mdsc->mutex
++ * lock.
++ */
++retry:
++ max = mdsc->max_sessions;
++ sessions = krealloc(sessions, max * sizeof(s), __GFP_ZERO);
++ if (!sessions)
++ return -ENOMEM;
++
++ spin_lock(&ci->i_unsafe_lock);
++ if (req1) {
++ list_for_each_entry(req, &ci->i_unsafe_dirops,
++ r_unsafe_dir_item) {
++ s = req->r_session;
++ if (unlikely(s->s_mds > max)) {
++ spin_unlock(&ci->i_unsafe_lock);
++ goto retry;
++ }
++ if (!sessions[s->s_mds]) {
++ s = ceph_get_mds_session(s);
++ sessions[s->s_mds] = s;
++ }
++ }
++ }
++ if (req2) {
++ list_for_each_entry(req, &ci->i_unsafe_iops,
++ r_unsafe_target_item) {
++ s = req->r_session;
++ if (unlikely(s->s_mds > max)) {
++ spin_unlock(&ci->i_unsafe_lock);
++ goto retry;
++ }
++ if (!sessions[s->s_mds]) {
++ s = ceph_get_mds_session(s);
++ sessions[s->s_mds] = s;
++ }
++ }
++ }
++ spin_unlock(&ci->i_unsafe_lock);
++
++ /* the auth MDS */
++ spin_lock(&ci->i_ceph_lock);
++ if (ci->i_auth_cap) {
++ s = ci->i_auth_cap->session;
++ if (!sessions[s->s_mds])
++ sessions[s->s_mds] = ceph_get_mds_session(s);
++ }
++ spin_unlock(&ci->i_ceph_lock);
++
++ /* send flush mdlog request to MDSes */
++ for (i = 0; i < max; i++) {
++ s = sessions[i];
++ if (s) {
++ send_flush_mdlog(s);
++ ceph_put_mds_session(s);
++ }
++ }
++ kfree(sessions);
++ }
++
+ dout("unsafe_request_wait %p wait on tid %llu %llu\n",
+ inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
+ if (req1) {
+--
+2.35.1
+
--- /dev/null
+From b48510fa4ce81db970f757cc3af8ad384b9c978d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jul 2021 09:22:54 +0800
+Subject: ceph: make ceph_create_session_msg a global symbol
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit fba97e8025015b63b1bdb73cd868c8ea832a1620 ]
+
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Stable-dep-of: 5bd76b8de5b7 ("ceph: fix NULL pointer dereference for req->r_session")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/mds_client.c | 16 +++++++++-------
+ fs/ceph/mds_client.h | 1 +
+ 2 files changed, 10 insertions(+), 7 deletions(-)
+
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 6859967df2b1..36cf3638f501 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -1157,7 +1157,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
+ /*
+ * session messages
+ */
+-static struct ceph_msg *create_session_msg(u32 op, u64 seq)
++struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq)
+ {
+ struct ceph_msg *msg;
+ struct ceph_mds_session_head *h;
+@@ -1165,7 +1165,8 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq)
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
+ false);
+ if (!msg) {
+- pr_err("create_session_msg ENOMEM creating msg\n");
++ pr_err("ENOMEM creating session %s msg\n",
++ ceph_session_op_name(op));
+ return NULL;
+ }
+ h = msg->front.iov_base;
+@@ -1299,7 +1300,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
+ msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
+ GFP_NOFS, false);
+ if (!msg) {
+- pr_err("create_session_msg ENOMEM creating msg\n");
++ pr_err("ENOMEM creating session open msg\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ p = msg->front.iov_base;
+@@ -1833,8 +1834,8 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
+
+ dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
+ ceph_mds_state_name(state));
+- msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
+- ++session->s_renew_seq);
++ msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
++ ++session->s_renew_seq);
+ if (!msg)
+ return -ENOMEM;
+ ceph_con_send(&session->s_con, msg);
+@@ -1848,7 +1849,7 @@ static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
+
+ dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
+ session->s_mds, ceph_session_state_name(session->s_state), seq);
+- msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
++ msg = ceph_create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
+ if (!msg)
+ return -ENOMEM;
+ ceph_con_send(&session->s_con, msg);
+@@ -1900,7 +1901,8 @@ static int request_close_session(struct ceph_mds_session *session)
+ dout("request_close_session mds%d state %s seq %lld\n",
+ session->s_mds, ceph_session_state_name(session->s_state),
+ session->s_seq);
+- msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
++ msg = ceph_create_session_msg(CEPH_SESSION_REQUEST_CLOSE,
++ session->s_seq);
+ if (!msg)
+ return -ENOMEM;
+ ceph_con_send(&session->s_con, msg);
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index acf33d7192bb..c0cff765cbf5 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -518,6 +518,7 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
+ kref_put(&req->r_kref, ceph_mdsc_release_request);
+ }
+
++extern struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq);
+ extern void __ceph_queue_cap_release(struct ceph_mds_session *session,
+ struct ceph_cap *cap);
+ extern void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
+--
+2.35.1
+
--- /dev/null
+From 2a971077b3e5a10cb4f7fb2fb75c09f43dd68951 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jul 2021 09:22:55 +0800
+Subject: ceph: make iterate_sessions a global symbol
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit 59b312f36230ea91ebb6ce1b11f2781604495d30 ]
+
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Stable-dep-of: 5bd76b8de5b7 ("ceph: fix NULL pointer dereference for req->r_session")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/caps.c | 26 +----------------------
+ fs/ceph/mds_client.c | 49 +++++++++++++++++++++++++++++---------------
+ fs/ceph/mds_client.h | 3 +++
+ 3 files changed, 36 insertions(+), 42 deletions(-)
+
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index 76e43a487bc6..7ae27a18cf18 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -4310,33 +4310,9 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s)
+ dout("flush_dirty_caps done\n");
+ }
+
+-static void iterate_sessions(struct ceph_mds_client *mdsc,
+- void (*cb)(struct ceph_mds_session *))
+-{
+- int mds;
+-
+- mutex_lock(&mdsc->mutex);
+- for (mds = 0; mds < mdsc->max_sessions; ++mds) {
+- struct ceph_mds_session *s;
+-
+- if (!mdsc->sessions[mds])
+- continue;
+-
+- s = ceph_get_mds_session(mdsc->sessions[mds]);
+- if (!s)
+- continue;
+-
+- mutex_unlock(&mdsc->mutex);
+- cb(s);
+- ceph_put_mds_session(s);
+- mutex_lock(&mdsc->mutex);
+- }
+- mutex_unlock(&mdsc->mutex);
+-}
+-
+ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
+ {
+- iterate_sessions(mdsc, flush_dirty_session_caps);
++ ceph_mdsc_iterate_sessions(mdsc, flush_dirty_session_caps, true);
+ }
+
+ void __ceph_touch_fmode(struct ceph_inode_info *ci,
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 36cf3638f501..45587b3025e4 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -809,6 +809,33 @@ static void put_request_session(struct ceph_mds_request *req)
+ }
+ }
+
++void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
++ void (*cb)(struct ceph_mds_session *),
++ bool check_state)
++{
++ int mds;
++
++ mutex_lock(&mdsc->mutex);
++ for (mds = 0; mds < mdsc->max_sessions; ++mds) {
++ struct ceph_mds_session *s;
++
++ s = __ceph_lookup_mds_session(mdsc, mds);
++ if (!s)
++ continue;
++
++ if (check_state && !check_session_state(s)) {
++ ceph_put_mds_session(s);
++ continue;
++ }
++
++ mutex_unlock(&mdsc->mutex);
++ cb(s);
++ ceph_put_mds_session(s);
++ mutex_lock(&mdsc->mutex);
++ }
++ mutex_unlock(&mdsc->mutex);
++}
++
+ void ceph_mdsc_release_request(struct kref *kref)
+ {
+ struct ceph_mds_request *req = container_of(kref,
+@@ -4377,24 +4404,12 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
+ }
+
+ /*
+- * lock unlock sessions, to wait ongoing session activities
++ * lock unlock the session, to wait ongoing session activities
+ */
+-static void lock_unlock_sessions(struct ceph_mds_client *mdsc)
++static void lock_unlock_session(struct ceph_mds_session *s)
+ {
+- int i;
+-
+- mutex_lock(&mdsc->mutex);
+- for (i = 0; i < mdsc->max_sessions; i++) {
+- struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
+- if (!s)
+- continue;
+- mutex_unlock(&mdsc->mutex);
+- mutex_lock(&s->s_mutex);
+- mutex_unlock(&s->s_mutex);
+- ceph_put_mds_session(s);
+- mutex_lock(&mdsc->mutex);
+- }
+- mutex_unlock(&mdsc->mutex);
++ mutex_lock(&s->s_mutex);
++ mutex_unlock(&s->s_mutex);
+ }
+
+ static void maybe_recover_session(struct ceph_mds_client *mdsc)
+@@ -4658,7 +4673,7 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
+ dout("pre_umount\n");
+ mdsc->stopping = 1;
+
+- lock_unlock_sessions(mdsc);
++ ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false);
+ ceph_flush_dirty_caps(mdsc);
+ wait_requests(mdsc);
+
+diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
+index c0cff765cbf5..88fc80832016 100644
+--- a/fs/ceph/mds_client.h
++++ b/fs/ceph/mds_client.h
+@@ -518,6 +518,9 @@ static inline void ceph_mdsc_put_request(struct ceph_mds_request *req)
+ kref_put(&req->r_kref, ceph_mdsc_release_request);
+ }
+
++extern void ceph_mdsc_iterate_sessions(struct ceph_mds_client *mdsc,
++ void (*cb)(struct ceph_mds_session *),
++ bool check_state);
+ extern struct ceph_msg *ceph_create_session_msg(u32 op, u64 seq);
+ extern void __ceph_queue_cap_release(struct ceph_mds_session *session,
+ struct ceph_cap *cap);
+--
+2.35.1
+
--- /dev/null
+From 3d5b879f514bbe2b7e382e2adf571bcbcaa147a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Jan 2022 12:29:04 +0800
+Subject: ceph: put the requests/sessions when it fails to alloc memory
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit 89d43d0551a848e70e63d9ba11534aaeabc82443 ]
+
+When failing to allocate the sessions memory we should make sure
+the req1 and req2 and the sessions get put. And also in case the
+max_sessions decreased so when kreallocate the new memory some
+sessions maybe missed being put.
+
+And if the max_sessions is 0 krealloc will return ZERO_SIZE_PTR,
+which will lead to a distinct access fault.
+
+URL: https://tracker.ceph.com/issues/53819
+Fixes: e1a4541ec0b9 ("ceph: flush the mdlog before waiting on unsafe reqs")
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Venky Shankar <vshankar@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Stable-dep-of: 5bd76b8de5b7 ("ceph: fix NULL pointer dereference for req->r_session")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/caps.c | 55 +++++++++++++++++++++++++++++++++-----------------
+ 1 file changed, 37 insertions(+), 18 deletions(-)
+
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index f14d52848b91..4e2fada35808 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2297,6 +2297,7 @@ static int unsafe_request_wait(struct inode *inode)
+ struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mds_request *req1 = NULL, *req2 = NULL;
++ unsigned int max_sessions;
+ int ret, err = 0;
+
+ spin_lock(&ci->i_unsafe_lock);
+@@ -2314,37 +2315,45 @@ static int unsafe_request_wait(struct inode *inode)
+ }
+ spin_unlock(&ci->i_unsafe_lock);
+
++ /*
++ * The mdsc->max_sessions is unlikely to be changed
++ * mostly, here we will retry it by reallocating the
++ * sessions array memory to get rid of the mdsc->mutex
++ * lock.
++ */
++retry:
++ max_sessions = mdsc->max_sessions;
++
+ /*
+ * Trigger to flush the journal logs in all the relevant MDSes
+ * manually, or in the worst case we must wait at most 5 seconds
+ * to wait the journal logs to be flushed by the MDSes periodically.
+ */
+- if (req1 || req2) {
++ if ((req1 || req2) && likely(max_sessions)) {
+ struct ceph_mds_session **sessions = NULL;
+ struct ceph_mds_session *s;
+ struct ceph_mds_request *req;
+- unsigned int max;
+ int i;
+
+- /*
+- * The mdsc->max_sessions is unlikely to be changed
+- * mostly, here we will retry it by reallocating the
+- * sessions arrary memory to get rid of the mdsc->mutex
+- * lock.
+- */
+-retry:
+- max = mdsc->max_sessions;
+- sessions = krealloc(sessions, max * sizeof(s), __GFP_ZERO);
+- if (!sessions)
+- return -ENOMEM;
++ sessions = kzalloc(max_sessions * sizeof(s), GFP_KERNEL);
++ if (!sessions) {
++ err = -ENOMEM;
++ goto out;
++ }
+
+ spin_lock(&ci->i_unsafe_lock);
+ if (req1) {
+ list_for_each_entry(req, &ci->i_unsafe_dirops,
+ r_unsafe_dir_item) {
+ s = req->r_session;
+- if (unlikely(s->s_mds >= max)) {
++ if (unlikely(s->s_mds >= max_sessions)) {
+ spin_unlock(&ci->i_unsafe_lock);
++ for (i = 0; i < max_sessions; i++) {
++ s = sessions[i];
++ if (s)
++ ceph_put_mds_session(s);
++ }
++ kfree(sessions);
+ goto retry;
+ }
+ if (!sessions[s->s_mds]) {
+@@ -2357,8 +2366,14 @@ static int unsafe_request_wait(struct inode *inode)
+ list_for_each_entry(req, &ci->i_unsafe_iops,
+ r_unsafe_target_item) {
+ s = req->r_session;
+- if (unlikely(s->s_mds >= max)) {
++ if (unlikely(s->s_mds >= max_sessions)) {
+ spin_unlock(&ci->i_unsafe_lock);
++ for (i = 0; i < max_sessions; i++) {
++ s = sessions[i];
++ if (s)
++ ceph_put_mds_session(s);
++ }
++ kfree(sessions);
+ goto retry;
+ }
+ if (!sessions[s->s_mds]) {
+@@ -2379,7 +2394,7 @@ static int unsafe_request_wait(struct inode *inode)
+ spin_unlock(&ci->i_ceph_lock);
+
+ /* send flush mdlog request to MDSes */
+- for (i = 0; i < max; i++) {
++ for (i = 0; i < max_sessions; i++) {
+ s = sessions[i];
+ if (s) {
+ send_flush_mdlog(s);
+@@ -2396,15 +2411,19 @@ static int unsafe_request_wait(struct inode *inode)
+ ceph_timeout_jiffies(req1->r_timeout));
+ if (ret)
+ err = -EIO;
+- ceph_mdsc_put_request(req1);
+ }
+ if (req2) {
+ ret = !wait_for_completion_timeout(&req2->r_safe_completion,
+ ceph_timeout_jiffies(req2->r_timeout));
+ if (ret)
+ err = -EIO;
+- ceph_mdsc_put_request(req2);
+ }
++
++out:
++ if (req1)
++ ceph_mdsc_put_request(req1);
++ if (req2)
++ ceph_mdsc_put_request(req2);
+ return err;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 87fca23d63b33ae3f71101bb5c9da3c1b8a1aad5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Aug 2022 22:42:55 -0700
+Subject: ceph: Use kcalloc for allocating multiple elements
+
+From: Kenneth Lee <klee33@uw.edu>
+
+[ Upstream commit aa1d627207cace003163dee24d1c06fa4e910c6b ]
+
+Prefer using kcalloc(a, b) over kzalloc(a * b) as this improves
+semantics since kcalloc is intended for allocating an array of memory.
+
+Signed-off-by: Kenneth Lee <klee33@uw.edu>
+Reviewed-by: Xiubo Li <xiubli@redhat.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Stable-dep-of: 5bd76b8de5b7 ("ceph: fix NULL pointer dereference for req->r_session")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/caps.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index ce6a858e765a..668be87ffee6 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2335,7 +2335,7 @@ static int unsafe_request_wait(struct inode *inode)
+ struct ceph_mds_request *req;
+ int i;
+
+- sessions = kzalloc(max_sessions * sizeof(s), GFP_KERNEL);
++ sessions = kcalloc(max_sessions, sizeof(s), GFP_KERNEL);
+ if (!sessions) {
+ err = -ENOMEM;
+ goto out;
+--
+2.35.1
+
--- /dev/null
+From 880750ab053e6202f4d9ce3a8174a0e9dda1b96d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 28 Aug 2022 11:39:50 +0900
+Subject: kbuild: refactor single builds of *.ko
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit f110e5a250e3c5db417e094b3dd86f1c135291ca ]
+
+Remove the potentially invalid modules.order instead of using
+the temporary file.
+
+Also, KBUILD_MODULES is don't care for single builds. No need to
+cancel it.
+
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Stable-dep-of: 055f37f84e30 ("KVM: x86: emulator: update the emulation mode after rsm")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Makefile | 16 ++++------------
+ 1 file changed, 4 insertions(+), 12 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index 8994b6f13b13..f737f1c4e2b8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1766,6 +1766,8 @@ modules modules_install:
+ @echo >&2 '***'
+ @exit 1
+
++KBUILD_MODULES :=
++
+ endif # CONFIG_MODULES
+
+ # Single targets
+@@ -1791,18 +1793,12 @@ $(single-ko): single_modpost
+ $(single-no-ko): descend
+ @:
+
+-ifeq ($(KBUILD_EXTMOD),)
+-# For the single build of in-tree modules, use a temporary file to avoid
+-# the situation of modules_install installing an invalid modules.order.
+-MODORDER := .modules.tmp
+-endif
+-
++# Remove MODORDER when done because it is not the real one.
+ PHONY += single_modpost
+ single_modpost: $(single-no-ko) modules_prepare
+ $(Q){ $(foreach m, $(single-ko), echo $(extmod-prefix)$m;) } > $(MODORDER)
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+-
+-KBUILD_MODULES := 1
++ $(Q)rm -f $(MODORDER)
+
+ export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod-prefix), $(single-no-ko))
+
+@@ -1812,10 +1808,6 @@ build-dirs := $(foreach d, $(build-dirs), \
+
+ endif
+
+-ifndef CONFIG_MODULES
+-KBUILD_MODULES :=
+-endif
+-
+ # Handle descending into subdirectories listed in $(build-dirs)
+ # Preset locale variables to speed up the build process. Limit locale
+ # tweaks to this spot to avoid wrong language settings when running
+--
+2.35.1
+
--- /dev/null
+From 1f2fcfb7aa5bfc58a673ceda542b47b14ba6c838 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Mar 2021 22:38:02 +0900
+Subject: kbuild: remove unneeded mkdir for external modules_install
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit 4b97ec0e9cfd5995f41b9726c88566a31f4625cc ]
+
+scripts/Makefile.modinst creates directories as needed.
+
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Stable-dep-of: 055f37f84e30 ("KVM: x86: emulator: update the emulation mode after rsm")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Makefile | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index 166f87bdc190..9eacc623642c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1746,10 +1746,8 @@ $(MODORDER): descend
+ PHONY += modules_install
+ modules_install: _emodinst_ _emodinst_post
+
+-install-dir := $(if $(INSTALL_MOD_DIR),$(INSTALL_MOD_DIR),extra)
+ PHONY += _emodinst_
+ _emodinst_:
+- $(Q)mkdir -p $(MODLIB)/$(install-dir)
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
+
+ PHONY += _emodinst_post
+--
+2.35.1
+
--- /dev/null
+From 521e0b2271424bc36bdd788b98b4b9d2850d7056 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 31 Mar 2021 22:38:03 +0900
+Subject: kbuild: unify modules(_install) for in-tree and external modules
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit 3e3005df73b535cb849cf4ec8075d6aa3c460f68 ]
+
+If you attempt to build or install modules ('make modules(_install)'
+with CONFIG_MODULES disabled, you will get a clear error message, but
+nothing for external module builds.
+
+Factor out the modules and modules_install rules into the common part,
+so you will get the same error message when you try to build external
+modules with CONFIG_MODULES=n.
+
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Stable-dep-of: 055f37f84e30 ("KVM: x86: emulator: update the emulation mode after rsm")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Makefile | 85 ++++++++++++++++++++++++--------------------------------
+ 1 file changed, 36 insertions(+), 49 deletions(-)
+
+diff --git a/Makefile b/Makefile
+index 9eacc623642c..8994b6f13b13 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1425,7 +1425,6 @@ endif
+
+ PHONY += modules
+ modules: $(if $(KBUILD_BUILTIN),vmlinux) modules_check modules_prepare
+- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+
+ PHONY += modules_check
+ modules_check: modules.order
+@@ -1443,12 +1442,9 @@ PHONY += modules_prepare
+ modules_prepare: prepare
+ $(Q)$(MAKE) $(build)=scripts scripts/module.lds
+
+-# Target to install modules
+-PHONY += modules_install
+-modules_install: _modinst_ _modinst_post
+-
+-PHONY += _modinst_
+-_modinst_:
++modules_install: __modinst_pre
++PHONY += __modinst_pre
++__modinst_pre:
+ @rm -rf $(MODLIB)/kernel
+ @rm -f $(MODLIB)/source
+ @mkdir -p $(MODLIB)/kernel
+@@ -1460,14 +1456,6 @@ _modinst_:
+ @sed 's:^:kernel/:' modules.order > $(MODLIB)/modules.order
+ @cp -f modules.builtin $(MODLIB)/
+ @cp -f $(objtree)/modules.builtin.modinfo $(MODLIB)/
+- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
+-
+-# This depmod is only for convenience to give the initial
+-# boot a modules.dep even before / is mounted read-write. However the
+-# boot script depmod is the master version.
+-PHONY += _modinst_post
+-_modinst_post: _modinst_
+- $(call cmd,depmod)
+
+ ifeq ($(CONFIG_MODULE_SIG), y)
+ PHONY += modules_sign
+@@ -1475,20 +1463,6 @@ modules_sign:
+ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modsign
+ endif
+
+-else # CONFIG_MODULES
+-
+-# Modules not configured
+-# ---------------------------------------------------------------------------
+-
+-PHONY += modules modules_install
+-modules modules_install:
+- @echo >&2
+- @echo >&2 "The present kernel configuration has modules disabled."
+- @echo >&2 "Type 'make config' and enable loadable module support."
+- @echo >&2 "Then build a kernel with module support enabled."
+- @echo >&2
+- @exit 1
+-
+ endif # CONFIG_MODULES
+
+ ###
+@@ -1736,24 +1710,9 @@ KBUILD_BUILTIN :=
+ KBUILD_MODULES := 1
+
+ build-dirs := $(KBUILD_EXTMOD)
+-PHONY += modules
+-modules: $(MODORDER)
+- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
+-
+ $(MODORDER): descend
+ @:
+
+-PHONY += modules_install
+-modules_install: _emodinst_ _emodinst_post
+-
+-PHONY += _emodinst_
+-_emodinst_:
+- $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
+-
+-PHONY += _emodinst_post
+-_emodinst_post: _emodinst_
+- $(call cmd,depmod)
+-
+ compile_commands.json: $(extmod-prefix)compile_commands.json
+ PHONY += compile_commands.json
+
+@@ -1776,6 +1735,39 @@ PHONY += prepare modules_prepare
+
+ endif # KBUILD_EXTMOD
+
++# ---------------------------------------------------------------------------
++# Modules
++
++PHONY += modules modules_install
++
++ifdef CONFIG_MODULES
++
++modules: $(MODORDER)
++ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
++
++quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
++ cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
++ $(KERNELRELEASE)
++
++modules_install:
++ $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modinst
++ $(call cmd,depmod)
++
++else # CONFIG_MODULES
++
++# Modules not configured
++# ---------------------------------------------------------------------------
++
++modules modules_install:
++ @echo >&2 '***'
++ @echo >&2 '*** The present kernel configuration has modules disabled.'
++ @echo >&2 '*** To use the module feature, please run "make menuconfig" etc.'
++ @echo >&2 '*** to enable CONFIG_MODULES.'
++ @echo >&2 '***'
++ @exit 1
++
++endif # CONFIG_MODULES
++
+ # Single targets
+ # ---------------------------------------------------------------------------
+ # To build individual files in subdirectories, you can do like this:
+@@ -1963,11 +1955,6 @@ tools/%: FORCE
+ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files)))
+ cmd_rmfiles = rm -rf $(rm-files)
+
+-# Run depmod only if we have System.map and depmod is executable
+-quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
+- cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
+- $(KERNELRELEASE)
+-
+ # read saved command lines for existing targets
+ existing-targets := $(wildcard $(sort $(targets)))
+
+--
+2.35.1
+
--- /dev/null
+From 44547012febf343ca7bf53890bbe2a42c22aaa0d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jun 2021 11:56:18 -0700
+Subject: KVM: x86: Drop .post_leave_smm(), i.e. the manual post-RSM MMU reset
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 0128116550acf52043a0aa5cca3caa85e3853aca ]
+
+Drop the .post_leave_smm() emulator callback, which at this point is just
+a wrapper to kvm_mmu_reset_context(). The manual context reset is
+unnecessary, because unlike enter_smm() which calls vendor MSR/CR helpers
+directly, em_rsm() bounces through the KVM helpers, e.g. kvm_set_cr4(),
+which are responsible for processing side effects. em_rsm() is already
+subtly relying on this behavior as it doesn't manually do
+kvm_update_cpuid_runtime(), e.g. to recognize CR4.OSXSAVE changes.
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210609185619.992058-9-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: 055f37f84e30 ("KVM: x86: emulator: update the emulation mode after rsm")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/emulate.c | 10 ++++++++--
+ arch/x86/kvm/kvm_emulate.h | 1 -
+ arch/x86/kvm/x86.c | 6 ------
+ 3 files changed, 8 insertions(+), 9 deletions(-)
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 2e6d7640c4ed..d5da89271318 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2741,8 +2741,14 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
+ if (ret != X86EMUL_CONTINUE)
+ goto emulate_shutdown;
+
+- ctxt->ops->post_leave_smm(ctxt);
+-
++ /*
++ * Note, the ctxt->ops callbacks are responsible for handling side
++ * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID
++ * runtime updates, etc... If that changes, e.g. this flow is moved
++ * out of the emulator to make it look more like enter_smm(), then
++ * those side effects need to be explicitly handled for both success
++ * and shutdown.
++ */
+ return X86EMUL_CONTINUE;
+
+ emulate_shutdown:
+diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
+index 148ba21d0646..3650db9e5ed5 100644
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -233,7 +233,6 @@ struct x86_emulate_ops {
+ void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
+ int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
+ const char *smstate);
+- void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
+ void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
+ int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
+ };
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index ef43bfa1b3a8..e04e97b0661c 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7013,11 +7013,6 @@ static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+ return kvm_x86_ops.pre_leave_smm(emul_to_vcpu(ctxt), smstate);
+ }
+
+-static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
+-{
+- kvm_mmu_reset_context(emul_to_vcpu(ctxt));
+-}
+-
+ static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
+ {
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
+@@ -7072,7 +7067,6 @@ static const struct x86_emulate_ops emulate_ops = {
+ .get_hflags = emulator_get_hflags,
+ .exiting_smm = emulator_exiting_smm,
+ .pre_leave_smm = emulator_pre_leave_smm,
+- .post_leave_smm = emulator_post_leave_smm,
+ .triple_fault = emulator_triple_fault,
+ .set_xcr = emulator_set_xcr,
+ };
+--
+2.35.1
+
--- /dev/null
+From 9d1b2beac4b61cf1f30aa0dddc85547ed61eb2cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jun 2021 11:56:12 -0700
+Subject: KVM: x86: Emulate triple fault shutdown if RSM emulation fails
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit 25b17226cd9a77982fc8c915d4118d7238a0f079 ]
+
+Use the recently introduced KVM_REQ_TRIPLE_FAULT to properly emulate
+shutdown if RSM from SMM fails.
+
+Note, entering shutdown after clearing the SMM flag and restoring NMI
+blocking is architecturally correct with respect to AMD's APM, which KVM
+also uses for SMRAM layout and RSM NMI blocking behavior. The APM says:
+
+ An RSM causes a processor shutdown if an invalid-state condition is
+ found in the SMRAM state-save area. Only an external reset, external
+ processor-initialization, or non-maskable external interrupt (NMI) can
+ cause the processor to leave the shutdown state.
+
+Of note is processor-initialization (INIT) as a valid shutdown wake
+event, as INIT is blocked by SMM, implying that entering shutdown also
+forces the CPU out of SMM.
+
+For recent Intel CPUs, restoring NMI blocking is technically wrong, but
+so is restoring NMI blocking in the first place, and Intel's RSM
+"architecture" is such a mess that just about anything is allowed and can
+be justified as micro-architectural behavior.
+
+Per the SDM:
+
+ On Pentium 4 and later processors, shutdown will inhibit INTR and A20M
+ but will not change any of the other inhibits. On these processors,
+ NMIs will be inhibited if no action is taken in the SMI handler to
+ uninhibit them (see Section 34.8).
+
+where Section 34.8 says:
+
+ When the processor enters SMM while executing an NMI handler, the
+ processor saves the SMRAM state save map but does not save the
+ attribute to keep NMI interrupts disabled. Potentially, an NMI could be
+ latched (while in SMM or upon exit) and serviced upon exit of SMM even
+ though the previous NMI handler has still not completed.
+
+I.e. RSM unconditionally unblocks NMI, but shutdown on RSM does not,
+which is in direct contradiction of KVM's behavior. But, as mentioned
+above, KVM follows AMD architecture and restores NMI blocking on RSM, so
+that micro-architectural detail is already lost.
+
+And for Pentium era CPUs, SMI# can break shutdown, meaning that at least
+some Intel CPUs fully leave SMM when entering shutdown:
+
+ In the shutdown state, Intel processors stop executing instructions
+ until a RESET#, INIT# or NMI# is asserted. While Pentium family
+ processors recognize the SMI# signal in shutdown state, P6 family and
+ Intel486 processors do not.
+
+In other words, the fact that Intel CPUs have implemented the two
+extremes gives KVM carte blanche when it comes to honoring Intel's
+architecture for handling shutdown during RSM.
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210609185619.992058-3-seanjc@google.com>
+[Return X86EMUL_CONTINUE after triple fault. - Paolo]
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: 055f37f84e30 ("KVM: x86: emulator: update the emulation mode after rsm")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/emulate.c | 12 +++++++-----
+ arch/x86/kvm/kvm_emulate.h | 1 +
+ arch/x86/kvm/x86.c | 6 ++++++
+ 3 files changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 63efccc8f429..89ad10261d90 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2730,7 +2730,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
+ * state-save area.
+ */
+ if (ctxt->ops->pre_leave_smm(ctxt, buf))
+- return X86EMUL_UNHANDLEABLE;
++ goto emulate_shutdown;
+
+ #ifdef CONFIG_X86_64
+ if (emulator_has_longmode(ctxt))
+@@ -2739,14 +2739,16 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
+ #endif
+ ret = rsm_load_state_32(ctxt, buf);
+
+- if (ret != X86EMUL_CONTINUE) {
+- /* FIXME: should triple fault */
+- return X86EMUL_UNHANDLEABLE;
+- }
++ if (ret != X86EMUL_CONTINUE)
++ goto emulate_shutdown;
+
+ ctxt->ops->post_leave_smm(ctxt);
+
+ return X86EMUL_CONTINUE;
++
++emulate_shutdown:
++ ctxt->ops->triple_fault(ctxt);
++ return X86EMUL_CONTINUE;
+ }
+
+ static void
+diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
+index aeed6da60e0c..1da3f77a8728 100644
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -234,6 +234,7 @@ struct x86_emulate_ops {
+ int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
+ const char *smstate);
+ void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
++ void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
+ int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
+ };
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 23d7c563e012..20dc108f2c4c 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7018,6 +7018,11 @@ static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
+ kvm_smm_changed(emul_to_vcpu(ctxt));
+ }
+
++static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
++{
++ kvm_make_request(KVM_REQ_TRIPLE_FAULT, emul_to_vcpu(ctxt));
++}
++
+ static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
+ {
+ return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);
+@@ -7068,6 +7073,7 @@ static const struct x86_emulate_ops emulate_ops = {
+ .set_hflags = emulator_set_hflags,
+ .pre_leave_smm = emulator_pre_leave_smm,
+ .post_leave_smm = emulator_post_leave_smm,
++ .triple_fault = emulator_triple_fault,
+ .set_xcr = emulator_set_xcr,
+ };
+
+--
+2.35.1
+
--- /dev/null
+From 74c87e6e5b654a2e15ca1ec3a9b9818a6499c979 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Oct 2022 15:47:30 +0300
+Subject: KVM: x86: emulator: update the emulation mode after rsm
+
+From: Maxim Levitsky <mlevitsk@redhat.com>
+
+[ Upstream commit 055f37f84e304e59c046d1accfd8f08462f52c4c ]
+
+Update the emulation mode after RSM so that RIP will be correctly
+written back, because the RSM instruction can switch the CPU mode from
+32 bit (or less) to 64 bit.
+
+This fixes a guest crash in case the #SMI is received while the guest
+runs a code from an address > 32 bit.
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Message-Id: <20221025124741.228045-13-mlevitsk@redhat.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/emulate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index d5da89271318..2022a27f0c26 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2749,7 +2749,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
+ * those side effects need to be explicitly handled for both success
+ * and shutdown.
+ */
+- return X86EMUL_CONTINUE;
++ return emulator_recalc_and_set_mode(ctxt);
+
+ emulate_shutdown:
+ ctxt->ops->triple_fault(ctxt);
+--
+2.35.1
+
--- /dev/null
+From a8b62eccd42f0f4ba325d420ba74865e870b3903 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jun 2021 11:56:14 -0700
+Subject: KVM: x86: Invoke kvm_smm_changed() immediately after clearing SMM
+ flag
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit fa75e08bbe4f8ea609f61bbb6c04b3bb2b38c793 ]
+
+Move RSM emulation's call to kvm_smm_changed() from .post_leave_smm() to
+.exiting_smm(), leaving behind the MMU context reset. The primary
+motivation is to allow for future cleanup, but this also fixes a bug of
+sorts by queueing KVM_REQ_EVENT even if RSM causes shutdown, e.g. to let
+an INIT wake the vCPU from shutdown. Of course, KVM doesn't properly
+emulate a shutdown state, e.g. KVM doesn't block SMIs after shutdown, and
+immediately exits to userspace, so the event request is a moot point in
+practice.
+
+Moving kvm_smm_changed() also moves the RSM tracepoint. This isn't
+strictly necessary, but will allow consolidating the SMI and RSM
+tracepoints in a future commit (by also moving the SMI tracepoint).
+Invoking the tracepoint before loading SMRAM state also means the SMBASE
+that reported in the tracepoint will point that the state that will be
+used for RSM, as opposed to the SMBASE _after_ RSM completes, which is
+arguably a good thing if the tracepoint is being used to debug a RSM/SMM
+issue.
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210609185619.992058-5-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: 055f37f84e30 ("KVM: x86: emulator: update the emulation mode after rsm")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 0afae6755868..ef43bfa1b3a8 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -7004,7 +7004,7 @@ static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+ vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
+- kvm_mmu_reset_context(vcpu);
++ kvm_smm_changed(vcpu);
+ }
+
+ static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+@@ -7015,7 +7015,7 @@ static int emulator_pre_leave_smm(struct x86_emulate_ctxt *ctxt,
+
+ static void emulator_post_leave_smm(struct x86_emulate_ctxt *ctxt)
+ {
+- kvm_smm_changed(emul_to_vcpu(ctxt));
++ kvm_mmu_reset_context(emul_to_vcpu(ctxt));
+ }
+
+ static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
+--
+2.35.1
+
--- /dev/null
+From 39c833834eb63d070825844a7673887405e77f61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jun 2021 11:56:13 -0700
+Subject: KVM: x86: Replace .set_hflags() with dedicated .exiting_smm() helper
+
+From: Sean Christopherson <seanjc@google.com>
+
+[ Upstream commit edce46548b70b8637694d96122447662ff35af0c ]
+
+Replace the .set_hflags() emulator hook with a dedicated .exiting_smm(),
+moving the SMM and SMM_INSIDE_NMI flag handling out of the emulator in
+the process. This is a step towards consolidating much of the logic in
+kvm_smm_changed(), including the SMM hflags updates.
+
+No functional change intended.
+
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Message-Id: <20210609185619.992058-4-seanjc@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Stable-dep-of: 055f37f84e30 ("KVM: x86: emulator: update the emulation mode after rsm")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/emulate.c | 3 +--
+ arch/x86/kvm/kvm_emulate.h | 2 +-
+ arch/x86/kvm/x86.c | 6 +++---
+ 3 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 89ad10261d90..2e6d7640c4ed 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2685,8 +2685,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
+ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
+ ctxt->ops->set_nmi_mask(ctxt, false);
+
+- ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+- ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
++ ctxt->ops->exiting_smm(ctxt);
+
+ /*
+ * Get back to real mode, to prepare a safe state in which to load
+diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
+index 1da3f77a8728..148ba21d0646 100644
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -230,7 +230,7 @@ struct x86_emulate_ops {
+ void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
+
+ unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
+- void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
++ void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
+ int (*pre_leave_smm)(struct x86_emulate_ctxt *ctxt,
+ const char *smstate);
+ void (*post_leave_smm)(struct x86_emulate_ctxt *ctxt);
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 20dc108f2c4c..0afae6755868 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -6999,11 +6999,11 @@ static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
+ return emul_to_vcpu(ctxt)->arch.hflags;
+ }
+
+-static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
++static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
+ {
+ struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+
+- vcpu->arch.hflags = emul_flags;
++ vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
+ kvm_mmu_reset_context(vcpu);
+ }
+
+@@ -7070,7 +7070,7 @@ static const struct x86_emulate_ops emulate_ops = {
+ .guest_has_rdpid = emulator_guest_has_rdpid,
+ .set_nmi_mask = emulator_set_nmi_mask,
+ .get_hflags = emulator_get_hflags,
+- .set_hflags = emulator_set_hflags,
++ .exiting_smm = emulator_exiting_smm,
+ .pre_leave_smm = emulator_pre_leave_smm,
+ .post_leave_smm = emulator_post_leave_smm,
+ .triple_fault = emulator_triple_fault,
+--
+2.35.1
+
--- /dev/null
+From d29b6a78a2e92ee7227f0a3db0ad552d0738f641 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Apr 2022 14:08:51 -0400
+Subject: mmc: sdhci-brcmstb: Enable Clock Gating to save power
+
+From: Al Cooper <alcooperx@gmail.com>
+
+[ Upstream commit 6bcc55fe648b860ef0c2b8dc23adc05bcddb93c2 ]
+
+Enabling this feature will allow the controller to stop the bus
+clock when the bus is idle. The feature is not part of the standard
+and is unique to newer Arasan cores and is enabled with a bit in a
+vendor specific register. This feature will only be enabled for
+non-removable devices because they don't switch the voltage and
+clock gating breaks SD Card volatge switching.
+
+Signed-off-by: Al Cooper <alcooperx@gmail.com>
+Signed-off-by: Kamal Dasu <kdasu.kdev@gmail.com>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20220427180853.35970-3-kdasu.kdev@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Stable-dep-of: 56baa208f910 ("mmc: sdhci-brcmstb: Fix SDHCI_RESET_ALL for CQHCI")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/sdhci-brcmstb.c | 35 +++++++++++++++++++++++++++++++-
+ 1 file changed, 34 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
+index 244780481193..683d0c685748 100644
+--- a/drivers/mmc/host/sdhci-brcmstb.c
++++ b/drivers/mmc/host/sdhci-brcmstb.c
+@@ -17,11 +17,14 @@
+
+ #define SDHCI_VENDOR 0x78
+ #define SDHCI_VENDOR_ENHANCED_STRB 0x1
++#define SDHCI_VENDOR_GATE_SDCLK_EN 0x2
+
+ #define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0)
+ #define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1)
++#define BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE BIT(2)
+
+ #define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0)
++#define BRCMSTB_PRIV_FLAGS_GATE_CLOCK BIT(1)
+
+ #define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
+
+@@ -36,6 +39,27 @@ struct brcmstb_match_priv {
+ const unsigned int flags;
+ };
+
++static inline void enable_clock_gating(struct sdhci_host *host)
++{
++ u32 reg;
++
++ reg = sdhci_readl(host, SDHCI_VENDOR);
++ reg |= SDHCI_VENDOR_GATE_SDCLK_EN;
++ sdhci_writel(host, reg, SDHCI_VENDOR);
++}
++
++void brcmstb_reset(struct sdhci_host *host, u8 mask)
++{
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
++
++ sdhci_reset(host, mask);
++
++ /* Reset will clear this, so re-enable it */
++ if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK)
++ enable_clock_gating(host);
++}
++
+ static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios)
+ {
+ struct sdhci_host *host = mmc_priv(mmc);
+@@ -131,7 +155,7 @@ static struct sdhci_ops sdhci_brcmstb_ops = {
+ static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
+ .set_clock = sdhci_brcmstb_set_clock,
+ .set_bus_width = sdhci_set_bus_width,
+- .reset = sdhci_reset,
++ .reset = brcmstb_reset,
+ .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling,
+ };
+
+@@ -147,6 +171,7 @@ static struct brcmstb_match_priv match_priv_7445 = {
+ };
+
+ static const struct brcmstb_match_priv match_priv_7216 = {
++ .flags = BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE,
+ .hs400es = sdhci_brcmstb_hs400es,
+ .ops = &sdhci_brcmstb_ops_7216,
+ };
+@@ -273,6 +298,14 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
+ if (res)
+ goto err;
+
++ /*
++ * Automatic clock gating does not work for SD cards that may
++ * voltage switch so only enable it for non-removable devices.
++ */
++ if ((match_priv->flags & BRCMSTB_MATCH_FLAGS_HAS_CLOCK_GATE) &&
++ (host->mmc->caps & MMC_CAP_NONREMOVABLE))
++ priv->flags |= BRCMSTB_PRIV_FLAGS_GATE_CLOCK;
++
+ /*
+ * If the chip has enhanced strobe and it's enabled, add
+ * callback
+--
+2.35.1
+
--- /dev/null
+From e0785d1afaf9f0ffed294b05eb117e078e0b9e9f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Oct 2022 12:42:05 -0700
+Subject: mmc: sdhci-brcmstb: Fix SDHCI_RESET_ALL for CQHCI
+
+From: Brian Norris <briannorris@chromium.org>
+
+[ Upstream commit 56baa208f91061ff27ec2d93fbc483f624d373b4 ]
+
+[[ NOTE: this is completely untested by the author, but included solely
+ because, as noted in commit df57d73276b8 ("mmc: sdhci-pci: Fix
+ SDHCI_RESET_ALL for CQHCI for Intel GLK-based controllers"), "other
+ drivers using CQHCI might benefit from a similar change, if they
+ also have CQHCI reset by SDHCI_RESET_ALL." We've now seen the same
+ bug on at least MSM, Arasan, and Intel hardware. ]]
+
+SDHCI_RESET_ALL resets will reset the hardware CQE state, but we aren't
+tracking that properly in software. When out of sync, we may trigger
+various timeouts.
+
+It's not typical to perform resets while CQE is enabled, but this may
+occur in some suspend or error recovery scenarios.
+
+Include this fix by way of the new sdhci_and_cqhci_reset() helper.
+
+I only patch the bcm7216 variant even though others potentially *could*
+provide the 'supports-cqe' property (and thus enable CQHCI), because
+d46ba2d17f90 ("mmc: sdhci-brcmstb: Add support for Command Queuing
+(CQE)") and some Broadcom folks confirm that only the 7216 variant
+actually supports it.
+
+This patch depends on (and should not compile without) the patch
+entitled "mmc: cqhci: Provide helper for resetting both SDHCI and
+CQHCI".
+
+Fixes: d46ba2d17f90 ("mmc: sdhci-brcmstb: Add support for Command Queuing (CQE)")
+Signed-off-by: Brian Norris <briannorris@chromium.org>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20221026124150.v4.3.I6a715feab6d01f760455865e968ecf0d85036018@changeid
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/sdhci-brcmstb.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
+index 683d0c685748..4d42b1810ace 100644
+--- a/drivers/mmc/host/sdhci-brcmstb.c
++++ b/drivers/mmc/host/sdhci-brcmstb.c
+@@ -12,6 +12,7 @@
+ #include <linux/bitops.h>
+ #include <linux/delay.h>
+
++#include "sdhci-cqhci.h"
+ #include "sdhci-pltfm.h"
+ #include "cqhci.h"
+
+@@ -53,7 +54,7 @@ void brcmstb_reset(struct sdhci_host *host, u8 mask)
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host);
+
+- sdhci_reset(host, mask);
++ sdhci_and_cqhci_reset(host, mask);
+
+ /* Reset will clear this, so re-enable it */
+ if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK)
+--
+2.35.1
+
--- /dev/null
+From 251b1a3f63cfc83802d33c97a9fa5782f7d506c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Apr 2022 14:08:50 -0400
+Subject: mmc: sdhci-brcmstb: Re-organize flags
+
+From: Al Cooper <alcooperx@gmail.com>
+
+[ Upstream commit f3a70f991dd07330225ea11e158e1d07ad5733fb ]
+
+Re-organize the flags by basing the bit names on the flag that they
+apply to. Also change the "flags" member in the "brcmstb_match_priv"
+struct to const.
+
+Signed-off-by: Al Cooper <alcooperx@gmail.com>
+Signed-off-by: Kamal Dasu <kdasu.kdev@gmail.com>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20220427180853.35970-2-kdasu.kdev@gmail.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Stable-dep-of: 56baa208f910 ("mmc: sdhci-brcmstb: Fix SDHCI_RESET_ALL for CQHCI")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/sdhci-brcmstb.c | 32 ++++++++++++++++----------------
+ 1 file changed, 16 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
+index f24623aac2db..244780481193 100644
+--- a/drivers/mmc/host/sdhci-brcmstb.c
++++ b/drivers/mmc/host/sdhci-brcmstb.c
+@@ -18,20 +18,22 @@
+ #define SDHCI_VENDOR 0x78
+ #define SDHCI_VENDOR_ENHANCED_STRB 0x1
+
+-#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0)
+-#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1)
++#define BRCMSTB_MATCH_FLAGS_NO_64BIT BIT(0)
++#define BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT BIT(1)
++
++#define BRCMSTB_PRIV_FLAGS_HAS_CQE BIT(0)
+
+ #define SDHCI_ARASAN_CQE_BASE_ADDR 0x200
+
+ struct sdhci_brcmstb_priv {
+ void __iomem *cfg_regs;
+- bool has_cqe;
++ unsigned int flags;
+ };
+
+ struct brcmstb_match_priv {
+ void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios);
+ struct sdhci_ops *ops;
+- unsigned int flags;
++ const unsigned int flags;
+ };
+
+ static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios)
+@@ -134,13 +136,13 @@ static struct sdhci_ops sdhci_brcmstb_ops_7216 = {
+ };
+
+ static struct brcmstb_match_priv match_priv_7425 = {
+- .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT |
+- BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
++ .flags = BRCMSTB_MATCH_FLAGS_NO_64BIT |
++ BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT,
+ .ops = &sdhci_brcmstb_ops,
+ };
+
+ static struct brcmstb_match_priv match_priv_7445 = {
+- .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT,
++ .flags = BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT,
+ .ops = &sdhci_brcmstb_ops,
+ };
+
+@@ -176,7 +178,7 @@ static int sdhci_brcmstb_add_host(struct sdhci_host *host,
+ bool dma64;
+ int ret;
+
+- if (!priv->has_cqe)
++ if ((priv->flags & BRCMSTB_PRIV_FLAGS_HAS_CQE) == 0)
+ return sdhci_add_host(host);
+
+ dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n");
+@@ -225,7 +227,6 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
+ struct sdhci_brcmstb_priv *priv;
+ struct sdhci_host *host;
+ struct resource *iomem;
+- bool has_cqe = false;
+ struct clk *clk;
+ int res;
+
+@@ -244,10 +245,6 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
+ return res;
+
+ memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata));
+- if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
+- has_cqe = true;
+- match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
+- }
+ brcmstb_pdata.ops = match_priv->ops;
+ host = sdhci_pltfm_init(pdev, &brcmstb_pdata,
+ sizeof(struct sdhci_brcmstb_priv));
+@@ -258,7 +255,10 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
+
+ pltfm_host = sdhci_priv(host);
+ priv = sdhci_pltfm_priv(pltfm_host);
+- priv->has_cqe = has_cqe;
++ if (device_property_read_bool(&pdev->dev, "supports-cqe")) {
++ priv->flags |= BRCMSTB_PRIV_FLAGS_HAS_CQE;
++ match_priv->ops->irq = sdhci_brcmstb_cqhci_irq;
++ }
+
+ /* Map in the non-standard CFG registers */
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+@@ -287,14 +287,14 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
+ * properties through mmc_of_parse().
+ */
+ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+- if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT)
++ if (match_priv->flags & BRCMSTB_MATCH_FLAGS_NO_64BIT)
+ host->caps &= ~SDHCI_CAN_64BIT;
+ host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
+ host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 |
+ SDHCI_SUPPORT_DDR50);
+ host->quirks |= SDHCI_QUIRK_MISSING_CAPS;
+
+- if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT)
++ if (match_priv->flags & BRCMSTB_MATCH_FLAGS_BROKEN_TIMEOUT)
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
+ res = sdhci_brcmstb_add_host(host, priv);
+--
+2.35.1
+
lib-vdso-use-grep-e-instead-of-egrep.patch
init-kconfig-fix-cc_has_asm_goto_tied_output-test-wi.patch
nios2-add-force-for-vmlinuz.gz.patch
+kbuild-remove-unneeded-mkdir-for-external-modules_in.patch
+kbuild-unify-modules-_install-for-in-tree-and-extern.patch
+kvm-x86-emulate-triple-fault-shutdown-if-rsm-emulati.patch
+kvm-x86-replace-.set_hflags-with-dedicated-.exiting_.patch
+kvm-x86-invoke-kvm_smm_changed-immediately-after-cle.patch
+kvm-x86-drop-.post_leave_smm-i.e.-the-manual-post-rs.patch
+kbuild-refactor-single-builds-of-.ko.patch
+kvm-x86-emulator-update-the-emulation-mode-after-rsm.patch
+mmc-sdhci-brcmstb-re-organize-flags.patch
+mmc-sdhci-brcmstb-enable-clock-gating-to-save-power.patch
+mmc-sdhci-brcmstb-fix-sdhci_reset_all-for-cqhci.patch
+usb-cdns3-add-support-for-drd-cdnsp.patch
+usb-cdnsp-device-side-header-file-for-cdnsp-driver.patch
+ceph-make-ceph_create_session_msg-a-global-symbol.patch
+ceph-make-iterate_sessions-a-global-symbol.patch
+ceph-flush-mdlog-before-umounting.patch
+ceph-flush-the-mdlog-before-waiting-on-unsafe-reqs.patch
+ceph-fix-off-by-one-bugs-in-unsafe_request_wait.patch
+ceph-put-the-requests-sessions-when-it-fails-to-allo.patch
+ceph-fix-possible-null-pointer-dereference-for-req-r.patch
+ceph-use-kcalloc-for-allocating-multiple-elements.patch
+ceph-fix-null-pointer-dereference-for-req-r_session.patch
--- /dev/null
+From 044b5bf55207978e8b0078fb4b1ffc0813d73c70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Dec 2020 11:32:18 +0100
+Subject: usb: cdns3: Add support for DRD CDNSP
+
+From: Pawel Laszczak <pawell@cadence.com>
+
+[ Upstream commit db8892bb1bb64b6e3d1381ac342a2ee31e1b76b6 ]
+
+Patch adds support for Cadence DRD Super Speed Plus controller(CDNSP).
+CDNSP DRD is a part of Cadence CDNSP controller.
+The DRD CDNSP controller has a lot of difference on hardware level but on
+software level is quite compatible with CDNS3 DRD. For this reason
+CDNS3 DRD part of CDNS3 driver was reused for CDNSP driver.
+
+Signed-off-by: Pawel Laszczak <pawell@cadence.com>
+Tested-by: Aswath Govindraju <a-govindraju@ti.com>
+Signed-off-by: Peter Chen <peter.chen@nxp.com>
+Stable-dep-of: 9d5333c93134 ("usb: cdns3: host: fix endless superspeed hub port reset")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/core.c | 24 +++++++---
+ drivers/usb/cdns3/core.h | 5 ++
+ drivers/usb/cdns3/drd.c | 101 +++++++++++++++++++++++++++------------
+ drivers/usb/cdns3/drd.h | 67 +++++++++++++++++++++-----
+ 4 files changed, 148 insertions(+), 49 deletions(-)
+
+diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
+index 6eeb7ed8e91f..8fe7420de033 100644
+--- a/drivers/usb/cdns3/core.c
++++ b/drivers/usb/cdns3/core.c
+@@ -97,13 +97,23 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
+ * can be restricted later depending on strap pin configuration.
+ */
+ if (dr_mode == USB_DR_MODE_UNKNOWN) {
+- if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
+- IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+- dr_mode = USB_DR_MODE_OTG;
+- else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
+- dr_mode = USB_DR_MODE_HOST;
+- else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+- dr_mode = USB_DR_MODE_PERIPHERAL;
++ if (cdns->version == CDNSP_CONTROLLER_V2) {
++ if (IS_ENABLED(CONFIG_USB_CDNSP_HOST) &&
++ IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
++ dr_mode = USB_DR_MODE_OTG;
++ else if (IS_ENABLED(CONFIG_USB_CDNSP_HOST))
++ dr_mode = USB_DR_MODE_HOST;
++ else if (IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
++ dr_mode = USB_DR_MODE_PERIPHERAL;
++ } else {
++ if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
++ IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
++ dr_mode = USB_DR_MODE_OTG;
++ else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
++ dr_mode = USB_DR_MODE_HOST;
++ else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
++ dr_mode = USB_DR_MODE_PERIPHERAL;
++ }
+ }
+
+ /*
+diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
+index 3176f924293a..0d87871499ea 100644
+--- a/drivers/usb/cdns3/core.h
++++ b/drivers/usb/cdns3/core.h
+@@ -55,7 +55,9 @@ struct cdns3_platform_data {
+ * @otg_res: the resource for otg
+ * @otg_v0_regs: pointer to base of v0 otg registers
+ * @otg_v1_regs: pointer to base of v1 otg registers
++ * @otg_cdnsp_regs: pointer to base of CDNSP otg registers
+ * @otg_regs: pointer to base of otg registers
++ * @otg_irq_regs: pointer to interrupt registers
+ * @otg_irq: irq number for otg controller
+ * @dev_irq: irq number for device controller
+ * @wakeup_irq: irq number for wakeup event, it is optional
+@@ -86,9 +88,12 @@ struct cdns3 {
+ struct resource otg_res;
+ struct cdns3_otg_legacy_regs *otg_v0_regs;
+ struct cdns3_otg_regs *otg_v1_regs;
++ struct cdnsp_otg_regs *otg_cdnsp_regs;
+ struct cdns3_otg_common_regs *otg_regs;
++ struct cdns3_otg_irq_regs *otg_irq_regs;
+ #define CDNS3_CONTROLLER_V0 0
+ #define CDNS3_CONTROLLER_V1 1
++#define CDNSP_CONTROLLER_V2 2
+ u32 version;
+ bool phyrst_a_enable;
+
+diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
+index 38ccd29e4cde..95863d44e3e0 100644
+--- a/drivers/usb/cdns3/drd.c
++++ b/drivers/usb/cdns3/drd.c
+@@ -2,13 +2,12 @@
+ /*
+ * Cadence USBSS DRD Driver.
+ *
+- * Copyright (C) 2018-2019 Cadence.
++ * Copyright (C) 2018-2020 Cadence.
+ * Copyright (C) 2019 Texas Instruments
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ * Roger Quadros <rogerq@ti.com>
+ *
+- *
+ */
+ #include <linux/kernel.h>
+ #include <linux/interrupt.h>
+@@ -28,8 +27,9 @@
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+-int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
++static int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
+ {
++ u32 __iomem *override_reg;
+ u32 reg;
+
+ switch (mode) {
+@@ -39,11 +39,24 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
+ break;
+ case USB_DR_MODE_OTG:
+ dev_dbg(cdns->dev, "Set controller to OTG mode\n");
+- if (cdns->version == CDNS3_CONTROLLER_V1) {
+- reg = readl(&cdns->otg_v1_regs->override);
++
++ if (cdns->version == CDNSP_CONTROLLER_V2)
++ override_reg = &cdns->otg_cdnsp_regs->override;
++ else if (cdns->version == CDNS3_CONTROLLER_V1)
++ override_reg = &cdns->otg_v1_regs->override;
++ else
++ override_reg = &cdns->otg_v0_regs->ctrl1;
++
++ reg = readl(override_reg);
++
++ if (cdns->version != CDNS3_CONTROLLER_V0)
+ reg |= OVERRIDE_IDPULLUP;
+- writel(reg, &cdns->otg_v1_regs->override);
++ else
++ reg |= OVERRIDE_IDPULLUP_V0;
+
++ writel(reg, override_reg);
++
++ if (cdns->version == CDNS3_CONTROLLER_V1) {
+ /*
+ * Enable work around feature built into the
+ * controller to address issue with RX Sensitivity
+@@ -55,10 +68,6 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
+ reg |= PHYRST_CFG_PHYRST_A_ENABLE;
+ writel(reg, &cdns->otg_v1_regs->phyrst_cfg);
+ }
+- } else {
+- reg = readl(&cdns->otg_v0_regs->ctrl1);
+- reg |= OVERRIDE_IDPULLUP_V0;
+- writel(reg, &cdns->otg_v0_regs->ctrl1);
+ }
+
+ /*
+@@ -123,7 +132,7 @@ bool cdns3_is_device(struct cdns3 *cdns)
+ */
+ static void cdns3_otg_disable_irq(struct cdns3 *cdns)
+ {
+- writel(0, &cdns->otg_regs->ien);
++ writel(0, &cdns->otg_irq_regs->ien);
+ }
+
+ /**
+@@ -133,7 +142,7 @@ static void cdns3_otg_disable_irq(struct cdns3 *cdns)
+ static void cdns3_otg_enable_irq(struct cdns3 *cdns)
+ {
+ writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT |
+- OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_regs->ien);
++ OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_irq_regs->ien);
+ }
+
+ /**
+@@ -144,16 +153,21 @@ static void cdns3_otg_enable_irq(struct cdns3 *cdns)
+ */
+ int cdns3_drd_host_on(struct cdns3 *cdns)
+ {
+- u32 val;
++ u32 val, ready_bit;
+ int ret;
+
+ /* Enable host mode. */
+ writel(OTGCMD_HOST_BUS_REQ | OTGCMD_OTG_DIS,
+ &cdns->otg_regs->cmd);
+
++ if (cdns->version == CDNSP_CONTROLLER_V2)
++ ready_bit = OTGSTS_CDNSP_XHCI_READY;
++ else
++ ready_bit = OTGSTS_CDNS3_XHCI_READY;
++
+ dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n");
+ ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
+- val & OTGSTS_XHCI_READY, 1, 100000);
++ val & ready_bit, 1, 100000);
+
+ if (ret)
+ dev_err(cdns->dev, "timeout waiting for xhci_ready\n");
+@@ -189,17 +203,22 @@ void cdns3_drd_host_off(struct cdns3 *cdns)
+ */
+ int cdns3_drd_gadget_on(struct cdns3 *cdns)
+ {
+- int ret, val;
+ u32 reg = OTGCMD_OTG_DIS;
++ u32 ready_bit;
++ int ret, val;
+
+ /* switch OTG core */
+ writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd);
+
+ dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n");
+
++ if (cdns->version == CDNSP_CONTROLLER_V2)
++ ready_bit = OTGSTS_CDNSP_DEV_READY;
++ else
++ ready_bit = OTGSTS_CDNS3_DEV_READY;
++
+ ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
+- val & OTGSTS_DEV_READY,
+- 1, 100000);
++ val & ready_bit, 1, 100000);
+ if (ret) {
+ dev_err(cdns->dev, "timeout waiting for dev_ready\n");
+ return ret;
+@@ -244,7 +263,7 @@ static int cdns3_init_otg_mode(struct cdns3 *cdns)
+
+ cdns3_otg_disable_irq(cdns);
+ /* clear all interrupts */
+- writel(~0, &cdns->otg_regs->ivect);
++ writel(~0, &cdns->otg_irq_regs->ivect);
+
+ ret = cdns3_set_mode(cdns, USB_DR_MODE_OTG);
+ if (ret)
+@@ -313,7 +332,7 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data)
+ if (cdns->in_lpm)
+ return ret;
+
+- reg = readl(&cdns->otg_regs->ivect);
++ reg = readl(&cdns->otg_irq_regs->ivect);
+
+ if (!reg)
+ return IRQ_NONE;
+@@ -332,7 +351,7 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data)
+ ret = IRQ_WAKE_THREAD;
+ }
+
+- writel(~0, &cdns->otg_regs->ivect);
++ writel(~0, &cdns->otg_irq_regs->ivect);
+ return ret;
+ }
+
+@@ -347,28 +366,43 @@ int cdns3_drd_init(struct cdns3 *cdns)
+ return PTR_ERR(regs);
+
+ /* Detection of DRD version. Controller has been released
+- * in two versions. Both are similar, but they have same changes
+- * in register maps.
+- * The first register in old version is command register and it's read
+- * only, so driver should read 0 from it. On the other hand, in v1
+- * the first register contains device ID number which is not set to 0.
+- * Driver uses this fact to detect the proper version of
++ * in three versions. All are very similar and are software compatible,
++ * but they have same changes in register maps.
++ * The first register in oldest version is command register and it's
++ * read only. Driver should read 0 from it. On the other hand, in v1
++ * and v2 the first register contains device ID number which is not
++ * set to 0. Driver uses this fact to detect the proper version of
+ * controller.
+ */
+ cdns->otg_v0_regs = regs;
+ if (!readl(&cdns->otg_v0_regs->cmd)) {
+ cdns->version = CDNS3_CONTROLLER_V0;
+ cdns->otg_v1_regs = NULL;
++ cdns->otg_cdnsp_regs = NULL;
+ cdns->otg_regs = regs;
++ cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *)
++ &cdns->otg_v0_regs->ien;
+ writel(1, &cdns->otg_v0_regs->simulate);
+ dev_dbg(cdns->dev, "DRD version v0 (%08x)\n",
+ readl(&cdns->otg_v0_regs->version));
+ } else {
+ cdns->otg_v0_regs = NULL;
+ cdns->otg_v1_regs = regs;
++ cdns->otg_cdnsp_regs = regs;
++
+ cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd;
+- cdns->version = CDNS3_CONTROLLER_V1;
+- writel(1, &cdns->otg_v1_regs->simulate);
++
++ if (cdns->otg_cdnsp_regs->did == OTG_CDNSP_DID) {
++ cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *)
++ &cdns->otg_cdnsp_regs->ien;
++ cdns->version = CDNSP_CONTROLLER_V2;
++ } else {
++ cdns->otg_irq_regs = (struct cdns3_otg_irq_regs *)
++ &cdns->otg_v1_regs->ien;
++ writel(1, &cdns->otg_v1_regs->simulate);
++ cdns->version = CDNS3_CONTROLLER_V1;
++ }
++
+ dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
+ readl(&cdns->otg_v1_regs->did),
+ readl(&cdns->otg_v1_regs->rid));
+@@ -378,10 +412,17 @@ int cdns3_drd_init(struct cdns3 *cdns)
+
+ /* Update dr_mode according to STRAP configuration. */
+ cdns->dr_mode = USB_DR_MODE_OTG;
+- if (state == OTGSTS_STRAP_HOST) {
++
++ if ((cdns->version == CDNSP_CONTROLLER_V2 &&
++ state == OTGSTS_CDNSP_STRAP_HOST) ||
++ (cdns->version != CDNSP_CONTROLLER_V2 &&
++ state == OTGSTS_STRAP_HOST)) {
+ dev_dbg(cdns->dev, "Controller strapped to HOST\n");
+ cdns->dr_mode = USB_DR_MODE_HOST;
+- } else if (state == OTGSTS_STRAP_GADGET) {
++ } else if ((cdns->version == CDNSP_CONTROLLER_V2 &&
++ state == OTGSTS_CDNSP_STRAP_GADGET) ||
++ (cdns->version != CDNSP_CONTROLLER_V2 &&
++ state == OTGSTS_STRAP_GADGET)) {
+ dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n");
+ cdns->dr_mode = USB_DR_MODE_PERIPHERAL;
+ }
+diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
+index f1ccae285a16..a767b6893938 100644
+--- a/drivers/usb/cdns3/drd.h
++++ b/drivers/usb/cdns3/drd.h
+@@ -1,8 +1,8 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /*
+- * Cadence USB3 DRD header file.
++ * Cadence USB3 and USBSSP DRD header file.
+ *
+- * Copyright (C) 2018-2019 Cadence.
++ * Copyright (C) 2018-2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ */
+@@ -13,7 +13,7 @@
+ #include <linux/phy/phy.h>
+ #include "core.h"
+
+-/* DRD register interface for version v1. */
++/* DRD register interface for version v1 of cdns3 driver. */
+ struct cdns3_otg_regs {
+ __le32 did;
+ __le32 rid;
+@@ -38,7 +38,7 @@ struct cdns3_otg_regs {
+ __le32 ctrl2;
+ };
+
+-/* DRD register interface for version v0. */
++/* DRD register interface for version v0 of cdns3 driver. */
+ struct cdns3_otg_legacy_regs {
+ __le32 cmd;
+ __le32 sts;
+@@ -57,14 +57,45 @@ struct cdns3_otg_legacy_regs {
+ __le32 ctrl1;
+ };
+
++/* DRD register interface for cdnsp driver */
++struct cdnsp_otg_regs {
++ __le32 did;
++ __le32 rid;
++ __le32 cfgs1;
++ __le32 cfgs2;
++ __le32 cmd;
++ __le32 sts;
++ __le32 state;
++ __le32 ien;
++ __le32 ivect;
++ __le32 tmr;
++ __le32 simulate;
++ __le32 adpbc_sts;
++ __le32 adp_ramp_time;
++ __le32 adpbc_ctrl1;
++ __le32 adpbc_ctrl2;
++ __le32 override;
++ __le32 vbusvalid_dbnc_cfg;
++ __le32 sessvalid_dbnc_cfg;
++ __le32 susp_timing_ctrl;
++};
++
++#define OTG_CDNSP_DID 0x0004034E
++
+ /*
+- * Common registers interface for both version of DRD.
++ * Common registers interface for both CDNS3 and CDNSP version of DRD.
+ */
+ struct cdns3_otg_common_regs {
+ __le32 cmd;
+ __le32 sts;
+ __le32 state;
+- __le32 different1;
++};
++
++/*
++ * Interrupt related registers. This registers are mapped in different
++ * location for CDNSP controller.
++ */
++struct cdns3_otg_irq_regs {
+ __le32 ien;
+ __le32 ivect;
+ };
+@@ -92,9 +123,9 @@ struct cdns3_otg_common_regs {
+ #define OTGCMD_DEV_BUS_DROP BIT(8)
+ /* Drop the bus for Host mode*/
+ #define OTGCMD_HOST_BUS_DROP BIT(9)
+-/* Power Down USBSS-DEV. */
++/* Power Down USBSS-DEV - only for CDNS3.*/
+ #define OTGCMD_DEV_POWER_OFF BIT(11)
+-/* Power Down CDNSXHCI. */
++/* Power Down CDNSXHCI - only for CDNS3. */
+ #define OTGCMD_HOST_POWER_OFF BIT(12)
+
+ /* OTGIEN - bitmasks */
+@@ -123,20 +154,31 @@ struct cdns3_otg_common_regs {
+ #define OTGSTS_OTG_NRDY_MASK BIT(11)
+ #define OTGSTS_OTG_NRDY(p) ((p) & OTGSTS_OTG_NRDY_MASK)
+ /*
+- * Value of the strap pins.
++ * Value of the strap pins for:
++ * CDNS3:
+ * 000 - no default configuration
+ * 010 - Controller initiall configured as Host
+ * 100 - Controller initially configured as Device
++ * CDNSP:
++ * 000 - No default configuration.
++ * 010 - Controller initiall configured as Host.
++ * 100 - Controller initially configured as Device.
+ */
+ #define OTGSTS_STRAP(p) (((p) & GENMASK(14, 12)) >> 12)
+ #define OTGSTS_STRAP_NO_DEFAULT_CFG 0x00
+ #define OTGSTS_STRAP_HOST_OTG 0x01
+ #define OTGSTS_STRAP_HOST 0x02
+ #define OTGSTS_STRAP_GADGET 0x04
++#define OTGSTS_CDNSP_STRAP_HOST 0x01
++#define OTGSTS_CDNSP_STRAP_GADGET 0x02
++
+ /* Host mode is turned on. */
+-#define OTGSTS_XHCI_READY BIT(26)
++#define OTGSTS_CDNS3_XHCI_READY BIT(26)
++#define OTGSTS_CDNSP_XHCI_READY BIT(27)
++
+ /* "Device mode is turned on .*/
+-#define OTGSTS_DEV_READY BIT(27)
++#define OTGSTS_CDNS3_DEV_READY BIT(27)
++#define OTGSTS_CDNSP_DEV_READY BIT(26)
+
+ /* OTGSTATE- bitmasks */
+ #define OTGSTATE_DEV_STATE_MASK GENMASK(2, 0)
+@@ -152,6 +194,8 @@ struct cdns3_otg_common_regs {
+ #define OVERRIDE_IDPULLUP BIT(0)
+ /* Only for CDNS3_CONTROLLER_V0 version */
+ #define OVERRIDE_IDPULLUP_V0 BIT(24)
++/* Vbusvalid/Sesvalid override select. */
++#define OVERRIDE_SESS_VLD_SEL BIT(10)
+
+ /* PHYRST_CFG - bitmasks */
+ #define PHYRST_CFG_PHYRST_A_ENABLE BIT(0)
+@@ -170,6 +214,5 @@ int cdns3_drd_gadget_on(struct cdns3 *cdns);
+ void cdns3_drd_gadget_off(struct cdns3 *cdns);
+ int cdns3_drd_host_on(struct cdns3 *cdns);
+ void cdns3_drd_host_off(struct cdns3 *cdns);
+-int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode);
+
+ #endif /* __LINUX_CDNS3_DRD */
+--
+2.35.1
+
--- /dev/null
+From 0fcaec3f0b643e256ace8b35919163bb08d91579 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Dec 2020 11:32:23 +0100
+Subject: usb: cdnsp: Device side header file for CDNSP driver
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pawel Laszczak <pawell@cadence.com>
+
+[ Upstream commit e93e58d2740282d32c0278fab283eb0ae158bb59 ]
+
+Patch defines macros, registers and structures used by
+Device side driver.
+
+Because the size of main patch is very big, I’ve decided to create
+separate patch for cdnsp-gadget.h. It should simplify reviewing the code.
+
+Signed-off-by: Pawel Laszczak <pawell@cadence.com>
+Signed-off-by: Peter Chen <peter.chen@nxp.com>
+Stable-dep-of: 9d5333c93134 ("usb: cdns3: host: fix endless superspeed hub port reset")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/cdns3/cdnsp-gadget.h | 1463 ++++++++++++++++++++++++++++++
+ 1 file changed, 1463 insertions(+)
+ create mode 100644 drivers/usb/cdns3/cdnsp-gadget.h
+
+diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
+new file mode 100644
+index 000000000000..93da1dcdad60
+--- /dev/null
++++ b/drivers/usb/cdns3/cdnsp-gadget.h
+@@ -0,0 +1,1463 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++/*
++ * Cadence CDNSP DRD Driver.
++ *
++ * Copyright (C) 2020 Cadence.
++ *
++ * Author: Pawel Laszczak <pawell@cadence.com>
++ *
++ * Code based on Linux XHCI driver.
++ * Origin: Copyright (C) 2008 Intel Corp.
++ */
++#ifndef __LINUX_CDNSP_GADGET_H
++#define __LINUX_CDNSP_GADGET_H
++
++#include <linux/io-64-nonatomic-lo-hi.h>
++#include <linux/usb/gadget.h>
++#include <linux/irq.h>
++
++/* Max number slots - only 1 is allowed. */
++#define CDNSP_DEV_MAX_SLOTS 1
++
++#define CDNSP_EP0_SETUP_SIZE 512
++
++/* One control and 15 for in and 15 for out endpoints. */
++#define CDNSP_ENDPOINTS_NUM 31
++
++/* Best Effort Service Latency. */
++#define CDNSP_DEFAULT_BESL 0
++
++/* Device Controller command default timeout value in us */
++#define CDNSP_CMD_TIMEOUT (15 * 1000)
++
++/* Up to 16 ms to halt an device controller */
++#define CDNSP_MAX_HALT_USEC (16 * 1000)
++
++#define CDNSP_CTX_SIZE 2112
++
++/*
++ * Controller register interface.
++ */
++
++/**
++ * struct cdnsp_cap_regs - CDNSP Registers.
++ * @hc_capbase: Length of the capabilities register and controller
++ * version number
++ * @hcs_params1: HCSPARAMS1 - Structural Parameters 1
++ * @hcs_params2: HCSPARAMS2 - Structural Parameters 2
++ * @hcs_params3: HCSPARAMS3 - Structural Parameters 3
++ * @hcc_params: HCCPARAMS - Capability Parameters
++ * @db_off: DBOFF - Doorbell array offset
++ * @run_regs_off: RTSOFF - Runtime register space offset
++ * @hcc_params2: HCCPARAMS2 Capability Parameters 2,
++ */
++struct cdnsp_cap_regs {
++ __le32 hc_capbase;
++ __le32 hcs_params1;
++ __le32 hcs_params2;
++ __le32 hcs_params3;
++ __le32 hcc_params;
++ __le32 db_off;
++ __le32 run_regs_off;
++ __le32 hcc_params2;
++ /* Reserved up to (CAPLENGTH - 0x1C) */
++};
++
++/* hc_capbase bitmasks. */
++/* bits 7:0 - how long is the Capabilities register. */
++#define HC_LENGTH(p) (((p) >> 00) & GENMASK(7, 0))
++/* bits 31:16 */
++#define HC_VERSION(p) (((p) >> 16) & GENMASK(15, 1))
++
++/* HCSPARAMS1 - hcs_params1 - bitmasks */
++/* bits 0:7, Max Device Endpoints */
++#define HCS_ENDPOINTS_MASK GENMASK(7, 0)
++#define HCS_ENDPOINTS(p) (((p) & HCS_ENDPOINTS_MASK) >> 0)
++
++/* HCCPARAMS offset from PCI base address */
++#define HCC_PARAMS_OFFSET 0x10
++
++/* HCCPARAMS - hcc_params - bitmasks */
++/* 1: device controller can use 64-bit address pointers. */
++#define HCC_64BIT_ADDR(p) ((p) & BIT(0))
++/* 1: device controller uses 64-byte Device Context structures. */
++#define HCC_64BYTE_CONTEXT(p) ((p) & BIT(2))
++/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15. */
++#define HCC_MAX_PSA(p) ((((p) >> 12) & 0xf) + 1)
++/* Extended Capabilities pointer from PCI base. */
++#define HCC_EXT_CAPS(p) (((p) & GENMASK(31, 16)) >> 16)
++
++#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
++
++/* db_off bitmask - bits 0:1 reserved. */
++#define DBOFF_MASK GENMASK(31, 2)
++
++/* run_regs_off bitmask - bits 0:4 reserved. */
++#define RTSOFF_MASK GENMASK(31, 5)
++
++/**
++ * struct cdnsp_op_regs - Device Controller Operational Registers.
++ * @command: USBCMD - Controller command register.
++ * @status: USBSTS - Controller status register.
++ * @page_size: This indicates the page size that the device controller supports.
++ * If bit n is set, the controller supports a page size of 2^(n+12),
++ * up to a 128MB page size. 4K is the minimum page size.
++ * @dnctrl: DNCTRL - Device notification control register.
++ * @cmd_ring: CRP - 64-bit Command Ring Pointer.
++ * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer.
++ * @config_reg: CONFIG - Configure Register
++ * @port_reg_base: PORTSCn - base address for Port Status and Control
++ * Each port has a Port Status and Control register,
++ * followed by a Port Power Management Status and Control
++ * register, a Port Link Info register, and a reserved
++ * register.
++ */
++struct cdnsp_op_regs {
++ __le32 command;
++ __le32 status;
++ __le32 page_size;
++ __le32 reserved1;
++ __le32 reserved2;
++ __le32 dnctrl;
++ __le64 cmd_ring;
++ /* rsvd: offset 0x20-2F. */
++ __le32 reserved3[4];
++ __le64 dcbaa_ptr;
++ __le32 config_reg;
++ /* rsvd: offset 0x3C-3FF. */
++ __le32 reserved4[241];
++ /* port 1 registers, which serve as a base address for other ports. */
++ __le32 port_reg_base;
++};
++
++/* Number of registers per port. */
++#define NUM_PORT_REGS 4
++
++/**
++ * struct cdnsp_port_regs - Port Registers.
++ * @portsc: PORTSC - Port Status and Control Register.
++ * @portpmsc: PORTPMSC - Port Power Managements Status and Control Register.
++ * @portli: PORTLI - Port Link Info register.
++ */
++struct cdnsp_port_regs {
++ __le32 portsc;
++ __le32 portpmsc;
++ __le32 portli;
++ __le32 reserved;
++};
++
++/*
++ * These bits are Read Only (RO) and should be saved and written to the
++ * registers: 0 (connect status) and 10:13 (port speed).
++ * These bits are also sticky - meaning they're in the AUX well and they aren't
++ * changed by a hot and warm.
++ */
++#define CDNSP_PORT_RO (PORT_CONNECT | DEV_SPEED_MASK)
++
++/*
++ * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
++ * bits 5:8 (link state), 25:26 ("wake on" enable state)
++ */
++#define CDNSP_PORT_RWS (PORT_PLS_MASK | PORT_WKCONN_E | PORT_WKDISC_E)
++
++/*
++ * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
++ * bits 1 (port enable/disable), 17 ( connect changed),
++ * 21 (port reset changed) , 22 (Port Link State Change),
++ */
++#define CDNSP_PORT_RW1CS (PORT_PED | PORT_CSC | PORT_RC | PORT_PLC)
++
++/* USBCMD - USB command - bitmasks. */
++/* Run/Stop, controller execution - do not write unless controller is halted.*/
++#define CMD_R_S BIT(0)
++/*
++ * Reset device controller - resets internal controller state machine and all
++ * registers (except PCI config regs).
++ */
++#define CMD_RESET BIT(1)
++/* Event Interrupt Enable - a '1' allows interrupts from the controller. */
++#define CMD_INTE BIT(2)
++/*
++ * Device System Error Interrupt Enable - get out-of-band signal for
++ * controller errors.
++ */
++#define CMD_DSEIE BIT(3)
++/* device controller save/restore state. */
++#define CMD_CSS BIT(8)
++#define CMD_CRS BIT(9)
++/*
++ * Enable Wrap Event - '1' means device controller generates an event
++ * when MFINDEX wraps.
++ */
++#define CMD_EWE BIT(10)
++/* 1: device enabled */
++#define CMD_DEVEN BIT(17)
++/* bits 18:31 are reserved (and should be preserved on writes). */
++
++/* Command register values to disable interrupts. */
++#define CDNSP_IRQS (CMD_INTE | CMD_DSEIE | CMD_EWE)
++
++/* USBSTS - USB status - bitmasks */
++/* controller not running - set to 1 when run/stop bit is cleared. */
++#define STS_HALT BIT(0)
++/*
++ * serious error, e.g. PCI parity error. The controller will clear
++ * the run/stop bit.
++ */
++#define STS_FATAL BIT(2)
++/* event interrupt - clear this prior to clearing any IP flags in IR set.*/
++#define STS_EINT BIT(3)
++/* port change detect */
++#define STS_PCD BIT(4)
++/* save state status - '1' means device controller is saving state. */
++#define STS_SSS BIT(8)
++/* restore state status - '1' means controllers is restoring state. */
++#define STS_RSS BIT(9)
++/* 1: save or restore error */
++#define STS_SRE BIT(10)
++/* 1: device Not Ready to accept doorbell or op reg writes after reset. */
++#define STS_CNR BIT(11)
++/* 1: internal Device Controller Error.*/
++#define STS_HCE BIT(12)
++
++/* CRCR - Command Ring Control Register - cmd_ring bitmasks. */
++/* bit 0 is the command ring cycle state. */
++#define CMD_RING_CS BIT(0)
++/* stop ring immediately - abort the currently executing command. */
++#define CMD_RING_ABORT BIT(2)
++/*
++ * Command Ring Busy.
++ * Set when Doorbell register is written with DB for command and cleared when
++ * the controller reached end of CR.
++ */
++#define CMD_RING_BUSY(p) ((p) & BIT(4))
++/* 1: command ring is running */
++#define CMD_RING_RUNNING BIT(3)
++/* Command Ring pointer - bit mask for the lower 32 bits. */
++#define CMD_RING_RSVD_BITS GENMASK(5, 0)
++
++/* CONFIG - Configure Register - config_reg bitmasks. */
++/* bits 0:7 - maximum number of device slots enabled. */
++#define MAX_DEVS GENMASK(7, 0)
++/* bit 8: U3 Entry Enabled, assert PLC when controller enters U3. */
++#define CONFIG_U3E BIT(8)
++
++/* PORTSC - Port Status and Control Register - port_reg_base bitmasks */
++/* 1: device connected. */
++#define PORT_CONNECT BIT(0)
++/* 1: port enabled. */
++#define PORT_PED BIT(1)
++/* 1: port reset signaling asserted. */
++#define PORT_RESET BIT(4)
++/*
++ * Port Link State - bits 5:8
++ * A read gives the current link PM state of the port,
++ * a write with Link State Write Strobe sets the link state.
++ */
++#define PORT_PLS_MASK GENMASK(8, 5)
++#define XDEV_U0 (0x0 << 5)
++#define XDEV_U1 (0x1 << 5)
++#define XDEV_U2 (0x2 << 5)
++#define XDEV_U3 (0x3 << 5)
++#define XDEV_DISABLED (0x4 << 5)
++#define XDEV_RXDETECT (0x5 << 5)
++#define XDEV_INACTIVE (0x6 << 5)
++#define XDEV_POLLING (0x7 << 5)
++#define XDEV_RECOVERY (0x8 << 5)
++#define XDEV_HOT_RESET (0x9 << 5)
++#define XDEV_COMP_MODE (0xa << 5)
++#define XDEV_TEST_MODE (0xb << 5)
++#define XDEV_RESUME (0xf << 5)
++/* 1: port has power. */
++#define PORT_POWER BIT(9)
++/*
++ * bits 10:13 indicate device speed:
++ * 0 - undefined speed - port hasn't be initialized by a reset yet
++ * 1 - full speed
++ * 2 - Reserved (Low Speed not supported
++ * 3 - high speed
++ * 4 - super speed
++ * 5 - super speed
++ * 6-15 reserved
++ */
++#define DEV_SPEED_MASK GENMASK(13, 10)
++#define XDEV_FS (0x1 << 10)
++#define XDEV_HS (0x3 << 10)
++#define XDEV_SS (0x4 << 10)
++#define XDEV_SSP (0x5 << 10)
++#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0 << 10))
++#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
++#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
++#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
++#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
++#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
++#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
++/* Port Link State Write Strobe - set this when changing link state */
++#define PORT_LINK_STROBE BIT(16)
++/* 1: connect status change */
++#define PORT_CSC BIT(17)
++/* 1: warm reset for a USB 3.0 device is done. */
++#define PORT_WRC BIT(19)
++/* 1: reset change - 1 to 0 transition of PORT_RESET */
++#define PORT_RC BIT(21)
++/*
++ * port link status change - set on some port link state transitions:
++ * Transition Reason
++ * ----------------------------------------------------------------------------
++ * - U3 to Resume Wakeup signaling from a device
++ * - Resume to Recovery to U0 USB 3.0 device resume
++ * - Resume to U0 USB 2.0 device resume
++ * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
++ * - U3 to U0 Software resume of USB 2.0 device complete
++ * - U2 to U0 L1 resume of USB 2.1 device complete
++ * - U0 to U0 L1 entry rejection by USB 2.1 device
++ * - U0 to disabled L1 entry error with USB 2.1 device
++ * - Any state to inactive Error on USB 3.0 port
++ */
++#define PORT_PLC BIT(22)
++/* Port configure error change - port failed to configure its link partner. */
++#define PORT_CEC BIT(23)
++/* Wake on connect (enable). */
++#define PORT_WKCONN_E BIT(25)
++/* Wake on disconnect (enable). */
++#define PORT_WKDISC_E BIT(26)
++/* Indicates if Warm Reset is being received. */
++#define PORT_WR BIT(31)
++
++#define PORT_CHANGE_BITS (PORT_CSC | PORT_WRC | PORT_RC | PORT_PLC | PORT_CEC)
++
++/* PORTPMSCUSB3 - Port Power Management Status and Control - bitmasks. */
++/* Enables U1 entry. */
++#define PORT_U1_TIMEOUT_MASK GENMASK(7, 0)
++#define PORT_U1_TIMEOUT(p) ((p) & PORT_U1_TIMEOUT_MASK)
++/* Enables U2 entry .*/
++#define PORT_U2_TIMEOUT_MASK GENMASK(14, 8)
++#define PORT_U2_TIMEOUT(p) (((p) << 8) & PORT_U2_TIMEOUT_MASK)
++
++/* PORTPMSCUSB2 - Port Power Management Status and Control - bitmasks. */
++#define PORT_L1S_MASK GENMASK(2, 0)
++#define PORT_L1S(p) ((p) & PORT_L1S_MASK)
++#define PORT_L1S_ACK PORT_L1S(1)
++#define PORT_L1S_NYET PORT_L1S(2)
++#define PORT_L1S_STALL PORT_L1S(3)
++#define PORT_L1S_TIMEOUT PORT_L1S(4)
++/* Remote Wake Enable. */
++#define PORT_RWE BIT(3)
++/* Best Effort Service Latency (BESL). */
++#define PORT_BESL(p) (((p) << 4) & GENMASK(7, 4))
++/* Hardware LPM Enable (HLE). */
++#define PORT_HLE BIT(16)
++/* Received Best Effort Service Latency (BESL). */
++#define PORT_RRBESL(p) (((p) & GENMASK(20, 17)) >> 17)
++/* Port Test Control. */
++#define PORT_TEST_MODE_MASK GENMASK(31, 28)
++#define PORT_TEST_MODE(p) (((p) << 28) & PORT_TEST_MODE_MASK)
++
++/**
++ * struct cdnsp_intr_reg - Interrupt Register Set.
++ * @irq_pending: IMAN - Interrupt Management Register. Used to enable
++ * interrupts and check for pending interrupts.
++ * @irq_control: IMOD - Interrupt Moderation Register.
++ * Used to throttle interrupts.
++ * @erst_size: Number of segments in the Event Ring Segment Table (ERST).
++ * @erst_base: ERST base address.
++ * @erst_dequeue: Event ring dequeue pointer.
++ *
++ * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
++ * Ring Segment Table (ERST) associated with it. The event ring is comprised of
++ * multiple segments of the same size. The controller places events on the ring
++ * and "updates the Cycle bit in the TRBs to indicate to software the current
++ * position of the Enqueue Pointer." The driver processes those events and
++ * updates the dequeue pointer.
++ */
++struct cdnsp_intr_reg {
++ __le32 irq_pending;
++ __le32 irq_control;
++ __le32 erst_size;
++ __le32 rsvd;
++ __le64 erst_base;
++ __le64 erst_dequeue;
++};
++
++/* IMAN - Interrupt Management Register - irq_pending bitmasks l. */
++#define IMAN_IE BIT(1)
++#define IMAN_IP BIT(0)
++/* bits 2:31 need to be preserved */
++#define IMAN_IE_SET(p) (((p) & IMAN_IE) | 0x2)
++#define IMAN_IE_CLEAR(p) (((p) & IMAN_IE) & ~(0x2))
++
++/* IMOD - Interrupter Moderation Register - irq_control bitmasks. */
++/*
++ * Minimum interval between interrupts (in 250ns intervals). The interval
++ * between interrupts will be longer if there are no events on the event ring.
++ * Default is 4000 (1 ms).
++ */
++#define IMOD_INTERVAL_MASK GENMASK(15, 0)
++/* Counter used to count down the time to the next interrupt - HW use only */
++#define IMOD_COUNTER_MASK GENMASK(31, 16)
++#define IMOD_DEFAULT_INTERVAL 0
++
++/* erst_size bitmasks. */
++/* Preserve bits 16:31 of erst_size. */
++#define ERST_SIZE_MASK GENMASK(31, 16)
++
++/* erst_dequeue bitmasks. */
++/*
++ * Dequeue ERST Segment Index (DESI) - Segment number (or alias)
++ * where the current dequeue pointer lies. This is an optional HW hint.
++ */
++#define ERST_DESI_MASK GENMASK(2, 0)
++/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced. */
++#define ERST_EHB BIT(3)
++#define ERST_PTR_MASK GENMASK(3, 0)
++
++/**
++ * struct cdnsp_run_regs
++ * @microframe_index: MFINDEX - current microframe number.
++ * @ir_set: Array of Interrupter registers.
++ *
++ * Device Controller Runtime Registers:
++ * "Software should read and write these registers using only Dword (32 bit)
++ * or larger accesses"
++ */
++struct cdnsp_run_regs {
++ __le32 microframe_index;
++ __le32 rsvd[7];
++ struct cdnsp_intr_reg ir_set[128];
++};
++
++/**
++ * USB2.0 Port Peripheral Configuration Registers.
++ * @ext_cap: Header register for Extended Capability.
++ * @port_reg1: Timer Configuration Register.
++ * @port_reg2: Timer Configuration Register.
++ * @port_reg3: Timer Configuration Register.
++ * @port_reg4: Timer Configuration Register.
++ * @port_reg5: Timer Configuration Register.
++ * @port_reg6: Chicken bits for USB20PPP.
++ */
++struct cdnsp_20port_cap {
++ __le32 ext_cap;
++ __le32 port_reg1;
++ __le32 port_reg2;
++ __le32 port_reg3;
++ __le32 port_reg4;
++ __le32 port_reg5;
++ __le32 port_reg6;
++};
++
++/* Extended capability register fields */
++#define EXT_CAPS_ID(p) (((p) >> 0) & GENMASK(7, 0))
++#define EXT_CAPS_NEXT(p) (((p) >> 8) & GENMASK(7, 0))
++/* Extended capability IDs - ID 0 reserved */
++#define EXT_CAPS_PROTOCOL 2
++
++/* USB 2.0 Port Peripheral Configuration Extended Capability */
++#define EXT_CAP_CFG_DEV_20PORT_CAP_ID 0xC1
++/*
++ * Setting this bit to '1' enables automatic wakeup from L1 state on transfer
++ * TRB prepared when USBSSP operates in USB2.0 mode.
++ */
++#define PORT_REG6_L1_L0_HW_EN BIT(1)
++/*
++ * Setting this bit to '1' forces Full Speed when USBSSP operates in USB2.0
++ * mode (disables High Speed).
++ */
++#define PORT_REG6_FORCE_FS BIT(0)
++
++/**
++ * USB3.x Port Peripheral Configuration Registers.
++ * @ext_cap: Header register for Extended Capability.
++ * @mode_addr: Miscellaneous 3xPORT operation mode configuration register.
++ * @mode_2: 3x Port Control Register 2.
++ */
++struct cdnsp_3xport_cap {
++ __le32 ext_cap;
++ __le32 mode_addr;
++ __le32 reserved[52];
++ __le32 mode_2;
++};
++
++/* Extended Capability Header for 3XPort Configuration Registers. */
++#define D_XEC_CFG_3XPORT_CAP 0xC0
++#define CFG_3XPORT_SSP_SUPPORT BIT(31)
++#define CFG_3XPORT_U1_PIPE_CLK_GATE_EN BIT(0)
++
++/* Revision Extended Capability ID */
++#define RTL_REV_CAP 0xC4
++#define RTL_REV_CAP_RX_BUFF_CMD_SIZE BITMASK(31, 24)
++#define RTL_REV_CAP_RX_BUFF_SIZE BITMASK(15, 0)
++#define RTL_REV_CAP_TX_BUFF_CMD_SIZE BITMASK(31, 24)
++#define RTL_REV_CAP_TX_BUFF_SIZE BITMASK(15, 0)
++
++#define CDNSP_VER_1 0x00000000
++#define CDNSP_VER_2 0x10000000
++
++#define CDNSP_IF_EP_EXIST(pdev, ep_num, dir) ((pdev)->rev_cap.ep_supported & \
++ (BIT(ep_num) << ((dir) ? 0 : 16)))
++
++/**
++ * struct cdnsp_rev_cap - controller capabilities .
++ * @ext_cap: Header for RTL Revision Extended Capability.
++ * @rtl_revision: RTL revision.
++ * @rx_buff_size: Rx buffer sizes.
++ * @tx_buff_size: Tx buffer sizes.
++ * @ep_supported: Supported endpoints.
++ * @ctrl_revision: Controller revision ID.
++ */
++struct cdnsp_rev_cap {
++ __le32 ext_cap;
++ __le32 rtl_revision;
++ __le32 rx_buff_size;
++ __le32 tx_buff_size;
++ __le32 ep_supported;
++ __le32 ctrl_revision;
++};
++
++/* USB2.0 Port Peripheral Configuration Registers. */
++#define D_XEC_PRE_REGS_CAP 0xC8
++#define REG_CHICKEN_BITS_2_OFFSET 0x48
++#define CHICKEN_XDMA_2_TP_CACHE_DIS BIT(28)
++
++/* XBUF Extended Capability ID. */
++#define XBUF_CAP_ID 0xCB
++#define XBUF_RX_TAG_MASK_0_OFFSET 0x1C
++#define XBUF_RX_TAG_MASK_1_OFFSET 0x24
++#define XBUF_TX_CMD_OFFSET 0x2C
++
++/**
++ * struct cdnsp_doorbell_array.
++ * @cmd_db: Command ring doorbell register.
++ * @ep_db: Endpoint ring doorbell register.
++ * Bits 0 - 7: Endpoint target.
++ * Bits 8 - 15: RsvdZ.
++ * Bits 16 - 31: Stream ID.
++ */
++struct cdnsp_doorbell_array {
++ __le32 cmd_db;
++ __le32 ep_db;
++};
++
++#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
++#define DB_VALUE_EP0_OUT(ep, stream) ((ep) & 0xff)
++#define DB_VALUE_CMD 0x00000000
++
++/**
++ * struct cdnsp_container_ctx.
++ * @type: Type of context. Used to calculated offsets to contained contexts.
++ * @size: Size of the context data.
++ * @ctx_size: context data structure size - 64 or 32 bits.
++ * @dma: dma address of the bytes.
++ * @bytes: The raw context data given to HW.
++ *
++ * Represents either a Device or Input context. Holds a pointer to the raw
++ * memory used for the context (bytes) and dma address of it (dma).
++ */
++struct cdnsp_container_ctx {
++ unsigned int type;
++#define CDNSP_CTX_TYPE_DEVICE 0x1
++#define CDNSP_CTX_TYPE_INPUT 0x2
++ int size;
++ int ctx_size;
++ dma_addr_t dma;
++ u8 *bytes;
++};
++
++/**
++ * struct cdnsp_slot_ctx
++ * @dev_info: Device speed, and last valid endpoint.
++ * @dev_port: Device port number that is needed to access the USB device.
++ * @int_target: Interrupter target number.
++ * @dev_state: Slot state and device address.
++ *
++ * Slot Context - This assumes the controller uses 32-byte context
++ * structures. If the controller uses 64-byte contexts, there is an additional
++ * 32 bytes reserved at the end of the slot context for controller internal use.
++ */
++struct cdnsp_slot_ctx {
++ __le32 dev_info;
++ __le32 dev_port;
++ __le32 int_target;
++ __le32 dev_state;
++ /* offset 0x10 to 0x1f reserved for controller internal use. */
++ __le32 reserved[4];
++};
++
++/* Bits 20:23 in the Slot Context are the speed for the device. */
++#define SLOT_SPEED_FS (XDEV_FS << 10)
++#define SLOT_SPEED_HS (XDEV_HS << 10)
++#define SLOT_SPEED_SS (XDEV_SS << 10)
++#define SLOT_SPEED_SSP (XDEV_SSP << 10)
++
++/* dev_info bitmasks. */
++/* Device speed - values defined by PORTSC Device Speed field - 20:23. */
++#define DEV_SPEED GENMASK(23, 20)
++#define GET_DEV_SPEED(n) (((n) & DEV_SPEED) >> 20)
++/* Index of the last valid endpoint context in this device context - 27:31. */
++#define LAST_CTX_MASK GENMASK(31, 27)
++#define LAST_CTX(p) ((p) << 27)
++#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
++#define SLOT_FLAG BIT(0)
++#define EP0_FLAG BIT(1)
++
++/* dev_port bitmasks */
++/* Device port number that is needed to access the USB device. */
++#define DEV_PORT(p) (((p) & 0xff) << 16)
++
++/* dev_state bitmasks */
++/* USB device address - assigned by the controller. */
++#define DEV_ADDR_MASK GENMASK(7, 0)
++/* Slot state */
++#define SLOT_STATE GENMASK(31, 27)
++#define GET_SLOT_STATE(p) (((p) & SLOT_STATE) >> 27)
++
++#define SLOT_STATE_DISABLED 0
++#define SLOT_STATE_ENABLED SLOT_STATE_DISABLED
++#define SLOT_STATE_DEFAULT 1
++#define SLOT_STATE_ADDRESSED 2
++#define SLOT_STATE_CONFIGURED 3
++
++/**
++ * struct cdnsp_ep_ctx.
++ * @ep_info: Endpoint state, streams, mult, and interval information.
++ * @ep_info2: Information on endpoint type, max packet size, max burst size,
++ * error count, and whether the controller will force an event for
++ * all transactions.
++ * @deq: 64-bit ring dequeue pointer address. If the endpoint only
++ * defines one stream, this points to the endpoint transfer ring.
++ * Otherwise, it points to a stream context array, which has a
++ * ring pointer for each flow.
++ * @tx_info: Average TRB lengths for the endpoint ring and
++ * max payload within an Endpoint Service Interval Time (ESIT).
++ *
++ * Endpoint Context - This assumes the controller uses 32-byte context
++ * structures. If the controller uses 64-byte contexts, there is an additional
++ * 32 bytes reserved at the end of the endpoint context for controller internal
++ * use.
++ */
++struct cdnsp_ep_ctx {
++ __le32 ep_info;
++ __le32 ep_info2;
++ __le64 deq;
++ __le32 tx_info;
++ /* offset 0x14 - 0x1f reserved for controller internal use. */
++ __le32 reserved[3];
++};
++
++/* ep_info bitmasks. */
++/*
++ * Endpoint State - bits 0:2:
++ * 0 - disabled
++ * 1 - running
++ * 2 - halted due to halt condition
++ * 3 - stopped
++ * 4 - TRB error
++ * 5-7 - reserved
++ */
++#define EP_STATE_MASK GENMASK(3, 0)
++#define EP_STATE_DISABLED 0
++#define EP_STATE_RUNNING 1
++#define EP_STATE_HALTED 2
++#define EP_STATE_STOPPED 3
++#define EP_STATE_ERROR 4
++#define GET_EP_CTX_STATE(ctx) (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK)
++
++/* Mult - Max number of burst within an interval, in EP companion desc. */
++#define EP_MULT(p) (((p) << 8) & GENMASK(9, 8))
++#define CTX_TO_EP_MULT(p) (((p) & GENMASK(9, 8)) >> 8)
++/* bits 10:14 are Max Primary Streams. */
++/* bit 15 is Linear Stream Array. */
++/* Interval - period between requests to an endpoint - 125u increments. */
++#define EP_INTERVAL(p) (((p) << 16) & GENMASK(23, 16))
++#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) & GENMASK(23, 16)) >> 16))
++#define CTX_TO_EP_INTERVAL(p) (((p) & GENMASK(23, 16)) >> 16)
++#define EP_MAXPSTREAMS_MASK GENMASK(14, 10)
++#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
++#define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10)
++/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
++#define EP_HAS_LSA BIT(15)
++
++/* ep_info2 bitmasks */
++#define ERROR_COUNT(p) (((p) & 0x3) << 1)
++#define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7)
++#define EP_TYPE(p) ((p) << 3)
++#define ISOC_OUT_EP 1
++#define BULK_OUT_EP 2
++#define INT_OUT_EP 3
++#define CTRL_EP 4
++#define ISOC_IN_EP 5
++#define BULK_IN_EP 6
++#define INT_IN_EP 7
++/* bit 6 reserved. */
++/* bit 7 is Device Initiate Disable - for disabling stream selection. */
++#define MAX_BURST(p) (((p) << 8) & GENMASK(15, 8))
++#define CTX_TO_MAX_BURST(p) (((p) & GENMASK(15, 8)) >> 8)
++#define MAX_PACKET(p) (((p) << 16) & GENMASK(31, 16))
++#define MAX_PACKET_MASK GENMASK(31, 16)
++#define MAX_PACKET_DECODED(p) (((p) & GENMASK(31, 16)) >> 16)
++
++/* tx_info bitmasks. */
++#define EP_AVG_TRB_LENGTH(p) ((p) & GENMASK(15, 0))
++#define EP_MAX_ESIT_PAYLOAD_LO(p) (((p) << 16) & GENMASK(31, 16))
++#define EP_MAX_ESIT_PAYLOAD_HI(p) ((((p) & GENMASK(23, 16)) >> 16) << 24)
++#define CTX_TO_MAX_ESIT_PAYLOAD_LO(p) (((p) & GENMASK(31, 16)) >> 16)
++#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) & GENMASK(31, 24)) >> 24)
++
++/* deq bitmasks. */
++#define EP_CTX_CYCLE_MASK BIT(0)
++#define CTX_DEQ_MASK (~0xfL)
++
++/**
++ * struct cdnsp_input_control_context
++ * Input control context;
++ *
++ * @drop_context: Set the bit of the endpoint context you want to disable.
++ * @add_context: Set the bit of the endpoint context you want to enable.
++ */
++struct cdnsp_input_control_ctx {
++ __le32 drop_flags;
++ __le32 add_flags;
++ __le32 rsvd2[6];
++};
++
++/**
++ * Represents everything that is needed to issue a command on the command ring.
++ *
++ * @in_ctx: Pointer to input context structure.
++ * @status: Command Completion Code for last command.
++ * @command_trb: Pointer to command TRB.
++ */
++struct cdnsp_command {
++ /* Input context for changing device state. */
++ struct cdnsp_container_ctx *in_ctx;
++ u32 status;
++ union cdnsp_trb *command_trb;
++};
++
++/**
++ * Stream context structure.
++ *
++ * @stream_ring: 64-bit stream ring address, cycle state, and stream type.
++ * @reserved: offset 0x14 - 0x1f reserved for controller internal use.
++ */
++struct cdnsp_stream_ctx {
++ __le64 stream_ring;
++ __le32 reserved[2];
++};
++
++/* Stream Context Types - bits 3:1 of stream ctx deq ptr. */
++#define SCT_FOR_CTX(p) (((p) << 1) & GENMASK(3, 1))
++/* Secondary stream array type, dequeue pointer is to a transfer ring. */
++#define SCT_SEC_TR 0
++/* Primary stream array type, dequeue pointer is to a transfer ring. */
++#define SCT_PRI_TR 1
++
++/**
++ * struct cdnsp_stream_info: Representing everything that is needed to
++ * supports stream capable endpoints.
++ * @stream_rings: Array of pointers containing Transfer rings for all
++ * supported streams.
++ * @num_streams: Number of streams, including stream 0.
++ * @stream_ctx_array: The stream context array may be bigger than the number
++ * of streams the driver asked for.
++ * @num_stream_ctxs: Number of streams.
++ * @ctx_array_dma: Dma address of Context Stream Array.
++ * @trb_address_map: For mapping physical TRB addresses to segments in
++ * stream rings.
++ * @td_count: Number of TDs associated with endpoint.
++ * @first_prime_det: First PRIME packet detected.
++ * @drbls_count: Number of allowed doorbells.
++ */
++struct cdnsp_stream_info {
++ struct cdnsp_ring **stream_rings;
++ unsigned int num_streams;
++ struct cdnsp_stream_ctx *stream_ctx_array;
++ unsigned int num_stream_ctxs;
++ dma_addr_t ctx_array_dma;
++ struct radix_tree_root trb_address_map;
++ int td_count;
++ u8 first_prime_det;
++#define STREAM_DRBL_FIFO_DEPTH 2
++ u8 drbls_count;
++};
++
++#define STREAM_LOG_STREAMS 4
++#define STREAM_NUM_STREAMS BIT(STREAM_LOG_STREAMS)
++
++#if STREAM_LOG_STREAMS > 16 && STREAM_LOG_STREAMS < 1
++#error "Not suupported stream value"
++#endif
++
++/**
++ * struct cdnsp_ep - extended device side representation of USB endpoint.
++ * @endpoint: usb endpoint
++ * @pending_req_list: List of requests queuing on transfer ring.
++ * @pdev: Device associated with this endpoint.
++ * @number: Endpoint number (1 - 15).
++ * idx: The device context index (DCI).
++ * interval: Interval between packets used for ISOC endpoint.
++ * @name: A human readable name e.g. ep1out.
++ * @direction: Endpoint direction.
++ * @buffering: Number of on-chip buffers related to endpoint.
++ * @buffering_period; Number of on-chip buffers related to periodic endpoint.
++ * @in_ctx: Pointer to input endpoint context structure.
++ * @out_ctx: Pointer to output endpoint context structure.
++ * @ring: Pointer to transfer ring.
++ * @stream_info: Hold stream information.
++ * @ep_state: Current state of endpoint.
++ * @skip: Sometimes the controller can not process isochronous endpoint ring
++ * quickly enough, and it will miss some isoc tds on the ring and
++ * generate Missed Service Error Event.
++ * Set skip flag when receive a Missed Service Error Event and
++ * process the missed tds on the endpoint ring.
++ */
++struct cdnsp_ep {
++ struct usb_ep endpoint;
++ struct list_head pending_list;
++ struct cdnsp_device *pdev;
++ u8 number;
++ u8 idx;
++ u32 interval;
++ char name[20];
++ u8 direction;
++ u8 buffering;
++ u8 buffering_period;
++ struct cdnsp_ep_ctx *in_ctx;
++ struct cdnsp_ep_ctx *out_ctx;
++ struct cdnsp_ring *ring;
++ struct cdnsp_stream_info stream_info;
++ unsigned int ep_state;
++#define EP_ENABLED BIT(0)
++#define EP_DIS_IN_RROGRESS BIT(1)
++#define EP_HALTED BIT(2)
++#define EP_STOPPED BIT(3)
++#define EP_WEDGE BIT(4)
++#define EP0_HALTED_STATUS BIT(5)
++#define EP_HAS_STREAMS BIT(6)
++
++ bool skip;
++};
++
++/**
++ * struct cdnsp_device_context_array
++ * @dev_context_ptr: Array of 64-bit DMA addresses for device contexts.
++ * @dma: DMA address for device contexts structure.
++ */
++struct cdnsp_device_context_array {
++ __le64 dev_context_ptrs[CDNSP_DEV_MAX_SLOTS + 1];
++ dma_addr_t dma;
++};
++
++/**
++ * struct cdnsp_transfer_event.
++ * @buffer: 64-bit buffer address, or immediate data.
++ * @transfer_len: Data length transferred.
++ * @flags: Field is interpreted differently based on the type of TRB.
++ */
++struct cdnsp_transfer_event {
++ __le64 buffer;
++ __le32 transfer_len;
++ __le32 flags;
++};
++
++/* Invalidate event after disabling endpoint. */
++#define TRB_EVENT_INVALIDATE 8
++
++/* Transfer event TRB length bit mask. */
++/* bits 0:23 */
++#define EVENT_TRB_LEN(p) ((p) & GENMASK(23, 0))
++/* Completion Code - only applicable for some types of TRBs */
++#define COMP_CODE_MASK (0xff << 24)
++#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
++#define COMP_INVALID 0
++#define COMP_SUCCESS 1
++#define COMP_DATA_BUFFER_ERROR 2
++#define COMP_BABBLE_DETECTED_ERROR 3
++#define COMP_TRB_ERROR 5
++#define COMP_RESOURCE_ERROR 7
++#define COMP_NO_SLOTS_AVAILABLE_ERROR 9
++#define COMP_INVALID_STREAM_TYPE_ERROR 10
++#define COMP_SLOT_NOT_ENABLED_ERROR 11
++#define COMP_ENDPOINT_NOT_ENABLED_ERROR 12
++#define COMP_SHORT_PACKET 13
++#define COMP_RING_UNDERRUN 14
++#define COMP_RING_OVERRUN 15
++#define COMP_VF_EVENT_RING_FULL_ERROR 16
++#define COMP_PARAMETER_ERROR 17
++#define COMP_CONTEXT_STATE_ERROR 19
++#define COMP_EVENT_RING_FULL_ERROR 21
++#define COMP_INCOMPATIBLE_DEVICE_ERROR 22
++#define COMP_MISSED_SERVICE_ERROR 23
++#define COMP_COMMAND_RING_STOPPED 24
++#define COMP_COMMAND_ABORTED 25
++#define COMP_STOPPED 26
++#define COMP_STOPPED_LENGTH_INVALID 27
++#define COMP_STOPPED_SHORT_PACKET 28
++#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR 29
++#define COMP_ISOCH_BUFFER_OVERRUN 31
++#define COMP_EVENT_LOST_ERROR 32
++#define COMP_UNDEFINED_ERROR 33
++#define COMP_INVALID_STREAM_ID_ERROR 34
++
++/*Transfer Event NRDY bit fields */
++#define TRB_TO_DEV_STREAM(p) ((p) & GENMASK(16, 0))
++#define TRB_TO_HOST_STREAM(p) ((p) & GENMASK(16, 0))
++#define STREAM_PRIME_ACK 0xFFFE
++#define STREAM_REJECTED 0xFFFF
++
++/** Transfer Event bit fields **/
++#define TRB_TO_EP_ID(p) (((p) & GENMASK(20, 16)) >> 16)
++
++/**
++ * struct cdnsp_link_trb
++ * @segment_ptr: 64-bit segment pointer.
++ * @intr_target: Interrupter target.
++ * @control: Flags.
++ */
++struct cdnsp_link_trb {
++ __le64 segment_ptr;
++ __le32 intr_target;
++ __le32 control;
++};
++
++/* control bitfields */
++#define LINK_TOGGLE BIT(1)
++
++/**
++ * struct cdnsp_event_cmd - Command completion event TRB.
++ * cmd_trb: Pointer to command TRB, or the value passed by the event data trb
++ * status: Command completion parameters and error code.
++ * flags: Flags.
++ */
++struct cdnsp_event_cmd {
++ __le64 cmd_trb;
++ __le32 status;
++ __le32 flags;
++};
++
++/* flags bitmasks */
++
++/* Address device - disable SetAddress. */
++#define TRB_BSR BIT(9)
++
++/* Configure Endpoint - Deconfigure. */
++#define TRB_DC BIT(9)
++
++/* Force Header */
++#define TRB_FH_TO_PACKET_TYPE(p) ((p) & GENMASK(4, 0))
++#define TRB_FH_TR_PACKET 0x4
++#define TRB_FH_TO_DEVICE_ADDRESS(p) (((p) << 25) & GENMASK(31, 25))
++#define TRB_FH_TR_PACKET_DEV_NOT 0x6
++#define TRB_FH_TO_NOT_TYPE(p) (((p) << 4) & GENMASK(7, 4))
++#define TRB_FH_TR_PACKET_FUNCTION_WAKE 0x1
++#define TRB_FH_TO_INTERFACE(p) (((p) << 8) & GENMASK(15, 8))
++
++enum cdnsp_setup_dev {
++ SETUP_CONTEXT_ONLY,
++ SETUP_CONTEXT_ADDRESS,
++};
++
++/* bits 24:31 are the slot ID. */
++#define TRB_TO_SLOT_ID(p) (((p) & GENMASK(31, 24)) >> 24)
++#define SLOT_ID_FOR_TRB(p) (((p) << 24) & GENMASK(31, 24))
++
++/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB. */
++#define TRB_TO_EP_INDEX(p) (((p) >> 16) & 0x1f)
++
++#define EP_ID_FOR_TRB(p) ((((p) + 1) << 16) & GENMASK(20, 16))
++
++#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23)
++#define TRB_TO_SUSPEND_PORT(p) (((p) >> 23) & 0x1)
++#define LAST_EP_INDEX 30
++
++/* Set TR Dequeue Pointer command TRB fields. */
++#define TRB_TO_STREAM_ID(p) ((((p) & GENMASK(31, 16)) >> 16))
++#define STREAM_ID_FOR_TRB(p) ((((p)) << 16) & GENMASK(31, 16))
++#define SCT_FOR_TRB(p) (((p) << 1) & 0x7)
++
++/* Link TRB specific fields. */
++#define TRB_TC BIT(1)
++
++/* Port Status Change Event TRB fields. */
++/* Port ID - bits 31:24. */
++#define GET_PORT_ID(p) (((p) & GENMASK(31, 24)) >> 24)
++#define SET_PORT_ID(p) (((p) << 24) & GENMASK(31, 24))
++#define EVENT_DATA BIT(2)
++
++/* Normal TRB fields. */
++/* transfer_len bitmasks - bits 0:16. */
++#define TRB_LEN(p) ((p) & GENMASK(16, 0))
++/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31). */
++#define TRB_TD_SIZE(p) (min((p), (u32)31) << 17)
++#define GET_TD_SIZE(p) (((p) & GENMASK(21, 17)) >> 17)
++/*
++ * Controller uses the TD_SIZE field for TBC if Extended TBC
++ * is enabled (ETE).
++ */
++#define TRB_TD_SIZE_TBC(p) (min((p), (u32)31) << 17)
++/* Interrupter Target - which MSI-X vector to target the completion event at. */
++#define TRB_INTR_TARGET(p) (((p) << 22) & GENMASK(31, 22))
++#define GET_INTR_TARGET(p) (((p) & GENMASK(31, 22)) >> 22)
++/*
++ * Total burst count field, Rsvdz on controller with Extended TBC
++ * enabled (ETE).
++ */
++#define TRB_TBC(p) (((p) & 0x3) << 7)
++#define TRB_TLBPC(p) (((p) & 0xf) << 16)
++
++/* Cycle bit - indicates TRB ownership by driver or driver.*/
++#define TRB_CYCLE BIT(0)
++/*
++ * Force next event data TRB to be evaluated before task switch.
++ * Used to pass OS data back after a TD completes.
++ */
++#define TRB_ENT BIT(1)
++/* Interrupt on short packet. */
++#define TRB_ISP BIT(2)
++/* Set PCIe no snoop attribute. */
++#define TRB_NO_SNOOP BIT(3)
++/* Chain multiple TRBs into a TD. */
++#define TRB_CHAIN BIT(4)
++/* Interrupt on completion. */
++#define TRB_IOC BIT(5)
++/* The buffer pointer contains immediate data. */
++#define TRB_IDT BIT(6)
++/* 0 - NRDY during data stage, 1 - NRDY during status stage (only control). */
++#define TRB_STAT BIT(7)
++/* Block Event Interrupt. */
++#define TRB_BEI BIT(9)
++
++/* Control transfer TRB specific fields. */
++#define TRB_DIR_IN BIT(16)
++
++/* TRB bit mask in Data Stage TRB */
++#define TRB_SETUPID_BITMASK GENMASK(9, 8)
++#define TRB_SETUPID(p) ((p) << 8)
++#define TRB_SETUPID_TO_TYPE(p) (((p) & TRB_SETUPID_BITMASK) >> 8)
++
++#define TRB_SETUP_SPEEDID_USB3 0x1
++#define TRB_SETUP_SPEEDID_USB2 0x0
++#define TRB_SETUP_SPEEDID(p) ((p) & (1 << 7))
++
++#define TRB_SETUPSTAT_ACK 0x1
++#define TRB_SETUPSTAT_STALL 0x0
++#define TRB_SETUPSTAT(p) ((p) << 6)
++
++/* Isochronous TRB specific fields */
++#define TRB_SIA BIT(31)
++#define TRB_FRAME_ID(p) (((p) << 20) & GENMASK(30, 20))
++
++struct cdnsp_generic_trb {
++ __le32 field[4];
++};
++
++union cdnsp_trb {
++ struct cdnsp_link_trb link;
++ struct cdnsp_transfer_event trans_event;
++ struct cdnsp_event_cmd event_cmd;
++ struct cdnsp_generic_trb generic;
++};
++
++/* TRB bit mask. */
++#define TRB_TYPE_BITMASK GENMASK(15, 10)
++#define TRB_TYPE(p) ((p) << 10)
++#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10)
++
++/* TRB type IDs. */
++/* bulk, interrupt, isoc scatter/gather, and control data stage. */
++#define TRB_NORMAL 1
++/* Setup Stage for control transfers. */
++#define TRB_SETUP 2
++/* Data Stage for control transfers. */
++#define TRB_DATA 3
++/* Status Stage for control transfers. */
++#define TRB_STATUS 4
++/* ISOC transfers. */
++#define TRB_ISOC 5
++/* TRB for linking ring segments. */
++#define TRB_LINK 6
++#define TRB_EVENT_DATA 7
++/* Transfer Ring No-op (not for the command ring). */
++#define TRB_TR_NOOP 8
++
++/* Command TRBs */
++/* Enable Slot Command. */
++#define TRB_ENABLE_SLOT 9
++/* Disable Slot Command. */
++#define TRB_DISABLE_SLOT 10
++/* Address Device Command. */
++#define TRB_ADDR_DEV 11
++/* Configure Endpoint Command. */
++#define TRB_CONFIG_EP 12
++/* Evaluate Context Command. */
++#define TRB_EVAL_CONTEXT 13
++/* Reset Endpoint Command. */
++#define TRB_RESET_EP 14
++/* Stop Transfer Ring Command. */
++#define TRB_STOP_RING 15
++/* Set Transfer Ring Dequeue Pointer Command. */
++#define TRB_SET_DEQ 16
++/* Reset Device Command. */
++#define TRB_RESET_DEV 17
++/* Force Event Command (opt). */
++#define TRB_FORCE_EVENT 18
++/* Force Header Command - generate a transaction or link management packet. */
++#define TRB_FORCE_HEADER 22
++/* No-op Command - not for transfer rings. */
++#define TRB_CMD_NOOP 23
++/* TRB IDs 24-31 reserved. */
++
++/* Event TRBS. */
++/* Transfer Event. */
++#define TRB_TRANSFER 32
++/* Command Completion Event. */
++#define TRB_COMPLETION 33
++/* Port Status Change Event. */
++#define TRB_PORT_STATUS 34
++/* Device Controller Event. */
++#define TRB_HC_EVENT 37
++/* MFINDEX Wrap Event - microframe counter wrapped. */
++#define TRB_MFINDEX_WRAP 39
++/* TRB IDs 40-47 reserved. */
++/* Endpoint Not Ready Event. */
++#define TRB_ENDPOINT_NRDY 48
++/* TRB IDs 49-53 reserved. */
++/* Halt Endpoint Command. */
++#define TRB_HALT_ENDPOINT 54
++/* Doorbell Overflow Event. */
++#define TRB_DRB_OVERFLOW 57
++/* Flush Endpoint Command. */
++#define TRB_FLUSH_ENDPOINT 58
++
++#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
++#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
++ cpu_to_le32(TRB_TYPE(TRB_LINK)))
++#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
++ cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
++
++/*
++ * TRBS_PER_SEGMENT must be a multiple of 4.
++ * The command ring is 64-byte aligned, so it must also be greater than 16.
++ */
++#define TRBS_PER_SEGMENT 256
++#define TRBS_PER_EVENT_SEGMENT 256
++#define TRBS_PER_EV_DEQ_UPDATE 100
++#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT * 16)
++#define TRB_SEGMENT_SHIFT (ilog2(TRB_SEGMENT_SIZE))
++/* TRB buffer pointers can't cross 64KB boundaries. */
++#define TRB_MAX_BUFF_SHIFT 16
++#define TRB_MAX_BUFF_SIZE BIT(TRB_MAX_BUFF_SHIFT)
++/* How much data is left before the 64KB boundary? */
++#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \
++ ((addr) & (TRB_MAX_BUFF_SIZE - 1)))
++
++/**
++ * struct cdnsp_segment - segment related data.
++ * @trbs: Array of Transfer Request Blocks.
++ * @next: Pointer to the next segment.
++ * @dma: DMA address of current segment.
++ * @bounce_dma: Bounce buffer DMA address .
++ * @bounce_buf: Bounce buffer virtual address.
++ * bounce_offs: Bounce buffer offset.
++ * bounce_len: Bounce buffer length.
++ */
++struct cdnsp_segment {
++ union cdnsp_trb *trbs;
++ struct cdnsp_segment *next;
++ dma_addr_t dma;
++ /* Max packet sized bounce buffer for td-fragmant alignment */
++ dma_addr_t bounce_dma;
++ void *bounce_buf;
++ unsigned int bounce_offs;
++ unsigned int bounce_len;
++};
++
++/**
++ * struct cdnsp_td - Transfer Descriptor object.
++ * @td_list: Used for binding TD with ep_ring->td_list.
++ * @preq: Request associated with this TD
++ * @start_seg: Segment containing the first_trb in TD.
++ * @first_trb: First TRB for this TD.
++ * @last_trb: Last TRB related with TD.
++ * @bounce_seg: Bounce segment for this TD.
++ * @request_length_set: actual_length of the request has already been set.
++ * @drbl - TD has been added to HW scheduler - only for stream capable
++ * endpoints.
++ */
++struct cdnsp_td {
++ struct list_head td_list;
++ struct cdnsp_request *preq;
++ struct cdnsp_segment *start_seg;
++ union cdnsp_trb *first_trb;
++ union cdnsp_trb *last_trb;
++ struct cdnsp_segment *bounce_seg;
++ bool request_length_set;
++ bool drbl;
++};
++
++/**
++ * struct cdnsp_dequeue_state - New dequeue pointer for Transfer Ring.
++ * @new_deq_seg: New dequeue segment.
++ * @new_deq_ptr: New dequeue pointer.
++ * @new_cycle_state: New cycle state.
++ * @stream_id: stream id for which new dequeue pointer has been selected.
++ */
++struct cdnsp_dequeue_state {
++ struct cdnsp_segment *new_deq_seg;
++ union cdnsp_trb *new_deq_ptr;
++ int new_cycle_state;
++ unsigned int stream_id;
++};
++
++enum cdnsp_ring_type {
++ TYPE_CTRL = 0,
++ TYPE_ISOC,
++ TYPE_BULK,
++ TYPE_INTR,
++ TYPE_STREAM,
++ TYPE_COMMAND,
++ TYPE_EVENT,
++};
++
++/**
++ * struct cdnsp_ring - information describing transfer, command or event ring.
++ * @first_seg: First segment on transfer ring.
++ * @last_seg: Last segment on transfer ring.
++ * @enqueue: SW enqueue pointer address.
++ * @enq_seg: SW enqueue segment address.
++ * @dequeue: SW dequeue pointer address.
++ * @deq_seg: SW dequeue segment address.
++ * @td_list: transfer descriptor list associated with this ring.
++ * @cycle_state: Current cycle bit. Write the cycle state into the TRB cycle
++ * field to give ownership of the TRB to the device controller
++ * (if we are the producer) or to check if we own the TRB
++ * (if we are the consumer).
++ * @stream_id: Stream id
++ * @stream_active: Stream is active - PRIME packet has been detected.
++ * @stream_rejected: This ring has been rejected by host.
++ * @num_tds: Number of TDs associated with ring.
++ * @num_segs: Number of segments.
++ * @num_trbs_free: Number of free TRBs on the ring.
++ * @bounce_buf_len: Length of bounce buffer.
++ * @type: Ring type - event, transfer, or command ring.
++ * @last_td_was_short - TD is short TD.
++ * @trb_address_map: For mapping physical TRB addresses to segments in
++ * stream rings.
++ */
++struct cdnsp_ring {
++ struct cdnsp_segment *first_seg;
++ struct cdnsp_segment *last_seg;
++ union cdnsp_trb *enqueue;
++ struct cdnsp_segment *enq_seg;
++ union cdnsp_trb *dequeue;
++ struct cdnsp_segment *deq_seg;
++ struct list_head td_list;
++ u32 cycle_state;
++ unsigned int stream_id;
++ unsigned int stream_active;
++ unsigned int stream_rejected;
++ int num_tds;
++ unsigned int num_segs;
++ unsigned int num_trbs_free;
++ unsigned int bounce_buf_len;
++ enum cdnsp_ring_type type;
++ bool last_td_was_short;
++ struct radix_tree_root *trb_address_map;
++};
++
++/**
++ * struct cdnsp_erst_entry - even ring segment table entry object.
++ * @seg_addr: 64-bit event ring segment address.
++ * seg_size: Number of TRBs in segment.;
++ */
++struct cdnsp_erst_entry {
++ __le64 seg_addr;
++ __le32 seg_size;
++ /* Set to zero */
++ __le32 rsvd;
++};
++
++/**
++ * struct cdnsp_erst - even ring segment table for event ring.
++ * @entries: Array of event ring segments
++ * @num_entries: Number of segments in entries array.
++ * @erst_dma_addr: DMA address for entries array.
++ */
++struct cdnsp_erst {
++ struct cdnsp_erst_entry *entries;
++ unsigned int num_entries;
++ dma_addr_t erst_dma_addr;
++};
++
++/**
++ * struct cdnsp_request - extended device side representation of usb_request
++ * object .
++ * @td: Transfer descriptor associated with this request.
++ * @request: Generic usb_request object describing single I/O request.
++ * @list: Used to adding request to endpoint pending_list.
++ * @pep: Extended representation of usb_ep object
++ * @epnum: Endpoint number associated with usb request.
++ * @direction: Endpoint direction for usb request.
++ */
++struct cdnsp_request {
++ struct cdnsp_td td;
++ struct usb_request request;
++ struct list_head list;
++ struct cdnsp_ep *pep;
++ u8 epnum;
++ unsigned direction:1;
++};
++
++#define ERST_NUM_SEGS 1
++
++/* Stages used during enumeration process.*/
++enum cdnsp_ep0_stage {
++ CDNSP_SETUP_STAGE,
++ CDNSP_DATA_STAGE,
++ CDNSP_STATUS_STAGE,
++};
++
++/**
++ * struct cdnsp_port - holds information about detected ports.
++ * @port_num: Port number.
++ * @exist: Indicate if port exist.
++ * maj_rev: Major revision.
++ * min_rev: Minor revision.
++ */
++struct cdnsp_port {
++ struct cdnsp_port_regs __iomem *regs;
++ u8 port_num;
++ u8 exist;
++ u8 maj_rev;
++ u8 min_rev;
++};
++
++#define CDNSP_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff)
++#define CDNSP_EXT_PORT_MINOR(x) (((x) >> 16) & 0xff)
++#define CDNSP_EXT_PORT_OFF(x) ((x) & 0xff)
++#define CDNSP_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff)
++
++/**
++ * struct cdnsp_device - represent USB device.
++ * @dev: Pointer to device structure associated whit this controller.
++ * @gadget: Device side representation of the peripheral controller.
++ * @gadget_driver: Pointer to the gadget driver.
++ * @irq: IRQ line number used by device side.
++ * @regs:IO device memory.
++ * @cap_regs: Capability registers.
++ * @op_regs: Operational registers.
++ * @run_regs: Runtime registers.
++ * @dba: Device base address register.
++ * @ir_set: Current interrupter register set.
++ * @port20_regs: Port 2.0 Peripheral Configuration Registers.
++ * @port3x_regs: USB3.x Port Peripheral Configuration Registers.
++ * @hcs_params1: Cached register copies of read-only HCSPARAMS1
++ * @hcc_params: Cached register copies of read-only HCCPARAMS1
++ * @rev_cap: Controller capability.
++ * @setup: Temporary buffer for setup packet.
++ * @ep0_preq: Internal allocated request used during enumeration.
++ * @ep0_stage: ep0 stage during enumeration process.
++ * @three_stage_setup: Three state or two state setup.
++ * @ep0_expect_in: Data IN expected for control transfer.
++ * @setup_id: Setup identifier.
++ * @setup_speed - Speed detected for current SETUP packet.
++ * @setup_buf: Buffer for SETUP packet.
++ * @device_address: Current device address.
++ * @may_wakeup: remote wakeup enabled/disabled.
++ * @lock: Lock used in interrupt thread context.
++ * @hci_version: device controller version.
++ * @dcbaa: Device context base address array.
++ * @cmd_ring: Command ring.
++ * @cmd: Represent all what is needed to issue command on Command Ring.
++ * @event_ring: Event ring.
++ * @erst: Event Ring Segment table
++ * @slot_id: Current Slot ID. Should be 0 or 1.
++ * @out_ctx: Output context.
++ * @in_ctx: Input context.
++ * @eps: array of endpoints object associated with device.
++ * @usb2_hw_lpm_capable: hardware lpm is enabled;
++ * @u1_allowed: Allow device transition to U1 state.
++ * @u2_allowed: Allow device transition to U2 state
++ * @device_pool: DMA pool for allocating input and output context.
++ * @segment_pool: DMA pool for allocating new segments.
++ * @cdnsp_state: Current state of controller.
++ * @link_state: Current link state.
++ * @usb2_port - Port USB 2.0.
++ * @usb3_port - Port USB 3.0.
++ * @active_port - Current selected Port.
++ * @test_mode: selected Test Mode.
++ */
++struct cdnsp_device {
++ struct device *dev;
++ struct usb_gadget gadget;
++ struct usb_gadget_driver *gadget_driver;
++ unsigned int irq;
++ void __iomem *regs;
++
++ /* Registers map */
++ struct cdnsp_cap_regs __iomem *cap_regs;
++ struct cdnsp_op_regs __iomem *op_regs;
++ struct cdnsp_run_regs __iomem *run_regs;
++ struct cdnsp_doorbell_array __iomem *dba;
++ struct cdnsp_intr_reg __iomem *ir_set;
++ struct cdnsp_20port_cap __iomem *port20_regs;
++ struct cdnsp_3xport_cap __iomem *port3x_regs;
++
++ /* Cached register copies of read-only CDNSP data */
++ __u32 hcs_params1;
++ __u32 hcs_params3;
++ __u32 hcc_params;
++ struct cdnsp_rev_cap rev_cap;
++ /* Lock used in interrupt thread context. */
++ spinlock_t lock;
++ struct usb_ctrlrequest setup;
++ struct cdnsp_request ep0_preq;
++ enum cdnsp_ep0_stage ep0_stage;
++ u8 three_stage_setup;
++ u8 ep0_expect_in;
++ u8 setup_id;
++ u8 setup_speed;
++ void *setup_buf;
++ u8 device_address;
++ int may_wakeup;
++ u16 hci_version;
++
++ /* data structures */
++ struct cdnsp_device_context_array *dcbaa;
++ struct cdnsp_ring *cmd_ring;
++ struct cdnsp_command cmd;
++ struct cdnsp_ring *event_ring;
++ struct cdnsp_erst erst;
++ int slot_id;
++
++ /*
++ * Commands to the hardware are passed an "input context" that
++ * tells the hardware what to change in its data structures.
++ * The hardware will return changes in an "output context" that
++ * software must allocate for the hardware. .
++ */
++ struct cdnsp_container_ctx out_ctx;
++ struct cdnsp_container_ctx in_ctx;
++ struct cdnsp_ep eps[CDNSP_ENDPOINTS_NUM];
++ u8 usb2_hw_lpm_capable:1;
++ u8 u1_allowed:1;
++ u8 u2_allowed:1;
++
++ /* DMA pools */
++ struct dma_pool *device_pool;
++ struct dma_pool *segment_pool;
++
++#define CDNSP_STATE_HALTED BIT(1)
++#define CDNSP_STATE_DYING BIT(2)
++#define CDNSP_STATE_DISCONNECT_PENDING BIT(3)
++#define CDNSP_WAKEUP_PENDING BIT(4)
++ unsigned int cdnsp_state;
++ unsigned int link_state;
++
++ struct cdnsp_port usb2_port;
++ struct cdnsp_port usb3_port;
++ struct cdnsp_port *active_port;
++ u16 test_mode;
++};
++
++#endif /* __LINUX_CDNSP_GADGET_H */
+--
+2.35.1
+