--- /dev/null
+From 04eebeff407c947ab5ec0bb33d81fed129e872e8 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 11 Apr 2019 15:24:30 +1200
+Subject: fanotify: Release SRCU lock when waiting for userspace response
+
+commit 05f0e38724e8449184acd8fbf0473ee5a07adc6c upstream.
+
+When userspace task processing fanotify permission events screws up and
+does not respond, fsnotify_mark_srcu SRCU is held indefinitely which
+causes further hangs in the whole notification subsystem. Although we
+cannot easily solve the problem of operations blocked waiting for
+response from userspace, we can at least somewhat localize the damage by
+dropping SRCU lock before waiting for userspace response and reacquiring
+it when userspace responds.
+
+Reviewed-by: Miklos Szeredi <mszeredi@redhat.com>
+Reviewed-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+[mruffell: cherry picked]
+Signed-off-by: Matthew Ruffell <matthew.ruffell@canonical.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/notify/fanotify/fanotify.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index 4944956cdbd9..eeb5cc1f6978 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -61,14 +61,26 @@ static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)
+
+ #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ static int fanotify_get_response(struct fsnotify_group *group,
+- struct fanotify_perm_event_info *event)
++ struct fanotify_perm_event_info *event,
++ struct fsnotify_iter_info *iter_info)
+ {
+ int ret;
+
+ pr_debug("%s: group=%p event=%p\n", __func__, group, event);
+
++ /*
++ * fsnotify_prepare_user_wait() fails if we race with mark deletion.
++ * Just let the operation pass in that case.
++ */
++ if (!fsnotify_prepare_user_wait(iter_info)) {
++ event->response = FAN_ALLOW;
++ goto out;
++ }
++
+ wait_event(group->fanotify_data.access_waitq, event->response);
+
++ fsnotify_finish_user_wait(iter_info);
++out:
+ /* userspace responded, convert to something usable */
+ switch (event->response) {
+ case FAN_ALLOW:
+@@ -216,7 +228,8 @@ static int fanotify_handle_event(struct fsnotify_group *group,
+
+ #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ if (mask & FAN_ALL_PERM_EVENTS) {
+- ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event));
++ ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event),
++ iter_info);
+ fsnotify_destroy_event(group, fsn_event);
+ }
+ #endif
+--
+2.19.1
+
--- /dev/null
+From 121bef4ce6afb094d2f504dc08dd7e55817d50bf Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 11 Apr 2019 15:24:29 +1200
+Subject: fsnotify: Pass fsnotify_iter_info into handle_event handler
+
+commit 9385a84d7e1f658bb2d96ab798393e4b16268aaa upstream.
+
+Pass fsnotify_iter_info into ->handle_event() handler so that it can
+release and reacquire SRCU lock via fsnotify_prepare_user_wait() and
+fsnotify_finish_user_wait() functions. These functions also make sure
+current marks are appropriately pinned so that iteration protected by
+srcu in fsnotify() stays safe.
+
+Reviewed-by: Miklos Szeredi <mszeredi@redhat.com>
+Reviewed-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+[mruffell: backport: removing const keyword and minor realignment]
+Signed-off-by: Matthew Ruffell <matthew.ruffell@canonical.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/notify/dnotify/dnotify.c | 3 ++-
+ fs/notify/fanotify/fanotify.c | 3 ++-
+ fs/notify/fsnotify.c | 19 +++++++++++++------
+ fs/notify/inotify/inotify.h | 3 ++-
+ fs/notify/inotify/inotify_fsnotify.c | 3 ++-
+ fs/notify/inotify/inotify_user.c | 2 +-
+ include/linux/fsnotify_backend.h | 3 ++-
+ kernel/audit_fsnotify.c | 3 ++-
+ kernel/audit_tree.c | 3 ++-
+ kernel/audit_watch.c | 3 ++-
+ 10 files changed, 30 insertions(+), 15 deletions(-)
+
+diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
+index 6faaf710e563..264bfd99a694 100644
+--- a/fs/notify/dnotify/dnotify.c
++++ b/fs/notify/dnotify/dnotify.c
+@@ -86,7 +86,8 @@ static int dnotify_handle_event(struct fsnotify_group *group,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ u32 mask, void *data, int data_type,
+- const unsigned char *file_name, u32 cookie)
++ const unsigned char *file_name, u32 cookie,
++ struct fsnotify_iter_info *iter_info)
+ {
+ struct dnotify_mark *dn_mark;
+ struct dnotify_struct *dn;
+diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
+index 8a459b179183..4944956cdbd9 100644
+--- a/fs/notify/fanotify/fanotify.c
++++ b/fs/notify/fanotify/fanotify.c
+@@ -174,7 +174,8 @@ static int fanotify_handle_event(struct fsnotify_group *group,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *fanotify_mark,
+ u32 mask, void *data, int data_type,
+- const unsigned char *file_name, u32 cookie)
++ const unsigned char *file_name, u32 cookie,
++ struct fsnotify_iter_info *iter_info)
+ {
+ int ret = 0;
+ struct fanotify_event_info *event;
+diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
+index 56b4f855fa9b..da8c6674990d 100644
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -131,7 +131,8 @@ static int send_to_group(struct inode *to_tell,
+ struct fsnotify_mark *vfsmount_mark,
+ __u32 mask, void *data,
+ int data_is, u32 cookie,
+- const unsigned char *file_name)
++ const unsigned char *file_name,
++ struct fsnotify_iter_info *iter_info)
+ {
+ struct fsnotify_group *group = NULL;
+ __u32 inode_test_mask = 0;
+@@ -182,7 +183,7 @@ static int send_to_group(struct inode *to_tell,
+
+ return group->ops->handle_event(group, to_tell, inode_mark,
+ vfsmount_mark, mask, data, data_is,
+- file_name, cookie);
++ file_name, cookie, iter_info);
+ }
+
+ /*
+@@ -197,8 +198,9 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ struct hlist_node *inode_node = NULL, *vfsmount_node = NULL;
+ struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL;
+ struct fsnotify_group *inode_group, *vfsmount_group;
++ struct fsnotify_iter_info iter_info;
+ struct mount *mnt;
+- int idx, ret = 0;
++ int ret = 0;
+ /* global tests shouldn't care about events on child only the specific event */
+ __u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);
+
+@@ -231,7 +233,7 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ !(mnt && test_mask & mnt->mnt_fsnotify_mask))
+ return 0;
+
+- idx = srcu_read_lock(&fsnotify_mark_srcu);
++ iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
+
+ if ((mask & FS_MODIFY) ||
+ (test_mask & to_tell->i_fsnotify_mask))
+@@ -280,8 +282,13 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ vfsmount_mark = NULL;
+ }
+ }
++
++ iter_info.inode_mark = inode_mark;
++ iter_info.vfsmount_mark = vfsmount_mark;
++
+ ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask,
+- data, data_is, cookie, file_name);
++ data, data_is, cookie, file_name,
++ &iter_info);
+
+ if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
+ goto out;
+@@ -295,7 +302,7 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ }
+ ret = 0;
+ out:
+- srcu_read_unlock(&fsnotify_mark_srcu, idx);
++ srcu_read_unlock(&fsnotify_mark_srcu, iter_info.srcu_idx);
+
+ return ret;
+ }
+diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h
+index ed855ef6f077..726b06b303b8 100644
+--- a/fs/notify/inotify/inotify.h
++++ b/fs/notify/inotify/inotify.h
+@@ -27,6 +27,7 @@ extern int inotify_handle_event(struct fsnotify_group *group,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ u32 mask, void *data, int data_type,
+- const unsigned char *file_name, u32 cookie);
++ const unsigned char *file_name, u32 cookie,
++ struct fsnotify_iter_info *iter_info);
+
+ extern const struct fsnotify_ops inotify_fsnotify_ops;
+diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
+index 2cd900c2c737..79a5f06b9100 100644
+--- a/fs/notify/inotify/inotify_fsnotify.c
++++ b/fs/notify/inotify/inotify_fsnotify.c
+@@ -67,7 +67,8 @@ int inotify_handle_event(struct fsnotify_group *group,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ u32 mask, void *data, int data_type,
+- const unsigned char *file_name, u32 cookie)
++ const unsigned char *file_name, u32 cookie,
++ struct fsnotify_iter_info *iter_info)
+ {
+ struct inotify_inode_mark *i_mark;
+ struct inotify_event_info *event;
+diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
+index 69d1ea3d292a..25336bd8d9aa 100644
+--- a/fs/notify/inotify/inotify_user.c
++++ b/fs/notify/inotify/inotify_user.c
+@@ -494,7 +494,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
+
+ /* Queue ignore event for the watch */
+ inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED,
+- NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
++ NULL, FSNOTIFY_EVENT_NONE, NULL, 0, NULL);
+
+ i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
+ /* remove this mark from the idr */
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index 3de53a2b8944..4174d7e2898a 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -98,7 +98,8 @@ struct fsnotify_ops {
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ u32 mask, void *data, int data_type,
+- const unsigned char *file_name, u32 cookie);
++ const unsigned char *file_name, u32 cookie,
++ struct fsnotify_iter_info *iter_info);
+ void (*free_group_priv)(struct fsnotify_group *group);
+ void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
+ void (*free_event)(struct fsnotify_event *event);
+diff --git a/kernel/audit_fsnotify.c b/kernel/audit_fsnotify.c
+index f84f8d06e1f6..231c0f3cbf2b 100644
+--- a/kernel/audit_fsnotify.c
++++ b/kernel/audit_fsnotify.c
+@@ -169,7 +169,8 @@ static int audit_mark_handle_event(struct fsnotify_group *group,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ u32 mask, void *data, int data_type,
+- const unsigned char *dname, u32 cookie)
++ const unsigned char *dname, u32 cookie,
++ struct fsnotify_iter_info *iter_info)
+ {
+ struct audit_fsnotify_mark *audit_mark;
+ struct inode *inode = NULL;
+diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
+index 25772476fa4a..b0bdebaf811d 100644
+--- a/kernel/audit_tree.c
++++ b/kernel/audit_tree.c
+@@ -949,7 +949,8 @@ static int audit_tree_handle_event(struct fsnotify_group *group,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ u32 mask, void *data, int data_type,
+- const unsigned char *file_name, u32 cookie)
++ const unsigned char *file_name, u32 cookie,
++ struct fsnotify_iter_info *iter_info)
+ {
+ return 0;
+ }
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index f036b6ada6ef..160d52b662e2 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -485,7 +485,8 @@ static int audit_watch_handle_event(struct fsnotify_group *group,
+ struct fsnotify_mark *inode_mark,
+ struct fsnotify_mark *vfsmount_mark,
+ u32 mask, void *data, int data_type,
+- const unsigned char *dname, u32 cookie)
++ const unsigned char *dname, u32 cookie,
++ struct fsnotify_iter_info *iter_info)
+ {
+ struct inode *inode;
+ struct audit_parent *parent;
+--
+2.19.1
+
--- /dev/null
+From a3ec50d0cb65c4d1f5a512500cedf44024dce3d6 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Thu, 11 Apr 2019 15:24:28 +1200
+Subject: fsnotify: Provide framework for dropping SRCU lock in ->handle_event
+
+commit abc77577a669f424c5d0c185b9994f2621c52aa4 upstream.
+
+fanotify wants to drop fsnotify_mark_srcu lock when waiting for response
+from userspace so that the whole notification subsystem is not blocked
+during that time. This patch provides a framework for safely getting
+mark reference for a mark found in the object list which pins the mark
+in that list. We can then drop fsnotify_mark_srcu, wait for userspace
+response and then safely continue iteration of the object list once we
+reaquire fsnotify_mark_srcu.
+
+Reviewed-by: Miklos Szeredi <mszeredi@redhat.com>
+Reviewed-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+[mruffell: backport: realign file fs/notify/mark.c]
+Signed-off-by: Matthew Ruffell <matthew.ruffell@canonical.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/notify/fsnotify.h | 6 +++
+ fs/notify/group.c | 1 +
+ fs/notify/mark.c | 83 +++++++++++++++++++++++++++++++-
+ include/linux/fsnotify_backend.h | 5 ++
+ 4 files changed, 94 insertions(+), 1 deletion(-)
+
+diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
+index 0a3bc2cf192c..0ad0eb9f2e14 100644
+--- a/fs/notify/fsnotify.h
++++ b/fs/notify/fsnotify.h
+@@ -8,6 +8,12 @@
+
+ #include "../mount.h"
+
++struct fsnotify_iter_info {
++ struct fsnotify_mark *inode_mark;
++ struct fsnotify_mark *vfsmount_mark;
++ int srcu_idx;
++};
++
+ /* destroy all events sitting in this groups notification queue */
+ extern void fsnotify_flush_notify(struct fsnotify_group *group);
+
+diff --git a/fs/notify/group.c b/fs/notify/group.c
+index fbe3cbebec16..864103b707f4 100644
+--- a/fs/notify/group.c
++++ b/fs/notify/group.c
+@@ -124,6 +124,7 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
+ /* set to 0 when there a no external references to this group */
+ atomic_set(&group->refcnt, 1);
+ atomic_set(&group->num_marks, 0);
++ atomic_set(&group->user_waits, 0);
+
+ spin_lock_init(&group->notification_lock);
+ INIT_LIST_HEAD(&group->notification_list);
+diff --git a/fs/notify/mark.c b/fs/notify/mark.c
+index d3fea0bd89e2..d3005d95d530 100644
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -105,6 +105,16 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)
+ atomic_inc(&mark->refcnt);
+ }
+
++/*
++ * Get mark reference when we found the mark via lockless traversal of object
++ * list. Mark can be already removed from the list by now and on its way to be
++ * destroyed once SRCU period ends.
++ */
++static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
++{
++ return atomic_inc_not_zero(&mark->refcnt);
++}
++
+ void fsnotify_put_mark(struct fsnotify_mark *mark)
+ {
+ if (atomic_dec_and_test(&mark->refcnt)) {
+@@ -125,6 +135,72 @@ u32 fsnotify_recalc_mask(struct hlist_head *head)
+ return new_mask;
+ }
+
++bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
++{
++ struct fsnotify_group *group;
++
++ if (WARN_ON_ONCE(!iter_info->inode_mark && !iter_info->vfsmount_mark))
++ return false;
++
++ if (iter_info->inode_mark)
++ group = iter_info->inode_mark->group;
++ else
++ group = iter_info->vfsmount_mark->group;
++
++ /*
++ * Since acquisition of mark reference is an atomic op as well, we can
++ * be sure this inc is seen before any effect of refcount increment.
++ */
++ atomic_inc(&group->user_waits);
++
++ if (iter_info->inode_mark) {
++ /* This can fail if mark is being removed */
++ if (!fsnotify_get_mark_safe(iter_info->inode_mark))
++ goto out_wait;
++ }
++ if (iter_info->vfsmount_mark) {
++ if (!fsnotify_get_mark_safe(iter_info->vfsmount_mark))
++ goto out_inode;
++ }
++
++ /*
++ * Now that both marks are pinned by refcount in the inode / vfsmount
++ * lists, we can drop SRCU lock, and safely resume the list iteration
++ * once userspace returns.
++ */
++ srcu_read_unlock(&fsnotify_mark_srcu, iter_info->srcu_idx);
++
++ return true;
++out_inode:
++ if (iter_info->inode_mark)
++ fsnotify_put_mark(iter_info->inode_mark);
++out_wait:
++ if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
++ wake_up(&group->notification_waitq);
++ return false;
++}
++
++void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
++{
++ struct fsnotify_group *group = NULL;
++
++ iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
++ if (iter_info->inode_mark) {
++ group = iter_info->inode_mark->group;
++ fsnotify_put_mark(iter_info->inode_mark);
++ }
++ if (iter_info->vfsmount_mark) {
++ group = iter_info->vfsmount_mark->group;
++ fsnotify_put_mark(iter_info->vfsmount_mark);
++ }
++ /*
++ * We abuse notification_waitq on group shutdown for waiting for all
++ * marks pinned when waiting for userspace.
++ */
++ if (atomic_dec_and_test(&group->user_waits) && group->shutdown)
++ wake_up(&group->notification_waitq);
++}
++
+ /*
+ * Remove mark from inode / vfsmount list, group list, drop inode reference
+ * if we got one.
+@@ -161,7 +237,6 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark)
+ * __fsnotify_parent() lazily when next event happens on one of our
+ * children.
+ */
+-
+ list_del_init(&mark->g_list);
+
+ spin_unlock(&mark->lock);
+@@ -508,6 +583,12 @@ void fsnotify_detach_group_marks(struct fsnotify_group *group)
+ __fsnotify_free_mark(mark);
+ fsnotify_put_mark(mark);
+ }
++ /*
++ * Some marks can still be pinned when waiting for response from
++ * userspace. Wait for those now. fsnotify_prepare_user_wait() will
++ * not succeed now so this wait is race-free.
++ */
++ wait_event(group->notification_waitq, !atomic_read(&group->user_waits));
+ }
+
+ void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old)
+diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
+index 79467b239fcf..3de53a2b8944 100644
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -79,6 +79,7 @@ struct fsnotify_event;
+ struct fsnotify_mark;
+ struct fsnotify_event_private_data;
+ struct fsnotify_fname;
++struct fsnotify_iter_info;
+
+ /*
+ * Each group much define these ops. The fsnotify infrastructure will call
+@@ -162,6 +163,8 @@ struct fsnotify_group {
+ struct fsnotify_event *overflow_event; /* Event we queue when the
+ * notification list is too
+ * full */
++ atomic_t user_waits; /* Number of tasks waiting for user
++ * response */
+
+ /* groups can define private fields here or use the void *private */
+ union {
+@@ -350,6 +353,8 @@ extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, un
+ extern void fsnotify_get_mark(struct fsnotify_mark *mark);
+ extern void fsnotify_put_mark(struct fsnotify_mark *mark);
+ extern void fsnotify_unmount_inodes(struct super_block *sb);
++extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
++extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
+
+ /* put here because inotify does some weird stuff when destroying watches */
+ extern void fsnotify_init_event(struct fsnotify_event *event,
+--
+2.19.1
+
--- /dev/null
+From 774c019e6d55abff5222350393febef71752289f Mon Sep 17 00:00:00 2001
+From: Nick Desaulniers <ndesaulniers@google.com>
+Date: Mon, 11 Feb 2019 11:30:04 -0800
+Subject: kbuild: clang: choose GCC_TOOLCHAIN_DIR not on LD
+
+commit ad15006cc78459d059af56729c4d9bed7c7fd860 upstream.
+
+This causes an issue when trying to build with `make LD=ld.lld` if
+ld.lld and the rest of your cross tools aren't in the same directory
+(ex. /usr/local/bin) (as is the case for Android's build system), as the
+GCC_TOOLCHAIN_DIR then gets set based on `which $(LD)` which will point
+where LLVM tools are, not GCC/binutils tools are located.
+
+Instead, select the GCC_TOOLCHAIN_DIR based on another tool provided by
+binutils for which LLVM does not provide a substitute for, such as
+elfedit.
+
+Fixes: 785f11aa595b ("kbuild: Add better clang cross build support")
+Link: https://github.com/ClangBuiltLinux/linux/issues/341
+Suggested-by: Nathan Chancellor <natechancellor@gmail.com>
+Reviewed-by: Nathan Chancellor <natechancellor@gmail.com>
+Tested-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Makefile | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/Makefile b/Makefile
+index f44094d2b147..ba3c48809f44 100644
+--- a/Makefile
++++ b/Makefile
+@@ -507,7 +507,7 @@ endif
+ ifeq ($(cc-name),clang)
+ ifneq ($(CROSS_COMPILE),)
+ CLANG_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%))
+-GCC_TOOLCHAIN_DIR := $(dir $(shell which $(LD)))
++GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
+ CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
+ GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
+ endif
+--
+2.19.1
+
--- /dev/null
+From ea40995258750d1f15b8a1b00a3c9110cddff230 Mon Sep 17 00:00:00 2001
+From: Nick Desaulniers <ndesaulniers@google.com>
+Date: Fri, 5 Apr 2019 18:38:45 -0700
+Subject: lib/string.c: implement a basic bcmp
+
+[ Upstream commit 5f074f3e192f10c9fade898b9b3b8812e3d83342 ]
+
+A recent optimization in Clang (r355672) lowers comparisons of the
+return value of memcmp against zero to comparisons of the return value
+of bcmp against zero. This helps some platforms that implement bcmp
+more efficiently than memcmp. glibc simply aliases bcmp to memcmp, but
+an optimized implementation is in the works.
+
+This results in linkage failures for all targets with Clang due to the
+undefined symbol. For now, just implement bcmp as a tailcail to memcmp
+to unbreak the build. This routine can be further optimized in the
+future.
+
+Other ideas discussed:
+
+ * A weak alias was discussed, but breaks for architectures that define
+ their own implementations of memcmp since aliases to declarations are
+ not permitted (only definitions). Arch-specific memcmp
+ implementations typically declare memcmp in C headers, but implement
+ them in assembly.
+
+ * -ffreestanding also is used sporadically throughout the kernel.
+
+ * -fno-builtin-bcmp doesn't work when doing LTO.
+
+Link: https://bugs.llvm.org/show_bug.cgi?id=41035
+Link: https://code.woboq.org/userspace/glibc/string/memcmp.c.html#bcmp
+Link: https://github.com/llvm/llvm-project/commit/8e16d73346f8091461319a7dfc4ddd18eedcff13
+Link: https://github.com/ClangBuiltLinux/linux/issues/416
+Link: http://lkml.kernel.org/r/20190313211335.165605-1-ndesaulniers@google.com
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Reported-by: Nathan Chancellor <natechancellor@gmail.com>
+Reported-by: Adhemerval Zanella <adhemerval.zanella@linaro.org>
+Suggested-by: Arnd Bergmann <arnd@arndb.de>
+Suggested-by: James Y Knight <jyknight@google.com>
+Suggested-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Suggested-by: Nathan Chancellor <natechancellor@gmail.com>
+Suggested-by: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Reviewed-by: Nathan Chancellor <natechancellor@gmail.com>
+Tested-by: Nathan Chancellor <natechancellor@gmail.com>
+Reviewed-by: Masahiro Yamada <yamada.masahiro@socionext.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Cc: David Laight <David.Laight@ACULAB.COM>
+Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/string.h | 3 +++
+ lib/string.c | 20 ++++++++++++++++++++
+ 2 files changed, 23 insertions(+)
+
+diff --git a/include/linux/string.h b/include/linux/string.h
+index 60042e5e88ff..42eed573ebb6 100644
+--- a/include/linux/string.h
++++ b/include/linux/string.h
+@@ -111,6 +111,9 @@ extern void * memscan(void *,int,__kernel_size_t);
+ #ifndef __HAVE_ARCH_MEMCMP
+ extern int memcmp(const void *,const void *,__kernel_size_t);
+ #endif
++#ifndef __HAVE_ARCH_BCMP
++extern int bcmp(const void *,const void *,__kernel_size_t);
++#endif
+ #ifndef __HAVE_ARCH_MEMCHR
+ extern void * memchr(const void *,int,__kernel_size_t);
+ #endif
+diff --git a/lib/string.c b/lib/string.c
+index ed83562a53ae..1cd9757291b1 100644
+--- a/lib/string.c
++++ b/lib/string.c
+@@ -772,6 +772,26 @@ __visible int memcmp(const void *cs, const void *ct, size_t count)
+ EXPORT_SYMBOL(memcmp);
+ #endif
+
++#ifndef __HAVE_ARCH_BCMP
++/**
++ * bcmp - returns 0 if and only if the buffers have identical contents.
++ * @a: pointer to first buffer.
++ * @b: pointer to second buffer.
++ * @len: size of buffers.
++ *
++ * The sign or magnitude of a non-zero return value has no particular
++ * meaning, and architectures may implement their own more efficient bcmp(). So
++ * while this particular implementation is a simple (tail) call to memcmp, do
++ * not rely on anything but whether the return value is zero or non-zero.
++ */
++#undef bcmp
++int bcmp(const void *a, const void *b, size_t len)
++{
++ return memcmp(a, b, len);
++}
++EXPORT_SYMBOL(bcmp);
++#endif
++
+ #ifndef __HAVE_ARCH_MEMSCAN
+ /**
+ * memscan - Find a character in an area of memory.
+--
+2.19.1
+
--- /dev/null
+From 383013ba2fae87077b34ae8ba5e627d7a8a48f6f Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 11 Apr 2019 21:46:08 +1000
+Subject: powerpc/64: Add CONFIG_PPC_BARRIER_NOSPEC
+
+commit 179ab1cbf883575c3a585bcfc0f2160f1d22a149 upstream.
+
+Add a config symbol to encode which platforms support the
+barrier_nospec speculation barrier. Currently this is just Book3S 64
+but we will add Book3E in a future patch.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/Kconfig | 7 ++++++-
+ arch/powerpc/include/asm/barrier.h | 6 +++---
+ arch/powerpc/include/asm/setup.h | 2 +-
+ arch/powerpc/kernel/Makefile | 3 ++-
+ arch/powerpc/kernel/module.c | 4 +++-
+ arch/powerpc/kernel/vmlinux.lds.S | 4 +++-
+ arch/powerpc/lib/feature-fixups.c | 6 ++++--
+ 7 files changed, 22 insertions(+), 10 deletions(-)
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index 0a6bb48854e3..a238698178fc 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -128,7 +128,7 @@ config PPC
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_CMOS_UPDATE
+- select GENERIC_CPU_VULNERABILITIES if PPC_BOOK3S_64
++ select GENERIC_CPU_VULNERABILITIES if PPC_BARRIER_NOSPEC
+ select GENERIC_TIME_VSYSCALL_OLD
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_CLOCKEVENTS_BROADCAST if SMP
+@@ -164,6 +164,11 @@ config PPC
+ select HAVE_ARCH_HARDENED_USERCOPY
+ select HAVE_KERNEL_GZIP
+
++config PPC_BARRIER_NOSPEC
++ bool
++ default y
++ depends on PPC_BOOK3S_64
++
+ config GENERIC_CSUM
+ def_bool CPU_LITTLE_ENDIAN
+
+diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
+index a8131162104f..465a64316897 100644
+--- a/arch/powerpc/include/asm/barrier.h
++++ b/arch/powerpc/include/asm/barrier.h
+@@ -77,7 +77,7 @@ do { \
+
+ #define smp_mb__before_spinlock() smp_mb()
+
+-#ifdef CONFIG_PPC_BOOK3S_64
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
+ /*
+ * Prevent execution of subsequent instructions until preceding branches have
+ * been fully resolved and are no longer executing speculatively.
+@@ -87,10 +87,10 @@ do { \
+ // This also acts as a compiler barrier due to the memory clobber.
+ #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
+
+-#else /* !CONFIG_PPC_BOOK3S_64 */
++#else /* !CONFIG_PPC_BARRIER_NOSPEC */
+ #define barrier_nospec_asm
+ #define barrier_nospec()
+-#endif
++#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
+ #include <asm-generic/barrier.h>
+
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index 84ae150ce6a6..38525bd2ed65 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -55,7 +55,7 @@ void setup_barrier_nospec(void);
+ void do_barrier_nospec_fixups(bool enable);
+ extern bool barrier_nospec_enabled;
+
+-#ifdef CONFIG_PPC_BOOK3S_64
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
+ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
+ #else
+ static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
+diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
+index 13885786282b..d80fbf0884ff 100644
+--- a/arch/powerpc/kernel/Makefile
++++ b/arch/powerpc/kernel/Makefile
+@@ -44,9 +44,10 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
+ obj-$(CONFIG_VDSO32) += vdso32/
+ obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o
+-obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o security.o
++obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
+ obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
+ obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
++obj-$(CONFIG_PPC_BARRIER_NOSPEC) += security.o
+ obj-$(CONFIG_PPC64) += vdso64/
+ obj-$(CONFIG_ALTIVEC) += vecemu.o
+ obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
+diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
+index d30f0626dcd0..3b1c3bb91025 100644
+--- a/arch/powerpc/kernel/module.c
++++ b/arch/powerpc/kernel/module.c
+@@ -72,13 +72,15 @@ int module_finalize(const Elf_Ehdr *hdr,
+ do_feature_fixups(powerpc_firmware_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
++#endif /* CONFIG_PPC64 */
+
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
+ sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
+ if (sect != NULL)
+ do_barrier_nospec_fixups_range(barrier_nospec_enabled,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
+-#endif
++#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
+ sect = find_section(hdr, sechdrs, "__lwsync_fixup");
+ if (sect != NULL)
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index 61def0be6914..5c6cf58943b9 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -153,14 +153,16 @@ SECTIONS
+ *(__rfi_flush_fixup)
+ __stop___rfi_flush_fixup = .;
+ }
++#endif /* CONFIG_PPC64 */
+
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
+ . = ALIGN(8);
+ __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
+ __start___barrier_nospec_fixup = .;
+ *(__barrier_nospec_fixup)
+ __stop___barrier_nospec_fixup = .;
+ }
+-#endif
++#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
+ EXCEPTION_TABLE(0)
+
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index a1222c441df5..5df57f7bae0a 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -304,6 +304,9 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
+ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
+ }
+
++#endif /* CONFIG_PPC_BOOK3S_64 */
++
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
+ void do_barrier_nospec_fixups(bool enable)
+ {
+ void *start, *end;
+@@ -313,8 +316,7 @@ void do_barrier_nospec_fixups(bool enable)
+
+ do_barrier_nospec_fixups_range(enable, start, end);
+ }
+-
+-#endif /* CONFIG_PPC_BOOK3S_64 */
++#endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
+ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+ {
+--
+2.19.1
+
--- /dev/null
+From 775d4400ef158a74116b6eff9a2e73dc54f41098 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 11 Apr 2019 21:46:09 +1000
+Subject: powerpc/64: Call setup_barrier_nospec() from setup_arch()
+
+commit af375eefbfb27cbb5b831984e66d724a40d26b5c upstream.
+
+Currently we require platform code to call setup_barrier_nospec(). But
+if we add an empty definition for the !CONFIG_PPC_BARRIER_NOSPEC case
+then we can call it in setup_arch().
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/setup.h | 4 ++++
+ arch/powerpc/kernel/setup-common.c | 2 ++
+ arch/powerpc/platforms/powernv/setup.c | 1 -
+ arch/powerpc/platforms/pseries/setup.c | 1 -
+ 4 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index 38525bd2ed65..d3e9da62d029 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -51,7 +51,11 @@ enum l1d_flush_type {
+
+ void setup_rfi_flush(enum l1d_flush_type, bool enable);
+ void do_rfi_flush_fixups(enum l1d_flush_type types);
++#ifdef CONFIG_PPC_BARRIER_NOSPEC
+ void setup_barrier_nospec(void);
++#else
++static inline void setup_barrier_nospec(void) { };
++#endif
+ void do_barrier_nospec_fixups(bool enable);
+ extern bool barrier_nospec_enabled;
+
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index bf0f712ac0e0..d5a128f54537 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -918,6 +918,8 @@ void __init setup_arch(char **cmdline_p)
+ if (ppc_md.setup_arch)
+ ppc_md.setup_arch();
+
++ setup_barrier_nospec();
++
+ paging_init();
+
+ /* Initialize the MMU context management stuff. */
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index eb5464648810..17203abf38e8 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -123,7 +123,6 @@ static void pnv_setup_rfi_flush(void)
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
+
+ setup_rfi_flush(type, enable);
+- setup_barrier_nospec();
+ }
+
+ static void __init pnv_setup_arch(void)
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 2b2759c98c7e..91ade7755823 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -525,7 +525,6 @@ void pseries_setup_rfi_flush(void)
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
+
+ setup_rfi_flush(types, enable);
+- setup_barrier_nospec();
+ }
+
+ static void __init pSeries_setup_arch(void)
+--
+2.19.1
+
--- /dev/null
+From 1741f81ec0112742e7c2d56111c0fb37fa27ae2d Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:06 +1000
+Subject: powerpc/64: Disable the speculation barrier from the command line
+
+commit cf175dc315f90185128fb061dc05b6fbb211aa2f upstream.
+
+The speculation barrier can be disabled from the command line
+with the parameter: "nospectre_v1".
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/security.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index bf298d0c475f..813e38ff81ce 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -17,6 +17,7 @@
+ unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+
+ bool barrier_nospec_enabled;
++static bool no_nospec;
+
+ static void enable_barrier_nospec(bool enable)
+ {
+@@ -43,9 +44,18 @@ void setup_barrier_nospec(void)
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
+ security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
+
+- enable_barrier_nospec(enable);
++ if (!no_nospec)
++ enable_barrier_nospec(enable);
+ }
+
++static int __init handle_nospectre_v1(char *p)
++{
++ no_nospec = true;
++
++ return 0;
++}
++early_param("nospectre_v1", handle_nospectre_v1);
++
+ #ifdef CONFIG_DEBUG_FS
+ static int barrier_nospec_set(void *data, u64 val)
+ {
+--
+2.19.1
+
--- /dev/null
+From 1ec953386c7c23f6699e325970b30a555f563646 Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:10 +1000
+Subject: powerpc/64: Make meltdown reporting Book3S 64 specific
+
+commit 406d2b6ae3420f5bb2b3db6986dc6f0b6dbb637b upstream.
+
+In a subsequent patch we will enable building security.c for Book3E.
+However the NXP platforms are not vulnerable to Meltdown, so make the
+Meltdown vulnerability reporting PPC_BOOK3S_64 specific.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+[mpe: Split out of larger patch]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/security.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index 926ed3c38741..2f30fc8ed0a8 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -93,6 +93,7 @@ static __init int barrier_nospec_debugfs_init(void)
+ device_initcall(barrier_nospec_debugfs_init);
+ #endif /* CONFIG_DEBUG_FS */
+
++#ifdef CONFIG_PPC_BOOK3S_64
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ bool thread_priv;
+@@ -125,6 +126,7 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
+
+ return sprintf(buf, "Vulnerable\n");
+ }
++#endif
+
+ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+--
+2.19.1
+
--- /dev/null
+From e459c26b740a2b619098ce3da2f060005d6700c0 Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:07 +1000
+Subject: powerpc/64: Make stf barrier PPC_BOOK3S_64 specific.
+
+commit 6453b532f2c8856a80381e6b9a1f5ea2f12294df upstream.
+
+NXP Book3E platforms are not vulnerable to speculative store
+bypass, so make the mitigations PPC_BOOK3S_64 specific.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/security.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index 813e38ff81ce..926ed3c38741 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -177,6 +177,7 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
+ return s.len;
+ }
+
++#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * Store-forwarding barrier support.
+ */
+@@ -324,3 +325,4 @@ static __init int stf_barrier_debugfs_init(void)
+ }
+ device_initcall(stf_barrier_debugfs_init);
+ #endif /* CONFIG_DEBUG_FS */
++#endif /* CONFIG_PPC_BOOK3S_64 */
+--
+2.19.1
+
--- /dev/null
+From 0e943bc9866e81fd598e0ab35bb670fd2cd676ad Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 11 Apr 2019 21:46:03 +1000
+Subject: powerpc/64: Use barrier_nospec in syscall entry
+
+commit 51973a815c6b46d7b23b68d6af371ad1c9d503ca upstream.
+
+Our syscall entry is done in assembly so patch in an explicit
+barrier_nospec.
+
+Based on a patch by Michal Suchanek.
+
+Signed-off-by: Michal Suchanek <msuchanek@suse.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/entry_64.S | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index e24ae0fa80ed..11e390662384 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -38,6 +38,7 @@
+ #include <asm/context_tracking.h>
+ #include <asm/tm.h>
+ #include <asm/ppc-opcode.h>
++#include <asm/barrier.h>
+ #include <asm/export.h>
+ #ifdef CONFIG_PPC_BOOK3S
+ #include <asm/exception-64s.h>
+@@ -180,6 +181,15 @@ system_call: /* label this so stack traces look sane */
+ clrldi r8,r8,32
+ 15:
+ slwi r0,r0,4
++
++ barrier_nospec_asm
++ /*
++ * Prevent the load of the handler below (based on the user-passed
++ * system call number) being speculatively executed until the test
++ * against NR_syscalls and branch to .Lsyscall_enosys above has
++ * committed.
++ */
++
+ ldx r12,r11,r0 /* Fetch system call handler [ptr] */
+ mtctr r12
+ bctrl /* Call handler */
+--
+2.19.1
+
--- /dev/null
+From ae3def2b67ef8b1755e9f1249b95e4561191007d Mon Sep 17 00:00:00 2001
+From: Michal Suchanek <msuchanek@suse.de>
+Date: Thu, 11 Apr 2019 21:45:57 +1000
+Subject: powerpc/64s: Add barrier_nospec
+
+commit a6b3964ad71a61bb7c61d80a60bea7d42187b2eb upstream.
+
+A no-op form of ori (or immediate of 0 into r31 and the result stored
+in r31) has been re-tasked as a speculation barrier. The instruction
+only acts as a barrier on newer machines with appropriate firmware
+support. On older CPUs it remains a harmless no-op.
+
+Implement barrier_nospec using this instruction.
+
+mpe: The semantics of the instruction are believed to be that it
+prevents execution of subsequent instructions until preceding branches
+have been fully resolved and are no longer executing speculatively.
+There is no further documentation available at this time.
+
+Signed-off-by: Michal Suchanek <msuchanek@suse.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/barrier.h | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
+index 798ab37c9930..352ea3e3cc05 100644
+--- a/arch/powerpc/include/asm/barrier.h
++++ b/arch/powerpc/include/asm/barrier.h
+@@ -77,6 +77,21 @@ do { \
+
+ #define smp_mb__before_spinlock() smp_mb()
+
++#ifdef CONFIG_PPC_BOOK3S_64
++/*
++ * Prevent execution of subsequent instructions until preceding branches have
++ * been fully resolved and are no longer executing speculatively.
++ */
++#define barrier_nospec_asm ori 31,31,0
++
++// This also acts as a compiler barrier due to the memory clobber.
++#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
++
++#else /* !CONFIG_PPC_BOOK3S_64 */
++#define barrier_nospec_asm
++#define barrier_nospec()
++#endif
++
+ #include <asm-generic/barrier.h>
+
+ #endif /* _ASM_POWERPC_BARRIER_H */
+--
+2.19.1
+
--- /dev/null
+From 0d410a2b0af293b32f2a960ada1310bb1df5cd19 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 11 Apr 2019 21:46:14 +1000
+Subject: powerpc/64s: Add new security feature flags for count cache flush
+
+commit dc8c6cce9a26a51fc19961accb978217a3ba8c75 upstream.
+
+Add security feature flags to indicate the need for software to flush
+the count cache on context switch, and for the presence of a hardware
+assisted count cache flush.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/security_features.h | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
+index 44989b22383c..a0d47bc18a5c 100644
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -59,6 +59,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
+ // Indirect branch prediction cache disabled
+ #define SEC_FTR_COUNT_CACHE_DISABLED 0x0000000000000020ull
+
++// bcctr 2,0,0 triggers a hardware assisted count cache flush
++#define SEC_FTR_BCCTR_FLUSH_ASSIST 0x0000000000000800ull
++
+
+ // Features indicating need for Spectre/Meltdown mitigations
+
+@@ -74,6 +77,9 @@ static inline bool security_ftr_enabled(unsigned long feature)
+ // Firmware configuration indicates user favours security over performance
+ #define SEC_FTR_FAVOUR_SECURITY 0x0000000000000200ull
+
++// Software required to flush count cache on context switch
++#define SEC_FTR_FLUSH_COUNT_CACHE 0x0000000000000400ull
++
+
+ // Features enabled by default
+ #define SEC_FTR_DEFAULT \
+--
+2.19.1
+
--- /dev/null
+From 4e7c5f670b90566d2a24e0d694c7f8b2a25afc61 Mon Sep 17 00:00:00 2001
+From: Michal Suchanek <msuchanek@suse.de>
+Date: Thu, 11 Apr 2019 21:45:58 +1000
+Subject: powerpc/64s: Add support for ori barrier_nospec patching
+
+commit 2eea7f067f495e33b8b116b35b5988ab2b8aec55 upstream.
+
+Based on the RFI patching. This is required to be able to disable the
+speculation barrier.
+
+Only one barrier type is supported and it does nothing when the
+firmware does not enable it. Also re-patching modules is not supported
+So the only meaningful thing that can be done is patching out the
+speculation barrier at boot when the user says it is not wanted.
+
+Signed-off-by: Michal Suchanek <msuchanek@suse.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/barrier.h | 2 +-
+ arch/powerpc/include/asm/feature-fixups.h | 9 ++++++++
+ arch/powerpc/include/asm/setup.h | 1 +
+ arch/powerpc/kernel/security.c | 9 ++++++++
+ arch/powerpc/kernel/vmlinux.lds.S | 7 ++++++
+ arch/powerpc/lib/feature-fixups.c | 27 +++++++++++++++++++++++
+ 6 files changed, 54 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
+index 352ea3e3cc05..a8131162104f 100644
+--- a/arch/powerpc/include/asm/barrier.h
++++ b/arch/powerpc/include/asm/barrier.h
+@@ -82,7 +82,7 @@ do { \
+ * Prevent execution of subsequent instructions until preceding branches have
+ * been fully resolved and are no longer executing speculatively.
+ */
+-#define barrier_nospec_asm ori 31,31,0
++#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; nop
+
+ // This also acts as a compiler barrier due to the memory clobber.
+ #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
+diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
+index 0bf8202feca6..afd3efd38938 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -213,6 +213,14 @@ void setup_feature_keys(void);
+ FTR_ENTRY_OFFSET 951b-952b; \
+ .popsection;
+
++#define NOSPEC_BARRIER_FIXUP_SECTION \
++953: \
++ .pushsection __barrier_nospec_fixup,"a"; \
++ .align 2; \
++954: \
++ FTR_ENTRY_OFFSET 953b-954b; \
++ .popsection;
++
+
+ #ifndef __ASSEMBLY__
+
+@@ -220,6 +228,7 @@ extern long stf_barrier_fallback;
+ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
+ extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
++extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+
+ #endif
+
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index 3f160cd20107..703ddf752516 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -50,6 +50,7 @@ enum l1d_flush_type {
+
+ void setup_rfi_flush(enum l1d_flush_type, bool enable);
+ void do_rfi_flush_fixups(enum l1d_flush_type types);
++void do_barrier_nospec_fixups(bool enable);
+
+ #endif /* !__ASSEMBLY__ */
+
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index 2277df84ef6e..8b1cf9c81b82 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -10,10 +10,19 @@
+ #include <linux/seq_buf.h>
+
+ #include <asm/security_features.h>
++#include <asm/setup.h>
+
+
+ unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+
++static bool barrier_nospec_enabled;
++
++static void enable_barrier_nospec(bool enable)
++{
++ barrier_nospec_enabled = enable;
++ do_barrier_nospec_fixups(enable);
++}
++
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ bool thread_priv;
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index c16fddbb6ab8..61def0be6914 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -153,6 +153,13 @@ SECTIONS
+ *(__rfi_flush_fixup)
+ __stop___rfi_flush_fixup = .;
+ }
++
++ . = ALIGN(8);
++ __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
++ __start___barrier_nospec_fixup = .;
++ *(__barrier_nospec_fixup)
++ __stop___barrier_nospec_fixup = .;
++ }
+ #endif
+
+ EXCEPTION_TABLE(0)
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index cf1398e3c2e0..f82ae6bb2365 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -277,6 +277,33 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
+ : "unknown");
+ }
++
++void do_barrier_nospec_fixups(bool enable)
++{
++ unsigned int instr, *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___barrier_nospec_fixup),
++ end = PTRRELOC(&__stop___barrier_nospec_fixup);
++
++ instr = 0x60000000; /* nop */
++
++ if (enable) {
++ pr_info("barrier-nospec: using ORI speculation barrier\n");
++ instr = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++ patch_instruction(dest, instr);
++ }
++
++ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
++}
++
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+
+ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+--
+2.19.1
+
--- /dev/null
+From 3fb8b0198c782bd80b6e8ea17578952fe7366d02 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 11 Apr 2019 21:46:15 +1000
+Subject: powerpc/64s: Add support for software count cache flush
+
+commit ee13cb249fabdff8b90aaff61add347749280087 upstream.
+
+Some CPU revisions support a mode where the count cache needs to be
+flushed by software on context switch. Additionally some revisions may
+have a hardware accelerated flush, in which case the software flush
+sequence can be shortened.
+
+If we detect the appropriate flag from firmware we patch a branch
+into _switch() which takes us to a count cache flush sequence.
+
+That sequence in turn may be patched to return early if we detect that
+the CPU supports accelerating the flush sequence in hardware.
+
+Add debugfs support for reporting the state of the flush, as well as
+runtime disabling it.
+
+And modify the spectre_v2 sysfs file to report the state of the
+software flush.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/asm-prototypes.h | 6 ++
+ arch/powerpc/include/asm/security_features.h | 1 +
+ arch/powerpc/kernel/entry_64.S | 54 +++++++++++
+ arch/powerpc/kernel/security.c | 98 +++++++++++++++++++-
+ 4 files changed, 154 insertions(+), 5 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
+index e0baba1535e6..f3daa175f86c 100644
+--- a/arch/powerpc/include/asm/asm-prototypes.h
++++ b/arch/powerpc/include/asm/asm-prototypes.h
+@@ -121,4 +121,10 @@ extern s64 __ashrdi3(s64, int);
+ extern int __cmpdi2(s64, s64);
+ extern int __ucmpdi2(u64, u64);
+
++/* Patch sites */
++extern s32 patch__call_flush_count_cache;
++extern s32 patch__flush_count_cache_return;
++
++extern long flush_count_cache;
++
+ #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */
+diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
+index a0d47bc18a5c..759597bf0fd8 100644
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -22,6 +22,7 @@ enum stf_barrier_type {
+
+ void setup_stf_barrier(void);
+ void do_stf_barrier_fixups(enum stf_barrier_type types);
++void setup_count_cache_flush(void);
+
+ static inline void security_ftr_set(unsigned long feature)
+ {
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 11e390662384..6625cec9e7c0 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -26,6 +26,7 @@
+ #include <asm/page.h>
+ #include <asm/mmu.h>
+ #include <asm/thread_info.h>
++#include <asm/code-patching-asm.h>
+ #include <asm/ppc_asm.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/cputable.h>
+@@ -483,6 +484,57 @@ _GLOBAL(ret_from_kernel_thread)
+ li r3,0
+ b .Lsyscall_exit
+
++#ifdef CONFIG_PPC_BOOK3S_64
++
++#define FLUSH_COUNT_CACHE \
++1: nop; \
++ patch_site 1b, patch__call_flush_count_cache
++
++
++#define BCCTR_FLUSH .long 0x4c400420
++
++.macro nops number
++ .rept \number
++ nop
++ .endr
++.endm
++
++.balign 32
++.global flush_count_cache
++flush_count_cache:
++ /* Save LR into r9 */
++ mflr r9
++
++ .rept 64
++ bl .+4
++ .endr
++ b 1f
++ nops 6
++
++ .balign 32
++ /* Restore LR */
++1: mtlr r9
++ li r9,0x7fff
++ mtctr r9
++
++ BCCTR_FLUSH
++
++2: nop
++ patch_site 2b patch__flush_count_cache_return
++
++ nops 3
++
++ .rept 278
++ .balign 32
++ BCCTR_FLUSH
++ nops 7
++ .endr
++
++ blr
++#else
++#define FLUSH_COUNT_CACHE
++#endif /* CONFIG_PPC_BOOK3S_64 */
++
+ /*
+ * This routine switches between two different tasks. The process
+ * state of one is saved on its kernel stack. Then the state
+@@ -514,6 +566,8 @@ _GLOBAL(_switch)
+ std r23,_CCR(r1)
+ std r1,KSP(r3) /* Set old stack pointer */
+
++ FLUSH_COUNT_CACHE
++
+ #ifdef CONFIG_SMP
+ /* We need a sync somewhere here to make sure that if the
+ * previous task gets rescheduled on another CPU, it sees all
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index 2f30fc8ed0a8..fd4703b6ddc0 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -9,6 +9,8 @@
+ #include <linux/device.h>
+ #include <linux/seq_buf.h>
+
++#include <asm/asm-prototypes.h>
++#include <asm/code-patching.h>
+ #include <asm/debug.h>
+ #include <asm/security_features.h>
+ #include <asm/setup.h>
+@@ -16,6 +18,13 @@
+
+ unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+
++enum count_cache_flush_type {
++ COUNT_CACHE_FLUSH_NONE = 0x1,
++ COUNT_CACHE_FLUSH_SW = 0x2,
++ COUNT_CACHE_FLUSH_HW = 0x4,
++};
++static enum count_cache_flush_type count_cache_flush_type;
++
+ bool barrier_nospec_enabled;
+ static bool no_nospec;
+
+@@ -160,17 +169,29 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
+ bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
+ ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
+
+- if (bcs || ccd) {
++ if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
++ bool comma = false;
+ seq_buf_printf(&s, "Mitigation: ");
+
+- if (bcs)
++ if (bcs) {
+ seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
++ comma = true;
++ }
++
++ if (ccd) {
++ if (comma)
++ seq_buf_printf(&s, ", ");
++ seq_buf_printf(&s, "Indirect branch cache disabled");
++ comma = true;
++ }
+
+- if (bcs && ccd)
++ if (comma)
+ seq_buf_printf(&s, ", ");
+
+- if (ccd)
+- seq_buf_printf(&s, "Indirect branch cache disabled");
++ seq_buf_printf(&s, "Software count cache flush");
++
++ if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
++ seq_buf_printf(&s, "(hardware accelerated)");
+ } else
+ seq_buf_printf(&s, "Vulnerable");
+
+@@ -327,4 +348,71 @@ static __init int stf_barrier_debugfs_init(void)
+ }
+ device_initcall(stf_barrier_debugfs_init);
+ #endif /* CONFIG_DEBUG_FS */
++
++static void toggle_count_cache_flush(bool enable)
++{
++ if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) {
++ patch_instruction_site(&patch__call_flush_count_cache, PPC_INST_NOP);
++ count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
++ pr_info("count-cache-flush: software flush disabled.\n");
++ return;
++ }
++
++ patch_branch_site(&patch__call_flush_count_cache,
++ (u64)&flush_count_cache, BRANCH_SET_LINK);
++
++ if (!security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) {
++ count_cache_flush_type = COUNT_CACHE_FLUSH_SW;
++ pr_info("count-cache-flush: full software flush sequence enabled.\n");
++ return;
++ }
++
++ patch_instruction_site(&patch__flush_count_cache_return, PPC_INST_BLR);
++ count_cache_flush_type = COUNT_CACHE_FLUSH_HW;
++ pr_info("count-cache-flush: hardware assisted flush sequence enabled\n");
++}
++
++void setup_count_cache_flush(void)
++{
++ toggle_count_cache_flush(true);
++}
++
++#ifdef CONFIG_DEBUG_FS
++static int count_cache_flush_set(void *data, u64 val)
++{
++ bool enable;
++
++ if (val == 1)
++ enable = true;
++ else if (val == 0)
++ enable = false;
++ else
++ return -EINVAL;
++
++ toggle_count_cache_flush(enable);
++
++ return 0;
++}
++
++static int count_cache_flush_get(void *data, u64 *val)
++{
++ if (count_cache_flush_type == COUNT_CACHE_FLUSH_NONE)
++ *val = 0;
++ else
++ *val = 1;
++
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get,
++ count_cache_flush_set, "%llu\n");
++
++static __init int count_cache_flush_debugfs_init(void)
++{
++ debugfs_create_file("count_cache_flush", 0600, powerpc_debugfs_root,
++ NULL, &fops_count_cache_flush);
++ return 0;
++}
++device_initcall(count_cache_flush_debugfs_init);
++#endif /* CONFIG_DEBUG_FS */
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+--
+2.19.1
+
--- /dev/null
+From 919e979557d238b97468b72de9ca82d10f279a93 Mon Sep 17 00:00:00 2001
+From: Michal Suchanek <msuchanek@suse.de>
+Date: Thu, 11 Apr 2019 21:46:01 +1000
+Subject: powerpc/64s: Enable barrier_nospec based on firmware settings
+
+commit cb3d6759a93c6d0aea1c10deb6d00e111c29c19c upstream.
+
+Check what firmware told us and enable/disable the barrier_nospec as
+appropriate.
+
+We err on the side of enabling the barrier, as it's no-op on older
+systems, see the comment for more detail.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/setup.h | 1 +
+ arch/powerpc/kernel/security.c | 60 ++++++++++++++++++++++++++
+ arch/powerpc/platforms/powernv/setup.c | 1 +
+ arch/powerpc/platforms/pseries/setup.c | 1 +
+ 4 files changed, 63 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index a225b5c42e76..84ae150ce6a6 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -51,6 +51,7 @@ enum l1d_flush_type {
+
+ void setup_rfi_flush(enum l1d_flush_type, bool enable);
+ void do_rfi_flush_fixups(enum l1d_flush_type types);
++void setup_barrier_nospec(void);
+ void do_barrier_nospec_fixups(bool enable);
+ extern bool barrier_nospec_enabled;
+
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index 34d436fe2498..d0e974da4918 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -9,6 +9,7 @@
+ #include <linux/device.h>
+ #include <linux/seq_buf.h>
+
++#include <asm/debug.h>
+ #include <asm/security_features.h>
+ #include <asm/setup.h>
+
+@@ -23,6 +24,65 @@ static void enable_barrier_nospec(bool enable)
+ do_barrier_nospec_fixups(enable);
+ }
+
++void setup_barrier_nospec(void)
++{
++ bool enable;
++
++ /*
++ * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well.
++ * But there's a good reason not to. The two flags we check below are
++ * both are enabled by default in the kernel, so if the hcall is not
++ * functional they will be enabled.
++ * On a system where the host firmware has been updated (so the ori
++ * functions as a barrier), but on which the hypervisor (KVM/Qemu) has
++ * not been updated, we would like to enable the barrier. Dropping the
++ * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is
++ * we potentially enable the barrier on systems where the host firmware
++ * is not updated, but that's harmless as it's a no-op.
++ */
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++ security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR);
++
++ enable_barrier_nospec(enable);
++}
++
++#ifdef CONFIG_DEBUG_FS
++static int barrier_nospec_set(void *data, u64 val)
++{
++ switch (val) {
++ case 0:
++ case 1:
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ if (!!val == !!barrier_nospec_enabled)
++ return 0;
++
++ enable_barrier_nospec(!!val);
++
++ return 0;
++}
++
++static int barrier_nospec_get(void *data, u64 *val)
++{
++ *val = barrier_nospec_enabled ? 1 : 0;
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_barrier_nospec,
++ barrier_nospec_get, barrier_nospec_set, "%llu\n");
++
++static __init int barrier_nospec_debugfs_init(void)
++{
++ debugfs_create_file("barrier_nospec", 0600, powerpc_debugfs_root, NULL,
++ &fops_barrier_nospec);
++ return 0;
++}
++device_initcall(barrier_nospec_debugfs_init);
++#endif /* CONFIG_DEBUG_FS */
++
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+ bool thread_priv;
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index 17203abf38e8..eb5464648810 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -123,6 +123,7 @@ static void pnv_setup_rfi_flush(void)
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
+
+ setup_rfi_flush(type, enable);
++ setup_barrier_nospec();
+ }
+
+ static void __init pnv_setup_arch(void)
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 91ade7755823..2b2759c98c7e 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -525,6 +525,7 @@ void pseries_setup_rfi_flush(void)
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
+
+ setup_rfi_flush(types, enable);
++ setup_barrier_nospec();
+ }
+
+ static void __init pSeries_setup_arch(void)
+--
+2.19.1
+
--- /dev/null
+From 403103d2fab584f18fbb402a7d0f2bdc81774844 Mon Sep 17 00:00:00 2001
+From: Michal Suchanek <msuchanek@suse.de>
+Date: Thu, 11 Apr 2019 21:46:04 +1000
+Subject: powerpc/64s: Enhance the information in cpu_show_spectre_v1()
+
+commit a377514519b9a20fa1ea9adddbb4129573129cef upstream.
+
+We now have barrier_nospec as mitigation so print it in
+cpu_show_spectre_v1() when enabled.
+
+Signed-off-by: Michal Suchanek <msuchanek@suse.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/security.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index d0e974da4918..f189f946d935 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -121,6 +121,9 @@ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, c
+ if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
+ return sprintf(buf, "Not affected\n");
+
++ if (barrier_nospec_enabled)
++ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++
+ return sprintf(buf, "Vulnerable\n");
+ }
+
+--
+2.19.1
+
--- /dev/null
+From 00486d04e0f6e1f6acb008568c29df4cc0e93eb3 Mon Sep 17 00:00:00 2001
+From: Michal Suchanek <msuchanek@suse.de>
+Date: Thu, 11 Apr 2019 21:46:00 +1000
+Subject: powerpc/64s: Patch barrier_nospec in modules
+
+commit 815069ca57c142eb71d27439bc27f41a433a67b3 upstream.
+
+Note that unlike RFI which is patched only in kernel the nospec state
+reflects settings at the time the module was loaded.
+
+Iterating all modules and re-patching every time the settings change
+is not implemented.
+
+Based on lwsync patching.
+
+Signed-off-by: Michal Suchanek <msuchanek@suse.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/setup.h | 7 +++++++
+ arch/powerpc/kernel/module.c | 6 ++++++
+ arch/powerpc/kernel/security.c | 2 +-
+ arch/powerpc/lib/feature-fixups.c | 16 +++++++++++++---
+ 4 files changed, 27 insertions(+), 4 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index 709f4e739ae8..a225b5c42e76 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -52,6 +52,13 @@ enum l1d_flush_type {
+ void setup_rfi_flush(enum l1d_flush_type, bool enable);
+ void do_rfi_flush_fixups(enum l1d_flush_type types);
+ void do_barrier_nospec_fixups(bool enable);
++extern bool barrier_nospec_enabled;
++
++#ifdef CONFIG_PPC_BOOK3S_64
++void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
++#else
++static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
++#endif
+
+ #endif /* !__ASSEMBLY__ */
+
+diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
+index 30b89d5cbb03..d30f0626dcd0 100644
+--- a/arch/powerpc/kernel/module.c
++++ b/arch/powerpc/kernel/module.c
+@@ -72,6 +72,12 @@ int module_finalize(const Elf_Ehdr *hdr,
+ do_feature_fixups(powerpc_firmware_features,
+ (void *)sect->sh_addr,
+ (void *)sect->sh_addr + sect->sh_size);
++
++ sect = find_section(hdr, sechdrs, "__spec_barrier_fixup");
++ if (sect != NULL)
++ do_barrier_nospec_fixups_range(barrier_nospec_enabled,
++ (void *)sect->sh_addr,
++ (void *)sect->sh_addr + sect->sh_size);
+ #endif
+
+ sect = find_section(hdr, sechdrs, "__lwsync_fixup");
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index 8b1cf9c81b82..34d436fe2498 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -15,7 +15,7 @@
+
+ unsigned long powerpc_security_features __read_mostly = SEC_FTR_DEFAULT;
+
+-static bool barrier_nospec_enabled;
++bool barrier_nospec_enabled;
+
+ static void enable_barrier_nospec(bool enable)
+ {
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index f82ae6bb2365..a1222c441df5 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -278,14 +278,14 @@ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ : "unknown");
+ }
+
+-void do_barrier_nospec_fixups(bool enable)
++void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
+ {
+ unsigned int instr, *dest;
+ long *start, *end;
+ int i;
+
+- start = PTRRELOC(&__start___barrier_nospec_fixup),
+- end = PTRRELOC(&__stop___barrier_nospec_fixup);
++ start = fixup_start;
++ end = fixup_end;
+
+ instr = 0x60000000; /* nop */
+
+@@ -304,6 +304,16 @@ void do_barrier_nospec_fixups(bool enable)
+ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
+ }
+
++void do_barrier_nospec_fixups(bool enable)
++{
++ void *start, *end;
++
++ start = PTRRELOC(&__start___barrier_nospec_fixup),
++ end = PTRRELOC(&__stop___barrier_nospec_fixup);
++
++ do_barrier_nospec_fixups_range(enable, start, end);
++}
++
+ #endif /* CONFIG_PPC_BOOK3S_64 */
+
+ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+--
+2.19.1
+
--- /dev/null
+From 1d66f83aebd639a61d5432a5f5411dc5924ca240 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 11 Apr 2019 21:46:13 +1000
+Subject: powerpc/asm: Add a patch_site macro & helpers for patching
+ instructions
+
+commit 06d0bbc6d0f56dacac3a79900e9a9a0d5972d818 upstream.
+
+Add a macro and some helper C functions for patching single asm
+instructions.
+
+The gas macro means we can do something like:
+
+ 1: nop
+ patch_site 1b, patch__foo
+
+Which is less visually distracting than defining a GLOBAL symbol at 1,
+and also doesn't pollute the symbol table which can confuse eg. perf.
+
+These are obviously similar to our existing feature sections, but are
+not automatically patched based on CPU/MMU features, rather they are
+designed to be manually patched by C code at some arbitrary point.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/code-patching-asm.h | 18 ++++++++++++++++++
+ arch/powerpc/include/asm/code-patching.h | 2 ++
+ arch/powerpc/lib/code-patching.c | 16 ++++++++++++++++
+ 3 files changed, 36 insertions(+)
+ create mode 100644 arch/powerpc/include/asm/code-patching-asm.h
+
+diff --git a/arch/powerpc/include/asm/code-patching-asm.h b/arch/powerpc/include/asm/code-patching-asm.h
+new file mode 100644
+index 000000000000..ed7b1448493a
+--- /dev/null
++++ b/arch/powerpc/include/asm/code-patching-asm.h
+@@ -0,0 +1,18 @@
++/* SPDX-License-Identifier: GPL-2.0+ */
++/*
++ * Copyright 2018, Michael Ellerman, IBM Corporation.
++ */
++#ifndef _ASM_POWERPC_CODE_PATCHING_ASM_H
++#define _ASM_POWERPC_CODE_PATCHING_ASM_H
++
++/* Define a "site" that can be patched */
++.macro patch_site label name
++ .pushsection ".rodata"
++ .balign 4
++ .global \name
++\name:
++ .4byte \label - .
++ .popsection
++.endm
++
++#endif /* _ASM_POWERPC_CODE_PATCHING_ASM_H */
+diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
+index b4ab1f497335..ab934f8232bd 100644
+--- a/arch/powerpc/include/asm/code-patching.h
++++ b/arch/powerpc/include/asm/code-patching.h
+@@ -28,6 +28,8 @@ unsigned int create_cond_branch(const unsigned int *addr,
+ unsigned long target, int flags);
+ int patch_branch(unsigned int *addr, unsigned long target, int flags);
+ int patch_instruction(unsigned int *addr, unsigned int instr);
++int patch_instruction_site(s32 *addr, unsigned int instr);
++int patch_branch_site(s32 *site, unsigned long target, int flags);
+
+ int instr_is_relative_branch(unsigned int instr);
+ int instr_is_relative_link_branch(unsigned int instr);
+diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
+index c77c486fbf24..14535ad4cdd1 100644
+--- a/arch/powerpc/lib/code-patching.c
++++ b/arch/powerpc/lib/code-patching.c
+@@ -40,6 +40,22 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags)
+ return patch_instruction(addr, create_branch(addr, target, flags));
+ }
+
++int patch_branch_site(s32 *site, unsigned long target, int flags)
++{
++ unsigned int *addr;
++
++ addr = (unsigned int *)((unsigned long)site + *site);
++ return patch_instruction(addr, create_branch(addr, target, flags));
++}
++
++int patch_instruction_site(s32 *site, unsigned int instr)
++{
++ unsigned int *addr;
++
++ addr = (unsigned int *)((unsigned long)site + *site);
++ return patch_instruction(addr, instr);
++}
++
+ unsigned int create_branch(const unsigned int *addr,
+ unsigned long target, int flags)
+ {
+--
+2.19.1
+
--- /dev/null
+From ae5e264f2b1fb1e4687636e0c0e0c4720ec6e6af Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Thu, 11 Apr 2019 21:45:59 +1000
+Subject: powerpc: Avoid code patching freed init sections
+
+commit 51c3c62b58b357e8d35e4cc32f7b4ec907426fe3 upstream.
+
+This stops us from doing code patching in init sections after they've
+been freed.
+
+In this chain:
+ kvm_guest_init() ->
+ kvm_use_magic_page() ->
+ fault_in_pages_readable() ->
+ __get_user() ->
+ __get_user_nocheck() ->
+ barrier_nospec();
+
+We have a code patching location at barrier_nospec() and
+kvm_guest_init() is an init function. This whole chain gets inlined,
+so when we free the init section (hence kvm_guest_init()), this code
+goes away and hence should no longer be patched.
+
+We seen this as userspace memory corruption when using a memory
+checker while doing partition migration testing on powervm (this
+starts the code patching post migration via
+/sys/kernel/mobility/migration). In theory, it could also happen when
+using /sys/kernel/debug/powerpc/barrier_nospec.
+
+Cc: stable@vger.kernel.org # 4.13+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Reviewed-by: Nicholas Piggin <npiggin@gmail.com>
+Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/setup.h | 1 +
+ arch/powerpc/lib/code-patching.c | 8 ++++++++
+ arch/powerpc/mm/mem.c | 2 ++
+ 3 files changed, 11 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index 703ddf752516..709f4e739ae8 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -8,6 +8,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
+
+ extern unsigned int rtas_data;
+ extern unsigned long long memory_limit;
++extern bool init_mem_is_free;
+ extern unsigned long klimit;
+ extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
+
+diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
+index 753d591f1b52..c77c486fbf24 100644
+--- a/arch/powerpc/lib/code-patching.c
++++ b/arch/powerpc/lib/code-patching.c
+@@ -14,12 +14,20 @@
+ #include <asm/page.h>
+ #include <asm/code-patching.h>
+ #include <asm/uaccess.h>
++#include <asm/setup.h>
++#include <asm/sections.h>
+
+
+ int patch_instruction(unsigned int *addr, unsigned int instr)
+ {
+ int err;
+
++ /* Make sure we aren't patching a freed init section */
++ if (init_mem_is_free && init_section_contains(addr, 4)) {
++ pr_debug("Skipping init section patching addr: 0x%px\n", addr);
++ return 0;
++ }
++
+ __put_user_size(instr, addr, 4, err);
+ if (err)
+ return err;
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 5f844337de21..1e93dbc88e80 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -62,6 +62,7 @@
+ #endif
+
+ unsigned long long memory_limit;
++bool init_mem_is_free;
+
+ #ifdef CONFIG_HIGHMEM
+ pte_t *kmap_pte;
+@@ -396,6 +397,7 @@ void __init mem_init(void)
+ void free_initmem(void)
+ {
+ ppc_md.progress = ppc_printk_progress;
++ init_mem_is_free = true;
+ free_initmem_default(POISON_FREE_INITMEM);
+ }
+
+--
+2.19.1
+
--- /dev/null
+From 1007bd3bb65d9e6e15a5917c16052248443f9be6 Mon Sep 17 00:00:00 2001
+From: Andreas Schwab <schwab@linux-m68k.org>
+Date: Thu, 11 Apr 2019 21:45:56 +1000
+Subject: powerpc: Fix invalid use of register expressions
+
+commit 8a583c0a8d316d8ea52ea78491174ab1a3e9ef9d upstream.
+
+binutils >= 2.26 now warns about misuse of register expressions in
+assembler operands that are actually literals, for example:
+
+ arch/powerpc/kernel/entry_64.S:535: Warning: invalid register expression
+
+In practice these are almost all uses of r0 that should just be a
+literal 0.
+
+Signed-off-by: Andreas Schwab <schwab@linux-m68k.org>
+[mpe: Mention r0 is almost always the culprit, fold in purgatory change]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/ppc_asm.h | 2 +-
+ arch/powerpc/kernel/swsusp_asm64.S | 2 +-
+ arch/powerpc/lib/copypage_power7.S | 14 +++----
+ arch/powerpc/lib/copyuser_power7.S | 66 +++++++++++++++---------------
+ arch/powerpc/lib/memcpy_power7.S | 66 +++++++++++++++---------------
+ arch/powerpc/lib/string_64.S | 2 +-
+ 6 files changed, 76 insertions(+), 76 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
+index c73750b0d9fa..24e95be3bfaf 100644
+--- a/arch/powerpc/include/asm/ppc_asm.h
++++ b/arch/powerpc/include/asm/ppc_asm.h
+@@ -437,7 +437,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
+ .machine push ; \
+ .machine "power4" ; \
+ lis scratch,0x60000000@h; \
+- dcbt r0,scratch,0b01010; \
++ dcbt 0,scratch,0b01010; \
+ .machine pop
+
+ /*
+diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
+index 988f38dced0f..82d8aae81c6a 100644
+--- a/arch/powerpc/kernel/swsusp_asm64.S
++++ b/arch/powerpc/kernel/swsusp_asm64.S
+@@ -179,7 +179,7 @@ nothing_to_copy:
+ sld r3, r3, r0
+ li r0, 0
+ 1:
+- dcbf r0,r3
++ dcbf 0,r3
+ addi r3,r3,0x20
+ bdnz 1b
+
+diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
+index a84d333ecb09..ca5fc8fa7efc 100644
+--- a/arch/powerpc/lib/copypage_power7.S
++++ b/arch/powerpc/lib/copypage_power7.S
+@@ -45,13 +45,13 @@ _GLOBAL(copypage_power7)
+ .machine push
+ .machine "power4"
+ /* setup read stream 0 */
+- dcbt r0,r4,0b01000 /* addr from */
+- dcbt r0,r7,0b01010 /* length and depth from */
++ dcbt 0,r4,0b01000 /* addr from */
++ dcbt 0,r7,0b01010 /* length and depth from */
+ /* setup write stream 1 */
+- dcbtst r0,r9,0b01000 /* addr to */
+- dcbtst r0,r10,0b01010 /* length and depth to */
++ dcbtst 0,r9,0b01000 /* addr to */
++ dcbtst 0,r10,0b01010 /* length and depth to */
+ eieio
+- dcbt r0,r8,0b01010 /* all streams GO */
++ dcbt 0,r8,0b01010 /* all streams GO */
+ .machine pop
+
+ #ifdef CONFIG_ALTIVEC
+@@ -83,7 +83,7 @@ _GLOBAL(copypage_power7)
+ li r12,112
+
+ .align 5
+-1: lvx v7,r0,r4
++1: lvx v7,0,r4
+ lvx v6,r4,r6
+ lvx v5,r4,r7
+ lvx v4,r4,r8
+@@ -92,7 +92,7 @@ _GLOBAL(copypage_power7)
+ lvx v1,r4,r11
+ lvx v0,r4,r12
+ addi r4,r4,128
+- stvx v7,r0,r3
++ stvx v7,0,r3
+ stvx v6,r3,r6
+ stvx v5,r3,r7
+ stvx v4,r3,r8
+diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
+index da0c568d18c4..391694814691 100644
+--- a/arch/powerpc/lib/copyuser_power7.S
++++ b/arch/powerpc/lib/copyuser_power7.S
+@@ -327,13 +327,13 @@ err1; stb r0,0(r3)
+ .machine push
+ .machine "power4"
+ /* setup read stream 0 */
+- dcbt r0,r6,0b01000 /* addr from */
+- dcbt r0,r7,0b01010 /* length and depth from */
++ dcbt 0,r6,0b01000 /* addr from */
++ dcbt 0,r7,0b01010 /* length and depth from */
+ /* setup write stream 1 */
+- dcbtst r0,r9,0b01000 /* addr to */
+- dcbtst r0,r10,0b01010 /* length and depth to */
++ dcbtst 0,r9,0b01000 /* addr to */
++ dcbtst 0,r10,0b01010 /* length and depth to */
+ eieio
+- dcbt r0,r8,0b01010 /* all streams GO */
++ dcbt 0,r8,0b01010 /* all streams GO */
+ .machine pop
+
+ beq cr1,.Lunwind_stack_nonvmx_copy
+@@ -388,26 +388,26 @@ err3; std r0,0(r3)
+ li r11,48
+
+ bf cr7*4+3,5f
+-err3; lvx v1,r0,r4
++err3; lvx v1,0,r4
+ addi r4,r4,16
+-err3; stvx v1,r0,r3
++err3; stvx v1,0,r3
+ addi r3,r3,16
+
+ 5: bf cr7*4+2,6f
+-err3; lvx v1,r0,r4
++err3; lvx v1,0,r4
+ err3; lvx v0,r4,r9
+ addi r4,r4,32
+-err3; stvx v1,r0,r3
++err3; stvx v1,0,r3
+ err3; stvx v0,r3,r9
+ addi r3,r3,32
+
+ 6: bf cr7*4+1,7f
+-err3; lvx v3,r0,r4
++err3; lvx v3,0,r4
+ err3; lvx v2,r4,r9
+ err3; lvx v1,r4,r10
+ err3; lvx v0,r4,r11
+ addi r4,r4,64
+-err3; stvx v3,r0,r3
++err3; stvx v3,0,r3
+ err3; stvx v2,r3,r9
+ err3; stvx v1,r3,r10
+ err3; stvx v0,r3,r11
+@@ -433,7 +433,7 @@ err3; stvx v0,r3,r11
+ */
+ .align 5
+ 8:
+-err4; lvx v7,r0,r4
++err4; lvx v7,0,r4
+ err4; lvx v6,r4,r9
+ err4; lvx v5,r4,r10
+ err4; lvx v4,r4,r11
+@@ -442,7 +442,7 @@ err4; lvx v2,r4,r14
+ err4; lvx v1,r4,r15
+ err4; lvx v0,r4,r16
+ addi r4,r4,128
+-err4; stvx v7,r0,r3
++err4; stvx v7,0,r3
+ err4; stvx v6,r3,r9
+ err4; stvx v5,r3,r10
+ err4; stvx v4,r3,r11
+@@ -463,29 +463,29 @@ err4; stvx v0,r3,r16
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+-err3; lvx v3,r0,r4
++err3; lvx v3,0,r4
+ err3; lvx v2,r4,r9
+ err3; lvx v1,r4,r10
+ err3; lvx v0,r4,r11
+ addi r4,r4,64
+-err3; stvx v3,r0,r3
++err3; stvx v3,0,r3
+ err3; stvx v2,r3,r9
+ err3; stvx v1,r3,r10
+ err3; stvx v0,r3,r11
+ addi r3,r3,64
+
+ 9: bf cr7*4+2,10f
+-err3; lvx v1,r0,r4
++err3; lvx v1,0,r4
+ err3; lvx v0,r4,r9
+ addi r4,r4,32
+-err3; stvx v1,r0,r3
++err3; stvx v1,0,r3
+ err3; stvx v0,r3,r9
+ addi r3,r3,32
+
+ 10: bf cr7*4+3,11f
+-err3; lvx v1,r0,r4
++err3; lvx v1,0,r4
+ addi r4,r4,16
+-err3; stvx v1,r0,r3
++err3; stvx v1,0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+@@ -565,25 +565,25 @@ err3; lvx v0,0,r4
+ addi r4,r4,16
+
+ bf cr7*4+3,5f
+-err3; lvx v1,r0,r4
++err3; lvx v1,0,r4
+ VPERM(v8,v0,v1,v16)
+ addi r4,r4,16
+-err3; stvx v8,r0,r3
++err3; stvx v8,0,r3
+ addi r3,r3,16
+ vor v0,v1,v1
+
+ 5: bf cr7*4+2,6f
+-err3; lvx v1,r0,r4
++err3; lvx v1,0,r4
+ VPERM(v8,v0,v1,v16)
+ err3; lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
+ addi r4,r4,32
+-err3; stvx v8,r0,r3
++err3; stvx v8,0,r3
+ err3; stvx v9,r3,r9
+ addi r3,r3,32
+
+ 6: bf cr7*4+1,7f
+-err3; lvx v3,r0,r4
++err3; lvx v3,0,r4
+ VPERM(v8,v0,v3,v16)
+ err3; lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+@@ -592,7 +592,7 @@ err3; lvx v1,r4,r10
+ err3; lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
+ addi r4,r4,64
+-err3; stvx v8,r0,r3
++err3; stvx v8,0,r3
+ err3; stvx v9,r3,r9
+ err3; stvx v10,r3,r10
+ err3; stvx v11,r3,r11
+@@ -618,7 +618,7 @@ err3; stvx v11,r3,r11
+ */
+ .align 5
+ 8:
+-err4; lvx v7,r0,r4
++err4; lvx v7,0,r4
+ VPERM(v8,v0,v7,v16)
+ err4; lvx v6,r4,r9
+ VPERM(v9,v7,v6,v16)
+@@ -635,7 +635,7 @@ err4; lvx v1,r4,r15
+ err4; lvx v0,r4,r16
+ VPERM(v15,v1,v0,v16)
+ addi r4,r4,128
+-err4; stvx v8,r0,r3
++err4; stvx v8,0,r3
+ err4; stvx v9,r3,r9
+ err4; stvx v10,r3,r10
+ err4; stvx v11,r3,r11
+@@ -656,7 +656,7 @@ err4; stvx v15,r3,r16
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+-err3; lvx v3,r0,r4
++err3; lvx v3,0,r4
+ VPERM(v8,v0,v3,v16)
+ err3; lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+@@ -665,27 +665,27 @@ err3; lvx v1,r4,r10
+ err3; lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
+ addi r4,r4,64
+-err3; stvx v8,r0,r3
++err3; stvx v8,0,r3
+ err3; stvx v9,r3,r9
+ err3; stvx v10,r3,r10
+ err3; stvx v11,r3,r11
+ addi r3,r3,64
+
+ 9: bf cr7*4+2,10f
+-err3; lvx v1,r0,r4
++err3; lvx v1,0,r4
+ VPERM(v8,v0,v1,v16)
+ err3; lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
+ addi r4,r4,32
+-err3; stvx v8,r0,r3
++err3; stvx v8,0,r3
+ err3; stvx v9,r3,r9
+ addi r3,r3,32
+
+ 10: bf cr7*4+3,11f
+-err3; lvx v1,r0,r4
++err3; lvx v1,0,r4
+ VPERM(v8,v0,v1,v16)
+ addi r4,r4,16
+-err3; stvx v8,r0,r3
++err3; stvx v8,0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
+index 786234fd4e91..193909abd18b 100644
+--- a/arch/powerpc/lib/memcpy_power7.S
++++ b/arch/powerpc/lib/memcpy_power7.S
+@@ -261,12 +261,12 @@ _GLOBAL(memcpy_power7)
+
+ .machine push
+ .machine "power4"
+- dcbt r0,r6,0b01000
+- dcbt r0,r7,0b01010
+- dcbtst r0,r9,0b01000
+- dcbtst r0,r10,0b01010
++ dcbt 0,r6,0b01000
++ dcbt 0,r7,0b01010
++ dcbtst 0,r9,0b01000
++ dcbtst 0,r10,0b01010
+ eieio
+- dcbt r0,r8,0b01010 /* GO */
++ dcbt 0,r8,0b01010 /* GO */
+ .machine pop
+
+ beq cr1,.Lunwind_stack_nonvmx_copy
+@@ -321,26 +321,26 @@ _GLOBAL(memcpy_power7)
+ li r11,48
+
+ bf cr7*4+3,5f
+- lvx v1,r0,r4
++ lvx v1,0,r4
+ addi r4,r4,16
+- stvx v1,r0,r3
++ stvx v1,0,r3
+ addi r3,r3,16
+
+ 5: bf cr7*4+2,6f
+- lvx v1,r0,r4
++ lvx v1,0,r4
+ lvx v0,r4,r9
+ addi r4,r4,32
+- stvx v1,r0,r3
++ stvx v1,0,r3
+ stvx v0,r3,r9
+ addi r3,r3,32
+
+ 6: bf cr7*4+1,7f
+- lvx v3,r0,r4
++ lvx v3,0,r4
+ lvx v2,r4,r9
+ lvx v1,r4,r10
+ lvx v0,r4,r11
+ addi r4,r4,64
+- stvx v3,r0,r3
++ stvx v3,0,r3
+ stvx v2,r3,r9
+ stvx v1,r3,r10
+ stvx v0,r3,r11
+@@ -366,7 +366,7 @@ _GLOBAL(memcpy_power7)
+ */
+ .align 5
+ 8:
+- lvx v7,r0,r4
++ lvx v7,0,r4
+ lvx v6,r4,r9
+ lvx v5,r4,r10
+ lvx v4,r4,r11
+@@ -375,7 +375,7 @@ _GLOBAL(memcpy_power7)
+ lvx v1,r4,r15
+ lvx v0,r4,r16
+ addi r4,r4,128
+- stvx v7,r0,r3
++ stvx v7,0,r3
+ stvx v6,r3,r9
+ stvx v5,r3,r10
+ stvx v4,r3,r11
+@@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7)
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+- lvx v3,r0,r4
++ lvx v3,0,r4
+ lvx v2,r4,r9
+ lvx v1,r4,r10
+ lvx v0,r4,r11
+ addi r4,r4,64
+- stvx v3,r0,r3
++ stvx v3,0,r3
+ stvx v2,r3,r9
+ stvx v1,r3,r10
+ stvx v0,r3,r11
+ addi r3,r3,64
+
+ 9: bf cr7*4+2,10f
+- lvx v1,r0,r4
++ lvx v1,0,r4
+ lvx v0,r4,r9
+ addi r4,r4,32
+- stvx v1,r0,r3
++ stvx v1,0,r3
+ stvx v0,r3,r9
+ addi r3,r3,32
+
+ 10: bf cr7*4+3,11f
+- lvx v1,r0,r4
++ lvx v1,0,r4
+ addi r4,r4,16
+- stvx v1,r0,r3
++ stvx v1,0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+@@ -499,25 +499,25 @@ _GLOBAL(memcpy_power7)
+ addi r4,r4,16
+
+ bf cr7*4+3,5f
+- lvx v1,r0,r4
++ lvx v1,0,r4
+ VPERM(v8,v0,v1,v16)
+ addi r4,r4,16
+- stvx v8,r0,r3
++ stvx v8,0,r3
+ addi r3,r3,16
+ vor v0,v1,v1
+
+ 5: bf cr7*4+2,6f
+- lvx v1,r0,r4
++ lvx v1,0,r4
+ VPERM(v8,v0,v1,v16)
+ lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
+ addi r4,r4,32
+- stvx v8,r0,r3
++ stvx v8,0,r3
+ stvx v9,r3,r9
+ addi r3,r3,32
+
+ 6: bf cr7*4+1,7f
+- lvx v3,r0,r4
++ lvx v3,0,r4
+ VPERM(v8,v0,v3,v16)
+ lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+@@ -526,7 +526,7 @@ _GLOBAL(memcpy_power7)
+ lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
+ addi r4,r4,64
+- stvx v8,r0,r3
++ stvx v8,0,r3
+ stvx v9,r3,r9
+ stvx v10,r3,r10
+ stvx v11,r3,r11
+@@ -552,7 +552,7 @@ _GLOBAL(memcpy_power7)
+ */
+ .align 5
+ 8:
+- lvx v7,r0,r4
++ lvx v7,0,r4
+ VPERM(v8,v0,v7,v16)
+ lvx v6,r4,r9
+ VPERM(v9,v7,v6,v16)
+@@ -569,7 +569,7 @@ _GLOBAL(memcpy_power7)
+ lvx v0,r4,r16
+ VPERM(v15,v1,v0,v16)
+ addi r4,r4,128
+- stvx v8,r0,r3
++ stvx v8,0,r3
+ stvx v9,r3,r9
+ stvx v10,r3,r10
+ stvx v11,r3,r11
+@@ -590,7 +590,7 @@ _GLOBAL(memcpy_power7)
+ mtocrf 0x01,r6
+
+ bf cr7*4+1,9f
+- lvx v3,r0,r4
++ lvx v3,0,r4
+ VPERM(v8,v0,v3,v16)
+ lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+@@ -599,27 +599,27 @@ _GLOBAL(memcpy_power7)
+ lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
+ addi r4,r4,64
+- stvx v8,r0,r3
++ stvx v8,0,r3
+ stvx v9,r3,r9
+ stvx v10,r3,r10
+ stvx v11,r3,r11
+ addi r3,r3,64
+
+ 9: bf cr7*4+2,10f
+- lvx v1,r0,r4
++ lvx v1,0,r4
+ VPERM(v8,v0,v1,v16)
+ lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
+ addi r4,r4,32
+- stvx v8,r0,r3
++ stvx v8,0,r3
+ stvx v9,r3,r9
+ addi r3,r3,32
+
+ 10: bf cr7*4+3,11f
+- lvx v1,r0,r4
++ lvx v1,0,r4
+ VPERM(v8,v0,v1,v16)
+ addi r4,r4,16
+- stvx v8,r0,r3
++ stvx v8,0,r3
+ addi r3,r3,16
+
+ /* Up to 15B to go */
+diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
+index 57ace356c949..11e6372537fd 100644
+--- a/arch/powerpc/lib/string_64.S
++++ b/arch/powerpc/lib/string_64.S
+@@ -192,7 +192,7 @@ err1; std r0,8(r3)
+ mtctr r6
+ mr r8,r3
+ 14:
+-err1; dcbz r0,r3
++err1; dcbz 0,r3
+ add r3,r3,r9
+ bdnz 14b
+
+--
+2.19.1
+
--- /dev/null
+From 6b1e590ea5b5811ba5bc4ff745f5bd7f75b0aa70 Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:11 +1000
+Subject: powerpc/fsl: Add barrier_nospec implementation for NXP PowerPC Book3E
+
+commit ebcd1bfc33c7a90df941df68a6e5d4018c022fba upstream.
+
+Implement the barrier_nospec as a isync;sync instruction sequence.
+The implementation uses the infrastructure built for BOOK3S 64.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+[mpe: Split out of larger patch]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/Kconfig | 2 +-
+ arch/powerpc/include/asm/barrier.h | 8 +++++++-
+ arch/powerpc/lib/feature-fixups.c | 31 ++++++++++++++++++++++++++++++
+ 3 files changed, 39 insertions(+), 2 deletions(-)
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index a238698178fc..fa8f2aa88189 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -167,7 +167,7 @@ config PPC
+ config PPC_BARRIER_NOSPEC
+ bool
+ default y
+- depends on PPC_BOOK3S_64
++ depends on PPC_BOOK3S_64 || PPC_FSL_BOOK3E
+
+ config GENERIC_CSUM
+ def_bool CPU_LITTLE_ENDIAN
+diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
+index 465a64316897..80024c4f2093 100644
+--- a/arch/powerpc/include/asm/barrier.h
++++ b/arch/powerpc/include/asm/barrier.h
+@@ -77,12 +77,18 @@ do { \
+
+ #define smp_mb__before_spinlock() smp_mb()
+
++#ifdef CONFIG_PPC_BOOK3S_64
++#define NOSPEC_BARRIER_SLOT nop
++#elif defined(CONFIG_PPC_FSL_BOOK3E)
++#define NOSPEC_BARRIER_SLOT nop; nop
++#endif
++
+ #ifdef CONFIG_PPC_BARRIER_NOSPEC
+ /*
+ * Prevent execution of subsequent instructions until preceding branches have
+ * been fully resolved and are no longer executing speculatively.
+ */
+-#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; nop
++#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT
+
+ // This also acts as a compiler barrier due to the memory clobber.
+ #define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory")
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index 5df57f7bae0a..b3e362437ec4 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -318,6 +318,37 @@ void do_barrier_nospec_fixups(bool enable)
+ }
+ #endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
++#ifdef CONFIG_PPC_FSL_BOOK3E
++void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_end)
++{
++ unsigned int instr[2], *dest;
++ long *start, *end;
++ int i;
++
++ start = fixup_start;
++ end = fixup_end;
++
++ instr[0] = PPC_INST_NOP;
++ instr[1] = PPC_INST_NOP;
++
++ if (enable) {
++ pr_info("barrier-nospec: using isync; sync as speculation barrier\n");
++ instr[0] = PPC_INST_ISYNC;
++ instr[1] = PPC_INST_SYNC;
++ }
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++ patch_instruction(dest, instr[0]);
++ patch_instruction(dest + 1, instr[1]);
++ }
++
++ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
++}
++#endif /* CONFIG_PPC_FSL_BOOK3E */
++
+ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+ {
+ long *start, *end;
+--
+2.19.1
+
--- /dev/null
+From 331d7fa8c50f1a30d0a1eb925aa94396e036a0fd Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:18 +1000
+Subject: powerpc/fsl: Add infrastructure to fixup branch predictor flush
+
+commit 76a5eaa38b15dda92cd6964248c39b5a6f3a4e9d upstream.
+
+In order to protect against speculation attacks (Spectre
+variant 2) on NXP PowerPC platforms, the branch predictor
+should be flushed when the privillege level is changed.
+This patch is adding the infrastructure to fixup at runtime
+the code sections that are performing the branch predictor flush
+depending on a boot arg parameter which is added later in a
+separate patch.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/feature-fixups.h | 12 ++++++++++++
+ arch/powerpc/include/asm/setup.h | 2 ++
+ arch/powerpc/kernel/vmlinux.lds.S | 8 ++++++++
+ arch/powerpc/lib/feature-fixups.c | 23 +++++++++++++++++++++++
+ 4 files changed, 45 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
+index afd3efd38938..175128e19025 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -221,6 +221,17 @@ void setup_feature_keys(void);
+ FTR_ENTRY_OFFSET 953b-954b; \
+ .popsection;
+
++#define START_BTB_FLUSH_SECTION \
++955: \
++
++#define END_BTB_FLUSH_SECTION \
++956: \
++ .pushsection __btb_flush_fixup,"a"; \
++ .align 2; \
++957: \
++ FTR_ENTRY_OFFSET 955b-957b; \
++ FTR_ENTRY_OFFSET 956b-957b; \
++ .popsection;
+
+ #ifndef __ASSEMBLY__
+
+@@ -229,6 +240,7 @@ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
+ extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+ extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
++extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
+
+ #endif
+
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index d3e9da62d029..23ee67e279ae 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -65,6 +65,8 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
+ static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
+ #endif
+
++void do_btb_flush_fixups(void);
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif /* _ASM_POWERPC_SETUP_H */
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index 5c6cf58943b9..50d365060855 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -164,6 +164,14 @@ SECTIONS
+ }
+ #endif /* CONFIG_PPC_BARRIER_NOSPEC */
+
++#ifdef CONFIG_PPC_FSL_BOOK3E
++ . = ALIGN(8);
++ __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
++ __start__btb_flush_fixup = .;
++ *(__btb_flush_fixup)
++ __stop__btb_flush_fixup = .;
++ }
++#endif
+ EXCEPTION_TABLE(0)
+
+ NOTES :kernel :notes
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index b3e362437ec4..e6ed0ec94bc8 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -347,6 +347,29 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
+
+ printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
+ }
++
++static void patch_btb_flush_section(long *curr)
++{
++ unsigned int *start, *end;
++
++ start = (void *)curr + *curr;
++ end = (void *)curr + *(curr + 1);
++ for (; start < end; start++) {
++ pr_devel("patching dest %lx\n", (unsigned long)start);
++ patch_instruction(start, PPC_INST_NOP);
++ }
++}
++
++void do_btb_flush_fixups(void)
++{
++ long *start, *end;
++
++ start = PTRRELOC(&__start__btb_flush_fixup);
++ end = PTRRELOC(&__stop__btb_flush_fixup);
++
++ for (; start < end; start += 2)
++ patch_btb_flush_section(start);
++}
+ #endif /* CONFIG_PPC_FSL_BOOK3E */
+
+ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
+--
+2.19.1
+
--- /dev/null
+From 7703a4ca2dedd8373956ec2758c36f9811dc74a7 Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:19 +1000
+Subject: powerpc/fsl: Add macro to flush the branch predictor
+
+commit 1cbf8990d79ff69da8ad09e8a3df014e1494462b upstream.
+
+The BUCSR register can be used to invalidate the entries in the
+branch prediction mechanisms.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/ppc_asm.h | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
+index 24e95be3bfaf..bbd35ba36a22 100644
+--- a/arch/powerpc/include/asm/ppc_asm.h
++++ b/arch/powerpc/include/asm/ppc_asm.h
+@@ -780,4 +780,25 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
+ .long 0x2400004c /* rfid */
+ #endif /* !CONFIG_PPC_BOOK3E */
+ #endif /* __ASSEMBLY__ */
++
++/*
++ * Helper macro for exception table entries
++ */
++#define EX_TABLE(_fault, _target) \
++ stringify_in_c(.section __ex_table,"a";)\
++ stringify_in_c(.balign 4;) \
++ stringify_in_c(.long (_fault) - . ;) \
++ stringify_in_c(.long (_target) - . ;) \
++ stringify_in_c(.previous)
++
++#ifdef CONFIG_PPC_FSL_BOOK3E
++#define BTB_FLUSH(reg) \
++ lis reg,BUCSR_INIT@h; \
++ ori reg,reg,BUCSR_INIT@l; \
++ mtspr SPRN_BUCSR,reg; \
++ isync;
++#else
++#define BTB_FLUSH(reg)
++#endif /* CONFIG_PPC_FSL_BOOK3E */
++
+ #endif /* _ASM_POWERPC_PPC_ASM_H */
+--
+2.19.1
+
--- /dev/null
+From 60410df51b980e8706054b9ac33c7f22987301b0 Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:22 +1000
+Subject: powerpc/fsl: Add nospectre_v2 command line argument
+
+commit f633a8ad636efb5d4bba1a047d4a0f1ef719aa06 upstream.
+
+When the command line argument is present, the Spectre variant 2
+mitigations are disabled.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/setup.h | 5 +++++
+ arch/powerpc/kernel/security.c | 21 +++++++++++++++++++++
+ 2 files changed, 26 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index 23ee67e279ae..862ebce3ae54 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -65,6 +65,11 @@ void do_barrier_nospec_fixups_range(bool enable, void *start, void *end);
+ static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void *end) { };
+ #endif
+
++#ifdef CONFIG_PPC_FSL_BOOK3E
++void setup_spectre_v2(void);
++#else
++static inline void setup_spectre_v2(void) {};
++#endif
+ void do_btb_flush_fixups(void);
+
+ #endif /* !__ASSEMBLY__ */
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index fc41bccd9ab6..6dc5cdc2b87c 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -27,6 +27,10 @@ static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NO
+
+ bool barrier_nospec_enabled;
+ static bool no_nospec;
++static bool btb_flush_enabled;
++#ifdef CONFIG_PPC_FSL_BOOK3E
++static bool no_spectrev2;
++#endif
+
+ static void enable_barrier_nospec(bool enable)
+ {
+@@ -102,6 +106,23 @@ static __init int barrier_nospec_debugfs_init(void)
+ device_initcall(barrier_nospec_debugfs_init);
+ #endif /* CONFIG_DEBUG_FS */
+
++#ifdef CONFIG_PPC_FSL_BOOK3E
++static int __init handle_nospectre_v2(char *p)
++{
++ no_spectrev2 = true;
++
++ return 0;
++}
++early_param("nospectre_v2", handle_nospectre_v2);
++void setup_spectre_v2(void)
++{
++ if (no_spectrev2)
++ do_btb_flush_fixups();
++ else
++ btb_flush_enabled = true;
++}
++#endif /* CONFIG_PPC_FSL_BOOK3E */
++
+ #ifdef CONFIG_PPC_BOOK3S_64
+ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+--
+2.19.1
+
--- /dev/null
+From 512465f78c9e1a6ee0a66b4cfdb30b77936ba6db Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:21 +1000
+Subject: powerpc/fsl: Emulate SPRN_BUCSR register
+
+commit 98518c4d8728656db349f875fcbbc7c126d4c973 upstream.
+
+In order to flush the branch predictor the guest kernel performs
+writes to the BUCSR register which is hypervisor privilleged. However,
+the branch predictor is flushed at each KVM entry, so the branch
+predictor has been already flushed, so just return as soon as possible
+to guest.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+[mpe: Tweak comment formatting]
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kvm/e500_emulate.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
+index 990db69a1d0b..fa88f641ac03 100644
+--- a/arch/powerpc/kvm/e500_emulate.c
++++ b/arch/powerpc/kvm/e500_emulate.c
+@@ -277,6 +277,13 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
+ vcpu->arch.pwrmgtcr0 = spr_val;
+ break;
+
++ case SPRN_BUCSR:
++ /*
++ * If we are here, it means that we have already flushed the
++ * branch predictor, so just return to guest.
++ */
++ break;
++
+ /* extra exceptions */
+ #ifdef CONFIG_SPE_POSSIBLE
+ case SPRN_IVOR32:
+--
+2.19.1
+
--- /dev/null
+From bda110d185f3d87ed8fd9339e7c12e34a5381eb2 Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:26 +1000
+Subject: powerpc/fsl: Enable runtime patching if nospectre_v2 boot arg is used
+
+commit 3bc8ea8603ae4c1e09aca8de229ad38b8091fcb3 upstream.
+
+If the user choses not to use the mitigations, replace
+the code sequence with nops.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/setup-common.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index d5a128f54537..5e7d70c5d065 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -919,6 +919,7 @@ void __init setup_arch(char **cmdline_p)
+ ppc_md.setup_arch();
+
+ setup_barrier_nospec();
++ setup_spectre_v2();
+
+ paging_init();
+
+--
+2.19.1
+
--- /dev/null
+From c4831f3c68fea75cbefe6a5233e262a74317abc3 Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:20 +1000
+Subject: powerpc/fsl: Fix spectre_v2 mitigations reporting
+
+commit 7d8bad99ba5a22892f0cad6881289fdc3875a930 upstream.
+
+Currently for CONFIG_PPC_FSL_BOOK3E the spectre_v2 file is incorrect:
+
+ $ cat /sys/devices/system/cpu/vulnerabilities/spectre_v2
+ "Mitigation: Software count cache flush"
+
+Which is wrong. Fix it to report vulnerable for now.
+
+Fixes: ee13cb249fab ("powerpc/64s: Add support for software count cache flush")
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/security.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index fd4703b6ddc0..fc41bccd9ab6 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -23,7 +23,7 @@ enum count_cache_flush_type {
+ COUNT_CACHE_FLUSH_SW = 0x2,
+ COUNT_CACHE_FLUSH_HW = 0x4,
+ };
+-static enum count_cache_flush_type count_cache_flush_type;
++static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
+
+ bool barrier_nospec_enabled;
+ static bool no_nospec;
+--
+2.19.1
+
--- /dev/null
+From 42fc63db4483b3fdab780d0f617668bba671a4aa Mon Sep 17 00:00:00 2001
+From: Christophe Leroy <christophe.leroy@c-s.fr>
+Date: Thu, 11 Apr 2019 21:46:29 +1000
+Subject: powerpc/fsl: Fix the flush of branch predictor.
+
+commit 27da80719ef132cf8c80eb406d5aeb37dddf78cc upstream.
+
+The commit identified below adds MC_BTB_FLUSH macro only when
+CONFIG_PPC_FSL_BOOK3E is defined. This results in the following error
+on some configs (seen several times with kisskb randconfig_defconfig)
+
+arch/powerpc/kernel/exceptions-64e.S:576: Error: Unrecognized opcode: `mc_btb_flush'
+make[3]: *** [scripts/Makefile.build:367: arch/powerpc/kernel/exceptions-64e.o] Error 1
+make[2]: *** [scripts/Makefile.build:492: arch/powerpc/kernel] Error 2
+make[1]: *** [Makefile:1043: arch/powerpc] Error 2
+make: *** [Makefile:152: sub-make] Error 2
+
+This patch adds a blank definition of MC_BTB_FLUSH for other cases.
+
+Fixes: 10c5e83afd4a ("powerpc/fsl: Flush the branch predictor at each kernel entry (64bit)")
+Cc: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Reviewed-by: Daniel Axtens <dja@axtens.net>
+Reviewed-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/exceptions-64e.S | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
+index 79c6fee6368d..423b5257d3a1 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -348,6 +348,7 @@ ret_from_mc_except:
+ #define GEN_BTB_FLUSH
+ #define CRIT_BTB_FLUSH
+ #define DBG_BTB_FLUSH
++#define MC_BTB_FLUSH
+ #define GDBELL_BTB_FLUSH
+ #endif
+
+--
+2.19.1
+
--- /dev/null
+From 525c880276b6d4349d0eac9ecc2fa70f35094a7f Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:28 +1000
+Subject: powerpc/fsl: Fixed warning: orphan section `__btb_flush_fixup'
+
+commit 039daac5526932ec731e4499613018d263af8b3e upstream.
+
+Fixed the following build warning:
+powerpc-linux-gnu-ld: warning: orphan section `__btb_flush_fixup' from
+`arch/powerpc/kernel/head_44x.o' being placed in section
+`__btb_flush_fixup'.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/head_booke.h | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
+index 384bb4d80520..7b98c7351f6c 100644
+--- a/arch/powerpc/kernel/head_booke.h
++++ b/arch/powerpc/kernel/head_booke.h
+@@ -31,6 +31,16 @@
+ */
+ #define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
+
++#ifdef CONFIG_PPC_FSL_BOOK3E
++#define BOOKE_CLEAR_BTB(reg) \
++START_BTB_FLUSH_SECTION \
++ BTB_FLUSH(reg) \
++END_BTB_FLUSH_SECTION
++#else
++#define BOOKE_CLEAR_BTB(reg)
++#endif
++
++
+ #define NORMAL_EXCEPTION_PROLOG(intno) \
+ mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
+ mfspr r10, SPRN_SPRG_THREAD; \
+@@ -42,9 +52,7 @@
+ andi. r11, r11, MSR_PR; /* check whether user or kernel */\
+ mr r11, r1; \
+ beq 1f; \
+-START_BTB_FLUSH_SECTION \
+- BTB_FLUSH(r11) \
+-END_BTB_FLUSH_SECTION \
++ BOOKE_CLEAR_BTB(r11) \
+ /* if from user, start at top of this thread's kernel stack */ \
+ lwz r11, THREAD_INFO-THREAD(r10); \
+ ALLOC_STACK_FRAME(r11, THREAD_SIZE); \
+@@ -130,9 +138,7 @@ END_BTB_FLUSH_SECTION \
+ stw r9,_CCR(r8); /* save CR on stack */\
+ mfspr r11,exc_level_srr1; /* check whether user or kernel */\
+ DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
+-START_BTB_FLUSH_SECTION \
+- BTB_FLUSH(r10) \
+-END_BTB_FLUSH_SECTION \
++ BOOKE_CLEAR_BTB(r10) \
+ andi. r11,r11,MSR_PR; \
+ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
+ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
+--
+2.19.1
+
--- /dev/null
+From 93d143d2fdba4f9cc44f4b4dcc91fc8265ed4563 Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:25 +1000
+Subject: powerpc/fsl: Flush branch predictor when entering KVM
+
+commit e7aa61f47b23afbec41031bc47ca8d6cb6516abc upstream.
+
+Switching from the guest to host is another place
+where the speculative accesses can be exploited.
+Flush the branch predictor when entering KVM.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kvm/bookehv_interrupts.S | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
+index 81bd8a07aa51..612b7f6a887f 100644
+--- a/arch/powerpc/kvm/bookehv_interrupts.S
++++ b/arch/powerpc/kvm/bookehv_interrupts.S
+@@ -75,6 +75,10 @@
+ PPC_LL r1, VCPU_HOST_STACK(r4)
+ PPC_LL r2, HOST_R2(r1)
+
++START_BTB_FLUSH_SECTION
++ BTB_FLUSH(r10)
++END_BTB_FLUSH_SECTION
++
+ mfspr r10, SPRN_PID
+ lwz r8, VCPU_HOST_PID(r4)
+ PPC_LL r11, VCPU_SHARED(r4)
+--
+2.19.1
+
--- /dev/null
+From dacb2a9aa38fd5f0e5facd87c6f58e3c550732c2 Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:23 +1000
+Subject: powerpc/fsl: Flush the branch predictor at each kernel entry (64bit)
+
+commit 10c5e83afd4a3f01712d97d3bb1ae34d5b74a185 upstream.
+
+In order to protect against speculation attacks on
+indirect branches, the branch predictor is flushed at
+kernel entry to protect for the following situations:
+- userspace process attacking another userspace process
+- userspace process attacking the kernel
+Basically when the privillege level change (i.e. the
+kernel is entered), the branch predictor state is flushed.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/entry_64.S | 5 +++++
+ arch/powerpc/kernel/exceptions-64e.S | 26 +++++++++++++++++++++++++-
+ arch/powerpc/mm/tlb_low_64e.S | 7 +++++++
+ 3 files changed, 37 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 6625cec9e7c0..390ebf4ef384 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -80,6 +80,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+ std r0,GPR0(r1)
+ std r10,GPR1(r1)
+ beq 2f /* if from kernel mode */
++#ifdef CONFIG_PPC_FSL_BOOK3E
++START_BTB_FLUSH_SECTION
++ BTB_FLUSH(r10)
++END_BTB_FLUSH_SECTION
++#endif
+ ACCOUNT_CPU_USER_ENTRY(r13, r10, r11)
+ 2: std r2,GPR2(r1)
+ std r3,GPR3(r1)
+diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
+index ca03eb229a9a..79c6fee6368d 100644
+--- a/arch/powerpc/kernel/exceptions-64e.S
++++ b/arch/powerpc/kernel/exceptions-64e.S
+@@ -295,7 +295,8 @@ ret_from_mc_except:
+ andi. r10,r11,MSR_PR; /* save stack pointer */ \
+ beq 1f; /* branch around if supervisor */ \
+ ld r1,PACAKSAVE(r13); /* get kernel stack coming from usr */\
+-1: cmpdi cr1,r1,0; /* check if SP makes sense */ \
++1: type##_BTB_FLUSH \
++ cmpdi cr1,r1,0; /* check if SP makes sense */ \
+ bge- cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
+ mfspr r10,SPRN_##type##_SRR0; /* read SRR0 before touching stack */
+
+@@ -327,6 +328,29 @@ ret_from_mc_except:
+ #define SPRN_MC_SRR0 SPRN_MCSRR0
+ #define SPRN_MC_SRR1 SPRN_MCSRR1
+
++#ifdef CONFIG_PPC_FSL_BOOK3E
++#define GEN_BTB_FLUSH \
++ START_BTB_FLUSH_SECTION \
++ beq 1f; \
++ BTB_FLUSH(r10) \
++ 1: \
++ END_BTB_FLUSH_SECTION
++
++#define CRIT_BTB_FLUSH \
++ START_BTB_FLUSH_SECTION \
++ BTB_FLUSH(r10) \
++ END_BTB_FLUSH_SECTION
++
++#define DBG_BTB_FLUSH CRIT_BTB_FLUSH
++#define MC_BTB_FLUSH CRIT_BTB_FLUSH
++#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
++#else
++#define GEN_BTB_FLUSH
++#define CRIT_BTB_FLUSH
++#define DBG_BTB_FLUSH
++#define GDBELL_BTB_FLUSH
++#endif
++
+ #define NORMAL_EXCEPTION_PROLOG(n, intnum, addition) \
+ EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
+
+diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
+index eb82d787d99a..b7e9c09dfe19 100644
+--- a/arch/powerpc/mm/tlb_low_64e.S
++++ b/arch/powerpc/mm/tlb_low_64e.S
+@@ -69,6 +69,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+ std r15,EX_TLB_R15(r12)
+ std r10,EX_TLB_CR(r12)
+ #ifdef CONFIG_PPC_FSL_BOOK3E
++START_BTB_FLUSH_SECTION
++ mfspr r11, SPRN_SRR1
++ andi. r10,r11,MSR_PR
++ beq 1f
++ BTB_FLUSH(r10)
++1:
++END_BTB_FLUSH_SECTION
+ std r7,EX_TLB_R7(r12)
+ #endif
+ TLB_MISS_PROLOG_STATS
+--
+2.19.1
+
--- /dev/null
+From d9ea046322ccfb2d0b1db769295b837b959b6d70 Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:24 +1000
+Subject: powerpc/fsl: Flush the branch predictor at each kernel entry (32 bit)
+
+commit 7fef436295bf6c05effe682c8797dfcb0deb112a upstream.
+
+In order to protect against speculation attacks on
+indirect branches, the branch predictor is flushed at
+kernel entry to protect for the following situations:
+- userspace process attacking another userspace process
+- userspace process attacking the kernel
+Basically when the privillege level change (i.e.the kernel
+is entered), the branch predictor state is flushed.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/head_booke.h | 6 ++++++
+ arch/powerpc/kernel/head_fsl_booke.S | 15 +++++++++++++++
+ 2 files changed, 21 insertions(+)
+
+diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
+index a620203f7de3..384bb4d80520 100644
+--- a/arch/powerpc/kernel/head_booke.h
++++ b/arch/powerpc/kernel/head_booke.h
+@@ -42,6 +42,9 @@
+ andi. r11, r11, MSR_PR; /* check whether user or kernel */\
+ mr r11, r1; \
+ beq 1f; \
++START_BTB_FLUSH_SECTION \
++ BTB_FLUSH(r11) \
++END_BTB_FLUSH_SECTION \
+ /* if from user, start at top of this thread's kernel stack */ \
+ lwz r11, THREAD_INFO-THREAD(r10); \
+ ALLOC_STACK_FRAME(r11, THREAD_SIZE); \
+@@ -127,6 +130,9 @@
+ stw r9,_CCR(r8); /* save CR on stack */\
+ mfspr r11,exc_level_srr1; /* check whether user or kernel */\
+ DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
++START_BTB_FLUSH_SECTION \
++ BTB_FLUSH(r10) \
++END_BTB_FLUSH_SECTION \
+ andi. r11,r11,MSR_PR; \
+ mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
+ lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
+diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
+index bf4c6021515f..60a0aeefc4a7 100644
+--- a/arch/powerpc/kernel/head_fsl_booke.S
++++ b/arch/powerpc/kernel/head_fsl_booke.S
+@@ -452,6 +452,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+ mfcr r13
+ stw r13, THREAD_NORMSAVE(3)(r10)
+ DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
++START_BTB_FLUSH_SECTION
++ mfspr r11, SPRN_SRR1
++ andi. r10,r11,MSR_PR
++ beq 1f
++ BTB_FLUSH(r10)
++1:
++END_BTB_FLUSH_SECTION
+ mfspr r10, SPRN_DEAR /* Get faulting address */
+
+ /* If we are faulting a kernel address, we have to use the
+@@ -546,6 +553,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+ mfcr r13
+ stw r13, THREAD_NORMSAVE(3)(r10)
+ DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
++START_BTB_FLUSH_SECTION
++ mfspr r11, SPRN_SRR1
++ andi. r10,r11,MSR_PR
++ beq 1f
++ BTB_FLUSH(r10)
++1:
++END_BTB_FLUSH_SECTION
++
+ mfspr r10, SPRN_SRR0 /* Get faulting address */
+
+ /* If we are faulting a kernel address, we have to use the
+--
+2.19.1
+
--- /dev/null
+From d09d592edd12ca4390f43e5d0195ea810b709b79 Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:12 +1000
+Subject: powerpc/fsl: Sanitize the syscall table for NXP PowerPC 32 bit
+ platforms
+
+commit c28218d4abbf4f2035495334d8bfcba64bda4787 upstream.
+
+Used barrier_nospec to sanitize the syscall table.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/entry_32.S | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index 370645687cc7..bdd88f9d7926 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -34,6 +34,7 @@
+ #include <asm/ftrace.h>
+ #include <asm/ptrace.h>
+ #include <asm/export.h>
++#include <asm/barrier.h>
+
+ /*
+ * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
+@@ -347,6 +348,15 @@ syscall_dotrace_cont:
+ ori r10,r10,sys_call_table@l
+ slwi r0,r0,2
+ bge- 66f
++
++ barrier_nospec_asm
++ /*
++ * Prevent the load of the handler below (based on the user-passed
++ * system call number) being speculatively executed until the test
++ * against NR_syscalls and branch to .66f above has
++ * committed.
++ */
++
+ lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
+ mtlr r10
+ addi r9,r1,STACK_FRAME_OVERHEAD
+--
+2.19.1
+
--- /dev/null
+From 32fb2f8c5f3bd536bfd650cde52536ebf3553b0e Mon Sep 17 00:00:00 2001
+From: Diana Craciun <diana.craciun@nxp.com>
+Date: Thu, 11 Apr 2019 21:46:27 +1000
+Subject: powerpc/fsl: Update Spectre v2 reporting
+
+commit dfa88658fb0583abb92e062c7a9cd5a5b94f2a46 upstream.
+
+Report branch predictor state flush as a mitigation for
+Spectre variant 2.
+
+Signed-off-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/security.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index 6dc5cdc2b87c..43ce800e73bf 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -213,8 +213,11 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
+
+ if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
+ seq_buf_printf(&s, "(hardware accelerated)");
+- } else
++ } else if (btb_flush_enabled) {
++ seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
++ } else {
+ seq_buf_printf(&s, "Vulnerable");
++ }
+
+ seq_buf_printf(&s, "\n");
+
+--
+2.19.1
+
--- /dev/null
+From 5c69946ec8b82c41b29031839de5362faa0e1e34 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 11 Apr 2019 21:46:17 +1000
+Subject: powerpc/powernv: Query firmware for count cache flush settings
+
+commit 99d54754d3d5f896a8f616b0b6520662bc99d66b upstream.
+
+Look for fw-features properties to determine the appropriate settings
+for the count cache flush, and then call the generic powerpc code to
+set it up based on the security feature flags.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/platforms/powernv/setup.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index 17203abf38e8..365e2b620201 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -77,6 +77,12 @@ static void init_fw_feat_flags(struct device_node *np)
+ if (fw_feature_is("enabled", "fw-count-cache-disabled", np))
+ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+
++ if (fw_feature_is("enabled", "fw-count-cache-flush-bcctr2,0,0", np))
++ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
++
++ if (fw_feature_is("enabled", "needs-count-cache-flush-on-context-switch", np))
++ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
++
+ /*
+ * The features below are enabled by default, so we instead look to see
+ * if firmware has *disabled* them, and clear them if so.
+@@ -123,6 +129,7 @@ static void pnv_setup_rfi_flush(void)
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
+
+ setup_rfi_flush(type, enable);
++ setup_count_cache_flush();
+ }
+
+ static void __init pnv_setup_arch(void)
+--
+2.19.1
+
--- /dev/null
+From 9ebac65e5fbd657e77979e2416b19cb21ddcf804 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 11 Apr 2019 21:46:16 +1000
+Subject: powerpc/pseries: Query hypervisor for count cache flush settings
+
+commit ba72dc171954b782a79d25e0f4b3ed91090c3b1e upstream.
+
+Use the existing hypercall to determine the appropriate settings for
+the count cache flush, and then call the generic powerpc code to set
+it up based on the security feature flags.
+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/hvcall.h | 2 ++
+ arch/powerpc/platforms/pseries/setup.c | 7 +++++++
+ 2 files changed, 9 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
+index 9d978102bf0d..9587d301db55 100644
+--- a/arch/powerpc/include/asm/hvcall.h
++++ b/arch/powerpc/include/asm/hvcall.h
+@@ -316,10 +316,12 @@
+ #define H_CPU_CHAR_BRANCH_HINTS_HONORED (1ull << 58) // IBM bit 5
+ #define H_CPU_CHAR_THREAD_RECONFIG_CTRL (1ull << 57) // IBM bit 6
+ #define H_CPU_CHAR_COUNT_CACHE_DISABLED (1ull << 56) // IBM bit 7
++#define H_CPU_CHAR_BCCTR_FLUSH_ASSIST (1ull << 54) // IBM bit 9
+
+ #define H_CPU_BEHAV_FAVOUR_SECURITY (1ull << 63) // IBM bit 0
+ #define H_CPU_BEHAV_L1D_FLUSH_PR (1ull << 62) // IBM bit 1
+ #define H_CPU_BEHAV_BNDS_CHK_SPEC_BAR (1ull << 61) // IBM bit 2
++#define H_CPU_BEHAV_FLUSH_COUNT_CACHE (1ull << 58) // IBM bit 5
+
+ #ifndef __ASSEMBLY__
+ #include <linux/types.h>
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index 91ade7755823..adb09ab87f7c 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -475,6 +475,12 @@ static void init_cpu_char_feature_flags(struct h_cpu_char_result *result)
+ if (result->character & H_CPU_CHAR_COUNT_CACHE_DISABLED)
+ security_ftr_set(SEC_FTR_COUNT_CACHE_DISABLED);
+
++ if (result->character & H_CPU_CHAR_BCCTR_FLUSH_ASSIST)
++ security_ftr_set(SEC_FTR_BCCTR_FLUSH_ASSIST);
++
++ if (result->behaviour & H_CPU_BEHAV_FLUSH_COUNT_CACHE)
++ security_ftr_set(SEC_FTR_FLUSH_COUNT_CACHE);
++
+ /*
+ * The features below are enabled by default, so we instead look to see
+ * if firmware has *disabled* them, and clear them if so.
+@@ -525,6 +531,7 @@ void pseries_setup_rfi_flush(void)
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR);
+
+ setup_rfi_flush(types, enable);
++ setup_count_cache_flush();
+ }
+
+ static void __init pSeries_setup_arch(void)
+--
+2.19.1
+
--- /dev/null
+From 435c43a7aaa2eb50996391fa7ec11945c341d71d Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 11 Apr 2019 21:46:30 +1000
+Subject: powerpc/security: Fix spectre_v2 reporting
+
+commit 92edf8df0ff2ae86cc632eeca0e651fd8431d40d upstream.
+
+When I updated the spectre_v2 reporting to handle software count cache
+flush I got the logic wrong when there's no software count cache
+enabled at all.
+
+The result is that on systems with the software count cache flush
+disabled we print:
+
+ Mitigation: Indirect branch cache disabled, Software count cache flush
+
+Which correctly indicates that the count cache is disabled, but
+incorrectly says the software count cache flush is enabled.
+
+The root of the problem is that we are trying to handle all
+combinations of options. But we know now that we only expect to see
+the software count cache flush enabled if the other options are false.
+
+So split the two cases, which simplifies the logic and fixes the bug.
+We were also missing a space before "(hardware accelerated)".
+
+The result is we see one of:
+
+ Mitigation: Indirect branch serialisation (kernel only)
+ Mitigation: Indirect branch cache disabled
+ Mitigation: Software count cache flush
+ Mitigation: Software count cache flush (hardware accelerated)
+
+Fixes: ee13cb249fab ("powerpc/64s: Add support for software count cache flush")
+Cc: stable@vger.kernel.org # v4.19+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Reviewed-by: Michael Neuling <mikey@neuling.org>
+Reviewed-by: Diana Craciun <diana.craciun@nxp.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/security.c | 23 ++++++++---------------
+ 1 file changed, 8 insertions(+), 15 deletions(-)
+
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index 43ce800e73bf..30542e833ebe 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -190,29 +190,22 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
+ bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
+ ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
+
+- if (bcs || ccd || count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
+- bool comma = false;
++ if (bcs || ccd) {
+ seq_buf_printf(&s, "Mitigation: ");
+
+- if (bcs) {
++ if (bcs)
+ seq_buf_printf(&s, "Indirect branch serialisation (kernel only)");
+- comma = true;
+- }
+
+- if (ccd) {
+- if (comma)
+- seq_buf_printf(&s, ", ");
+- seq_buf_printf(&s, "Indirect branch cache disabled");
+- comma = true;
+- }
+-
+- if (comma)
++ if (bcs && ccd)
+ seq_buf_printf(&s, ", ");
+
+- seq_buf_printf(&s, "Software count cache flush");
++ if (ccd)
++ seq_buf_printf(&s, "Indirect branch cache disabled");
++ } else if (count_cache_flush_type != COUNT_CACHE_FLUSH_NONE) {
++ seq_buf_printf(&s, "Mitigation: Software count cache flush");
+
+ if (count_cache_flush_type == COUNT_CACHE_FLUSH_HW)
+- seq_buf_printf(&s, "(hardware accelerated)");
++ seq_buf_printf(&s, " (hardware accelerated)");
+ } else if (btb_flush_enabled) {
+ seq_buf_printf(&s, "Mitigation: Branch predictor state flush");
+ } else {
+--
+2.19.1
+
--- /dev/null
+From eeb003b800dd8bf95ebfedeb32a388b46974fe16 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 11 Apr 2019 21:46:02 +1000
+Subject: powerpc: Use barrier_nospec in copy_from_user()
+
+commit ddf35cf3764b5a182b178105f57515b42e2634f8 upstream.
+
+Based on the x86 commit doing the same.
+
+See commit 304ec1b05031 ("x86/uaccess: Use __uaccess_begin_nospec()
+and uaccess_try_nospec") and b3bbfb3fb5d2 ("x86: Introduce
+__uaccess_begin_nospec() and uaccess_try_nospec") for more detail.
+
+In all cases we are ordering the load from the potentially
+user-controlled pointer vs a previous branch based on an access_ok()
+check or similar.
+
+Base on a patch from Michal Suchanek.
+
+Signed-off-by: Michal Suchanek <msuchanek@suse.de>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/uaccess.h | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 31913b3ac7ab..da852153c1f8 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -269,6 +269,7 @@ do { \
+ __chk_user_ptr(ptr); \
+ if (!is_kernel_addr((unsigned long)__gu_addr)) \
+ might_fault(); \
++ barrier_nospec(); \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+@@ -280,8 +281,10 @@ do { \
+ unsigned long __gu_val = 0; \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ might_fault(); \
+- if (access_ok(VERIFY_READ, __gu_addr, (size))) \
++ if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
++ barrier_nospec(); \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
++ } \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+ })
+@@ -292,6 +295,7 @@ do { \
+ unsigned long __gu_val; \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+ __chk_user_ptr(ptr); \
++ barrier_nospec(); \
+ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
+ __gu_err; \
+@@ -348,15 +352,19 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
+
+ switch (n) {
+ case 1:
++ barrier_nospec();
+ __get_user_size(*(u8 *)to, from, 1, ret);
+ break;
+ case 2:
++ barrier_nospec();
+ __get_user_size(*(u16 *)to, from, 2, ret);
+ break;
+ case 4:
++ barrier_nospec();
+ __get_user_size(*(u32 *)to, from, 4, ret);
+ break;
+ case 8:
++ barrier_nospec();
+ __get_user_size(*(u64 *)to, from, 8, ret);
+ break;
+ }
+@@ -366,6 +374,7 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
+
+ check_object_size(to, n, false);
+
++ barrier_nospec();
+ return __copy_tofrom_user((__force void __user *)to, from, n);
+ }
+
+--
+2.19.1
+
--- /dev/null
+From 5e93a05855b2096d85c265b379898b444b273cb9 Mon Sep 17 00:00:00 2001
+From: Michael Ellerman <mpe@ellerman.id.au>
+Date: Thu, 11 Apr 2019 21:46:05 +1000
+Subject: powerpc64s: Show ori31 availability in spectre_v1 sysfs file not v2
+
+commit 6d44acae1937b81cf8115ada8958e04f601f3f2e upstream.
+
+When I added the spectre_v2 information in sysfs, I included the
+availability of the ori31 speculation barrier.
+
+Although the ori31 barrier can be used to mitigate v2, it's primarily
+intended as a spectre v1 mitigation. Spectre v2 is mitigated by
+hardware changes.
+
+So rework the sysfs files to show the ori31 information in the
+spectre_v1 file, rather than v2.
+
+Currently we display eg:
+
+ $ grep . spectre_v*
+ spectre_v1:Mitigation: __user pointer sanitization
+ spectre_v2:Mitigation: Indirect branch cache disabled, ori31 speculation barrier enabled
+
+After:
+
+ $ grep . spectre_v*
+ spectre_v1:Mitigation: __user pointer sanitization, ori31 speculation barrier enabled
+ spectre_v2:Mitigation: Indirect branch cache disabled
+
+Fixes: d6fbe1c55c55 ("powerpc/64s: Wire up cpu_show_spectre_v2()")
+Cc: stable@vger.kernel.org # v4.17+
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/security.c | 27 +++++++++++++++++----------
+ 1 file changed, 17 insertions(+), 10 deletions(-)
+
+diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
+index f189f946d935..bf298d0c475f 100644
+--- a/arch/powerpc/kernel/security.c
++++ b/arch/powerpc/kernel/security.c
+@@ -118,25 +118,35 @@ ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, cha
+
+ ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+- if (!security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR))
+- return sprintf(buf, "Not affected\n");
++ struct seq_buf s;
++
++ seq_buf_init(&s, buf, PAGE_SIZE - 1);
+
+- if (barrier_nospec_enabled)
+- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
++ if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) {
++ if (barrier_nospec_enabled)
++ seq_buf_printf(&s, "Mitigation: __user pointer sanitization");
++ else
++ seq_buf_printf(&s, "Vulnerable");
+
+- return sprintf(buf, "Vulnerable\n");
++ if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31))
++ seq_buf_printf(&s, ", ori31 speculation barrier enabled");
++
++ seq_buf_printf(&s, "\n");
++ } else
++ seq_buf_printf(&s, "Not affected\n");
++
++ return s.len;
+ }
+
+ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
+ {
+- bool bcs, ccd, ori;
+ struct seq_buf s;
++ bool bcs, ccd;
+
+ seq_buf_init(&s, buf, PAGE_SIZE - 1);
+
+ bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED);
+ ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED);
+- ori = security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31);
+
+ if (bcs || ccd) {
+ seq_buf_printf(&s, "Mitigation: ");
+@@ -152,9 +162,6 @@ ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, c
+ } else
+ seq_buf_printf(&s, "Vulnerable");
+
+- if (ori)
+- seq_buf_printf(&s, ", ori31 speculation barrier enabled");
+-
+ seq_buf_printf(&s, "\n");
+
+ return s.len;
+--
+2.19.1
+
x86-power-32-move-sysenter-msr-restoration-to-fix_pr.patch
x86-power-make-restore_processor_context-sane.patch
powerpc-tm-limit-tm-code-inside-ppc_transactional_me.patch
+kbuild-clang-choose-gcc_toolchain_dir-not-on-ld.patch
+x86-vdso-use-ld-instead-of-cc-to-link.patch
+x86-vdso-drop-implicit-common-page-size-linker-flag.patch
+lib-string.c-implement-a-basic-bcmp.patch
+fsnotify-provide-framework-for-dropping-srcu-lock-in.patch
+fsnotify-pass-fsnotify_iter_info-into-handle_event-h.patch
+fanotify-release-srcu-lock-when-waiting-for-userspac.patch
+powerpc-fix-invalid-use-of-register-expressions.patch
+powerpc-64s-add-barrier_nospec.patch
+powerpc-64s-add-support-for-ori-barrier_nospec-patch.patch
+powerpc-avoid-code-patching-freed-init-sections.patch
+powerpc-64s-patch-barrier_nospec-in-modules.patch
+powerpc-64s-enable-barrier_nospec-based-on-firmware-.patch
+powerpc-use-barrier_nospec-in-copy_from_user.patch
+powerpc-64-use-barrier_nospec-in-syscall-entry.patch
+powerpc-64s-enhance-the-information-in-cpu_show_spec.patch
+powerpc64s-show-ori31-availability-in-spectre_v1-sys.patch
+powerpc-64-disable-the-speculation-barrier-from-the-.patch
+powerpc-64-make-stf-barrier-ppc_book3s_64-specific.patch
+powerpc-64-add-config_ppc_barrier_nospec.patch
+powerpc-64-call-setup_barrier_nospec-from-setup_arch.patch
+powerpc-64-make-meltdown-reporting-book3s-64-specifi.patch
+powerpc-fsl-add-barrier_nospec-implementation-for-nx.patch
+powerpc-fsl-sanitize-the-syscall-table-for-nxp-power.patch
+powerpc-asm-add-a-patch_site-macro-helpers-for-patch.patch
+powerpc-64s-add-new-security-feature-flags-for-count.patch
+powerpc-64s-add-support-for-software-count-cache-flu.patch
+powerpc-pseries-query-hypervisor-for-count-cache-flu.patch
+powerpc-powernv-query-firmware-for-count-cache-flush.patch
+powerpc-fsl-add-infrastructure-to-fixup-branch-predi.patch
+powerpc-fsl-add-macro-to-flush-the-branch-predictor.patch
+powerpc-fsl-fix-spectre_v2-mitigations-reporting.patch
+powerpc-fsl-emulate-sprn_bucsr-register.patch
+powerpc-fsl-add-nospectre_v2-command-line-argument.patch
+powerpc-fsl-flush-the-branch-predictor-at-each-kerne.patch
+powerpc-fsl-flush-the-branch-predictor-at-each-kerne.patch-25870
+powerpc-fsl-flush-branch-predictor-when-entering-kvm.patch
+powerpc-fsl-enable-runtime-patching-if-nospectre_v2-.patch
+powerpc-fsl-update-spectre-v2-reporting.patch
+powerpc-fsl-fixed-warning-orphan-section-__btb_flush.patch
+powerpc-fsl-fix-the-flush-of-branch-predictor.patch
+powerpc-security-fix-spectre_v2-reporting.patch
--- /dev/null
+From 2e75212a5370ef38178eeb09e8bf3a2c513dbda6 Mon Sep 17 00:00:00 2001
+From: Nick Desaulniers <ndesaulniers@google.com>
+Date: Thu, 6 Dec 2018 11:12:31 -0800
+Subject: x86/vdso: Drop implicit common-page-size linker flag
+
+GNU linker's -z common-page-size's default value is based on the target
+architecture. arch/x86/entry/vdso/Makefile sets it to the architecture
+default, which is implicit and redundant. Drop it.
+
+Fixes: 2aae950b21e4 ("x86_64: Add vDSO for x86-64 with gettimeofday/clock_gettime/getcpu")
+Reported-by: Dmitry Golovin <dima@golovin.in>
+Reported-by: Bill Wendling <morbo@google.com>
+Suggested-by: Dmitry Golovin <dima@golovin.in>
+Suggested-by: Rui Ueyama <ruiu@google.com>
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Andy Lutomirski <luto@kernel.org>
+Cc: Andi Kleen <andi@firstfloor.org>
+Cc: Fangrui Song <maskray@google.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86-ml <x86@kernel.org>
+Link: https://lkml.kernel.org/r/20181206191231.192355-1-ndesaulniers@google.com
+Link: https://bugs.llvm.org/show_bug.cgi?id=38774
+Link: https://github.com/ClangBuiltLinux/linux/issues/31
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/entry/vdso/Makefile | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
+index 2ae92c6b1de6..756dc9432d15 100644
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -48,7 +48,7 @@ targets += $(vdso_img_sodbg)
+ export CPPFLAGS_vdso.lds += -P -C
+
+ VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
+- -z max-page-size=4096 -z common-page-size=4096
++ -z max-page-size=4096
+
+ $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+ $(call if_changed,vdso)
+@@ -95,7 +95,7 @@ CFLAGS_REMOVE_vvar.o = -pg
+
+ CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
+ VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
+- -z max-page-size=4096 -z common-page-size=4096
++ -z max-page-size=4096
+
+ # 64-bit objects to re-brand as x32
+ vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
+--
+2.19.1
+
--- /dev/null
+From a1da6d1e5f14f5e40065d031a53ced0395d1b4b0 Mon Sep 17 00:00:00 2001
+From: Alistair Strachan <astrachan@google.com>
+Date: Fri, 3 Aug 2018 10:39:31 -0700
+Subject: x86: vdso: Use $LD instead of $CC to link
+
+The vdso{32,64}.so can fail to link with CC=clang when clang tries to find
+a suitable GCC toolchain to link these libraries with.
+
+/usr/bin/ld: arch/x86/entry/vdso/vclock_gettime.o:
+ access beyond end of merged section (782)
+
+This happens because the host environment leaked into the cross compiler
+environment due to the way clang searches for suitable GCC toolchains.
+
+Clang is a retargetable compiler, and each invocation of it must provide
+--target=<something> --gcc-toolchain=<something> to allow it to find the
+correct binutils for cross compilation. These flags had been added to
+KBUILD_CFLAGS, but the vdso code uses CC and not KBUILD_CFLAGS (for various
+reasons) which breaks clang's ability to find the correct linker when cross
+compiling.
+
+Most of the time this goes unnoticed because the host linker is new enough
+to work anyway, or is incompatible and skipped, but this cannot be reliably
+assumed.
+
+This change alters the vdso makefile to just use LD directly, which
+bypasses clang and thus the searching problem. The makefile will just use
+${CROSS_COMPILE}ld instead, which is always what we want. This matches the
+method used to link vmlinux.
+
+This drops references to DISABLE_LTO; this option doesn't seem to be set
+anywhere, and not knowing what its possible values are, it's not clear how
+to convert it from CC to LD flag.
+
+Signed-off-by: Alistair Strachan <astrachan@google.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Andy Lutomirski <luto@kernel.org>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: kernel-team@android.com
+Cc: joel@joelfernandes.org
+Cc: Andi Kleen <andi.kleen@intel.com>
+Link: https://lkml.kernel.org/r/20180803173931.117515-1-astrachan@google.com
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/entry/vdso/Makefile | 22 +++++++++-------------
+ 1 file changed, 9 insertions(+), 13 deletions(-)
+
+diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
+index d5409660f5de..2ae92c6b1de6 100644
+--- a/arch/x86/entry/vdso/Makefile
++++ b/arch/x86/entry/vdso/Makefile
+@@ -47,10 +47,8 @@ targets += $(vdso_img_sodbg)
+
+ export CPPFLAGS_vdso.lds += -P -C
+
+-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
+- -Wl,--no-undefined \
+- -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
+- $(DISABLE_LTO)
++VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -soname linux-vdso.so.1 --no-undefined \
++ -z max-page-size=4096 -z common-page-size=4096
+
+ $(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+ $(call if_changed,vdso)
+@@ -96,10 +94,8 @@ CFLAGS_REMOVE_vvar.o = -pg
+ #
+
+ CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
+-VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
+- -Wl,-soname=linux-vdso.so.1 \
+- -Wl,-z,max-page-size=4096 \
+- -Wl,-z,common-page-size=4096
++VDSO_LDFLAGS_vdsox32.lds = -m elf32_x86_64 -soname linux-vdso.so.1 \
++ -z max-page-size=4096 -z common-page-size=4096
+
+ # 64-bit objects to re-brand as x32
+ vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
+@@ -127,7 +123,7 @@ $(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
+ $(call if_changed,vdso)
+
+ CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
+-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
++VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -soname linux-gate.so.1
+
+ # This makes sure the $(obj) subdirectory exists even though vdso32/
+ # is not a kbuild sub-make subdirectory.
+@@ -165,13 +161,13 @@ $(obj)/vdso32.so.dbg: FORCE \
+ # The DSO images are built using a special linker script.
+ #
+ quiet_cmd_vdso = VDSO $@
+- cmd_vdso = $(CC) -nostdlib -o $@ \
++ cmd_vdso = $(LD) -nostdlib -o $@ \
+ $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
+- -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
++ -T $(filter %.lds,$^) $(filter %.o,$^) && \
+ sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+
+-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=both) \
+- $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
++VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
++ $(call ld-option, --build-id) -Bsymbolic
+ GCOV_PROFILE := n
+
+ #
+--
+2.19.1
+