]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 21 Nov 2019 22:38:20 +0000 (23:38 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 21 Nov 2019 22:38:20 +0000 (23:38 +0100)
added patches:
block-introduce-blk_rq_is_passthrough.patch
fbdev-ditch-fb_edid_add_monspecs.patch
kprobes-x86-prohibit-probing-on-exception-masking-instructions.patch
libata-have-ata_scsi_rw_xlat-fail-invalid-passthrough-requests.patch
uprobes-x86-prohibit-probing-on-mov-ss-instruction.patch
x86-atomic-fix-smp_mb__-before-after-_atomic.patch

queue-4.9/block-introduce-blk_rq_is_passthrough.patch [new file with mode: 0644]
queue-4.9/fbdev-ditch-fb_edid_add_monspecs.patch [new file with mode: 0644]
queue-4.9/kprobes-x86-prohibit-probing-on-exception-masking-instructions.patch [new file with mode: 0644]
queue-4.9/libata-have-ata_scsi_rw_xlat-fail-invalid-passthrough-requests.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/uprobes-x86-prohibit-probing-on-mov-ss-instruction.patch [new file with mode: 0644]
queue-4.9/x86-atomic-fix-smp_mb__-before-after-_atomic.patch [new file with mode: 0644]

diff --git a/queue-4.9/block-introduce-blk_rq_is_passthrough.patch b/queue-4.9/block-introduce-blk_rq_is_passthrough.patch
new file mode 100644 (file)
index 0000000..0daf0ae
--- /dev/null
@@ -0,0 +1,55 @@
+From 57292b58ddb58689e8c3b4c6eadbef10d9ca44dd Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 31 Jan 2017 16:57:29 +0100
+Subject: block: introduce blk_rq_is_passthrough
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 57292b58ddb58689e8c3b4c6eadbef10d9ca44dd upstream.
+
+This can be used to check for fs vs non-fs requests and basically
+removes all knowledge of BLOCK_PC specific from the block layer,
+as well as preparing for removing the cmd_type field in struct request.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+[only take the blkdev.h changes as we only want the function for backported
+patches - gregkh]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/blkdev.h |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -212,6 +212,11 @@ struct request {
+       (req)->cmd_flags |= flags;              \
+ } while (0)
++static inline bool blk_rq_is_passthrough(struct request *rq)
++{
++      return rq->cmd_type != REQ_TYPE_FS;
++}
++
+ static inline unsigned short req_get_ioprio(struct request *req)
+ {
+       return req->ioprio;
+@@ -663,7 +668,7 @@ static inline void blk_clear_rl_full(str
+ static inline bool rq_mergeable(struct request *rq)
+ {
+-      if (rq->cmd_type != REQ_TYPE_FS)
++      if (blk_rq_is_passthrough(rq))
+               return false;
+       if (req_op(rq) == REQ_OP_FLUSH)
+@@ -910,7 +915,7 @@ static inline unsigned int blk_rq_get_ma
+ {
+       struct request_queue *q = rq->q;
+-      if (unlikely(rq->cmd_type != REQ_TYPE_FS))
++      if (blk_rq_is_passthrough(rq))
+               return q->limits.max_hw_sectors;
+       if (!q->limits.chunk_sectors ||
diff --git a/queue-4.9/fbdev-ditch-fb_edid_add_monspecs.patch b/queue-4.9/fbdev-ditch-fb_edid_add_monspecs.patch
new file mode 100644 (file)
index 0000000..46a475f
--- /dev/null
@@ -0,0 +1,237 @@
+From 3b8720e63f4a1fc6f422a49ecbaa3b59c86d5aaf Mon Sep 17 00:00:00 2001
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+Date: Sun, 21 Jul 2019 22:19:56 +0200
+Subject: fbdev: Ditch fb_edid_add_monspecs
+
+From: Daniel Vetter <daniel.vetter@ffwll.ch>
+
+commit 3b8720e63f4a1fc6f422a49ecbaa3b59c86d5aaf upstream.
+
+It's dead code ever since
+
+commit 34280340b1dc74c521e636f45cd728f9abf56ee2
+Author: Geert Uytterhoeven <geert+renesas@glider.be>
+Date:   Fri Dec 4 17:01:43 2015 +0100
+
+    fbdev: Remove unused SH-Mobile HDMI driver
+
+Also with this gone we can remove the cea_modes db. This entire thing
+is massively incomplete anyway, compared to the CEA parsing that
+drm_edid.c does.
+
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Tavis Ormandy <taviso@gmail.com>
+Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
+Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20190721201956.941-1-daniel.vetter@ffwll.ch
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/video/fbdev/core/fbmon.c  |   95 --------------------------------------
+ drivers/video/fbdev/core/modedb.c |   57 ----------------------
+ include/linux/fb.h                |    3 -
+ 3 files changed, 155 deletions(-)
+
+--- a/drivers/video/fbdev/core/fbmon.c
++++ b/drivers/video/fbdev/core/fbmon.c
+@@ -997,97 +997,6 @@ void fb_edid_to_monspecs(unsigned char *
+       DPRINTK("========================================\n");
+ }
+-/**
+- * fb_edid_add_monspecs() - add monitor video modes from E-EDID data
+- * @edid:     128 byte array with an E-EDID block
+- * @spacs:    monitor specs to be extended
+- */
+-void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
+-{
+-      unsigned char *block;
+-      struct fb_videomode *m;
+-      int num = 0, i;
+-      u8 svd[64], edt[(128 - 4) / DETAILED_TIMING_DESCRIPTION_SIZE];
+-      u8 pos = 4, svd_n = 0;
+-
+-      if (!edid)
+-              return;
+-
+-      if (!edid_checksum(edid))
+-              return;
+-
+-      if (edid[0] != 0x2 ||
+-          edid[2] < 4 || edid[2] > 128 - DETAILED_TIMING_DESCRIPTION_SIZE)
+-              return;
+-
+-      DPRINTK("  Short Video Descriptors\n");
+-
+-      while (pos < edid[2]) {
+-              u8 len = edid[pos] & 0x1f, type = (edid[pos] >> 5) & 7;
+-              pr_debug("Data block %u of %u bytes\n", type, len);
+-              if (type == 2) {
+-                      for (i = pos; i < pos + len; i++) {
+-                              u8 idx = edid[pos + i] & 0x7f;
+-                              svd[svd_n++] = idx;
+-                              pr_debug("N%sative mode #%d\n",
+-                                       edid[pos + i] & 0x80 ? "" : "on-n", idx);
+-                      }
+-              } else if (type == 3 && len >= 3) {
+-                      /* Check Vendor Specific Data Block.  For HDMI,
+-                         it is always 00-0C-03 for HDMI Licensing, LLC. */
+-                      if (edid[pos + 1] == 3 && edid[pos + 2] == 0xc &&
+-                          edid[pos + 3] == 0)
+-                              specs->misc |= FB_MISC_HDMI;
+-              }
+-              pos += len + 1;
+-      }
+-
+-      block = edid + edid[2];
+-
+-      DPRINTK("  Extended Detailed Timings\n");
+-
+-      for (i = 0; i < (128 - edid[2]) / DETAILED_TIMING_DESCRIPTION_SIZE;
+-           i++, block += DETAILED_TIMING_DESCRIPTION_SIZE)
+-              if (PIXEL_CLOCK)
+-                      edt[num++] = block - edid;
+-
+-      /* Yikes, EDID data is totally useless */
+-      if (!(num + svd_n))
+-              return;
+-
+-      m = kzalloc((specs->modedb_len + num + svd_n) *
+-                     sizeof(struct fb_videomode), GFP_KERNEL);
+-
+-      if (!m)
+-              return;
+-
+-      memcpy(m, specs->modedb, specs->modedb_len * sizeof(struct fb_videomode));
+-
+-      for (i = specs->modedb_len; i < specs->modedb_len + num; i++) {
+-              get_detailed_timing(edid + edt[i - specs->modedb_len], &m[i]);
+-              if (i == specs->modedb_len)
+-                      m[i].flag |= FB_MODE_IS_FIRST;
+-              pr_debug("Adding %ux%u@%u\n", m[i].xres, m[i].yres, m[i].refresh);
+-      }
+-
+-      for (i = specs->modedb_len + num; i < specs->modedb_len + num + svd_n; i++) {
+-              int idx = svd[i - specs->modedb_len - num];
+-              if (!idx || idx >= ARRAY_SIZE(cea_modes)) {
+-                      pr_warning("Reserved SVD code %d\n", idx);
+-              } else if (!cea_modes[idx].xres) {
+-                      pr_warning("Unimplemented SVD code %d\n", idx);
+-              } else {
+-                      memcpy(&m[i], cea_modes + idx, sizeof(m[i]));
+-                      pr_debug("Adding SVD #%d: %ux%u@%u\n", idx,
+-                               m[i].xres, m[i].yres, m[i].refresh);
+-              }
+-      }
+-
+-      kfree(specs->modedb);
+-      specs->modedb = m;
+-      specs->modedb_len = specs->modedb_len + num + svd_n;
+-}
+-
+ /*
+  * VESA Generalized Timing Formula (GTF)
+  */
+@@ -1497,9 +1406,6 @@ int fb_parse_edid(unsigned char *edid, s
+ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
+ {
+ }
+-void fb_edid_add_monspecs(unsigned char *edid, struct fb_monspecs *specs)
+-{
+-}
+ void fb_destroy_modedb(struct fb_videomode *modedb)
+ {
+ }
+@@ -1607,7 +1513,6 @@ EXPORT_SYMBOL(fb_firmware_edid);
+ EXPORT_SYMBOL(fb_parse_edid);
+ EXPORT_SYMBOL(fb_edid_to_monspecs);
+-EXPORT_SYMBOL(fb_edid_add_monspecs);
+ EXPORT_SYMBOL(fb_get_mode);
+ EXPORT_SYMBOL(fb_validate_mode);
+ EXPORT_SYMBOL(fb_destroy_modedb);
+--- a/drivers/video/fbdev/core/modedb.c
++++ b/drivers/video/fbdev/core/modedb.c
+@@ -289,63 +289,6 @@ static const struct fb_videomode modedb[
+ };
+ #ifdef CONFIG_FB_MODE_HELPERS
+-const struct fb_videomode cea_modes[65] = {
+-      /* #1: 640x480p@59.94/60Hz */
+-      [1] = {
+-              NULL, 60, 640, 480, 39722, 48, 16, 33, 10, 96, 2, 0,
+-              FB_VMODE_NONINTERLACED, 0,
+-      },
+-      /* #3: 720x480p@59.94/60Hz */
+-      [3] = {
+-              NULL, 60, 720, 480, 37037, 60, 16, 30, 9, 62, 6, 0,
+-              FB_VMODE_NONINTERLACED, 0,
+-      },
+-      /* #5: 1920x1080i@59.94/60Hz */
+-      [5] = {
+-              NULL, 60, 1920, 1080, 13763, 148, 88, 15, 2, 44, 5,
+-              FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+-              FB_VMODE_INTERLACED, 0,
+-      },
+-      /* #7: 720(1440)x480iH@59.94/60Hz */
+-      [7] = {
+-              NULL, 60, 1440, 480, 18554/*37108*/, 114, 38, 15, 4, 124, 3, 0,
+-              FB_VMODE_INTERLACED, 0,
+-      },
+-      /* #9: 720(1440)x240pH@59.94/60Hz */
+-      [9] = {
+-              NULL, 60, 1440, 240, 18554, 114, 38, 16, 4, 124, 3, 0,
+-              FB_VMODE_NONINTERLACED, 0,
+-      },
+-      /* #18: 720x576pH@50Hz */
+-      [18] = {
+-              NULL, 50, 720, 576, 37037, 68, 12, 39, 5, 64, 5, 0,
+-              FB_VMODE_NONINTERLACED, 0,
+-      },
+-      /* #19: 1280x720p@50Hz */
+-      [19] = {
+-              NULL, 50, 1280, 720, 13468, 220, 440, 20, 5, 40, 5,
+-              FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+-              FB_VMODE_NONINTERLACED, 0,
+-      },
+-      /* #20: 1920x1080i@50Hz */
+-      [20] = {
+-              NULL, 50, 1920, 1080, 13480, 148, 528, 15, 5, 528, 5,
+-              FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+-              FB_VMODE_INTERLACED, 0,
+-      },
+-      /* #32: 1920x1080p@23.98/24Hz */
+-      [32] = {
+-              NULL, 24, 1920, 1080, 13468, 148, 638, 36, 4, 44, 5,
+-              FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+-              FB_VMODE_NONINTERLACED, 0,
+-      },
+-      /* #35: (2880)x480p4x@59.94/60Hz */
+-      [35] = {
+-              NULL, 60, 2880, 480, 9250, 240, 64, 30, 9, 248, 6, 0,
+-              FB_VMODE_NONINTERLACED, 0,
+-      },
+-};
+-
+ const struct fb_videomode vesa_modes[] = {
+       /* 0 640x350-85 VESA */
+       { NULL, 85, 640, 350, 31746,  96, 32, 60, 32, 64, 3,
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -732,8 +732,6 @@ extern int fb_parse_edid(unsigned char *
+ extern const unsigned char *fb_firmware_edid(struct device *device);
+ extern void fb_edid_to_monspecs(unsigned char *edid,
+                               struct fb_monspecs *specs);
+-extern void fb_edid_add_monspecs(unsigned char *edid,
+-                               struct fb_monspecs *specs);
+ extern void fb_destroy_modedb(struct fb_videomode *modedb);
+ extern int fb_find_mode_cvt(struct fb_videomode *mode, int margins, int rb);
+ extern unsigned char *fb_ddc_read(struct i2c_adapter *adapter);
+@@ -807,7 +805,6 @@ struct dmt_videomode {
+ extern const char *fb_mode_option;
+ extern const struct fb_videomode vesa_modes[];
+-extern const struct fb_videomode cea_modes[65];
+ extern const struct dmt_videomode dmt_modes[];
+ struct fb_modelist {
diff --git a/queue-4.9/kprobes-x86-prohibit-probing-on-exception-masking-instructions.patch b/queue-4.9/kprobes-x86-prohibit-probing-on-exception-masking-instructions.patch
new file mode 100644 (file)
index 0000000..ac16bfc
--- /dev/null
@@ -0,0 +1,83 @@
+From ee6a7354a3629f9b65bc18dbe393503e9440d6f5 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Wed, 9 May 2018 21:58:15 +0900
+Subject: kprobes/x86: Prohibit probing on exception masking instructions
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit ee6a7354a3629f9b65bc18dbe393503e9440d6f5 upstream.
+
+Since MOV SS and POP SS instructions will delay the exceptions until the
+next instruction is executed, single-stepping on it by kprobes must be
+prohibited.
+
+However, kprobes usually executes those instructions directly on trampoline
+buffer (a.k.a. kprobe-booster), except for the kprobes which has
+post_handler. Thus if kprobe user probes MOV SS with post_handler, it will
+do single-stepping on the MOV SS.
+
+This means it is safe that if it is used via ftrace or perf/bpf since those
+don't use the post_handler.
+
+Anyway, since the stack switching is a rare case, it is safer just
+rejecting kprobes on such instructions.
+
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Cc: Francis Deslauriers <francis.deslauriers@efficios.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: "H . Peter Anvin" <hpa@zytor.com>
+Cc: Yonghong Song <yhs@fb.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: "David S . Miller" <davem@davemloft.net>
+Link: https://lkml.kernel.org/r/152587069574.17316.3311695234863248641.stgit@devbox
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/insn.h    |   18 ++++++++++++++++++
+ arch/x86/kernel/kprobes/core.c |    4 ++++
+ 2 files changed, 22 insertions(+)
+
+--- a/arch/x86/include/asm/insn.h
++++ b/arch/x86/include/asm/insn.h
+@@ -208,4 +208,22 @@ static inline int insn_offset_immediate(
+       return insn_offset_displacement(insn) + insn->displacement.nbytes;
+ }
++#define POP_SS_OPCODE 0x1f
++#define MOV_SREG_OPCODE 0x8e
++
++/*
++ * Intel SDM Vol.3A 6.8.3 states;
++ * "Any single-step trap that would be delivered following the MOV to SS
++ * instruction or POP to SS instruction (because EFLAGS.TF is 1) is
++ * suppressed."
++ * This function returns true if @insn is MOV SS or POP SS. On these
++ * instructions, single stepping is suppressed.
++ */
++static inline int insn_masking_exception(struct insn *insn)
++{
++      return insn->opcode.bytes[0] == POP_SS_OPCODE ||
++              (insn->opcode.bytes[0] == MOV_SREG_OPCODE &&
++               X86_MODRM_REG(insn->modrm.bytes[0]) == 2);
++}
++
+ #endif /* _ASM_X86_INSN_H */
+--- a/arch/x86/kernel/kprobes/core.c
++++ b/arch/x86/kernel/kprobes/core.c
+@@ -376,6 +376,10 @@ int __copy_instruction(u8 *dest, u8 *src
+               return 0;
+       memcpy(dest, insn.kaddr, length);
++      /* We should not singlestep on the exception masking instructions */
++      if (insn_masking_exception(&insn))
++              return 0;
++
+ #ifdef CONFIG_X86_64
+       if (insn_rip_relative(&insn)) {
+               s64 newdisp;
diff --git a/queue-4.9/libata-have-ata_scsi_rw_xlat-fail-invalid-passthrough-requests.patch b/queue-4.9/libata-have-ata_scsi_rw_xlat-fail-invalid-passthrough-requests.patch
new file mode 100644 (file)
index 0000000..e8d0078
--- /dev/null
@@ -0,0 +1,80 @@
+From 2d7271501720038381d45fb3dcbe4831228fc8cc Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Wed, 7 Aug 2019 12:20:52 -0600
+Subject: libata: have ata_scsi_rw_xlat() fail invalid passthrough requests
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 2d7271501720038381d45fb3dcbe4831228fc8cc upstream.
+
+For passthrough requests, libata-scsi takes what the user passes in
+as gospel. This can be problematic if the user fills in the CDB
+incorrectly. One example of that is in request sizes. For read/write
+commands, the CDB contains fields describing the transfer length of
+the request. These should match with the SG_IO header fields, but
+libata-scsi currently does no validation of that.
+
+Check that the number of blocks in the CDB for passthrough requests
+matches what was mapped into the request. If the CDB asks for more
+data then the validated SG_IO header fields, error it.
+
+Reported-by: Krishna Ram Prakash R <krp@gtux.in>
+Reviewed-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ata/libata-scsi.c |   21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1734,6 +1734,21 @@ nothing_to_do:
+       return 1;
+ }
++static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
++{
++      struct request *rq = scmd->request;
++      u32 req_blocks;
++
++      if (!blk_rq_is_passthrough(rq))
++              return true;
++
++      req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
++      if (n_blocks > req_blocks)
++              return false;
++
++      return true;
++}
++
+ /**
+  *    ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
+  *    @qc: Storage for translated ATA taskfile
+@@ -1776,6 +1791,8 @@ static unsigned int ata_scsi_rw_xlat(str
+               scsi_10_lba_len(cdb, &block, &n_block);
+               if (cdb[1] & (1 << 3))
+                       tf_flags |= ATA_TFLAG_FUA;
++              if (!ata_check_nblocks(scmd, n_block))
++                      goto invalid_fld;
+               break;
+       case READ_6:
+       case WRITE_6:
+@@ -1790,6 +1807,8 @@ static unsigned int ata_scsi_rw_xlat(str
+                */
+               if (!n_block)
+                       n_block = 256;
++              if (!ata_check_nblocks(scmd, n_block))
++                      goto invalid_fld;
+               break;
+       case READ_16:
+       case WRITE_16:
+@@ -1800,6 +1819,8 @@ static unsigned int ata_scsi_rw_xlat(str
+               scsi_16_lba_len(cdb, &block, &n_block);
+               if (cdb[1] & (1 << 3))
+                       tf_flags |= ATA_TFLAG_FUA;
++              if (!ata_check_nblocks(scmd, n_block))
++                      goto invalid_fld;
+               break;
+       default:
+               DPRINTK("no-byte command\n");
index fff5ff10b251849241f4f58531e2a98e18fd17e2..b3e75e716e083ad476570ad3eb6f162051148526 100644 (file)
@@ -146,3 +146,9 @@ ib-iser-fix-possible-null-deref-at-iser_inv_desc.patch
 memfd-use-radix_tree_deref_slot_protected-to-avoid-the-warning.patch
 slcan-fix-memory-leak-in-error-path.patch
 net-cdc_ncm-signedness-bug-in-cdc_ncm_set_dgram_size.patch
+x86-atomic-fix-smp_mb__-before-after-_atomic.patch
+kprobes-x86-prohibit-probing-on-exception-masking-instructions.patch
+uprobes-x86-prohibit-probing-on-mov-ss-instruction.patch
+fbdev-ditch-fb_edid_add_monspecs.patch
+block-introduce-blk_rq_is_passthrough.patch
+libata-have-ata_scsi_rw_xlat-fail-invalid-passthrough-requests.patch
diff --git a/queue-4.9/uprobes-x86-prohibit-probing-on-mov-ss-instruction.patch b/queue-4.9/uprobes-x86-prohibit-probing-on-mov-ss-instruction.patch
new file mode 100644 (file)
index 0000000..e1e1073
--- /dev/null
@@ -0,0 +1,50 @@
+From 13ebe18c94f5b0665c01ae7fad2717ae959f4212 Mon Sep 17 00:00:00 2001
+From: Masami Hiramatsu <mhiramat@kernel.org>
+Date: Wed, 9 May 2018 21:58:45 +0900
+Subject: uprobes/x86: Prohibit probing on MOV SS instruction
+
+From: Masami Hiramatsu <mhiramat@kernel.org>
+
+commit 13ebe18c94f5b0665c01ae7fad2717ae959f4212 upstream.
+
+Since MOV SS and POP SS instructions will delay the exceptions until the
+next instruction is executed, single-stepping on it by uprobes must be
+prohibited.
+
+uprobe already rejects probing on POP SS (0x1f), but allows probing on MOV
+SS (0x8e and reg == 2).  This checks the target instruction and if it is
+MOV SS or POP SS, returns -ENOTSUPP to reject probing.
+
+Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Oleg Nesterov <oleg@redhat.com>
+Cc: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Cc: Francis Deslauriers <francis.deslauriers@efficios.com>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: "H . Peter Anvin" <hpa@zytor.com>
+Cc: Yonghong Song <yhs@fb.com>
+Cc: Borislav Petkov <bp@suse.de>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: "David S . Miller" <davem@davemloft.net>
+Link: https://lkml.kernel.org/r/152587072544.17316.5950935243917346341.stgit@devbox
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/uprobes.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/x86/kernel/uprobes.c
++++ b/arch/x86/kernel/uprobes.c
+@@ -296,6 +296,10 @@ static int uprobe_init_insn(struct arch_
+       if (is_prefix_bad(insn))
+               return -ENOTSUPP;
++      /* We should not singlestep on the exception masking instructions */
++      if (insn_masking_exception(insn))
++              return -ENOTSUPP;
++
+       if (x86_64)
+               good_insns = good_insns_64;
+       else
diff --git a/queue-4.9/x86-atomic-fix-smp_mb__-before-after-_atomic.patch b/queue-4.9/x86-atomic-fix-smp_mb__-before-after-_atomic.patch
new file mode 100644 (file)
index 0000000..c0d21b3
--- /dev/null
@@ -0,0 +1,144 @@
+From 69d927bba39517d0980462efc051875b7f4db185 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 24 Apr 2019 13:38:23 +0200
+Subject: x86/atomic: Fix smp_mb__{before,after}_atomic()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 69d927bba39517d0980462efc051875b7f4db185 upstream.
+
+Recent probing at the Linux Kernel Memory Model uncovered a
+'surprise'. Strongly ordered architectures where the atomic RmW
+primitive implies full memory ordering and
+smp_mb__{before,after}_atomic() are a simple barrier() (such as x86)
+fail for:
+
+       *x = 1;
+       atomic_inc(u);
+       smp_mb__after_atomic();
+       r0 = *y;
+
+Because, while the atomic_inc() implies memory order, it
+(surprisingly) does not provide a compiler barrier. This then allows
+the compiler to re-order like so:
+
+       atomic_inc(u);
+       *x = 1;
+       smp_mb__after_atomic();
+       r0 = *y;
+
+Which the CPU is then allowed to re-order (under TSO rules) like:
+
+       atomic_inc(u);
+       r0 = *y;
+       *x = 1;
+
+And this very much was not intended. Therefore strengthen the atomic
+RmW ops to include a compiler barrier.
+
+NOTE: atomic_{or,and,xor} and the bitops already had the compiler
+barrier.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Jari Ruusu <jari.ruusu@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/atomic.h      |    8 ++++----
+ arch/x86/include/asm/atomic64_64.h |    8 ++++----
+ arch/x86/include/asm/barrier.h     |    4 ++--
+ 3 files changed, 10 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/include/asm/atomic.h
++++ b/arch/x86/include/asm/atomic.h
+@@ -49,7 +49,7 @@ static __always_inline void atomic_add(i
+ {
+       asm volatile(LOCK_PREFIX "addl %1,%0"
+                    : "+m" (v->counter)
+-                   : "ir" (i));
++                   : "ir" (i) : "memory");
+ }
+ /**
+@@ -63,7 +63,7 @@ static __always_inline void atomic_sub(i
+ {
+       asm volatile(LOCK_PREFIX "subl %1,%0"
+                    : "+m" (v->counter)
+-                   : "ir" (i));
++                   : "ir" (i) : "memory");
+ }
+ /**
+@@ -89,7 +89,7 @@ static __always_inline bool atomic_sub_a
+ static __always_inline void atomic_inc(atomic_t *v)
+ {
+       asm volatile(LOCK_PREFIX "incl %0"
+-                   : "+m" (v->counter));
++                   : "+m" (v->counter) :: "memory");
+ }
+ /**
+@@ -101,7 +101,7 @@ static __always_inline void atomic_inc(a
+ static __always_inline void atomic_dec(atomic_t *v)
+ {
+       asm volatile(LOCK_PREFIX "decl %0"
+-                   : "+m" (v->counter));
++                   : "+m" (v->counter) :: "memory");
+ }
+ /**
+--- a/arch/x86/include/asm/atomic64_64.h
++++ b/arch/x86/include/asm/atomic64_64.h
+@@ -44,7 +44,7 @@ static __always_inline void atomic64_add
+ {
+       asm volatile(LOCK_PREFIX "addq %1,%0"
+                    : "=m" (v->counter)
+-                   : "er" (i), "m" (v->counter));
++                   : "er" (i), "m" (v->counter) : "memory");
+ }
+ /**
+@@ -58,7 +58,7 @@ static inline void atomic64_sub(long i,
+ {
+       asm volatile(LOCK_PREFIX "subq %1,%0"
+                    : "=m" (v->counter)
+-                   : "er" (i), "m" (v->counter));
++                   : "er" (i), "m" (v->counter) : "memory");
+ }
+ /**
+@@ -85,7 +85,7 @@ static __always_inline void atomic64_inc
+ {
+       asm volatile(LOCK_PREFIX "incq %0"
+                    : "=m" (v->counter)
+-                   : "m" (v->counter));
++                   : "m" (v->counter) : "memory");
+ }
+ /**
+@@ -98,7 +98,7 @@ static __always_inline void atomic64_dec
+ {
+       asm volatile(LOCK_PREFIX "decq %0"
+                    : "=m" (v->counter)
+-                   : "m" (v->counter));
++                   : "m" (v->counter) : "memory");
+ }
+ /**
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -105,8 +105,8 @@ do {                                                                       \
+ #endif
+ /* Atomic operations are already serializing on x86 */
+-#define __smp_mb__before_atomic()     barrier()
+-#define __smp_mb__after_atomic()      barrier()
++#define __smp_mb__before_atomic()     do { } while (0)
++#define __smp_mb__after_atomic()      do { } while (0)
+ #include <asm-generic/barrier.h>