]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Nov 2019 18:10:01 +0000 (19:10 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Nov 2019 18:10:01 +0000 (19:10 +0100)
added patches:
drm-i915-add-gen9-bcs-cmdparsing.patch
drm-i915-add-support-for-mandatory-cmdparsing.patch
drm-i915-allow-parsing-of-unsized-batches.patch
drm-i915-cmdparser-add-support-for-backward-jumps.patch
drm-i915-cmdparser-fix-jump-whitelist-clearing.patch
drm-i915-cmdparser-ignore-length-operands-during-command-matching.patch
drm-i915-disable-secure-batches-for-gen6.patch
drm-i915-gen8-add-rc6-ctx-corruption-wa.patch
drm-i915-gtt-add-read-only-pages-to-gen8_pte_encode.patch
drm-i915-gtt-disable-read-only-support-under-gvt.patch
drm-i915-gtt-read-only-pages-for-insert_entries-on-bdw.patch
drm-i915-lower-rm-timeout-to-avoid-dsi-hard-hangs.patch
drm-i915-remove-master-tables-from-cmdparser.patch
drm-i915-rename-gen7-cmdparser-tables.patch
drm-i915-support-ro-ppgtt-mapped-cmdparser-shadow-buffers.patch

16 files changed:
queue-4.4/drm-i915-add-gen9-bcs-cmdparsing.patch [new file with mode: 0644]
queue-4.4/drm-i915-add-support-for-mandatory-cmdparsing.patch [new file with mode: 0644]
queue-4.4/drm-i915-allow-parsing-of-unsized-batches.patch [new file with mode: 0644]
queue-4.4/drm-i915-cmdparser-add-support-for-backward-jumps.patch [new file with mode: 0644]
queue-4.4/drm-i915-cmdparser-fix-jump-whitelist-clearing.patch [new file with mode: 0644]
queue-4.4/drm-i915-cmdparser-ignore-length-operands-during-command-matching.patch [new file with mode: 0644]
queue-4.4/drm-i915-disable-secure-batches-for-gen6.patch [new file with mode: 0644]
queue-4.4/drm-i915-gen8-add-rc6-ctx-corruption-wa.patch [new file with mode: 0644]
queue-4.4/drm-i915-gtt-add-read-only-pages-to-gen8_pte_encode.patch [new file with mode: 0644]
queue-4.4/drm-i915-gtt-disable-read-only-support-under-gvt.patch [new file with mode: 0644]
queue-4.4/drm-i915-gtt-read-only-pages-for-insert_entries-on-bdw.patch [new file with mode: 0644]
queue-4.4/drm-i915-lower-rm-timeout-to-avoid-dsi-hard-hangs.patch [new file with mode: 0644]
queue-4.4/drm-i915-remove-master-tables-from-cmdparser.patch [new file with mode: 0644]
queue-4.4/drm-i915-rename-gen7-cmdparser-tables.patch [new file with mode: 0644]
queue-4.4/drm-i915-support-ro-ppgtt-mapped-cmdparser-shadow-buffers.patch [new file with mode: 0644]
queue-4.4/series

diff --git a/queue-4.4/drm-i915-add-gen9-bcs-cmdparsing.patch b/queue-4.4/drm-i915-add-gen9-bcs-cmdparsing.patch
new file mode 100644 (file)
index 0000000..37b2d02
--- /dev/null
@@ -0,0 +1,303 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Mon, 23 Apr 2018 11:12:15 -0700
+Subject: drm/i915: Add gen9 BCS cmdparsing
+
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+
+commit 0f2f39758341df70202ae1c42d5a1e4ee392b6d3 upstream.
+
+For gen9 we enable cmdparsing on the BCS ring, specifically
+to catch inadvertent accesses to sensitive registers
+
+Unlike gen7/hsw, we use the parser only to block certain
+registers. We can rely on h/w to block restricted commands,
+so the command tables only provide enough info to allow the
+parser to delineate each command, and identify commands that
+access registers.
+
+Note: This patch deliberately ignores checkpatch issues in
+favour of matching the style of the surrounding code. We'll
+correct the entire file in one go in a later patch.
+
+v3: rebase (Mika)
+v4: Add RING_TIMESTAMP registers to whitelist (Jon)
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_cmd_parser.c |  121 ++++++++++++++++++++++++++++++---
+ drivers/gpu/drm/i915/i915_dma.c        |    2 
+ drivers/gpu/drm/i915/i915_drv.h        |    2 
+ drivers/gpu/drm/i915/i915_gem_gtt.c    |    3 
+ drivers/gpu/drm/i915/i915_reg.h        |    5 +
+ 5 files changed, 119 insertions(+), 14 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -346,6 +346,47 @@ static const struct drm_i915_cmd_descrip
+       CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
+ };
++/*
++ * For Gen9 we can still rely on the h/w to enforce cmd security, and only
++ * need to re-enforce the register access checks. We therefore only need to
++ * teach the cmdparser how to find the end of each command, and identify
++ * register accesses. The table doesn't need to reject any commands, and so
++ * the only commands listed here are:
++ *   1) Those that touch registers
++ *   2) Those that do not have the default 8-bit length
++ *
++ * Note that the default MI length mask chosen for this table is 0xFF, not
++ * the 0x3F used on older devices. This is because the vast majority of MI
++ * cmds on Gen9 use a standard 8-bit Length field.
++ * All the Gen9 blitter instructions are standard 0xFF length mask, and
++ * none allow access to non-general registers, so in fact no BLT cmds are
++ * included in the table at all.
++ *
++ */
++static const struct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
++      CMD(  MI_NOOP,                          SMI,    F,  1,      S  ),
++      CMD(  MI_USER_INTERRUPT,                SMI,    F,  1,      S  ),
++      CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      S  ),
++      CMD(  MI_FLUSH,                         SMI,    F,  1,      S  ),
++      CMD(  MI_ARB_CHECK,                     SMI,    F,  1,      S  ),
++      CMD(  MI_REPORT_HEAD,                   SMI,    F,  1,      S  ),
++      CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      S  ),
++      CMD(  MI_SUSPEND_FLUSH,                 SMI,    F,  1,      S  ),
++      CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   S  ),
++      CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   S  ),
++      CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3FF,  S  ),
++      CMD(  MI_LOAD_REGISTER_IMM(1),          SMI,   !F,  0xFF,   W,
++            .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 }    ),
++      CMD(  MI_UPDATE_GTT,                    SMI,   !F,  0x3FF,  S  ),
++      CMD(  MI_STORE_REGISTER_MEM_GEN8,       SMI,    F,  4,      W,
++            .reg = { .offset = 1, .mask = 0x007FFFFC }               ),
++      CMD(  MI_FLUSH_DW,                      SMI,   !F,  0x3F,   S  ),
++      CMD(  MI_LOAD_REGISTER_MEM_GEN8,        SMI,    F,  4,      W,
++            .reg = { .offset = 1, .mask = 0x007FFFFC }               ),
++      CMD(  MI_LOAD_REGISTER_REG,             SMI,    !F,  0xFF,  W,
++            .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 }    ),
++};
++
+ #undef CMD
+ #undef SMI
+ #undef S3D
+@@ -389,6 +430,11 @@ static const struct drm_i915_cmd_table h
+       { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
+ };
++static const struct drm_i915_cmd_table gen9_blt_cmd_table[] = {
++      { gen9_blt_cmds, ARRAY_SIZE(gen9_blt_cmds) },
++};
++
++
+ /*
+  * Register whitelists, sorted by increasing register offset.
+  */
+@@ -422,6 +468,10 @@ struct drm_i915_reg_descriptor {
+ #define REG64(addr)                                     \
+       REG32(addr), REG32(addr + sizeof(u32))
++#define REG64_IDX(_reg, idx) \
++      { .addr = _reg(idx) }, \
++      { .addr = _reg ## _UDW(idx) }
++
+ static const struct drm_i915_reg_descriptor gen7_render_regs[] = {
+       REG64(GPGPU_THREADS_DISPATCHED),
+       REG64(HS_INVOCATION_COUNT),
+@@ -475,6 +525,29 @@ static const struct drm_i915_reg_descrip
+       REG32(BCS_SWCTRL),
+ };
++static const struct drm_i915_reg_descriptor gen9_blt_regs[] = {
++      REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE),
++      REG64_IDX(RING_TIMESTAMP, BSD_RING_BASE),
++      REG32(BCS_SWCTRL),
++      REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE),
++      REG64_IDX(BCS_GPR, 0),
++      REG64_IDX(BCS_GPR, 1),
++      REG64_IDX(BCS_GPR, 2),
++      REG64_IDX(BCS_GPR, 3),
++      REG64_IDX(BCS_GPR, 4),
++      REG64_IDX(BCS_GPR, 5),
++      REG64_IDX(BCS_GPR, 6),
++      REG64_IDX(BCS_GPR, 7),
++      REG64_IDX(BCS_GPR, 8),
++      REG64_IDX(BCS_GPR, 9),
++      REG64_IDX(BCS_GPR, 10),
++      REG64_IDX(BCS_GPR, 11),
++      REG64_IDX(BCS_GPR, 12),
++      REG64_IDX(BCS_GPR, 13),
++      REG64_IDX(BCS_GPR, 14),
++      REG64_IDX(BCS_GPR, 15),
++};
++
+ #undef REG64
+ #undef REG32
+@@ -533,6 +606,17 @@ static u32 gen7_blt_get_cmd_length_mask(
+       return 0;
+ }
++static u32 gen9_blt_get_cmd_length_mask(u32 cmd_header)
++{
++      u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT;
++
++      if (client == INSTR_MI_CLIENT || client == INSTR_BC_CLIENT)
++              return 0xFF;
++
++      DRM_DEBUG_DRIVER("CMD: Abnormal blt cmd length! 0x%08X\n", cmd_header);
++      return 0;
++}
++
+ static bool validate_cmds_sorted(struct intel_engine_cs *ring,
+                                const struct drm_i915_cmd_table *cmd_tables,
+                                int cmd_table_count)
+@@ -672,7 +756,7 @@ int i915_cmd_parser_init_ring(struct int
+       int cmd_table_count;
+       int ret;
+-      if (!IS_GEN7(ring->dev))
++      if (!IS_GEN7(ring->dev) && !(IS_GEN9(ring->dev) && ring->id == BCS))
+               return 0;
+       switch (ring->id) {
+@@ -697,7 +781,17 @@ int i915_cmd_parser_init_ring(struct int
+               ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+               break;
+       case BCS:
+-              if (IS_HASWELL(ring->dev)) {
++              ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
++              if (IS_GEN9(ring->dev)) {
++                      cmd_tables = gen9_blt_cmd_table;
++                      cmd_table_count = ARRAY_SIZE(gen9_blt_cmd_table);
++                      ring->get_cmd_length_mask =
++                              gen9_blt_get_cmd_length_mask;
++
++                      /* BCS Engine unsafe without parser */
++                      ring->requires_cmd_parser = 1;
++              }
++              else if (IS_HASWELL(ring->dev)) {
+                       cmd_tables = hsw_blt_ring_cmd_table;
+                       cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
+               } else {
+@@ -705,10 +799,14 @@ int i915_cmd_parser_init_ring(struct int
+                       cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
+               }
+-              ring->reg_table = gen7_blt_regs;
+-              ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
++              if (IS_GEN9(ring->dev)) {
++                      ring->reg_table = gen9_blt_regs;
++                      ring->reg_count = ARRAY_SIZE(gen9_blt_regs);
++              } else {
++                      ring->reg_table = gen7_blt_regs;
++                      ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
++              }
+-              ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+               break;
+       case VECS:
+               cmd_tables = hsw_vebox_cmd_table;
+@@ -1082,9 +1180,9 @@ int i915_parse_cmds(struct intel_engine_
+               }
+               /*
+-               * If the batch buffer contains a chained batch, return an
+-               * error that tells the caller to abort and dispatch the
+-               * workload as a non-secure batch.
++               * We don't try to handle BATCH_BUFFER_START because it adds
++               * non-trivial complexity. Instead we abort the scan and return
++               * and error to indicate that the batch is unsafe.
+                */
+               if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+                       ret = -EACCES;
+@@ -1106,7 +1204,7 @@ int i915_parse_cmds(struct intel_engine_
+               }
+               if (!check_cmd(ring, desc, cmd, length, &oacontrol_set)) {
+-                      ret = -EINVAL;
++                      ret = CMDPARSER_USES_GGTT(ring->dev) ? -EINVAL : -EACCES;
+                       break;
+               }
+@@ -1136,7 +1234,7 @@ int i915_parse_cmds(struct intel_engine_
+  *
+  * Return: the current version number of the cmd parser
+  */
+-int i915_cmd_parser_get_version(void)
++int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
+ {
+       /*
+        * Command parser version history
+@@ -1148,6 +1246,7 @@ int i915_cmd_parser_get_version(void)
+        * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
+        * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
+        * 5. GPGPU dispatch compute indirect registers.
++       * 10. Gen9 only - Supports the new ppgtt based BLIT parser
+        */
+-      return 5;
++      return CMDPARSER_USES_GGTT(dev_priv) ? 5 : 10;
+ }
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -145,7 +145,7 @@ static int i915_getparam(struct drm_devi
+               value = 1;
+               break;
+       case I915_PARAM_CMD_PARSER_VERSION:
+-              value = i915_cmd_parser_get_version();
++              value = i915_cmd_parser_get_version(dev_priv);
+               break;
+       case I915_PARAM_HAS_COHERENT_PHYS_GTT:
+               value = 1;
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -3285,7 +3285,7 @@ void i915_get_extra_instdone(struct drm_
+ const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
+ /* i915_cmd_parser.c */
+-int i915_cmd_parser_get_version(void);
++int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
+ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
+ void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
+ bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -119,7 +119,8 @@ static int sanitize_enable_ppgtt(struct
+           (enable_ppgtt == 0 || !has_aliasing_ppgtt))
+               return 0;
+-      if (enable_ppgtt == 1)
++      /* Full PPGTT is required by the Gen9 cmdparser */
++      if (enable_ppgtt == 1 && INTEL_INFO(dev)->gen != 9)
+               return 1;
+       if (enable_ppgtt == 2 && has_full_ppgtt)
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -511,6 +511,10 @@
+  */
+ #define BCS_SWCTRL 0x22200
++/* There are 16 GPR registers */
++#define BCS_GPR(n)    (0x22600 + (n) * 8)
++#define BCS_GPR_UDW(n)        (0x22600 + (n) * 8 + 4)
++
+ #define GPGPU_THREADS_DISPATCHED        0x2290
+ #define HS_INVOCATION_COUNT             0x2300
+ #define DS_INVOCATION_COUNT             0x2308
+@@ -1567,6 +1571,7 @@ enum skl_disp_power_wells {
+ #define RING_IMR(base)                ((base)+0xa8)
+ #define RING_HWSTAM(base)     ((base)+0x98)
+ #define RING_TIMESTAMP(base)  ((base)+0x358)
++#define RING_TIMESTAMP_UDW(base) ((base) + 0x358 + 4)
+ #define   TAIL_ADDR           0x001FFFF8
+ #define   HEAD_WRAP_COUNT     0xFFE00000
+ #define   HEAD_WRAP_ONE               0x00200000
diff --git a/queue-4.4/drm-i915-add-support-for-mandatory-cmdparsing.patch b/queue-4.4/drm-i915-add-support-for-mandatory-cmdparsing.patch
new file mode 100644 (file)
index 0000000..1f5d48b
--- /dev/null
@@ -0,0 +1,122 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Wed, 1 Aug 2018 09:33:59 -0700
+Subject: drm/i915: Add support for mandatory cmdparsing
+
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+
+commit 311a50e76a33d1e029563c24b2ff6db0c02b5afe upstream.
+
+The existing cmdparser for gen7 can be bypassed by specifying
+batch_len=0 in the execbuf call. This is safe because bypassing
+simply reduces the cmd-set available.
+
+In a later patch we will introduce cmdparsing for gen9, as a
+security measure, which must be strictly enforced since without
+it we are vulnerable to DoS attacks.
+
+Introduce the concept of 'required' cmd parsing that cannot be
+bypassed by submitting zero-length bb's.
+
+v2: rebase (Mika)
+v2: rebase (Mika)
+v3: fix conflict on engine flags (Mika)
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_cmd_parser.c     |   24 ++----------------------
+ drivers/gpu/drm/i915/i915_gem_execbuffer.c |    9 ++++++++-
+ drivers/gpu/drm/i915/intel_ringbuffer.h    |    3 ++-
+ 3 files changed, 12 insertions(+), 24 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -734,7 +734,7 @@ int i915_cmd_parser_init_ring(struct int
+               return ret;
+       }
+-      ring->needs_cmd_parser = true;
++      ring->using_cmd_parser = true;
+       return 0;
+ }
+@@ -748,7 +748,7 @@ int i915_cmd_parser_init_ring(struct int
+  */
+ void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring)
+ {
+-      if (!ring->needs_cmd_parser)
++      if (!ring->using_cmd_parser)
+               return;
+       fini_hash_table(ring);
+@@ -914,26 +914,6 @@ unpin_src:
+       return ret ? ERR_PTR(ret) : dst;
+ }
+-/**
+- * i915_needs_cmd_parser() - should a given ring use software command parsing?
+- * @ring: the ring in question
+- *
+- * Only certain platforms require software batch buffer command parsing, and
+- * only when enabled via module parameter.
+- *
+- * Return: true if the ring requires software command parsing
+- */
+-bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
+-{
+-      if (!ring->needs_cmd_parser)
+-              return false;
+-
+-      if (!USES_PPGTT(ring->dev))
+-              return false;
+-
+-      return (i915.enable_cmd_parser == 1);
+-}
+-
+ static bool check_cmd(const struct intel_engine_cs *ring,
+                     const struct drm_i915_cmd_descriptor *desc,
+                     const u32 *cmd, u32 length,
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -1320,6 +1320,13 @@ eb_get_batch(struct eb_vmas *eb)
+       return vma->obj;
+ }
++static inline bool use_cmdparser(const struct intel_engine_cs *ring,
++                               u32 batch_len)
++{
++      return ring->requires_cmd_parser ||
++              (ring->using_cmd_parser && batch_len && USES_PPGTT(ring->dev));
++}
++
+ static int
+ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+                      struct drm_file *file,
+@@ -1491,7 +1498,7 @@ i915_gem_do_execbuffer(struct drm_device
+       }
+       params->args_batch_start_offset = args->batch_start_offset;
+-      if (i915_needs_cmd_parser(ring) && args->batch_len) {
++      if (use_cmdparser(ring, args->batch_len)) {
+               struct drm_i915_gem_object *parsed_batch_obj;
+               parsed_batch_obj = i915_gem_execbuffer_parse(ring,
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
+@@ -314,7 +314,8 @@ struct  intel_engine_cs {
+               volatile u32 *cpu_page;
+       } scratch;
+-      bool needs_cmd_parser;
++      bool using_cmd_parser;
++      bool requires_cmd_parser;
+       /*
+        * Table of commands the command parser needs to know about
diff --git a/queue-4.4/drm-i915-allow-parsing-of-unsized-batches.patch b/queue-4.4/drm-i915-allow-parsing-of-unsized-batches.patch
new file mode 100644 (file)
index 0000000..303e497
--- /dev/null
@@ -0,0 +1,50 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Wed, 1 Aug 2018 09:45:50 -0700
+Subject: drm/i915: Allow parsing of unsized batches
+
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+
+commit 435e8fc059dbe0eec823a75c22da2972390ba9e0 upstream.
+
+In "drm/i915: Add support for mandatory cmdparsing" we introduced the
+concept of mandatory parsing. This allows the cmdparser to be invoked
+even when user passes batch_len=0 to the execbuf ioctl's.
+
+However, the cmdparser needs to know the extents of the buffer being
+scanned. Refactor the code to ensure the cmdparser uses the actual
+object size, instead of the incoming length, if user passes 0.
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_gem_execbuffer.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -1540,12 +1540,17 @@ i915_gem_do_execbuffer(struct drm_device
+       if (use_cmdparser(ring, args->batch_len)) {
+               struct drm_i915_gem_object *parsed_batch_obj;
++              u32 batch_off = args->batch_start_offset;
++              u32 batch_len = args->batch_len;
++              if (batch_len == 0)
++                      batch_len = batch_obj->base.size - batch_off;
++
+               parsed_batch_obj = i915_gem_execbuffer_parse(ring,
+                                                     &shadow_exec_entry,
+                                                     eb, vm,
+                                                     batch_obj,
+-                                                    args->batch_start_offset,
+-                                                    args->batch_len);
++                                                    batch_off,
++                                                    batch_len);
+               if (IS_ERR(parsed_batch_obj)) {
+                       ret = PTR_ERR(parsed_batch_obj);
+                       goto err;
diff --git a/queue-4.4/drm-i915-cmdparser-add-support-for-backward-jumps.patch b/queue-4.4/drm-i915-cmdparser-add-support-for-backward-jumps.patch
new file mode 100644 (file)
index 0000000..a4a6831
--- /dev/null
@@ -0,0 +1,403 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Fri, 21 Sep 2018 13:18:09 -0700
+Subject: drm/i915/cmdparser: Add support for backward jumps
+
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+
+commit f8c08d8faee5567803c8c533865296ca30286bbf upstream.
+
+To keep things manageable, the pre-gen9 cmdparser does not
+attempt to track any form of nested BB_START's. This did not
+prevent usermode from using nested starts, or even chained
+batches because the cmdparser is not strictly enforced pre gen9.
+
+Instead, the existence of a nested BB_START would cause the batch
+to be emitted in insecure mode, and any privileged capabilities
+would not be available.
+
+For Gen9, the cmdparser becomes mandatory (for BCS at least), and
+so not providing any form of nested BB_START support becomes
+overly restrictive. Any such batch will simply not run.
+
+We make heavy use of backward jumps in igt, and it is much easier
+to add support for this restricted subset of nested jumps, than to
+rewrite the whole of our test suite to avoid them.
+
+Add the required logic to support limited backward jumps, to
+instructions that have already been validated by the parser.
+
+Note that it's not sufficient to simply approve any BB_START
+that jumps backwards in the buffer because this would allow an
+attacker to embed a rogue instruction sequence within the
+operand words of a harmless instruction (say LRI) and jump to
+that.
+
+We introduce a bit array to track every instr offset successfully
+validated, and test the target of BB_START against this. If the
+target offset hits, it is re-written to the same offset in the
+shadow buffer and the BB_START cmd is allowed.
+
+Note: This patch deliberately ignores checkpatch issues in the
+cmdtables, in order to match the style of the surrounding code.
+We'll correct the entire file in one go in a later patch.
+
+v2: set dispatch secure late (Mika)
+v3: rebase (Mika)
+v4: Clear whitelist on each parse
+Minor review updates (Chris)
+v5: Correct backward jump batching
+v6: fix compilation error due to struct eb shuffle (Mika)
+
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_cmd_parser.c     |  148 +++++++++++++++++++++++++----
+ drivers/gpu/drm/i915/i915_drv.h            |   16 ++-
+ drivers/gpu/drm/i915/i915_gem_context.c    |    5 
+ drivers/gpu/drm/i915/i915_gem_execbuffer.c |   35 ++++--
+ 4 files changed, 175 insertions(+), 29 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -385,6 +385,17 @@ static const struct drm_i915_cmd_descrip
+             .reg = { .offset = 1, .mask = 0x007FFFFC }               ),
+       CMD(  MI_LOAD_REGISTER_REG,             SMI,    !F,  0xFF,  W,
+             .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 }    ),
++
++      /*
++       * We allow BB_START but apply further checks. We just sanitize the
++       * basic fields here.
++       */
++      CMD( MI_BATCH_BUFFER_START_GEN8,       SMI,    !F,  0xFF,  B,
++           .bits = {{
++                      .offset = 0,
++                      .mask = ~SMI,
++                      .expected = (MI_BATCH_PPGTT_HSW | 1),
++            }},                                            ),
+ };
+ #undef CMD
+@@ -1012,7 +1023,7 @@ unpin_src:
+       return ret ? ERR_PTR(ret) : dst;
+ }
+-static bool check_cmd(const struct intel_engine_cs *ring,
++static int check_cmd(const struct intel_engine_cs *ring,
+                     const struct drm_i915_cmd_descriptor *desc,
+                     const u32 *cmd, u32 length,
+                     bool *oacontrol_set)
+@@ -1122,15 +1133,114 @@ static bool check_cmd(const struct intel
+       return true;
+ }
++static int check_bbstart(struct intel_context *ctx,
++                       u32 *cmd, u64 offset, u32 length,
++                       u32 batch_len,
++                       u64 batch_start,
++                       u64 shadow_batch_start)
++{
++
++      u64 jump_offset, jump_target;
++      u32 target_cmd_offset, target_cmd_index;
++
++      /* For igt compatibility on older platforms */
++      if (CMDPARSER_USES_GGTT(ctx->i915)) {
++              DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n");
++              return -EACCES;
++      }
++
++      if (length != 3) {
++              DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
++                               length);
++              return -EINVAL;
++      }
++
++      jump_target = *(u64*)(cmd+1);
++      jump_offset = jump_target - batch_start;
++
++      /*
++       * Any underflow of jump_target is guaranteed to be outside the range
++       * of a u32, so >= test catches both too large and too small
++       */
++      if (jump_offset >= batch_len) {
++              DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
++                        jump_target);
++              return -EINVAL;
++      }
++
++      /*
++       * This cannot overflow a u32 because we already checked jump_offset
++       * is within the BB, and the batch_len is a u32
++       */
++      target_cmd_offset = lower_32_bits(jump_offset);
++      target_cmd_index = target_cmd_offset / sizeof(u32);
++
++      *(u64*)(cmd + 1) = shadow_batch_start + target_cmd_offset;
++
++      if (target_cmd_index == offset)
++              return 0;
++
++      if (ctx->jump_whitelist_cmds <= target_cmd_index) {
++              DRM_DEBUG("CMD: Rejecting BB_START - truncated whitelist array\n");
++              return -EINVAL;
++      } else if (!test_bit(target_cmd_index, ctx->jump_whitelist)) {
++              DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
++                        jump_target);
++              return -EINVAL;
++      }
++
++      return 0;
++}
++
++static void init_whitelist(struct intel_context *ctx, u32 batch_len)
++{
++      const u32 batch_cmds = DIV_ROUND_UP(batch_len, sizeof(u32));
++      const u32 exact_size = BITS_TO_LONGS(batch_cmds);
++      u32 next_size = BITS_TO_LONGS(roundup_pow_of_two(batch_cmds));
++      unsigned long *next_whitelist;
++
++      if (CMDPARSER_USES_GGTT(ctx->i915))
++              return;
++
++      if (batch_cmds <= ctx->jump_whitelist_cmds) {
++              memset(ctx->jump_whitelist, 0, exact_size * sizeof(u32));
++              return;
++      }
++
++again:
++      next_whitelist = kcalloc(next_size, sizeof(long), GFP_KERNEL);
++      if (next_whitelist) {
++              kfree(ctx->jump_whitelist);
++              ctx->jump_whitelist = next_whitelist;
++              ctx->jump_whitelist_cmds =
++                      next_size * BITS_PER_BYTE * sizeof(long);
++              return;
++      }
++
++      if (next_size > exact_size) {
++              next_size = exact_size;
++              goto again;
++      }
++
++      DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
++      memset(ctx->jump_whitelist, 0,
++              BITS_TO_LONGS(ctx->jump_whitelist_cmds) * sizeof(u32));
++
++      return;
++}
++
+ #define LENGTH_BIAS 2
+ /**
+  * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
++ * @ctx: the context in which the batch is to execute
+  * @ring: the ring on which the batch is to execute
+  * @batch_obj: the batch buffer in question
+- * @shadow_batch_obj: copy of the batch buffer in question
++ * @user_batch_start: Canonical base address of original user batch
+  * @batch_start_offset: byte offset in the batch at which execution starts
+  * @batch_len: length of the commands in batch_obj
++ * @shadow_batch_obj: copy of the batch buffer in question
++ * @shadow_batch_start: Canonical base address of shadow_batch_obj
+  *
+  * Parses the specified batch buffer looking for privilege violations as
+  * described in the overview.
+@@ -1138,13 +1248,16 @@ static bool check_cmd(const struct intel
+  * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
+  * if the batch appears legal but should use hardware parsing
+  */
+-int i915_parse_cmds(struct intel_engine_cs *ring,
++int i915_parse_cmds(struct intel_context *ctx,
++                  struct intel_engine_cs *ring,
+                   struct drm_i915_gem_object *batch_obj,
+-                  struct drm_i915_gem_object *shadow_batch_obj,
++                  u64 user_batch_start,
+                   u32 batch_start_offset,
+-                  u32 batch_len)
++                  u32 batch_len,
++                  struct drm_i915_gem_object *shadow_batch_obj,
++                  u64 shadow_batch_start)
+ {
+-      u32 *cmd, *batch_base, *batch_end;
++      u32 *cmd, *batch_base, *batch_end, offset = 0;
+       struct drm_i915_cmd_descriptor default_desc = { 0 };
+       bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
+       int ret = 0;
+@@ -1156,6 +1269,8 @@ int i915_parse_cmds(struct intel_engine_
+               return PTR_ERR(batch_base);
+       }
++      init_whitelist(ctx, batch_len);
++
+       /*
+        * We use the batch length as size because the shadow object is as
+        * large or larger and copy_batch() will write MI_NOPs to the extra
+@@ -1179,16 +1294,6 @@ int i915_parse_cmds(struct intel_engine_
+                       break;
+               }
+-              /*
+-               * We don't try to handle BATCH_BUFFER_START because it adds
+-               * non-trivial complexity. Instead we abort the scan and return
+-               * and error to indicate that the batch is unsafe.
+-               */
+-              if (desc->cmd.value == MI_BATCH_BUFFER_START) {
+-                      ret = -EACCES;
+-                      break;
+-              }
+-
+               if (desc->flags & CMD_DESC_FIXED)
+                       length = desc->length.fixed;
+               else
+@@ -1208,7 +1313,18 @@ int i915_parse_cmds(struct intel_engine_
+                       break;
+               }
++              if (desc->cmd.value == MI_BATCH_BUFFER_START) {
++                      ret = check_bbstart(ctx, cmd, offset, length,
++                                          batch_len, user_batch_start,
++                                          shadow_batch_start);
++                      break;
++              }
++
++              if (ctx->jump_whitelist_cmds > offset)
++                      set_bit(offset, ctx->jump_whitelist);
++
+               cmd += length;
++              offset += length;
+       }
+       if (oacontrol_set) {
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -891,6 +891,12 @@ struct intel_context {
+               int pin_count;
+       } engine[I915_NUM_RINGS];
++      /* jump_whitelist: Bit array for tracking cmds during cmdparsing */
++      unsigned long *jump_whitelist;
++
++      /* jump_whitelist_cmds: No of cmd slots available */
++      uint32_t jump_whitelist_cmds;
++
+       struct list_head link;
+ };
+@@ -3289,11 +3295,15 @@ int i915_cmd_parser_get_version(struct d
+ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
+ void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
+ bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
+-int i915_parse_cmds(struct intel_engine_cs *ring,
++int i915_parse_cmds(struct intel_context *cxt,
++                  struct intel_engine_cs *ring,
+                   struct drm_i915_gem_object *batch_obj,
+-                  struct drm_i915_gem_object *shadow_batch_obj,
++                  u64 user_batch_start,
+                   u32 batch_start_offset,
+-                  u32 batch_len);
++                  u32 batch_len,
++                  struct drm_i915_gem_object *shadow_batch_obj,
++                  u64 shadow_batch_start);
++
+ /* i915_suspend.c */
+ extern int i915_save_state(struct drm_device *dev);
+--- a/drivers/gpu/drm/i915/i915_gem_context.c
++++ b/drivers/gpu/drm/i915/i915_gem_context.c
+@@ -157,6 +157,8 @@ void i915_gem_context_free(struct kref *
+       if (i915.enable_execlists)
+               intel_lr_context_free(ctx);
++      kfree(ctx->jump_whitelist);
++
+       /*
+        * This context is going away and we need to remove all VMAs still
+        * around. This is to handle imported shared objects for which
+@@ -246,6 +248,9 @@ __create_hw_context(struct drm_device *d
+       ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
++      ctx->jump_whitelist = NULL;
++      ctx->jump_whitelist_cmds = 0;
++
+       return ctx;
+ err_out:
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -1154,7 +1154,8 @@ shadow_batch_pin(struct drm_i915_gem_obj
+ }
+ static struct drm_i915_gem_object*
+-i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
++i915_gem_execbuffer_parse(struct intel_context *ctx,
++                        struct intel_engine_cs *ring,
+                         struct drm_i915_gem_exec_object2 *shadow_exec_entry,
+                         struct eb_vmas *eb,
+                         struct i915_address_space *vm,
+@@ -1164,6 +1165,10 @@ i915_gem_execbuffer_parse(struct intel_e
+ {
+       struct drm_i915_gem_object *shadow_batch_obj;
+       struct i915_vma *vma;
++      struct i915_vma *user_vma = list_entry(eb->vmas.prev,
++                                      typeof(*user_vma), exec_list);
++      u64 batch_start;
++      u64 shadow_batch_start;
+       int ret;
+       shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
+@@ -1171,20 +1176,30 @@ i915_gem_execbuffer_parse(struct intel_e
+       if (IS_ERR(shadow_batch_obj))
+               return shadow_batch_obj;
+-      ret = i915_parse_cmds(ring,
+-                            batch_obj,
+-                            shadow_batch_obj,
+-                            batch_start_offset,
+-                            batch_len);
+-      if (ret)
+-              goto err;
+-
+       vma = shadow_batch_pin(shadow_batch_obj, vm);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err;
+       }
++      batch_start = user_vma->node.start + batch_start_offset;
++
++      shadow_batch_start = vma->node.start;
++
++      ret = i915_parse_cmds(ctx,
++                            ring,
++                            batch_obj,
++                            batch_start,
++                            batch_start_offset,
++                            batch_len,
++                            shadow_batch_obj,
++                            shadow_batch_start);
++      if (ret) {
++              WARN_ON(vma->pin_count == 0);
++              vma->pin_count--;
++              goto err;
++      }
++
+       i915_gem_object_unpin_pages(shadow_batch_obj);
+       memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+@@ -1545,7 +1560,7 @@ i915_gem_do_execbuffer(struct drm_device
+               if (batch_len == 0)
+                       batch_len = batch_obj->base.size - batch_off;
+-              parsed_batch_obj = i915_gem_execbuffer_parse(ring,
++              parsed_batch_obj = i915_gem_execbuffer_parse(ctx, ring,
+                                                     &shadow_exec_entry,
+                                                     eb, vm,
+                                                     batch_obj,
diff --git a/queue-4.4/drm-i915-cmdparser-fix-jump-whitelist-clearing.patch b/queue-4.4/drm-i915-cmdparser-fix-jump-whitelist-clearing.patch
new file mode 100644 (file)
index 0000000..07eb773
--- /dev/null
@@ -0,0 +1,49 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Mon, 11 Nov 2019 08:13:24 -0800
+Subject: drm/i915/cmdparser: Fix jump whitelist clearing
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+commit ea0b163b13ffc52818c079adb00d55e227a6da6f upstream.
+
+When a jump_whitelist bitmap is reused, it needs to be cleared.
+Currently this is done with memset() and the size calculation assumes
+bitmaps are made of 32-bit words, not longs.  So on 64-bit
+architectures, only the first half of the bitmap is cleared.
+
+If some whitelist bits are carried over between successive batches
+submitted on the same context, this will presumably allow embedding
+the rogue instructions that we're trying to reject.
+
+Use bitmap_zero() instead, which gets the calculation right.
+
+Fixes: f8c08d8faee5 ("drm/i915/cmdparser: Add support for backward jumps")
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_cmd_parser.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -1203,7 +1203,7 @@ static void init_whitelist(struct intel_
+               return;
+       if (batch_cmds <= ctx->jump_whitelist_cmds) {
+-              memset(ctx->jump_whitelist, 0, exact_size * sizeof(u32));
++              bitmap_zero(ctx->jump_whitelist, batch_cmds);
+               return;
+       }
+@@ -1223,8 +1223,7 @@ again:
+       }
+       DRM_DEBUG("CMD: Failed to extend whitelist. BB_START may be disallowed\n");
+-      memset(ctx->jump_whitelist, 0,
+-              BITS_TO_LONGS(ctx->jump_whitelist_cmds) * sizeof(u32));
++      bitmap_zero(ctx->jump_whitelist, ctx->jump_whitelist_cmds);
+       return;
+ }
diff --git a/queue-4.4/drm-i915-cmdparser-ignore-length-operands-during-command-matching.patch b/queue-4.4/drm-i915-cmdparser-ignore-length-operands-during-command-matching.patch
new file mode 100644 (file)
index 0000000..6f1922b
--- /dev/null
@@ -0,0 +1,40 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Thu, 20 Sep 2018 09:45:10 -0700
+Subject: drm/i915/cmdparser: Ignore Length operands during command matching
+
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+
+commit 926abff21a8f29ef159a3ac893b05c6e50e043c3 upstream.
+
+Some of the gen instruction macros (e.g. MI_DISPLAY_FLIP) have the
+length directly encoded in them. Since these are used directly in
+the tables, the Length becomes part of the comparison used for
+matching during parsing. Thus, if the cmd being parsed has a
+different length to that in the table, it is not matched and the
+cmd is accepted via the default variable length path.
+
+Fix by masking out everything except the Opcode in the cmd tables
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_cmd_parser.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -92,7 +92,7 @@
+ #define CMD(op, opm, f, lm, fl, ...)                          \
+       {                                                       \
+               .flags = (fl) | ((f) ? CMD_DESC_FIXED : 0),     \
+-              .cmd = { (op), (opm) },                         \
++              .cmd = { (op) & (opm), (opm) },                 \
+               .length = { (lm) },                             \
+               __VA_ARGS__                                     \
+       }
diff --git a/queue-4.4/drm-i915-disable-secure-batches-for-gen6.patch b/queue-4.4/drm-i915-disable-secure-batches-for-gen6.patch
new file mode 100644 (file)
index 0000000..e8f9fea
--- /dev/null
@@ -0,0 +1,74 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Fri, 8 Jun 2018 08:53:46 -0700
+Subject: drm/i915: Disable Secure Batches for gen6+
+
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+
+commit 44157641d448cbc0c4b73c5231d2b911f0cb0427 upstream.
+
+Retroactively stop reporting support for secure batches
+through the api for gen6+ so that older binaries trigger
+the fallback path instead.
+
+Older binaries use secure batches pre gen6 to access resources
+that are not available to normal usermode processes. However,
+all known userspace explicitly checks for HAS_SECURE_BATCHES
+before relying on the secure batch feature.
+
+Since there are no known binaries relying on this for newer gens
+we can kill secure batches from gen6, via I915_PARAM_HAS_SECURE_BATCHES.
+
+v2: rebase (Mika)
+v3: rebase (Mika)
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_dma.c            |    2 +-
+ drivers/gpu/drm/i915/i915_drv.h            |    3 +++
+ drivers/gpu/drm/i915/i915_gem_execbuffer.c |    4 ++++
+ 3 files changed, 8 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/i915_dma.c
++++ b/drivers/gpu/drm/i915/i915_dma.c
+@@ -133,7 +133,7 @@ static int i915_getparam(struct drm_devi
+               value = 1;
+               break;
+       case I915_PARAM_HAS_SECURE_BATCHES:
+-              value = capable(CAP_SYS_ADMIN);
++              value = HAS_SECURE_BATCHES(dev_priv) && capable(CAP_SYS_ADMIN);
+               break;
+       case I915_PARAM_HAS_PINNED_BATCHES:
+               value = 1;
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2539,6 +2539,9 @@ struct drm_i915_cmd_table {
+ #define HAS_BSD2(dev)         (INTEL_INFO(dev)->ring_mask & BSD2_RING)
+ #define HAS_BLT(dev)          (INTEL_INFO(dev)->ring_mask & BLT_RING)
+ #define HAS_VEBOX(dev)                (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
++
++#define HAS_SECURE_BATCHES(dev_priv) (INTEL_INFO(dev_priv)->gen < 6)
++
+ #define HAS_LLC(dev)          (INTEL_INFO(dev)->has_llc)
+ #define HAS_WT(dev)           ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
+                                __I915__(dev)->ellc_size)
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -1351,6 +1351,10 @@ i915_gem_do_execbuffer(struct drm_device
+       dispatch_flags = 0;
+       if (args->flags & I915_EXEC_SECURE) {
++              /* Return -EPERM to trigger fallback code on old binaries. */
++              if (!HAS_SECURE_BATCHES(dev_priv))
++                      return -EPERM;
++
+               if (!file->is_master || !capable(CAP_SYS_ADMIN))
+                   return -EPERM;
diff --git a/queue-4.4/drm-i915-gen8-add-rc6-ctx-corruption-wa.patch b/queue-4.4/drm-i915-gen8-add-rc6-ctx-corruption-wa.patch
new file mode 100644 (file)
index 0000000..72f5a0b
--- /dev/null
@@ -0,0 +1,374 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Imre Deak <imre.deak@intel.com>
+Date: Mon, 9 Jul 2018 18:24:27 +0300
+Subject: drm/i915/gen8+: Add RC6 CTX corruption WA
+
+From: Imre Deak <imre.deak@intel.com>
+
+commit 7e34f4e4aad3fd34c02b294a3cf2321adf5b4438 upstream.
+
+In some circumstances the RC6 context can get corrupted. We can detect
+this and take the required action, that is disable RC6 and runtime PM.
+The HW recovers from the corrupted state after a system suspend/resume
+cycle, so detect the recovery and re-enable RC6 and runtime PM.
+
+v2: rebase (Mika)
+v3:
+- Move intel_suspend_gt_powersave() to the end of the GEM suspend
+  sequence.
+- Add commit message.
+v4:
+- Rebased on intel_uncore_forcewake_put(i915->uncore, ...) API
+  change.
+v5: rebased on gem/gt split (Mika)
+
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_drv.c      |    4 
+ drivers/gpu/drm/i915/i915_drv.h      |    5 +
+ drivers/gpu/drm/i915/i915_reg.h      |    2 
+ drivers/gpu/drm/i915/intel_display.c |    9 +
+ drivers/gpu/drm/i915/intel_drv.h     |    3 
+ drivers/gpu/drm/i915/intel_pm.c      |  165 +++++++++++++++++++++++++++++++----
+ 6 files changed, 173 insertions(+), 15 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.c
++++ b/drivers/gpu/drm/i915/i915_drv.c
+@@ -698,6 +698,8 @@ static int i915_drm_suspend_late(struct
+               return ret;
+       }
++      i915_rc6_ctx_wa_suspend(dev_priv);
++
+       pci_disable_device(drm_dev->pdev);
+       /*
+        * During hibernation on some platforms the BIOS may try to access
+@@ -849,6 +851,8 @@ static int i915_drm_resume_early(struct
+       intel_uncore_sanitize(dev);
+       intel_power_domains_init_hw(dev_priv);
++      i915_rc6_ctx_wa_resume(dev_priv);
++
+       return ret;
+ }
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -1159,6 +1159,7 @@ struct intel_gen6_power_mgmt {
+       bool client_boost;
+       bool enabled;
++      bool ctx_corrupted;
+       struct delayed_work delayed_resume_work;
+       unsigned boosts;
+@@ -2570,6 +2571,10 @@ struct drm_i915_cmd_table {
+ /* Early gen2 have a totally busted CS tlb and require pinned batches. */
+ #define HAS_BROKEN_CS_TLB(dev)                (IS_I830(dev) || IS_845G(dev))
++
++#define NEEDS_RC6_CTX_CORRUPTION_WA(dev)      \
++      (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen == 9)
++
+ /*
+  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
+  * even when in MSI mode. This results in spurious interrupt warnings if the
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -170,6 +170,8 @@
+ #define   ECOCHK_PPGTT_WT_HSW         (0x2<<3)
+ #define   ECOCHK_PPGTT_WB_HSW         (0x3<<3)
++#define GEN8_RC6_CTX_INFO             0x8504
++
+ #define GAC_ECO_BITS                  0x14090
+ #define   ECOBITS_SNB_BIT             (1<<13)
+ #define   ECOBITS_PPGTT_CACHE64B      (3<<8)
+--- a/drivers/gpu/drm/i915/intel_display.c
++++ b/drivers/gpu/drm/i915/intel_display.c
+@@ -10747,6 +10747,10 @@ void intel_mark_busy(struct drm_device *
+               return;
+       intel_runtime_pm_get(dev_priv);
++
++      if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv))
++              intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
++
+       i915_update_gfx_val(dev_priv);
+       if (INTEL_INFO(dev)->gen >= 6)
+               gen6_rps_busy(dev_priv);
+@@ -10765,6 +10769,11 @@ void intel_mark_idle(struct drm_device *
+       if (INTEL_INFO(dev)->gen >= 6)
+               gen6_rps_idle(dev->dev_private);
++      if (NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)) {
++              i915_rc6_ctx_wa_check(dev_priv);
++              intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
++      }
++
+       intel_runtime_pm_put(dev_priv);
+ }
+--- a/drivers/gpu/drm/i915/intel_drv.h
++++ b/drivers/gpu/drm/i915/intel_drv.h
+@@ -1410,6 +1410,9 @@ void intel_enable_gt_powersave(struct dr
+ void intel_disable_gt_powersave(struct drm_device *dev);
+ void intel_suspend_gt_powersave(struct drm_device *dev);
+ void intel_reset_gt_powersave(struct drm_device *dev);
++bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915);
++void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915);
++void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915);
+ void gen6_update_ring_freq(struct drm_device *dev);
+ void gen6_rps_busy(struct drm_i915_private *dev_priv);
+ void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -4599,30 +4599,42 @@ void intel_set_rps(struct drm_device *de
+               gen6_set_rps(dev, val);
+ }
+-static void gen9_disable_rps(struct drm_device *dev)
++static void gen9_disable_rc6(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       I915_WRITE(GEN6_RC_CONTROL, 0);
++}
++
++static void gen9_disable_rps(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
+       I915_WRITE(GEN9_PG_ENABLE, 0);
+ }
+-static void gen6_disable_rps(struct drm_device *dev)
++static void gen6_disable_rc6(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       I915_WRITE(GEN6_RC_CONTROL, 0);
++}
++
++static void gen6_disable_rps(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = dev->dev_private;
++
+       I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ }
+-static void cherryview_disable_rps(struct drm_device *dev)
++static void cherryview_disable_rc6(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       I915_WRITE(GEN6_RC_CONTROL, 0);
+ }
+-static void valleyview_disable_rps(struct drm_device *dev)
++static void valleyview_disable_rc6(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -4826,7 +4838,8 @@ static void gen9_enable_rc6(struct drm_d
+       I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
+       /* 3a: Enable RC6 */
+-      if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
++      if (!dev_priv->rps.ctx_corrupted &&
++          intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+               rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+       DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+                       "on" : "off");
+@@ -4849,7 +4862,7 @@ static void gen9_enable_rc6(struct drm_d
+        * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
+        */
+       if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
+-          ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
++          INTEL_INFO(dev)->gen == 9)
+               I915_WRITE(GEN9_PG_ENABLE, 0);
+       else
+               I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+@@ -4892,7 +4905,8 @@ static void gen8_enable_rps(struct drm_d
+               I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+       /* 3: Enable RC6 */
+-      if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
++      if (!dev_priv->rps.ctx_corrupted &&
++          intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
+               rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
+       intel_print_rc6_info(dev, rc6_mask);
+       if (IS_BROADWELL(dev))
+@@ -6136,10 +6150,101 @@ static void intel_init_emon(struct drm_d
+       dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
+ }
++static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv)
++{
++      return !I915_READ(GEN8_RC6_CTX_INFO);
++}
++
++static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915)
++{
++      if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
++              return;
++
++      if (i915_rc6_ctx_corrupted(i915)) {
++              DRM_INFO("RC6 context corrupted, disabling runtime power management\n");
++              i915->rps.ctx_corrupted = true;
++              intel_runtime_pm_get(i915);
++      }
++}
++
++static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915)
++{
++      if (i915->rps.ctx_corrupted) {
++              intel_runtime_pm_put(i915);
++              i915->rps.ctx_corrupted = false;
++      }
++}
++
++/**
++ * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA
++ * @i915: i915 device
++ *
++ * Perform any steps needed to clean up the RC6 CTX WA before system suspend.
++ */
++void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915)
++{
++      if (i915->rps.ctx_corrupted)
++              intel_runtime_pm_put(i915);
++}
++
++/**
++ * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA
++ * @i915: i915 device
++ *
++ * Perform any steps needed to re-init the RC6 CTX WA after system resume.
++ */
++void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915)
++{
++      if (!i915->rps.ctx_corrupted)
++              return;
++
++      if (i915_rc6_ctx_corrupted(i915)) {
++              intel_runtime_pm_get(i915);
++              return;
++      }
++
++      DRM_INFO("RC6 context restored, re-enabling runtime power management\n");
++      i915->rps.ctx_corrupted = false;
++}
++
++static void intel_disable_rc6(struct drm_device *dev);
++
++/**
++ * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption
++ * @i915: i915 device
++ *
++ * Check if an RC6 CTX corruption has happened since the last check and if so
++ * disable RC6 and runtime power management.
++ *
++ * Return false if no context corruption has happened since the last call of
++ * this function, true otherwise.
++*/
++bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915)
++{
++      if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915))
++              return false;
++
++      if (i915->rps.ctx_corrupted)
++              return false;
++
++      if (!i915_rc6_ctx_corrupted(i915))
++              return false;
++
++      DRM_NOTE("RC6 context corruption, disabling runtime power management\n");
++
++      intel_disable_rc6(i915->dev);
++      i915->rps.ctx_corrupted = true;
++      intel_runtime_pm_get_noresume(i915);
++
++      return true;
++}
++
+ void intel_init_gt_powersave(struct drm_device *dev)
+ {
+       i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
++      i915_rc6_ctx_wa_init(to_i915(dev));
++
+       if (IS_CHERRYVIEW(dev))
+               cherryview_init_gt_powersave(dev);
+       else if (IS_VALLEYVIEW(dev))
+@@ -6152,6 +6257,8 @@ void intel_cleanup_gt_powersave(struct d
+               return;
+       else if (IS_VALLEYVIEW(dev))
+               valleyview_cleanup_gt_powersave(dev);
++
++      i915_rc6_ctx_wa_cleanup(to_i915(dev));
+ }
+ static void gen6_suspend_rps(struct drm_device *dev)
+@@ -6184,6 +6291,38 @@ void intel_suspend_gt_powersave(struct d
+       gen6_rps_idle(dev_priv);
+ }
++static void __intel_disable_rc6(struct drm_device *dev)
++{
++      if (INTEL_INFO(dev)->gen >= 9)
++              gen9_disable_rc6(dev);
++      else if (IS_CHERRYVIEW(dev))
++              cherryview_disable_rc6(dev);
++      else if (IS_VALLEYVIEW(dev))
++              valleyview_disable_rc6(dev);
++      else
++              gen6_disable_rc6(dev);
++}
++
++static void intel_disable_rc6(struct drm_device *dev)
++{
++      struct drm_i915_private *dev_priv = to_i915(dev);
++
++      mutex_lock(&dev_priv->rps.hw_lock);
++      __intel_disable_rc6(dev);
++      mutex_unlock(&dev_priv->rps.hw_lock);
++}
++
++static void intel_disable_rps(struct drm_device *dev)
++{
++      if (IS_CHERRYVIEW(dev) || IS_VALLEYVIEW(dev))
++              return;
++
++      if (INTEL_INFO(dev)->gen >= 9)
++              gen9_disable_rps(dev);
++      else
++              gen6_disable_rps(dev);
++}
++
+ void intel_disable_gt_powersave(struct drm_device *dev)
+ {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+@@ -6194,16 +6333,12 @@ void intel_disable_gt_powersave(struct d
+               intel_suspend_gt_powersave(dev);
+               mutex_lock(&dev_priv->rps.hw_lock);
+-              if (INTEL_INFO(dev)->gen >= 9)
+-                      gen9_disable_rps(dev);
+-              else if (IS_CHERRYVIEW(dev))
+-                      cherryview_disable_rps(dev);
+-              else if (IS_VALLEYVIEW(dev))
+-                      valleyview_disable_rps(dev);
+-              else
+-                      gen6_disable_rps(dev);
++
++              __intel_disable_rc6(dev);
++              intel_disable_rps(dev);
+               dev_priv->rps.enabled = false;
++
+               mutex_unlock(&dev_priv->rps.hw_lock);
+       }
+ }
diff --git a/queue-4.4/drm-i915-gtt-add-read-only-pages-to-gen8_pte_encode.patch b/queue-4.4/drm-i915-gtt-add-read-only-pages-to-gen8_pte_encode.patch
new file mode 100644 (file)
index 0000000..070da64
--- /dev/null
@@ -0,0 +1,110 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Thu, 12 Jul 2018 19:53:10 +0100
+Subject: drm/i915/gtt: Add read only pages to gen8_pte_encode
+
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+
+commit 25dda4dabeeb12af5209b0183c788ef2a88dabbe upstream.
+
+We can set a bit inside the ppGTT PTE to indicate a page is read-only;
+writes from the GPU will be discarded. We can use this to protect pages
+and in particular support read-only userptr mappings (necessary for
+importing PROT_READ vma).
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Matthew Auld <matthew.william.auld@gmail.com>
+Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180712185315.3288-1-chris@chris-wilson.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_gem_gtt.c |   22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -172,11 +172,14 @@ static void ppgtt_unbind_vma(struct i915
+ static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
+                                 enum i915_cache_level level,
+-                                bool valid)
++                                bool valid, u32 flags)
+ {
+       gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
+       pte |= addr;
++      if (unlikely(flags & PTE_READ_ONLY))
++              pte &= ~_PAGE_RW;
++
+       switch (level) {
+       case I915_CACHE_NONE:
+               pte |= PPAT_UNCACHED_INDEX;
+@@ -460,7 +463,7 @@ static void gen8_initialize_pt(struct i9
+       gen8_pte_t scratch_pte;
+       scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+-                                    I915_CACHE_LLC, true);
++                                    I915_CACHE_LLC, true, 0);
+       fill_px(vm->dev, pt, scratch_pte);
+ }
+@@ -757,8 +760,9 @@ static void gen8_ppgtt_clear_range(struc
+ {
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+-      gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+-                                               I915_CACHE_LLC, use_scratch);
++      gen8_pte_t scratch_pte =
++              gen8_pte_encode(px_dma(vm->scratch_page),
++                              I915_CACHE_LLC, use_scratch, 0);
+       if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+               gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
+@@ -799,7 +803,7 @@ gen8_ppgtt_insert_pte_entries(struct i91
+               pt_vaddr[pte] =
+                       gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
+-                                      cache_level, true);
++                                      cache_level, true, 0);
+               if (++pte == GEN8_PTES) {
+                       kunmap_px(ppgtt, pt_vaddr);
+                       pt_vaddr = NULL;
+@@ -1447,7 +1451,7 @@ static void gen8_dump_ppgtt(struct i915_
+       uint64_t start = ppgtt->base.start;
+       uint64_t length = ppgtt->base.total;
+       gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+-                                               I915_CACHE_LLC, true);
++                                               I915_CACHE_LLC, true, 0);
+       if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+               gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
+@@ -2357,7 +2361,7 @@ static void gen8_ggtt_insert_entries(str
+               addr = sg_dma_address(sg_iter.sg) +
+                       (sg_iter.sg_pgoffset << PAGE_SHIFT);
+               gen8_set_pte(&gtt_entries[i],
+-                           gen8_pte_encode(addr, level, true));
++                           gen8_pte_encode(addr, level, true, 0));
+               i++;
+       }
+@@ -2370,7 +2374,7 @@ static void gen8_ggtt_insert_entries(str
+        */
+       if (i != 0)
+               WARN_ON(readq(&gtt_entries[i-1])
+-                      != gen8_pte_encode(addr, level, true));
++                      != gen8_pte_encode(addr, level, true, 0));
+       /* This next bit makes the above posting read even more important. We
+        * want to flush the TLBs only after we're certain all the PTE updates
+@@ -2444,7 +2448,7 @@ static void gen8_ggtt_clear_range(struct
+       scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
+                                     I915_CACHE_LLC,
+-                                    use_scratch);
++                                    use_scratch, 0);
+       for (i = 0; i < num_entries; i++)
+               gen8_set_pte(&gtt_base[i], scratch_pte);
+       readl(gtt_base);
diff --git a/queue-4.4/drm-i915-gtt-disable-read-only-support-under-gvt.patch b/queue-4.4/drm-i915-gtt-disable-read-only-support-under-gvt.patch
new file mode 100644 (file)
index 0000000..cad1df9
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Chris Wilson <chris@chris-wilson.co.uk>
+Date: Thu, 12 Jul 2018 19:53:12 +0100
+Subject: drm/i915/gtt: Disable read-only support under GVT
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+commit c9e666880de5a1fed04dc412b046916d542b72dd upstream.
+
+GVT is not propagating the PTE bits, and is always setting the
+read-write bit, thus breaking read-only support.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Zhenyu Wang <zhenyuw@linux.intel.com>
+Cc: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Matthew Auld <matthew.william.auld@gmail.com>
+Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180712185315.3288-3-chris@chris-wilson.co.uk
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_gem_gtt.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -1522,8 +1522,12 @@ static int gen8_ppgtt_init(struct i915_h
+       ppgtt->base.unbind_vma = ppgtt_unbind_vma;
+       ppgtt->base.bind_vma = ppgtt_bind_vma;
+-      /* From bdw, there is support for read-only pages in the PPGTT */
+-      ppgtt->base.has_read_only = true;
++      /*
++       * From bdw, there is support for read-only pages in the PPGTT.
++       *
++       * XXX GVT is not honouring the lack of RW in the PTE bits.
++       */
++      ppgtt->base.has_read_only = !intel_vgpu_active(ppgtt->base.dev);
+       ppgtt->debug_dump = gen8_dump_ppgtt;
diff --git a/queue-4.4/drm-i915-gtt-read-only-pages-for-insert_entries-on-bdw.patch b/queue-4.4/drm-i915-gtt-read-only-pages-for-insert_entries-on-bdw.patch
new file mode 100644 (file)
index 0000000..9f4aef3
--- /dev/null
@@ -0,0 +1,185 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Mon, 6 Aug 2018 14:10:48 -0700
+Subject: drm/i915/gtt: Read-only pages for insert_entries on bdw+
+
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+
+commit 250f8c8140ac0a5e5acb91891d6813f12778b224 upstream.
+
+Hook up the flags to allow read-only ppGTT mappings for gen8+
+
+v2: Include a selftest to check that writes to a readonly PTE are
+dropped
+v3: Don't duplicate cpu_check() as we can just reuse it, and even worse
+don't wholesale copy the theory-of-operation comment from igt_ctx_exec
+without changing it to explain the intention behind the new test!
+v4: Joonas really likes magic mystery values
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Matthew Auld <matthew.william.auld@gmail.com>
+Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20180712185315.3288-2-chris@chris-wilson.co.uk
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_gem_gtt.c     |   30 ++++++++++++++++++++----------
+ drivers/gpu/drm/i915/i915_gem_gtt.h     |    3 +++
+ drivers/gpu/drm/i915/intel_ringbuffer.c |   10 ++++++++--
+ 3 files changed, 31 insertions(+), 12 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
+@@ -152,7 +152,8 @@ static int ppgtt_bind_vma(struct i915_vm
+ {
+       u32 pte_flags = 0;
+-      /* Currently applicable only to VLV */
++      /* Applicable to VLV, and gen8+ */
++      pte_flags = 0;
+       if (vma->obj->gt_ro)
+               pte_flags |= PTE_READ_ONLY;
+@@ -783,7 +784,8 @@ gen8_ppgtt_insert_pte_entries(struct i91
+                             struct i915_page_directory_pointer *pdp,
+                             struct sg_page_iter *sg_iter,
+                             uint64_t start,
+-                            enum i915_cache_level cache_level)
++                            enum i915_cache_level cache_level,
++                            u32 flags)
+ {
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+@@ -803,7 +805,7 @@ gen8_ppgtt_insert_pte_entries(struct i91
+               pt_vaddr[pte] =
+                       gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
+-                                      cache_level, true, 0);
++                                      cache_level, true, flags);
+               if (++pte == GEN8_PTES) {
+                       kunmap_px(ppgtt, pt_vaddr);
+                       pt_vaddr = NULL;
+@@ -824,7 +826,7 @@ static void gen8_ppgtt_insert_entries(st
+                                     struct sg_table *pages,
+                                     uint64_t start,
+                                     enum i915_cache_level cache_level,
+-                                    u32 unused)
++                                    u32 flags)
+ {
+       struct i915_hw_ppgtt *ppgtt =
+               container_of(vm, struct i915_hw_ppgtt, base);
+@@ -834,7 +836,7 @@ static void gen8_ppgtt_insert_entries(st
+       if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
+               gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
+-                                            cache_level);
++                                            cache_level, flags);
+       } else {
+               struct i915_page_directory_pointer *pdp;
+               uint64_t templ4, pml4e;
+@@ -842,7 +844,7 @@ static void gen8_ppgtt_insert_entries(st
+               gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
+                       gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
+-                                                    start, cache_level);
++                                                    start, cache_level, flags);
+               }
+       }
+ }
+@@ -1519,6 +1521,10 @@ static int gen8_ppgtt_init(struct i915_h
+       ppgtt->base.clear_range = gen8_ppgtt_clear_range;
+       ppgtt->base.unbind_vma = ppgtt_unbind_vma;
+       ppgtt->base.bind_vma = ppgtt_bind_vma;
++
++      /* From bdw, there is support for read-only pages in the PPGTT */
++      ppgtt->base.has_read_only = true;
++
+       ppgtt->debug_dump = gen8_dump_ppgtt;
+       if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+@@ -2347,7 +2353,7 @@ static void gen8_set_pte(void __iomem *a
+ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
+                                    struct sg_table *st,
+                                    uint64_t start,
+-                                   enum i915_cache_level level, u32 unused)
++                                   enum i915_cache_level level, u32 flags)
+ {
+       struct drm_i915_private *dev_priv = vm->dev->dev_private;
+       unsigned first_entry = start >> PAGE_SHIFT;
+@@ -2361,7 +2367,7 @@ static void gen8_ggtt_insert_entries(str
+               addr = sg_dma_address(sg_iter.sg) +
+                       (sg_iter.sg_pgoffset << PAGE_SHIFT);
+               gen8_set_pte(&gtt_entries[i],
+-                           gen8_pte_encode(addr, level, true, 0));
++                           gen8_pte_encode(addr, level, true, flags));
+               i++;
+       }
+@@ -2374,7 +2380,7 @@ static void gen8_ggtt_insert_entries(str
+        */
+       if (i != 0)
+               WARN_ON(readq(&gtt_entries[i-1])
+-                      != gen8_pte_encode(addr, level, true, 0));
++                      != gen8_pte_encode(addr, level, true, flags));
+       /* This next bit makes the above posting read even more important. We
+        * want to flush the TLBs only after we're certain all the PTE updates
+@@ -2514,7 +2520,8 @@ static int ggtt_bind_vma(struct i915_vma
+       if (ret)
+               return ret;
+-      /* Currently applicable only to VLV */
++      /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
++      pte_flags = 0;
+       if (obj->gt_ro)
+               pte_flags |= PTE_READ_ONLY;
+@@ -2657,6 +2664,9 @@ static int i915_gem_setup_global_gtt(str
+       i915_address_space_init(ggtt_vm, dev_priv);
+       ggtt_vm->total += PAGE_SIZE;
++      /* Only VLV supports read-only GGTT mappings */
++      ggtt_vm->has_read_only = IS_VALLEYVIEW(dev_priv);
++
+       if (intel_vgpu_active(dev)) {
+               ret = intel_vgt_balloon(dev);
+               if (ret)
+--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
++++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
+@@ -307,6 +307,9 @@ struct i915_address_space {
+        */
+       struct list_head inactive_list;
++      /* Some systems support read-only mappings for GGTT and/or PPGTT */
++      bool has_read_only:1;
++
+       /* FIXME: Need a more generic return type */
+       gen6_pte_t (*pte_encode)(dma_addr_t addr,
+                                enum i915_cache_level level,
+--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
++++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
+@@ -2058,6 +2058,8 @@ static void intel_destroy_ringbuffer_obj
+ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
+                                     struct intel_ringbuffer *ringbuf)
+ {
++      struct drm_i915_private *dev_priv = to_i915(dev);
++      struct i915_address_space *vm = &dev_priv->gtt.base;
+       struct drm_i915_gem_object *obj;
+       obj = NULL;
+@@ -2068,8 +2070,12 @@ static int intel_alloc_ringbuffer_obj(st
+       if (obj == NULL)
+               return -ENOMEM;
+-      /* mark ring buffers as read-only from GPU side by default */
+-      obj->gt_ro = 1;
++      /*
++       * Mark ring buffers as read-only from GPU side (so no stray overwrites)
++       * if supported by the platform's GGTT.
++       */
++      if (vm->has_read_only)
++              obj->gt_ro = 1;
+       ringbuf->obj = obj;
diff --git a/queue-4.4/drm-i915-lower-rm-timeout-to-avoid-dsi-hard-hangs.patch b/queue-4.4/drm-i915-lower-rm-timeout-to-avoid-dsi-hard-hangs.patch
new file mode 100644 (file)
index 0000000..8781634
--- /dev/null
@@ -0,0 +1,75 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Uma Shankar <uma.shankar@intel.com>
+Date: Tue, 7 Aug 2018 21:15:35 +0530
+Subject: drm/i915: Lower RM timeout to avoid DSI hard hangs
+
+From: Uma Shankar <uma.shankar@intel.com>
+
+commit 1d85a299c4db57c55e0229615132c964d17aa765 upstream.
+
+In BXT/APL, device 2 MMIO reads from MIPI controller requires its PLL
+to be turned ON. When MIPI PLL is turned off (MIPI Display is not
+active or connected), and someone (host or GT engine) tries to read
+MIPI registers, it causes hard hang. This is a hardware restriction
+or limitation.
+
+Driver by itself doesn't read MIPI registers when MIPI display is off.
+But any userspace application can submit unprivileged batch buffer for
+execution. In that batch buffer there can be mmio reads. And these
+reads are allowed even for unprivileged applications. If these
+register reads are for MIPI DSI controller and MIPI display is not
+active during that time, then the MMIO read operation causes system
+hard hang and only way to recover is hard reboot. A genuine
+process/application won't submit batch buffer like this and doesn't
+cause any issue. But on a compromised system, a malign userspace
+process/app can generate such batch buffer and can trigger system
+hard hang (denial of service attack).
+
+The fix is to lower the internal MMIO timeout value to an optimum
+value of 950us as recommended by hardware team. If the timeout is
+beyond 1ms (which will hit for any value we choose if MMIO READ on a
+DSI specific register is performed without PLL ON), it causes the
+system hang. But if the timeout value is lower than it will be below
+the threshold (even if timeout happens) and system will not get into
+a hung state. This will avoid a system hang without losing any
+programming or GT interrupts, taking the worst case of lowest CDCLK
+frequency and early DC5 abort into account.
+
+Signed-off-by: Uma Shankar <uma.shankar@intel.com>
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_reg.h |    4 ++++
+ drivers/gpu/drm/i915/intel_pm.c |    8 ++++++++
+ 2 files changed, 12 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_reg.h
++++ b/drivers/gpu/drm/i915/i915_reg.h
+@@ -5709,6 +5709,10 @@ enum skl_disp_power_wells {
+ #define GAMMA_MODE_MODE_12BIT (2 << 0)
+ #define GAMMA_MODE_MODE_SPLIT (3 << 0)
++/* Display Internal Timeout Register */
++#define RM_TIMEOUT            0x42060
++#define  MMIO_TIMEOUT_US(us)  ((us) << 0)
++
+ /* interrupts */
+ #define DE_MASTER_IRQ_CONTROL   (1 << 31)
+ #define DE_SPRITEB_FLIP_DONE    (1 << 29)
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -66,6 +66,14 @@ static void bxt_init_clock_gating(struct
+        */
+       I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
+                  GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ);
++
++      /*
++       * Lower the display internal timeout.
++       * This is needed to avoid any hard hangs when DSI port PLL
++       * is off and a MMIO access is attempted by any privilege
++       * application, using batch buffers or any other means.
++       */
++      I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950));
+ }
+ static void i915_pineview_get_mem_freq(struct drm_device *dev)
diff --git a/queue-4.4/drm-i915-remove-master-tables-from-cmdparser.patch b/queue-4.4/drm-i915-remove-master-tables-from-cmdparser.patch
new file mode 100644 (file)
index 0000000..eb66ccd
--- /dev/null
@@ -0,0 +1,265 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Fri, 8 Jun 2018 10:05:26 -0700
+Subject: drm/i915: Remove Master tables from cmdparser
+
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+
+commit 66d8aba1cd6db34af10de465c0d52af679288cb6 upstream.
+
+The previous patch has killed support for secure batches
+on gen6+, and hence the cmdparsers master tables are
+now dead code. Remove them.
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_cmd_parser.c     |   72 ++++-------------------------
+ drivers/gpu/drm/i915/i915_drv.h            |    3 -
+ drivers/gpu/drm/i915/i915_gem_execbuffer.c |    9 +--
+ 3 files changed, 15 insertions(+), 69 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -50,13 +50,11 @@
+  * granting userspace undue privileges. There are three categories of privilege.
+  *
+  * First, commands which are explicitly defined as privileged or which should
+- * only be used by the kernel driver. The parser generally rejects such
+- * commands, though it may allow some from the drm master process.
++ * only be used by the kernel driver. The parser rejects such commands
+  *
+  * Second, commands which access registers. To support correct/enhanced
+  * userspace functionality, particularly certain OpenGL extensions, the parser
+- * provides a whitelist of registers which userspace may safely access (for both
+- * normal and drm master processes).
++ * provides a whitelist of registers which userspace may safely access
+  *
+  * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc).
+  * The parser always rejects such commands.
+@@ -81,9 +79,9 @@
+  * in the per-ring command tables.
+  *
+  * Other command table entries map fairly directly to high level categories
+- * mentioned above: rejected, master-only, register whitelist. The parser
+- * implements a number of checks, including the privileged memory checks, via a
+- * general bitmasking mechanism.
++ * mentioned above: rejected, register whitelist. The parser implements a number
++ * of checks, including the privileged memory checks, via a general bitmasking
++ * mechanism.
+  */
+ #define STD_MI_OPCODE_MASK  0xFF800000
+@@ -109,14 +107,13 @@
+ #define R CMD_DESC_REJECT
+ #define W CMD_DESC_REGISTER
+ #define B CMD_DESC_BITMASK
+-#define M CMD_DESC_MASTER
+ /*            Command                          Mask   Fixed Len   Action
+             ---------------------------------------------------------- */
+ static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
+       CMD(  MI_NOOP,                          SMI,    F,  1,      S  ),
+       CMD(  MI_USER_INTERRUPT,                SMI,    F,  1,      R  ),
+-      CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      M  ),
++      CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      R  ),
+       CMD(  MI_ARB_CHECK,                     SMI,    F,  1,      S  ),
+       CMD(  MI_REPORT_HEAD,                   SMI,    F,  1,      S  ),
+       CMD(  MI_SUSPEND_FLUSH,                 SMI,    F,  1,      S  ),
+@@ -213,7 +210,7 @@ static const struct drm_i915_cmd_descrip
+       CMD(  MI_URB_ATOMIC_ALLOC,              SMI,    F,  1,      S  ),
+       CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
+       CMD(  MI_RS_CONTEXT,                    SMI,    F,  1,      S  ),
+-      CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
++      CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   R  ),
+       CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
+       CMD(  MI_LOAD_REGISTER_REG,             SMI,   !F,  0xFF,   R  ),
+       CMD(  MI_RS_STORE_DATA_IMM,             SMI,   !F,  0xFF,   S  ),
+@@ -345,7 +342,7 @@ static const struct drm_i915_cmd_descrip
+ };
+ static const struct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
+-      CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   M  ),
++      CMD(  MI_LOAD_SCAN_LINES_INCL,          SMI,   !F,  0x3F,   R  ),
+       CMD(  MI_LOAD_SCAN_LINES_EXCL,          SMI,   !F,  0x3F,   R  ),
+ };
+@@ -359,7 +356,6 @@ static const struct drm_i915_cmd_descrip
+ #undef R
+ #undef W
+ #undef B
+-#undef M
+ static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
+       { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
+@@ -479,19 +475,6 @@ static const struct drm_i915_reg_descrip
+       REG32(BCS_SWCTRL),
+ };
+-static const struct drm_i915_reg_descriptor ivb_master_regs[] = {
+-      REG32(FORCEWAKE_MT),
+-      REG32(DERRMR),
+-      REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_A)),
+-      REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_B)),
+-      REG32(GEN7_PIPE_DE_LOAD_SL(PIPE_C)),
+-};
+-
+-static const struct drm_i915_reg_descriptor hsw_master_regs[] = {
+-      REG32(FORCEWAKE_MT),
+-      REG32(DERRMR),
+-};
+-
+ #undef REG64
+ #undef REG32
+@@ -608,9 +591,7 @@ static bool check_sorted(int ring_id,
+ static bool validate_regs_sorted(struct intel_engine_cs *ring)
+ {
+-      return check_sorted(ring->id, ring->reg_table, ring->reg_count) &&
+-              check_sorted(ring->id, ring->master_reg_table,
+-                           ring->master_reg_count);
++      return check_sorted(ring->id, ring->reg_table, ring->reg_count);
+ }
+ struct cmd_node {
+@@ -708,14 +689,6 @@ int i915_cmd_parser_init_ring(struct int
+               ring->reg_table = gen7_render_regs;
+               ring->reg_count = ARRAY_SIZE(gen7_render_regs);
+-              if (IS_HASWELL(ring->dev)) {
+-                      ring->master_reg_table = hsw_master_regs;
+-                      ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+-              } else {
+-                      ring->master_reg_table = ivb_master_regs;
+-                      ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+-              }
+-
+               ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+               break;
+       case VCS:
+@@ -735,14 +708,6 @@ int i915_cmd_parser_init_ring(struct int
+               ring->reg_table = gen7_blt_regs;
+               ring->reg_count = ARRAY_SIZE(gen7_blt_regs);
+-              if (IS_HASWELL(ring->dev)) {
+-                      ring->master_reg_table = hsw_master_regs;
+-                      ring->master_reg_count = ARRAY_SIZE(hsw_master_regs);
+-              } else {
+-                      ring->master_reg_table = ivb_master_regs;
+-                      ring->master_reg_count = ARRAY_SIZE(ivb_master_regs);
+-              }
+-
+               ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+               break;
+       case VECS:
+@@ -972,7 +937,6 @@ bool i915_needs_cmd_parser(struct intel_
+ static bool check_cmd(const struct intel_engine_cs *ring,
+                     const struct drm_i915_cmd_descriptor *desc,
+                     const u32 *cmd, u32 length,
+-                    const bool is_master,
+                     bool *oacontrol_set)
+ {
+       if (desc->flags & CMD_DESC_REJECT) {
+@@ -980,12 +944,6 @@ static bool check_cmd(const struct intel
+               return false;
+       }
+-      if ((desc->flags & CMD_DESC_MASTER) && !is_master) {
+-              DRM_DEBUG_DRIVER("CMD: Rejected master-only command: 0x%08X\n",
+-                               *cmd);
+-              return false;
+-      }
+-
+       if (desc->flags & CMD_DESC_REGISTER) {
+               /*
+                * Get the distance between individual register offset
+@@ -1002,11 +960,6 @@ static bool check_cmd(const struct intel
+                               find_reg(ring->reg_table, ring->reg_count,
+                                        reg_addr);
+-                      if (!reg && is_master)
+-                              reg = find_reg(ring->master_reg_table,
+-                                             ring->master_reg_count,
+-                                             reg_addr);
+-
+                       if (!reg) {
+                               DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n",
+                                                reg_addr, *cmd, ring->id);
+@@ -1100,7 +1053,6 @@ static bool check_cmd(const struct intel
+  * @shadow_batch_obj: copy of the batch buffer in question
+  * @batch_start_offset: byte offset in the batch at which execution starts
+  * @batch_len: length of the commands in batch_obj
+- * @is_master: is the submitting process the drm master?
+  *
+  * Parses the specified batch buffer looking for privilege violations as
+  * described in the overview.
+@@ -1112,8 +1064,7 @@ int i915_parse_cmds(struct intel_engine_
+                   struct drm_i915_gem_object *batch_obj,
+                   struct drm_i915_gem_object *shadow_batch_obj,
+                   u32 batch_start_offset,
+-                  u32 batch_len,
+-                  bool is_master)
++                  u32 batch_len)
+ {
+       u32 *cmd, *batch_base, *batch_end;
+       struct drm_i915_cmd_descriptor default_desc = { 0 };
+@@ -1174,8 +1125,7 @@ int i915_parse_cmds(struct intel_engine_
+                       break;
+               }
+-              if (!check_cmd(ring, desc, cmd, length, is_master,
+-                             &oacontrol_set)) {
++              if (!check_cmd(ring, desc, cmd, length, &oacontrol_set)) {
+                       ret = -EINVAL;
+                       break;
+               }
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -3287,8 +3287,7 @@ int i915_parse_cmds(struct intel_engine_
+                   struct drm_i915_gem_object *batch_obj,
+                   struct drm_i915_gem_object *shadow_batch_obj,
+                   u32 batch_start_offset,
+-                  u32 batch_len,
+-                  bool is_master);
++                  u32 batch_len);
+ /* i915_suspend.c */
+ extern int i915_save_state(struct drm_device *dev);
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -1129,8 +1129,7 @@ i915_gem_execbuffer_parse(struct intel_e
+                         struct eb_vmas *eb,
+                         struct drm_i915_gem_object *batch_obj,
+                         u32 batch_start_offset,
+-                        u32 batch_len,
+-                        bool is_master)
++                        u32 batch_len)
+ {
+       struct drm_i915_gem_object *shadow_batch_obj;
+       struct i915_vma *vma;
+@@ -1145,8 +1144,7 @@ i915_gem_execbuffer_parse(struct intel_e
+                             batch_obj,
+                             shadow_batch_obj,
+                             batch_start_offset,
+-                            batch_len,
+-                            is_master);
++                            batch_len);
+       if (ret)
+               goto err;
+@@ -1501,8 +1499,7 @@ i915_gem_do_execbuffer(struct drm_device
+                                                     eb,
+                                                     batch_obj,
+                                                     args->batch_start_offset,
+-                                                    args->batch_len,
+-                                                    file->is_master);
++                                                    args->batch_len);
+               if (IS_ERR(parsed_batch_obj)) {
+                       ret = PTR_ERR(parsed_batch_obj);
+                       goto err;
diff --git a/queue-4.4/drm-i915-rename-gen7-cmdparser-tables.patch b/queue-4.4/drm-i915-rename-gen7-cmdparser-tables.patch
new file mode 100644 (file)
index 0000000..e062e40
--- /dev/null
@@ -0,0 +1,179 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Fri, 20 Apr 2018 14:26:01 -0700
+Subject: drm/i915: Rename gen7 cmdparser tables
+
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+
+commit 0a2f661b6c21815a7fa60e30babe975fee8e73c6 upstream.
+
+We're about to introduce some new tables for later gens, and the
+current naming for the gen7 tables will no longer make sense.
+
+v2: rebase
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_cmd_parser.c |   70 ++++++++++++++++-----------------
+ 1 file changed, 35 insertions(+), 35 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
++++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
+@@ -113,7 +113,7 @@
+ /*            Command                          Mask   Fixed Len   Action
+             ---------------------------------------------------------- */
+-static const struct drm_i915_cmd_descriptor common_cmds[] = {
++static const struct drm_i915_cmd_descriptor gen7_common_cmds[] = {
+       CMD(  MI_NOOP,                          SMI,    F,  1,      S  ),
+       CMD(  MI_USER_INTERRUPT,                SMI,    F,  1,      R  ),
+       CMD(  MI_WAIT_FOR_EVENT,                SMI,    F,  1,      M  ),
+@@ -146,7 +146,7 @@ static const struct drm_i915_cmd_descrip
+       CMD(  MI_BATCH_BUFFER_START,            SMI,   !F,  0xFF,   S  ),
+ };
+-static const struct drm_i915_cmd_descriptor render_cmds[] = {
++static const struct drm_i915_cmd_descriptor gen7_render_cmds[] = {
+       CMD(  MI_FLUSH,                         SMI,    F,  1,      S  ),
+       CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
+       CMD(  MI_PREDICATE,                     SMI,    F,  1,      S  ),
+@@ -229,7 +229,7 @@ static const struct drm_i915_cmd_descrip
+       CMD(  GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS,  S3D,   !F,  0x1FF,  S  ),
+ };
+-static const struct drm_i915_cmd_descriptor video_cmds[] = {
++static const struct drm_i915_cmd_descriptor gen7_video_cmds[] = {
+       CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
+       CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
+       CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
+@@ -273,7 +273,7 @@ static const struct drm_i915_cmd_descrip
+       CMD(  MFX_WAIT,                         SMFX,   F,  1,      S  ),
+ };
+-static const struct drm_i915_cmd_descriptor vecs_cmds[] = {
++static const struct drm_i915_cmd_descriptor gen7_vecs_cmds[] = {
+       CMD(  MI_ARB_ON_OFF,                    SMI,    F,  1,      R  ),
+       CMD(  MI_SET_APPID,                     SMI,    F,  1,      S  ),
+       CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0xFF,   B,
+@@ -311,7 +311,7 @@ static const struct drm_i915_cmd_descrip
+             }},                                                      ),
+ };
+-static const struct drm_i915_cmd_descriptor blt_cmds[] = {
++static const struct drm_i915_cmd_descriptor gen7_blt_cmds[] = {
+       CMD(  MI_DISPLAY_FLIP,                  SMI,   !F,  0xFF,   R  ),
+       CMD(  MI_STORE_DWORD_IMM,               SMI,   !F,  0x3FF,  B,
+             .bits = {{
+@@ -361,35 +361,35 @@ static const struct drm_i915_cmd_descrip
+ #undef B
+ #undef M
+-static const struct drm_i915_cmd_table gen7_render_cmds[] = {
+-      { common_cmds, ARRAY_SIZE(common_cmds) },
+-      { render_cmds, ARRAY_SIZE(render_cmds) },
++static const struct drm_i915_cmd_table gen7_render_cmd_table[] = {
++      { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++      { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
+ };
+-static const struct drm_i915_cmd_table hsw_render_ring_cmds[] = {
+-      { common_cmds, ARRAY_SIZE(common_cmds) },
+-      { render_cmds, ARRAY_SIZE(render_cmds) },
++static const struct drm_i915_cmd_table hsw_render_ring_cmd_table[] = {
++      { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++      { gen7_render_cmds, ARRAY_SIZE(gen7_render_cmds) },
+       { hsw_render_cmds, ARRAY_SIZE(hsw_render_cmds) },
+ };
+-static const struct drm_i915_cmd_table gen7_video_cmds[] = {
+-      { common_cmds, ARRAY_SIZE(common_cmds) },
+-      { video_cmds, ARRAY_SIZE(video_cmds) },
++static const struct drm_i915_cmd_table gen7_video_cmd_table[] = {
++      { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++      { gen7_video_cmds, ARRAY_SIZE(gen7_video_cmds) },
+ };
+-static const struct drm_i915_cmd_table hsw_vebox_cmds[] = {
+-      { common_cmds, ARRAY_SIZE(common_cmds) },
+-      { vecs_cmds, ARRAY_SIZE(vecs_cmds) },
++static const struct drm_i915_cmd_table hsw_vebox_cmd_table[] = {
++      { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++      { gen7_vecs_cmds, ARRAY_SIZE(gen7_vecs_cmds) },
+ };
+-static const struct drm_i915_cmd_table gen7_blt_cmds[] = {
+-      { common_cmds, ARRAY_SIZE(common_cmds) },
+-      { blt_cmds, ARRAY_SIZE(blt_cmds) },
++static const struct drm_i915_cmd_table gen7_blt_cmd_table[] = {
++      { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++      { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
+ };
+-static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
+-      { common_cmds, ARRAY_SIZE(common_cmds) },
+-      { blt_cmds, ARRAY_SIZE(blt_cmds) },
++static const struct drm_i915_cmd_table hsw_blt_ring_cmd_table[] = {
++      { gen7_common_cmds, ARRAY_SIZE(gen7_common_cmds) },
++      { gen7_blt_cmds, ARRAY_SIZE(gen7_blt_cmds) },
+       { hsw_blt_cmds, ARRAY_SIZE(hsw_blt_cmds) },
+ };
+@@ -697,12 +697,12 @@ int i915_cmd_parser_init_ring(struct int
+       switch (ring->id) {
+       case RCS:
+               if (IS_HASWELL(ring->dev)) {
+-                      cmd_tables = hsw_render_ring_cmds;
++                      cmd_tables = hsw_render_ring_cmd_table;
+                       cmd_table_count =
+-                              ARRAY_SIZE(hsw_render_ring_cmds);
++                              ARRAY_SIZE(hsw_render_ring_cmd_table);
+               } else {
+-                      cmd_tables = gen7_render_cmds;
+-                      cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
++                      cmd_tables = gen7_render_cmd_table;
++                      cmd_table_count = ARRAY_SIZE(gen7_render_cmd_table);
+               }
+               ring->reg_table = gen7_render_regs;
+@@ -719,17 +719,17 @@ int i915_cmd_parser_init_ring(struct int
+               ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
+               break;
+       case VCS:
+-              cmd_tables = gen7_video_cmds;
+-              cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
++              cmd_tables = gen7_video_cmd_table;
++              cmd_table_count = ARRAY_SIZE(gen7_video_cmd_table);
+               ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+               break;
+       case BCS:
+               if (IS_HASWELL(ring->dev)) {
+-                      cmd_tables = hsw_blt_ring_cmds;
+-                      cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
++                      cmd_tables = hsw_blt_ring_cmd_table;
++                      cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmd_table);
+               } else {
+-                      cmd_tables = gen7_blt_cmds;
+-                      cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
++                      cmd_tables = gen7_blt_cmd_table;
++                      cmd_table_count = ARRAY_SIZE(gen7_blt_cmd_table);
+               }
+               ring->reg_table = gen7_blt_regs;
+@@ -746,8 +746,8 @@ int i915_cmd_parser_init_ring(struct int
+               ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
+               break;
+       case VECS:
+-              cmd_tables = hsw_vebox_cmds;
+-              cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
++              cmd_tables = hsw_vebox_cmd_table;
++              cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table);
+               /* VECS can use the same length_mask function as VCS */
+               ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
+               break;
diff --git a/queue-4.4/drm-i915-support-ro-ppgtt-mapped-cmdparser-shadow-buffers.patch b/queue-4.4/drm-i915-support-ro-ppgtt-mapped-cmdparser-shadow-buffers.patch
new file mode 100644 (file)
index 0000000..c955a86
--- /dev/null
@@ -0,0 +1,170 @@
+From foo@baz Tue 12 Nov 2019 04:09:39 PM CET
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+Date: Tue, 22 May 2018 13:59:06 -0700
+Subject: drm/i915: Support ro ppgtt mapped cmdparser shadow buffers
+
+From: Jon Bloomfield <jon.bloomfield@intel.com>
+
+commit 4f7af1948abcb18b4772fe1bcd84d7d27d96258c upstream.
+
+For Gen7, the original cmdparser motive was to permit limited
+use of register read/write instructions in unprivileged BB's.
+This worked by copying the user supplied bb to a kmd owned
+bb, and running it in secure mode, from the ggtt, only if
+the scanner finds no unsafe commands or registers.
+
+For Gen8+ we can't use this same technique because running bb's
+from the ggtt also disables access to ppgtt space. But we also
+do not actually require 'secure' execution since we are only
+trying to reduce the available command/register set. Instead we
+will copy the user buffer to a kmd owned read-only bb in ppgtt,
+and run in the usual non-secure mode.
+
+Note that ro pages are only supported by ppgtt (not ggtt), but
+luckily that's exactly what we need.
+
+Add the required paths to map the shadow buffer to ppgtt ro for Gen8+
+
+v2: IS_GEN7/IS_GEN (Mika)
+v3: rebase
+v4: rebase
+v5: rebase
+
+Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Takashi Iwai <tiwai@suse.de>
+Cc: Tyler Hicks <tyhicks@canonical.com>
+Signed-off-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Reviewed-by: Chris Wilson <chris.p.wilson@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/i915_drv.h            |    6 ++
+ drivers/gpu/drm/i915/i915_gem_execbuffer.c |   62 +++++++++++++++++++++--------
+ 2 files changed, 52 insertions(+), 16 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_drv.h
++++ b/drivers/gpu/drm/i915/i915_drv.h
+@@ -2556,6 +2556,12 @@ struct drm_i915_cmd_table {
+ #define HAS_OVERLAY(dev)              (INTEL_INFO(dev)->has_overlay)
+ #define OVERLAY_NEEDS_PHYSICAL(dev)   (INTEL_INFO(dev)->overlay_needs_physical)
++/*
++ * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
++ * All later gens can run the final buffer from the ppgtt
++ */
++#define CMDPARSER_USES_GGTT(dev_priv) IS_GEN7(dev_priv)
++
+ /* Early gen2 have a totally busted CS tlb and require pinned batches. */
+ #define HAS_BROKEN_CS_TLB(dev)                (IS_I830(dev) || IS_845G(dev))
+ /*
+--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+@@ -1123,10 +1123,41 @@ i915_reset_gen7_sol_offsets(struct drm_d
+       return 0;
+ }
++static struct i915_vma*
++shadow_batch_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm)
++{
++      struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
++      struct i915_address_space *pin_vm = vm;
++      u64 flags;
++      int ret;
++
++      /*
++       * PPGTT backed shadow buffers must be mapped RO, to prevent
++       * post-scan tampering
++       */
++      if (CMDPARSER_USES_GGTT(dev_priv)) {
++              flags = PIN_GLOBAL;
++              pin_vm = &dev_priv->gtt.base;
++      } else if (vm->has_read_only) {
++              flags = PIN_USER;
++              obj->gt_ro = 1;
++      } else {
++              DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
++              return ERR_PTR(-EINVAL);
++      }
++
++      ret = i915_gem_object_pin(obj, pin_vm, 0, flags);
++      if (ret)
++              return ERR_PTR(ret);
++      else
++              return i915_gem_obj_to_vma(obj, pin_vm);
++}
++
+ static struct drm_i915_gem_object*
+ i915_gem_execbuffer_parse(struct intel_engine_cs *ring,
+                         struct drm_i915_gem_exec_object2 *shadow_exec_entry,
+                         struct eb_vmas *eb,
++                        struct i915_address_space *vm,
+                         struct drm_i915_gem_object *batch_obj,
+                         u32 batch_start_offset,
+                         u32 batch_len)
+@@ -1148,15 +1179,16 @@ i915_gem_execbuffer_parse(struct intel_e
+       if (ret)
+               goto err;
+-      ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
+-      if (ret)
++      vma = shadow_batch_pin(shadow_batch_obj, vm);
++      if (IS_ERR(vma)) {
++              ret = PTR_ERR(vma);
+               goto err;
++      }
+       i915_gem_object_unpin_pages(shadow_batch_obj);
+       memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
+-      vma = i915_gem_obj_to_ggtt(shadow_batch_obj);
+       vma->exec_entry = shadow_exec_entry;
+       vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
+       drm_gem_object_reference(&shadow_batch_obj->base);
+@@ -1168,7 +1200,14 @@ i915_gem_execbuffer_parse(struct intel_e
+ err:
+       i915_gem_object_unpin_pages(shadow_batch_obj);
+-      if (ret == -EACCES) /* unhandled chained batch */
++
++      /*
++       * Unsafe GGTT-backed buffers can still be submitted safely
++       * as non-secure.
++       * For PPGTT backing however, we have no choice but to forcibly
++       * reject unsafe buffers
++       */
++      if (CMDPARSER_USES_GGTT(batch_obj->base.dev) && (ret == -EACCES))
+               return batch_obj;
+       else
+               return ERR_PTR(ret);
+@@ -1503,7 +1542,7 @@ i915_gem_do_execbuffer(struct drm_device
+               parsed_batch_obj = i915_gem_execbuffer_parse(ring,
+                                                     &shadow_exec_entry,
+-                                                    eb,
++                                                    eb, vm,
+                                                     batch_obj,
+                                                     args->batch_start_offset,
+                                                     args->batch_len);
+@@ -1516,18 +1555,9 @@ i915_gem_do_execbuffer(struct drm_device
+                * parsed_batch_obj == batch_obj means batch not fully parsed:
+                * Accept, but don't promote to secure.
+                */
+-
+               if (parsed_batch_obj != batch_obj) {
+-                      /*
+-                       * Batch parsed and accepted:
+-                       *
+-                       * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+-                       * bit from MI_BATCH_BUFFER_START commands issued in
+-                       * the dispatch_execbuffer implementations. We
+-                       * specifically don't want that set on batches the
+-                       * command parser has accepted.
+-                       */
+-                      dispatch_flags |= I915_DISPATCH_SECURE;
++                      if (CMDPARSER_USES_GGTT(dev_priv))
++                              dispatch_flags |= I915_DISPATCH_SECURE;
+                       params->args_batch_start_offset = 0;
+                       batch_obj = parsed_batch_obj;
+               }
index 5da2c49da745856078329964e067906f301f55e5..e75b141990bd0cbc5252d26f1561d62dde54f8d5 100644 (file)
@@ -41,3 +41,18 @@ can-flexcan-disable-completely-the-ecc-mechanism.patch
 mm-filemap.c-don-t-initiate-writeback-if-mapping-has-no-dirty-pages.patch
 cgroup-writeback-don-t-switch-wbs-immediately-on-dead-wbs-if-the-memcg-is-dead.patch
 net-prevent-load-store-tearing-on-sk-sk_stamp.patch
+drm-i915-gtt-add-read-only-pages-to-gen8_pte_encode.patch
+drm-i915-gtt-read-only-pages-for-insert_entries-on-bdw.patch
+drm-i915-gtt-disable-read-only-support-under-gvt.patch
+drm-i915-rename-gen7-cmdparser-tables.patch
+drm-i915-disable-secure-batches-for-gen6.patch
+drm-i915-remove-master-tables-from-cmdparser.patch
+drm-i915-add-support-for-mandatory-cmdparsing.patch
+drm-i915-support-ro-ppgtt-mapped-cmdparser-shadow-buffers.patch
+drm-i915-allow-parsing-of-unsized-batches.patch
+drm-i915-add-gen9-bcs-cmdparsing.patch
+drm-i915-cmdparser-add-support-for-backward-jumps.patch
+drm-i915-cmdparser-ignore-length-operands-during-command-matching.patch
+drm-i915-lower-rm-timeout-to-avoid-dsi-hard-hangs.patch
+drm-i915-gen8-add-rc6-ctx-corruption-wa.patch
+drm-i915-cmdparser-fix-jump-whitelist-clearing.patch