]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge drm/drm-next into drm-intel-next
authorRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 17 Oct 2024 16:52:05 +0000 (12:52 -0400)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Thu, 17 Oct 2024 16:52:05 +0000 (12:52 -0400)
Needed to bring some KVM changes to be able to include a fix in our Kconfig.

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
1  2 
drivers/gpu/drm/i915/display/intel_dsb.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/xe/Makefile
drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
drivers/gpu/drm/xe/display/xe_display.c

Simple merge
index 97afa13ad4ce2dd80d3a2533a4efe458d3b478d5,ee3469d4ae730caab5b0825b1a05541bc4da54c4..0382beb4035b4ce3dc0b8ea4072e1611fbb09a16
@@@ -152,9 -152,33 +152,9 @@@ static inline void intel_uncore_write_n
  {
        struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
  
-       xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val);
+       xe_mmio_write32(__compat_uncore_to_mmio(uncore), reg, val);
  }
  
 -static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore)
 -{
 -      struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
 -
 -      return xe_device_get_root_tile(xe)->mmio.regs;
 -}
 -
 -/*
 - * The raw_reg_{read,write} macros are intended as a micro-optimization for
 - * interrupt handlers so that the pointer indirection on uncore->regs can
 - * be computed once (and presumably cached in a register) instead of generating
 - * extra load instructions for each MMIO access.
 - *
 - * Given that these macros are only intended for non-GSI interrupt registers
 - * (and the goal is to avoid extra instructions generated by the compiler),
 - * these macros do not account for uncore->gsi_offset.  Any caller that needs
 - * to use these macros on a GSI register is responsible for adding the
 - * appropriate GSI offset to the 'base' parameter.
 - */
 -#define raw_reg_read(base, reg) \
 -      readl(base + i915_mmio_reg_offset(reg))
 -#define raw_reg_write(base, reg, value) \
 -      writel(value, base + i915_mmio_reg_offset(reg))
 -
  #define intel_uncore_forcewake_get(x, y) do { } while (0)
  #define intel_uncore_forcewake_put(x, y) do { } while (0)
  
index b1730b58165612dfd037498f3b25fb1e78dba45d,26b2cae11d46de98c98107d1c0249d3a929aaf7e..957ae763531d8a24502c545f0862186bee56e738
@@@ -356,9 -340,55 +344,55 @@@ static void __xe_display_pm_suspend(str
  
        intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
  
 -      intel_dmc_suspend(xe);
 +      intel_dmc_suspend(display);
  }
  
+ void xe_display_pm_suspend(struct xe_device *xe)
+ {
+       __xe_display_pm_suspend(xe, false);
+ }
+ void xe_display_pm_shutdown(struct xe_device *xe)
+ {
+       struct intel_display *display = &xe->display;
+       if (!xe->info.probe_display)
+               return;
+       intel_power_domains_disable(xe);
+       intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true);
+       if (has_display(xe)) {
+               drm_kms_helper_poll_disable(&xe->drm);
+               intel_display_driver_disable_user_access(xe);
+               intel_display_driver_suspend(xe);
+       }
+       xe_display_flush_cleanup_work(xe);
+       intel_dp_mst_suspend(xe);
+       intel_hpd_cancel_work(xe);
+       if (has_display(xe))
+               intel_display_driver_suspend_access(xe);
+       intel_encoder_suspend_all(display);
+       intel_encoder_shutdown_all(display);
+       intel_opregion_suspend(display, PCI_D3cold);
+       intel_dmc_suspend(xe);
+ }
+ void xe_display_pm_runtime_suspend(struct xe_device *xe)
+ {
+       if (!xe->info.probe_display)
+               return;
+       if (xe->d3cold.allowed)
+               __xe_display_pm_suspend(xe, true);
+       intel_hpd_poll_enable(xe);
+ }
  void xe_display_pm_suspend_late(struct xe_device *xe)
  {
        bool s2idle = suspend_to_idle();