]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge tag 'drm-intel-next-2024-02-27-1' of git://anongit.freedesktop.org/drm/drm...
authorDave Airlie <airlied@redhat.com>
Wed, 28 Feb 2024 01:02:54 +0000 (11:02 +1000)
committerDave Airlie <airlied@redhat.com>
Wed, 28 Feb 2024 01:02:55 +0000 (11:02 +1000)
drm/i915 feature pull #2 for v6.9:

Features and functionality:
- DP tunneling and bandwidth allocation support (Imre)
- Add more ADL-N PCI IDs (Gustavo)
- Enable fastboot also on older platforms (Ville)
- Bigjoiner force enable debugfs option for testing (Stan)

Refactoring and cleanups:
- Remove unused structs and struct members (Jiri Slaby)
- Use per-device debug logging (Ville)
- State check improvements (Ville)
- Hardcoded cd2x divider cleanups (Ville)
- CDCLK documentation updates (Ville, Rodrigo)

Fixes:
- HDCP MST Type1 fixes (Suraj)
- Fix MTL C20 PHY PLL values (Ravi)
- More hardware access prevention during init (Imre)
- Always enable decompression with tile4 on Xe2 (Juha-Pekka)
- Improve LNL package C residency (Suraj)

drm core changes:
- DP tunneling and bandwidth allocation helpers (Imre)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Jani Nikula <jani.nikula@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/87sf1devbj.fsf@intel.com
72 files changed:
drivers/gpu/drm/display/Kconfig
drivers/gpu/drm/display/Makefile
drivers/gpu/drm/display/drm_dp_helper.c
drivers/gpu/drm/display/drm_dp_tunnel.c [new file with mode: 0644]
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/Kconfig.debug
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/dvo_ch7017.c
drivers/gpu/drm/i915/display/dvo_ch7xxx.c
drivers/gpu/drm/i915/display/dvo_ivch.c
drivers/gpu/drm/i915/display/dvo_ns2501.c
drivers/gpu/drm/i915/display/dvo_sil164.c
drivers/gpu/drm/i915/display/dvo_tfp410.c
drivers/gpu/drm/i915/display/i9xx_wm.c
drivers/gpu/drm/i915/display/intel_atomic.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_bios.h
drivers/gpu/drm/i915/display/intel_cdclk.c
drivers/gpu/drm/i915/display/intel_color.c
drivers/gpu/drm/i915/display/intel_crt.c
drivers/gpu/drm/i915/display/intel_cx0_phy.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_core.h
drivers/gpu/drm/i915/display/intel_display_debugfs.c
drivers/gpu/drm/i915/display/intel_display_driver.c
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_dp_hdcp.c
drivers/gpu/drm/i915/display/intel_dp_link_training.c
drivers/gpu/drm/i915/display/intel_dp_link_training.h
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_dp_tunnel.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_dp_tunnel.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.h
drivers/gpu/drm/i915/display/intel_drrs.c
drivers/gpu/drm/i915/display/intel_dsb.c
drivers/gpu/drm/i915/display/intel_dsi.h
drivers/gpu/drm/i915/display/intel_dvo.c
drivers/gpu/drm/i915/display/intel_dvo_dev.h
drivers/gpu/drm/i915/display/intel_fb.c
drivers/gpu/drm/i915/display/intel_global_state.h
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/display/intel_hdcp.h
drivers/gpu/drm/i915/display/intel_hdmi.c
drivers/gpu/drm/i915/display/intel_link_bw.c
drivers/gpu/drm/i915/display/intel_link_bw.h
drivers/gpu/drm/i915/display/intel_opregion.c
drivers/gpu/drm/i915/display/intel_sdvo.c
drivers/gpu/drm/i915/display/skl_universal_plane.c
drivers/gpu/drm/i915/display/skl_watermark.c
drivers/gpu/drm/i915/display/skl_watermark.h
drivers/gpu/drm/i915/display/skl_watermark_regs.h
drivers/gpu/drm/i915/gt/uc/intel_guc.h
drivers/gpu/drm/i915/gvt/fb_decoder.h
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/interrupt.c
drivers/gpu/drm/i915/gvt/interrupt.h
drivers/gpu/drm/i915/gvt/mmio.h
drivers/gpu/drm/i915/gvt/scheduler.h
drivers/gpu/drm/i915/i915_drm_client.h
drivers/gpu/drm/i915/i915_perf_types.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_vma_types.h
drivers/gpu/drm/i915/intel_memory_region.h
include/drm/display/drm_dp.h
include/drm/display/drm_dp_helper.h
include/drm/display/drm_dp_tunnel.h [new file with mode: 0644]
include/drm/i915_pciids.h

index 09712b88a5b83e3235c017f027833ff03566c6b2..c0f56888c32803eb574fe086ed25a537f7ff80c5 100644 (file)
@@ -17,6 +17,27 @@ config DRM_DISPLAY_DP_HELPER
        help
          DRM display helpers for DisplayPort.
 
+config DRM_DISPLAY_DP_TUNNEL
+       bool
+       select DRM_DISPLAY_DP_HELPER
+       help
+         Enable support for DisplayPort tunnels. This allows drivers to use
+         DP tunnel features like the Bandwidth Allocation mode to maximize the
+         BW utilization for display streams on Thunderbolt links.
+
+config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+       bool "Enable debugging the DP tunnel state"
+       depends on REF_TRACKER
+       depends on DRM_DISPLAY_DP_TUNNEL
+       depends on DEBUG_KERNEL
+       depends on EXPERT
+       help
+         Enables debugging the DP tunnel manager's state, including the
+         consistency of all managed tunnels' reference counting and the state of
+         streams contained in tunnels.
+
+         If in doubt, say "N".
+
 config DRM_DISPLAY_HDCP_HELPER
        bool
        depends on DRM_DISPLAY_HELPER
index 17ac4a1006a8008794477d8a634d699f372415a0..7ca61333c669670aeb3b39f5442d4c432db99afe 100644 (file)
@@ -8,6 +8,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
        drm_dp_helper.o \
        drm_dp_mst_topology.o \
        drm_dsc_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
+       drm_dp_tunnel.o
 drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
 drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
        drm_hdmi_helper.o \
index 8d6ce46471ae6161c601ae0488fb65dda48f396b..d046dfa79504f9c5368aaf2a0bd87c2e1d84f4b7 100644 (file)
@@ -4055,3 +4055,33 @@ int drm_dp_bw_channel_coding_efficiency(bool is_uhbr)
                return 800000;
 }
 EXPORT_SYMBOL(drm_dp_bw_channel_coding_efficiency);
+
+/**
+ * drm_dp_max_dprx_data_rate - Get the max data bandwidth of a DPRX sink
+ * @max_link_rate: max DPRX link rate in 10kbps units
+ * @max_lanes: max DPRX lane count
+ *
+ * Given a link rate and lanes, get the data bandwidth.
+ *
+ * Data bandwidth is the actual payload rate, which depends on the data
+ * bandwidth efficiency and the link rate.
+ *
+ * Note that protocol layers above the DPRX link level considered here can
+ * further limit the maximum data rate. Such layers are the MST topology (with
+ * limits on the link between the source and first branch device as well as on
+ * the whole MST path until the DPRX link) and (Thunderbolt) DP tunnels -
+ * which in turn can encapsulate an MST link with its own limit - with each
+ * SST or MST encapsulated tunnel sharing the BW of a tunnel group.
+ *
+ * Returns the maximum data rate in kBps units.
+ */
+int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes)
+{
+       int ch_coding_efficiency =
+               drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
+
+       return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate * 10 * max_lanes,
+                                             ch_coding_efficiency),
+                                 1000000 * 8);
+}
+EXPORT_SYMBOL(drm_dp_max_dprx_data_rate);
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
new file mode 100644 (file)
index 0000000..120e0de
--- /dev/null
@@ -0,0 +1,1949 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/ref_tracker.h>
+#include <linux/types.h>
+
+#include <drm/drm_atomic_state_helper.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_print.h>
+#include <drm/display/drm_dp.h>
+#include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
+
+#define to_group(__private_obj) \
+       container_of(__private_obj, struct drm_dp_tunnel_group, base)
+
+#define to_group_state(__private_state) \
+       container_of(__private_state, struct drm_dp_tunnel_group_state, base)
+
+#define is_dp_tunnel_private_obj(__obj) \
+       ((__obj)->funcs == &tunnel_group_funcs)
+
+#define for_each_new_group_in_state(__state, __new_group_state, __i) \
+       for ((__i) = 0; \
+            (__i) < (__state)->num_private_objs; \
+            (__i)++) \
+               for_each_if ((__state)->private_objs[__i].ptr && \
+                            is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
+                            ((__new_group_state) = \
+                               to_group_state((__state)->private_objs[__i].new_state), 1))
+
+#define for_each_old_group_in_state(__state, __old_group_state, __i) \
+       for ((__i) = 0; \
+            (__i) < (__state)->num_private_objs; \
+            (__i)++) \
+               for_each_if ((__state)->private_objs[__i].ptr && \
+                            is_dp_tunnel_private_obj((__state)->private_objs[__i].ptr) && \
+                            ((__old_group_state) = \
+                               to_group_state((__state)->private_objs[__i].old_state), 1))
+
+#define for_each_tunnel_in_group(__group, __tunnel) \
+       list_for_each_entry(__tunnel, &(__group)->tunnels, node)
+
+#define for_each_tunnel_state(__group_state, __tunnel_state) \
+       list_for_each_entry(__tunnel_state, &(__group_state)->tunnel_states, node)
+
+#define for_each_tunnel_state_safe(__group_state, __tunnel_state, __tunnel_state_tmp) \
+       list_for_each_entry_safe(__tunnel_state, __tunnel_state_tmp, \
+                                &(__group_state)->tunnel_states, node)
+
+#define kbytes_to_mbits(__kbytes) \
+       DIV_ROUND_UP((__kbytes) * 8, 1000)
+
+#define DPTUN_BW_ARG(__bw) ((__bw) < 0 ? (__bw) : kbytes_to_mbits(__bw))
+
+#define __tun_prn(__tunnel, __level, __type, __fmt, ...) \
+       drm_##__level##__type((__tunnel)->group->mgr->dev, \
+                             "[DPTUN %s][%s] " __fmt, \
+                             drm_dp_tunnel_name(__tunnel), \
+                             (__tunnel)->aux->name, ## \
+                             __VA_ARGS__)
+
+#define tun_dbg(__tunnel, __fmt, ...) \
+       __tun_prn(__tunnel, dbg, _kms, __fmt, ## __VA_ARGS__)
+
+#define tun_dbg_stat(__tunnel, __err, __fmt, ...) do { \
+       if (__err) \
+               __tun_prn(__tunnel, dbg, _kms, __fmt " (Failed, err: %pe)\n", \
+                         ## __VA_ARGS__, ERR_PTR(__err)); \
+       else \
+               __tun_prn(__tunnel, dbg, _kms, __fmt " (Ok)\n", \
+                         ## __VA_ARGS__); \
+} while (0)
+
+#define tun_dbg_atomic(__tunnel, __fmt, ...) \
+       __tun_prn(__tunnel, dbg, _atomic, __fmt, ## __VA_ARGS__)
+
+#define tun_grp_dbg(__group, __fmt, ...) \
+       drm_dbg_kms((__group)->mgr->dev, \
+                   "[DPTUN %s] " __fmt, \
+                   drm_dp_tunnel_group_name(__group), ## \
+                   __VA_ARGS__)
+
+#define DP_TUNNELING_BASE DP_TUNNELING_OUI
+
+#define __DPTUN_REG_RANGE(__start, __size) \
+       GENMASK_ULL((__start) + (__size) - 1, (__start))
+
+#define DPTUN_REG_RANGE(__addr, __size) \
+       __DPTUN_REG_RANGE((__addr) - DP_TUNNELING_BASE, (__size))
+
+#define DPTUN_REG(__addr) DPTUN_REG_RANGE(__addr, 1)
+
+#define DPTUN_INFO_REG_MASK ( \
+       DPTUN_REG_RANGE(DP_TUNNELING_OUI, DP_TUNNELING_OUI_BYTES) | \
+       DPTUN_REG_RANGE(DP_TUNNELING_DEV_ID, DP_TUNNELING_DEV_ID_BYTES) | \
+       DPTUN_REG(DP_TUNNELING_HW_REV) | \
+       DPTUN_REG(DP_TUNNELING_SW_REV_MAJOR) | \
+       DPTUN_REG(DP_TUNNELING_SW_REV_MINOR) | \
+       DPTUN_REG(DP_TUNNELING_CAPABILITIES) | \
+       DPTUN_REG(DP_IN_ADAPTER_INFO) | \
+       DPTUN_REG(DP_USB4_DRIVER_ID) | \
+       DPTUN_REG(DP_USB4_DRIVER_BW_CAPABILITY) | \
+       DPTUN_REG(DP_IN_ADAPTER_TUNNEL_INFORMATION) | \
+       DPTUN_REG(DP_BW_GRANULARITY) | \
+       DPTUN_REG(DP_ESTIMATED_BW) | \
+       DPTUN_REG(DP_ALLOCATED_BW) | \
+       DPTUN_REG(DP_TUNNELING_MAX_LINK_RATE) | \
+       DPTUN_REG(DP_TUNNELING_MAX_LANE_COUNT) | \
+       DPTUN_REG(DP_DPTX_BW_ALLOCATION_MODE_CONTROL))
+
+static const DECLARE_BITMAP(dptun_info_regs, 64) = {
+       DPTUN_INFO_REG_MASK & -1UL,
+#if BITS_PER_LONG == 32
+       DPTUN_INFO_REG_MASK >> 32,
+#endif
+};
+
+struct drm_dp_tunnel_regs {
+       u8 buf[HWEIGHT64(DPTUN_INFO_REG_MASK)];
+};
+
+struct drm_dp_tunnel_group;
+
+struct drm_dp_tunnel {
+       struct drm_dp_tunnel_group *group;
+
+       struct list_head node;
+
+       struct kref kref;
+       struct ref_tracker *tracker;
+       struct drm_dp_aux *aux;
+       char name[8];
+
+       int bw_granularity;
+       int estimated_bw;
+       int allocated_bw;
+
+       int max_dprx_rate;
+       u8 max_dprx_lane_count;
+
+       u8 adapter_id;
+
+       bool bw_alloc_supported:1;
+       bool bw_alloc_enabled:1;
+       bool has_io_error:1;
+       bool destroyed:1;
+};
+
+struct drm_dp_tunnel_group_state;
+
+struct drm_dp_tunnel_state {
+       struct drm_dp_tunnel_group_state *group_state;
+
+       struct drm_dp_tunnel_ref tunnel_ref;
+
+       struct list_head node;
+
+       u32 stream_mask;
+       int *stream_bw;
+};
+
+struct drm_dp_tunnel_group_state {
+       struct drm_private_state base;
+
+       struct list_head tunnel_states;
+};
+
+struct drm_dp_tunnel_group {
+       struct drm_private_obj base;
+       struct drm_dp_tunnel_mgr *mgr;
+
+       struct list_head tunnels;
+
+       /* available BW including the allocated_bw of all tunnels in the group */
+       int available_bw;
+
+       u8 drv_group_id;
+       char name[8];
+
+       bool active:1;
+};
+
+struct drm_dp_tunnel_mgr {
+       struct drm_device *dev;
+
+       int group_count;
+       struct drm_dp_tunnel_group *groups;
+       wait_queue_head_t bw_req_queue;
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+       struct ref_tracker_dir ref_tracker;
+#endif
+};
+
+/*
+ * The following helpers provide a way to read out the tunneling DPCD
+ * registers with a minimal amount of AUX transfers (1 transfer per contiguous
+ * range, as permitted by the 16 byte per transfer AUX limit), not accessing
+ * other registers to avoid any read side-effects.
+ */
+static int next_reg_area(int *offset)
+{
+       *offset = find_next_bit(dptun_info_regs, 64, *offset);
+
+       return find_next_zero_bit(dptun_info_regs, 64, *offset + 1) - *offset;
+}
+
+#define tunnel_reg_ptr(__regs, __address) ({ \
+       WARN_ON(!test_bit((__address) - DP_TUNNELING_BASE, dptun_info_regs)); \
+       &(__regs)->buf[bitmap_weight(dptun_info_regs, (__address) - DP_TUNNELING_BASE)]; \
+})
+
+static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *regs)
+{
+       int offset = 0;
+       int len;
+
+       while ((len = next_reg_area(&offset))) {
+               int address = DP_TUNNELING_BASE + offset;
+
+               if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0)
+                       return -EIO;
+
+               offset += len;
+       }
+
+       return 0;
+}
+
+static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int address)
+{
+       return *tunnel_reg_ptr(regs, address);
+}
+
+static u8 tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs *regs)
+{
+       u8 drv_id = tunnel_reg(regs, DP_USB4_DRIVER_ID) & DP_USB4_DRIVER_ID_MASK;
+       u8 group_id = tunnel_reg(regs, DP_IN_ADAPTER_TUNNEL_INFORMATION) & DP_GROUP_ID_MASK;
+
+       if (!group_id)
+               return 0;
+
+       return (drv_id << DP_GROUP_ID_BITS) | group_id;
+}
+
+/* Return granularity in kB/s units */
+static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs *regs)
+{
+       int gr = tunnel_reg(regs, DP_BW_GRANULARITY) & DP_BW_GRANULARITY_MASK;
+
+       if (gr > 2)
+               return -1;
+
+       return (250000 << gr) / 8;
+}
+
+static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs *regs)
+{
+       u8 bw_code = tunnel_reg(regs, DP_TUNNELING_MAX_LINK_RATE);
+
+       return drm_dp_bw_code_to_link_rate(bw_code);
+}
+
+static int tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs *regs)
+{
+       return tunnel_reg(regs, DP_TUNNELING_MAX_LANE_COUNT) &
+              DP_TUNNELING_MAX_LANE_COUNT_MASK;
+}
+
+static bool tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs *regs)
+{
+       u8 cap_mask = DP_TUNNELING_SUPPORT | DP_IN_BW_ALLOCATION_MODE_SUPPORT;
+
+       if ((tunnel_reg(regs, DP_TUNNELING_CAPABILITIES) & cap_mask) != cap_mask)
+               return false;
+
+       return tunnel_reg(regs, DP_USB4_DRIVER_BW_CAPABILITY) &
+              DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT;
+}
+
+static bool tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs *regs)
+{
+       return tunnel_reg(regs, DP_DPTX_BW_ALLOCATION_MODE_CONTROL) &
+              DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE;
+}
+
+static u8 tunnel_group_drv_id(u8 drv_group_id)
+{
+       return drv_group_id >> DP_GROUP_ID_BITS;
+}
+
+static u8 tunnel_group_id(u8 drv_group_id)
+{
+       return drv_group_id & DP_GROUP_ID_MASK;
+}
+
+const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
+{
+       return tunnel->name;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_name);
+
+static const char *drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group *group)
+{
+       return group->name;
+}
+
+static struct drm_dp_tunnel_group *
+lookup_or_alloc_group(struct drm_dp_tunnel_mgr *mgr, u8 drv_group_id)
+{
+       struct drm_dp_tunnel_group *group = NULL;
+       int i;
+
+       for (i = 0; i < mgr->group_count; i++) {
+               /*
+                * A tunnel group with 0 group ID shouldn't have more than one
+                * tunnels.
+                */
+               if (tunnel_group_id(drv_group_id) &&
+                   mgr->groups[i].drv_group_id == drv_group_id)
+                       return &mgr->groups[i];
+
+               if (!group && !mgr->groups[i].active)
+                       group = &mgr->groups[i];
+       }
+
+       if (!group) {
+               drm_dbg_kms(mgr->dev,
+                           "DPTUN: Can't allocate more tunnel groups\n");
+               return NULL;
+       }
+
+       group->drv_group_id = drv_group_id;
+       group->active = true;
+
+       /*
+        * The group name format here and elsewhere: Driver-ID:Group-ID:*
+        * (* standing for all DP-Adapters/tunnels in the group).
+        */
+       snprintf(group->name, sizeof(group->name), "%d:%d:*",
+                tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
+                tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1));
+
+       return group;
+}
+
+static void free_group(struct drm_dp_tunnel_group *group)
+{
+       struct drm_dp_tunnel_mgr *mgr = group->mgr;
+
+       if (drm_WARN_ON(mgr->dev, !list_empty(&group->tunnels)))
+               return;
+
+       group->drv_group_id = 0;
+       group->available_bw = -1;
+       group->active = false;
+}
+
+static struct drm_dp_tunnel *
+tunnel_get(struct drm_dp_tunnel *tunnel)
+{
+       kref_get(&tunnel->kref);
+
+       return tunnel;
+}
+
+static void free_tunnel(struct kref *kref)
+{
+       struct drm_dp_tunnel *tunnel = container_of(kref, typeof(*tunnel), kref);
+       struct drm_dp_tunnel_group *group = tunnel->group;
+
+       list_del(&tunnel->node);
+       if (list_empty(&group->tunnels))
+               free_group(group);
+
+       kfree(tunnel);
+}
+
+static void tunnel_put(struct drm_dp_tunnel *tunnel)
+{
+       kref_put(&tunnel->kref, free_tunnel);
+}
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
+                            struct ref_tracker **tracker)
+{
+       ref_tracker_alloc(&tunnel->group->mgr->ref_tracker,
+                         tracker, GFP_KERNEL);
+}
+
+static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
+                              struct ref_tracker **tracker)
+{
+       ref_tracker_free(&tunnel->group->mgr->ref_tracker,
+                        tracker);
+}
+#else
+static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
+                            struct ref_tracker **tracker)
+{
+}
+
+static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
+                              struct ref_tracker **tracker)
+{
+}
+#endif
+
+/**
+ * drm_dp_tunnel_get - Get a reference for a DP tunnel
+ * @tunnel: Tunnel object
+ * @tracker: Debug tracker for the reference
+ *
+ * Get a reference for @tunnel, along with a debug tracker to help locating
+ * the source of a reference leak/double reference put etc. issue.
+ *
+ * The reference must be dropped after use calling drm_dp_tunnel_put()
+ * passing @tunnel and *@tracker returned from here.
+ *
+ * Returns @tunnel - as a convenience - along with *@tracker.
+ */
+struct drm_dp_tunnel *
+drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel,
+                 struct ref_tracker **tracker)
+{
+       track_tunnel_ref(tunnel, tracker);
+
+       return tunnel_get(tunnel);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_get);
+
+/**
+ * drm_dp_tunnel_put - Put a reference for a DP tunnel
+ * @tunnel - Tunnel object
+ * @tracker - Debug tracker for the reference
+ *
+ * Put a reference for @tunnel along with its debug *@tracker, which
+ * was obtained with drm_dp_tunnel_get().
+ */
+void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel,
+                      struct ref_tracker **tracker)
+{
+       untrack_tunnel_ref(tunnel, tracker);
+
+       tunnel_put(tunnel);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_put);
+
+static bool add_tunnel_to_group(struct drm_dp_tunnel_mgr *mgr,
+                               u8 drv_group_id,
+                               struct drm_dp_tunnel *tunnel)
+{
+       struct drm_dp_tunnel_group *group;
+
+       group = lookup_or_alloc_group(mgr, drv_group_id);
+       if (!group)
+               return false;
+
+       tunnel->group = group;
+       list_add(&tunnel->node, &group->tunnels);
+
+       return true;
+}
+
+static struct drm_dp_tunnel *
+create_tunnel(struct drm_dp_tunnel_mgr *mgr,
+             struct drm_dp_aux *aux,
+             const struct drm_dp_tunnel_regs *regs)
+{
+       u8 drv_group_id = tunnel_reg_drv_group_id(regs);
+       struct drm_dp_tunnel *tunnel;
+
+       tunnel = kzalloc(sizeof(*tunnel), GFP_KERNEL);
+       if (!tunnel)
+               return NULL;
+
+       INIT_LIST_HEAD(&tunnel->node);
+
+       kref_init(&tunnel->kref);
+
+       tunnel->aux = aux;
+
+       tunnel->adapter_id = tunnel_reg(regs, DP_IN_ADAPTER_INFO) & DP_IN_ADAPTER_NUMBER_MASK;
+
+       snprintf(tunnel->name, sizeof(tunnel->name), "%d:%d:%d",
+                tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
+                tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1),
+                tunnel->adapter_id & ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1));
+
+       tunnel->bw_granularity = tunnel_reg_bw_granularity(regs);
+       tunnel->allocated_bw = tunnel_reg(regs, DP_ALLOCATED_BW) *
+                              tunnel->bw_granularity;
+       /*
+        * An initial allocated BW of 0 indicates an undefined state: the
+        * actual allocation is determined by the TBT CM, usually following a
+        * legacy allocation policy (based on the max DPRX caps). From the
+        * driver's POV the state becomes defined only after the first
+        * allocation request.
+        */
+       if (!tunnel->allocated_bw)
+               tunnel->allocated_bw = -1;
+
+       tunnel->bw_alloc_supported = tunnel_reg_bw_alloc_supported(regs);
+       tunnel->bw_alloc_enabled = tunnel_reg_bw_alloc_enabled(regs);
+
+       if (!add_tunnel_to_group(mgr, drv_group_id, tunnel)) {
+               kfree(tunnel);
+
+               return NULL;
+       }
+
+       track_tunnel_ref(tunnel, &tunnel->tracker);
+
+       return tunnel;
+}
+
+static void destroy_tunnel(struct drm_dp_tunnel *tunnel)
+{
+       untrack_tunnel_ref(tunnel, &tunnel->tracker);
+       tunnel_put(tunnel);
+}
+
+/**
+ * drm_dp_tunnel_set_io_error - Set the IO error flag for a DP tunnel
+ * @tunnel: Tunnel object
+ *
+ * Set the IO error flag for @tunnel. Drivers can call this function upon
+ * detecting a failure that affects the tunnel functionality, for instance
+ * after a DP AUX transfer failure on the port @tunnel is connected to.
+ *
+ * This disables further management of @tunnel, including any related
+ * AUX accesses for tunneling DPCD registers, returning error to the
+ * initiators of these. The driver is supposed to drop this tunnel and -
+ * optionally - recreate it.
+ */
+void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel)
+{
+       tunnel->has_io_error = true;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_set_io_error);
+
+#define SKIP_DPRX_CAPS_CHECK           BIT(0)
+#define ALLOW_ALLOCATED_BW_CHANGE      BIT(1)
+static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr,
+                                 const struct drm_dp_tunnel_regs *regs,
+                                 unsigned int flags)
+{
+       u8 drv_group_id = tunnel_reg_drv_group_id(regs);
+       bool check_dprx = !(flags & SKIP_DPRX_CAPS_CHECK);
+       bool ret = true;
+
+       if (!tunnel_reg_bw_alloc_supported(regs)) {
+               if (tunnel_group_id(drv_group_id)) {
+                       drm_dbg_kms(mgr->dev,
+                                   "DPTUN: A non-zero group ID is only allowed with BWA support\n");
+                       ret = false;
+               }
+
+               if (tunnel_reg(regs, DP_ALLOCATED_BW)) {
+                       drm_dbg_kms(mgr->dev,
+                                   "DPTUN: BW is allocated without BWA support\n");
+                       ret = false;
+               }
+
+               return ret;
+       }
+
+       if (!tunnel_group_id(drv_group_id)) {
+               drm_dbg_kms(mgr->dev,
+                           "DPTUN: BWA support requires a non-zero group ID\n");
+               ret = false;
+       }
+
+       if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) {
+               drm_dbg_kms(mgr->dev,
+                           "DPTUN: Invalid DPRX lane count: %d\n",
+                           tunnel_reg_max_dprx_lane_count(regs));
+
+               ret = false;
+       }
+
+       if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) {
+               drm_dbg_kms(mgr->dev,
+                           "DPTUN: DPRX rate is 0\n");
+
+               ret = false;
+       }
+
+       if (tunnel_reg_bw_granularity(regs) < 0) {
+               drm_dbg_kms(mgr->dev,
+                           "DPTUN: Invalid BW granularity\n");
+
+               ret = false;
+       }
+
+       if (tunnel_reg(regs, DP_ALLOCATED_BW) > tunnel_reg(regs, DP_ESTIMATED_BW)) {
+               drm_dbg_kms(mgr->dev,
+                           "DPTUN: Allocated BW %d > estimated BW %d Mb/s\n",
+                           DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) *
+                                        tunnel_reg_bw_granularity(regs)),
+                           DPTUN_BW_ARG(tunnel_reg(regs, DP_ESTIMATED_BW) *
+                                        tunnel_reg_bw_granularity(regs)));
+
+               ret = false;
+       }
+
+       return ret;
+}
+
+static int tunnel_allocated_bw(const struct drm_dp_tunnel *tunnel)
+{
+       return max(tunnel->allocated_bw, 0);
+}
+
+static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel,
+                                         const struct drm_dp_tunnel_regs *regs,
+                                         unsigned int flags)
+{
+       u8 new_drv_group_id = tunnel_reg_drv_group_id(regs);
+       bool ret = true;
+
+       if (tunnel->bw_alloc_supported != tunnel_reg_bw_alloc_supported(regs)) {
+               tun_dbg(tunnel,
+                       "BW alloc support has changed %s -> %s\n",
+                       str_yes_no(tunnel->bw_alloc_supported),
+                       str_yes_no(tunnel_reg_bw_alloc_supported(regs)));
+
+               ret = false;
+       }
+
+       if (tunnel->group->drv_group_id != new_drv_group_id) {
+               tun_dbg(tunnel,
+                       "Driver/group ID has changed %d:%d:* -> %d:%d:*\n",
+                       tunnel_group_drv_id(tunnel->group->drv_group_id),
+                       tunnel_group_id(tunnel->group->drv_group_id),
+                       tunnel_group_drv_id(new_drv_group_id),
+                       tunnel_group_id(new_drv_group_id));
+
+               ret = false;
+       }
+
+       if (!tunnel->bw_alloc_supported)
+               return ret;
+
+       if (tunnel->bw_granularity != tunnel_reg_bw_granularity(regs)) {
+               tun_dbg(tunnel,
+                       "BW granularity has changed: %d -> %d Mb/s\n",
+                       DPTUN_BW_ARG(tunnel->bw_granularity),
+                       DPTUN_BW_ARG(tunnel_reg_bw_granularity(regs)));
+
+               ret = false;
+       }
+
+       /*
+        * On some devices at least the BW alloc mode enabled status is always
+        * reported as 0, so skip checking that here.
+        */
+
+       if (!(flags & ALLOW_ALLOCATED_BW_CHANGE) &&
+           tunnel_allocated_bw(tunnel) !=
+           tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity) {
+               tun_dbg(tunnel,
+                       "Allocated BW has changed: %d -> %d Mb/s\n",
+                       DPTUN_BW_ARG(tunnel->allocated_bw),
+                       DPTUN_BW_ARG(tunnel_reg(regs, DP_ALLOCATED_BW) * tunnel->bw_granularity));
+
+               ret = false;
+       }
+
+       return ret;
+}
+
+static int
+read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel,
+                           struct drm_dp_tunnel_regs *regs,
+                           unsigned int flags)
+{
+       int err;
+
+       err = read_tunnel_regs(tunnel->aux, regs);
+       if (err < 0) {
+               drm_dp_tunnel_set_io_error(tunnel);
+
+               return err;
+       }
+
+       if (!tunnel_regs_are_valid(tunnel->group->mgr, regs, flags))
+               return -EINVAL;
+
+       if (!tunnel_info_changes_are_valid(tunnel, regs, flags))
+               return -EINVAL;
+
+       return 0;
+}
+
+static bool update_dprx_caps(struct drm_dp_tunnel *tunnel, const struct drm_dp_tunnel_regs *regs)
+{
+       bool changed = false;
+
+       if (tunnel_reg_max_dprx_rate(regs) != tunnel->max_dprx_rate) {
+               tunnel->max_dprx_rate = tunnel_reg_max_dprx_rate(regs);
+               changed = true;
+       }
+
+       if (tunnel_reg_max_dprx_lane_count(regs) != tunnel->max_dprx_lane_count) {
+               tunnel->max_dprx_lane_count = tunnel_reg_max_dprx_lane_count(regs);
+               changed = true;
+       }
+
+       return changed;
+}
+
+static int dev_id_len(const u8 *dev_id, int max_len)
+{
+       while (max_len && dev_id[max_len - 1] == '\0')
+               max_len--;
+
+       return max_len;
+}
+
+static int get_max_dprx_bw(const struct drm_dp_tunnel *tunnel)
+{
+       int max_dprx_bw = drm_dp_max_dprx_data_rate(tunnel->max_dprx_rate,
+                                                   tunnel->max_dprx_lane_count);
+
+       /*
+        * A BW request of roundup(max_dprx_bw, tunnel->bw_granularity) results in
+        * an allocation of max_dprx_bw. A BW request above this rounded-up
+        * value will fail.
+        */
+       return min(roundup(max_dprx_bw, tunnel->bw_granularity),
+                  MAX_DP_REQUEST_BW * tunnel->bw_granularity);
+}
+
+static int get_max_tunnel_bw(const struct drm_dp_tunnel *tunnel)
+{
+       return min(get_max_dprx_bw(tunnel), tunnel->group->available_bw);
+}
+
+/**
+ * drm_dp_tunnel_detect - Detect DP tunnel on the link
+ * @mgr: Tunnel manager
+ * @aux: DP AUX on which the tunnel will be detected
+ *
+ * Detect if there is any DP tunnel on the link and add it to the tunnel
+ * group's tunnel list.
+ *
+ * Returns a pointer to a tunnel on success, or an ERR_PTR() error on
+ * failure.
+ */
+struct drm_dp_tunnel *
+drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
+                    struct drm_dp_aux *aux)
+{
+       struct drm_dp_tunnel_regs regs;
+       struct drm_dp_tunnel *tunnel;
+       int err;
+
+       err = read_tunnel_regs(aux, &regs);
+       if (err)
+               return ERR_PTR(err);
+
+       if (!(tunnel_reg(&regs, DP_TUNNELING_CAPABILITIES) &
+             DP_TUNNELING_SUPPORT))
+               return ERR_PTR(-ENODEV);
+
+       /* The DPRX caps are valid only after enabling BW alloc mode. */
+       if (!tunnel_regs_are_valid(mgr, &regs, SKIP_DPRX_CAPS_CHECK))
+               return ERR_PTR(-EINVAL);
+
+       tunnel = create_tunnel(mgr, aux, &regs);
+       if (!tunnel)
+               return ERR_PTR(-ENOMEM);
+
+       tun_dbg(tunnel,
+               "OUI:%*phD DevID:%*pE Rev-HW:%d.%d SW:%d.%d PR-Sup:%s BWA-Sup:%s BWA-En:%s\n",
+               DP_TUNNELING_OUI_BYTES,
+                       tunnel_reg_ptr(&regs, DP_TUNNELING_OUI),
+               dev_id_len(tunnel_reg_ptr(&regs, DP_TUNNELING_DEV_ID), DP_TUNNELING_DEV_ID_BYTES),
+                       tunnel_reg_ptr(&regs, DP_TUNNELING_DEV_ID),
+               (tunnel_reg(&regs, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MAJOR_MASK) >>
+                       DP_TUNNELING_HW_REV_MAJOR_SHIFT,
+               (tunnel_reg(&regs, DP_TUNNELING_HW_REV) & DP_TUNNELING_HW_REV_MINOR_MASK) >>
+                       DP_TUNNELING_HW_REV_MINOR_SHIFT,
+               tunnel_reg(&regs, DP_TUNNELING_SW_REV_MAJOR),
+               tunnel_reg(&regs, DP_TUNNELING_SW_REV_MINOR),
+               str_yes_no(tunnel_reg(&regs, DP_TUNNELING_CAPABILITIES) &
+                          DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT),
+               str_yes_no(tunnel->bw_alloc_supported),
+               str_yes_no(tunnel->bw_alloc_enabled));
+
+       return tunnel;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_detect);
+
+/**
+ * drm_dp_tunnel_destroy - Destroy tunnel object
+ * @tunnel: Tunnel object
+ *
+ * Remove the tunnel from the tunnel topology and destroy it.
+ *
+ * Returns 0 on success, -ENODEV if the tunnel has been destroyed already.
+ */
+int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
+{
+       if (!tunnel)
+               return 0;
+
+       if (drm_WARN_ON(tunnel->group->mgr->dev, tunnel->destroyed))
+               return -ENODEV;
+
+       tun_dbg(tunnel, "destroying\n");
+
+       tunnel->destroyed = true;
+       destroy_tunnel(tunnel);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_destroy);
+
+static int check_tunnel(const struct drm_dp_tunnel *tunnel)
+{
+       if (tunnel->destroyed)
+               return -ENODEV;
+
+       if (tunnel->has_io_error)
+               return -EIO;
+
+       return 0;
+}
+
+static int group_allocated_bw(struct drm_dp_tunnel_group *group)
+{
+       struct drm_dp_tunnel *tunnel;
+       int group_allocated_bw = 0;
+
+       for_each_tunnel_in_group(group, tunnel) {
+               if (check_tunnel(tunnel) == 0 &&
+                   tunnel->bw_alloc_enabled)
+                       group_allocated_bw += tunnel_allocated_bw(tunnel);
+       }
+
+       return group_allocated_bw;
+}
+
+/*
+ * The estimated BW reported by the TBT Connection Manager for each tunnel in
+ * a group includes the BW already allocated for the given tunnel and the
+ * unallocated BW which is free to be used by any tunnel in the group.
+ */
+static int group_free_bw(const struct drm_dp_tunnel *tunnel)
+{
+       return tunnel->estimated_bw - tunnel_allocated_bw(tunnel);
+}
+
+static int calc_group_available_bw(const struct drm_dp_tunnel *tunnel)
+{
+       return group_allocated_bw(tunnel->group) +
+              group_free_bw(tunnel);
+}
+
+static int update_group_available_bw(struct drm_dp_tunnel *tunnel,
+                                    const struct drm_dp_tunnel_regs *regs)
+{
+       struct drm_dp_tunnel *tunnel_iter;
+       int group_available_bw;
+       bool changed;
+
+       tunnel->estimated_bw = tunnel_reg(regs, DP_ESTIMATED_BW) * tunnel->bw_granularity;
+
+       if (calc_group_available_bw(tunnel) == tunnel->group->available_bw)
+               return 0;
+
+       for_each_tunnel_in_group(tunnel->group, tunnel_iter) {
+               int err;
+
+               if (tunnel_iter == tunnel)
+                       continue;
+
+               if (check_tunnel(tunnel_iter) != 0 ||
+                   !tunnel_iter->bw_alloc_enabled)
+                       continue;
+
+               err = drm_dp_dpcd_probe(tunnel_iter->aux, DP_DPCD_REV);
+               if (err) {
+                       tun_dbg(tunnel_iter,
+                               "Probe failed, assume disconnected (err %pe)\n",
+                               ERR_PTR(err));
+                       drm_dp_tunnel_set_io_error(tunnel_iter);
+               }
+       }
+
+       group_available_bw = calc_group_available_bw(tunnel);
+
+       tun_dbg(tunnel, "Updated group available BW: %d->%d\n",
+               DPTUN_BW_ARG(tunnel->group->available_bw),
+               DPTUN_BW_ARG(group_available_bw));
+
+       changed = tunnel->group->available_bw != group_available_bw;
+
+       tunnel->group->available_bw = group_available_bw;
+
+       return changed ? 1 : 0;
+}
+
+static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
+{
+       u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ;
+       u8 val;
+
+       if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0)
+               goto out_err;
+
+       if (enable)
+               val |= mask;
+       else
+               val &= ~mask;
+
+       if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0)
+               goto out_err;
+
+       tunnel->bw_alloc_enabled = enable;
+
+       return 0;
+
+out_err:
+       drm_dp_tunnel_set_io_error(tunnel);
+
+       return -EIO;
+}
+
+/**
+ * drm_dp_tunnel_enable_bw_alloc - Enable DP tunnel BW allocation mode
+ * @tunnel: Tunnel object
+ *
+ * Enable the DP tunnel BW allocation mode on @tunnel if it supports it.
+ *
+ * Returns 0 in case of success, negative error code otherwise.
+ */
+int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+       struct drm_dp_tunnel_regs regs;
+       int err;
+
+       err = check_tunnel(tunnel);
+       if (err)
+               return err;
+
+       if (!tunnel->bw_alloc_supported)
+               return -EOPNOTSUPP;
+
+       if (!tunnel_group_id(tunnel->group->drv_group_id))
+               return -EINVAL;
+
+       err = set_bw_alloc_mode(tunnel, true);
+       if (err)
+               goto out;
+
+       /*
+        * After a BWA disable/re-enable sequence the allocated BW can either
+        * stay at its last requested value or, for instance after system
+        * suspend/resume, TBT CM can reset back the allocation to the amount
+        * allocated in the legacy/non-BWA mode. Accordingly allow for the
+        * allocation to change wrt. the last SW state.
+        */
+       err = read_and_verify_tunnel_regs(tunnel, &regs,
+                                         ALLOW_ALLOCATED_BW_CHANGE);
+       if (err) {
+               set_bw_alloc_mode(tunnel, false);
+
+               goto out;
+       }
+
+       if (!tunnel->max_dprx_rate)
+               update_dprx_caps(tunnel, &regs);
+
+       if (tunnel->group->available_bw == -1) {
+               err = update_group_available_bw(tunnel, &regs);
+               if (err > 0)
+                       err = 0;
+       }
+out:
+       tun_dbg_stat(tunnel, err,
+                    "Enabling BW alloc mode: DPRX:%dx%d Group alloc:%d/%d Mb/s",
+                    tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,
+                    DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
+                    DPTUN_BW_ARG(tunnel->group->available_bw));
+
+       return err;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_enable_bw_alloc);
+
+/**
+ * drm_dp_tunnel_disable_bw_alloc - Disable DP tunnel BW allocation mode
+ * @tunnel: Tunnel object
+ *
+ * Disable the DP tunnel BW allocation mode on @tunnel.
+ *
+ * Returns 0 in case of success, negative error code otherwise.
+ */
+int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+       int err;
+
+       err = check_tunnel(tunnel);
+       if (err)
+               return err;
+
+       tunnel->allocated_bw = -1;
+
+       err = set_bw_alloc_mode(tunnel, false);
+
+       tun_dbg_stat(tunnel, err, "Disabling BW alloc mode");
+
+       return err;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_disable_bw_alloc);
+
+/**
+ * drm_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation mode enabled state
+ * @tunnel: Tunnel object
+ *
+ * Query if the BW allocation mode is enabled for @tunnel.
+ *
+ * Returns %true if the BW allocation mode is enabled for @tunnel.
+ */
+bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)
+{
+       return tunnel && tunnel->bw_alloc_enabled;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_bw_alloc_is_enabled);
+
+static int clear_bw_req_state(struct drm_dp_aux *aux)
+{
+       u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
+
+       if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0)
+               return -EIO;
+
+       return 0;
+}
+
+static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
+{
+       u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED;
+       u8 status_change_mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
+       u8 val;
+       int err;
+
+       if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+               return -EIO;
+
+       *status_changed = val & status_change_mask;
+
+       val &= bw_req_mask;
+
+       if (!val)
+               return -EAGAIN;
+
+       err = clear_bw_req_state(aux);
+       if (err < 0)
+               return err;
+
+       return val == DP_BW_REQUEST_SUCCEEDED ? 0 : -ENOSPC;
+}
+
+static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
+{
+       struct drm_dp_tunnel_mgr *mgr = tunnel->group->mgr;
+       int request_bw = DIV_ROUND_UP(bw, tunnel->bw_granularity);
+       DEFINE_WAIT_FUNC(wait, woken_wake_function);
+       long timeout;
+       int err;
+
+       if (bw < 0) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       if (request_bw * tunnel->bw_granularity == tunnel->allocated_bw)
+               return 0;
+
+       /* Atomic check should prevent the following. */
+       if (drm_WARN_ON(mgr->dev, request_bw > MAX_DP_REQUEST_BW)) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       err = clear_bw_req_state(tunnel->aux);
+       if (err)
+               goto out;
+
+       if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) {
+               err = -EIO;
+               goto out;
+       }
+
+       timeout = msecs_to_jiffies(3000);
+       add_wait_queue(&mgr->bw_req_queue, &wait);
+
+       for (;;) {
+               bool status_changed;
+
+               err = bw_req_complete(tunnel->aux, &status_changed);
+               if (err != -EAGAIN)
+                       break;
+
+               if (status_changed) {
+                       struct drm_dp_tunnel_regs regs;
+
+                       err = read_and_verify_tunnel_regs(tunnel, &regs,
+                                                         ALLOW_ALLOCATED_BW_CHANGE);
+                       if (err)
+                               break;
+               }
+
+               if (!timeout) {
+                       err = -ETIMEDOUT;
+                       break;
+               }
+
+               timeout = wait_woken(&wait, TASK_UNINTERRUPTIBLE, timeout);
+       };
+
+       remove_wait_queue(&mgr->bw_req_queue, &wait);
+
+       if (err)
+               goto out;
+
+       tunnel->allocated_bw = request_bw * tunnel->bw_granularity;
+
+out:
+       tun_dbg_stat(tunnel, err, "Allocating %d/%d Mb/s for tunnel: Group alloc:%d/%d Mb/s",
+                    DPTUN_BW_ARG(request_bw * tunnel->bw_granularity),
+                    DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
+                    DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
+                    DPTUN_BW_ARG(tunnel->group->available_bw));
+
+       if (err == -EIO)
+               drm_dp_tunnel_set_io_error(tunnel);
+
+       return err;
+}
+
+/**
+ * drm_dp_tunnel_alloc_bw - Allocate BW for a DP tunnel
+ * @tunnel: Tunnel object
+ * @bw: BW in kB/s units
+ *
+ * Allocate @bw kB/s for @tunnel. The allocated BW must be freed after use by
+ * calling this function for the same tunnel setting @bw to 0.
+ *
+ * Returns 0 in case of success, a negative error code otherwise.
+ */
+int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
+{
+       int err;
+
+       err = check_tunnel(tunnel);
+       if (err)
+               return err;
+
+       return allocate_tunnel_bw(tunnel, bw);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_alloc_bw);
+
+/**
+ * drm_dp_tunnel_atomic_get_allocated_bw - Get the BW allocated for a DP tunnel
+ * @tunnel: Tunnel object
+ *
+ * Get the current BW allocated for @tunnel. After the tunnel is created /
+ * resumed and the BW allocation mode is enabled for it, the allocation
+ * becomes determined only after the first allocation request by the driver
+ * calling drm_dp_tunnel_alloc_bw().
+ *
+ * Return the BW allocated for the tunnel, or -1 if the allocation is
+ * undetermined.
+ */
+int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
+{
+       return tunnel->allocated_bw;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_get_allocated_bw);
+
+/*
+ * Return 0 if the status hasn't changed, 1 if the status has changed, a
+ * negative error code in case of an I/O failure.
+ */
+static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
+{
+       u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
+       u8 val;
+
+       if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0)
+               goto out_err;
+
+       val &= mask;
+
+       if (val) {
+               if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0)
+                       goto out_err;
+
+               return 1;
+       }
+
+       if (!drm_dp_tunnel_bw_alloc_is_enabled(tunnel))
+               return 0;
+
+       /*
+        * Check for estimated BW changes explicitly to account for lost
+        * BW change notifications.
+        */
+       if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0)
+               goto out_err;
+
+       if (val * tunnel->bw_granularity != tunnel->estimated_bw)
+               return 1;
+
+       return 0;
+
+out_err:
+       drm_dp_tunnel_set_io_error(tunnel);
+
+       return -EIO;
+}
+
+/**
+ * drm_dp_tunnel_update_state - Update DP tunnel SW state with the HW state
+ * @tunnel: Tunnel object
+ *
+ * Update the SW state of @tunnel with the HW state.
+ *
+ * Returns 0 if the state has not changed, 1 if it has changed and got updated
+ * successfully and a negative error code otherwise.
+ */
+int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
+{
+       struct drm_dp_tunnel_regs regs;
+       bool changed = false;
+       int ret;
+
+       ret = check_tunnel(tunnel);
+       if (ret < 0)
+               return ret;
+
+       ret = check_and_clear_status_change(tunnel);
+       if (ret < 0)
+               goto out;
+
+       if (!ret)
+               return 0;
+
+       ret = read_and_verify_tunnel_regs(tunnel, &regs, 0);
+       if (ret)
+               goto out;
+
+       if (update_dprx_caps(tunnel, &regs))
+               changed = true;
+
+       ret = update_group_available_bw(tunnel, &regs);
+       if (ret == 1)
+               changed = true;
+
+out:
+       tun_dbg_stat(tunnel, ret < 0 ? ret : 0,
+                    "State update: Changed:%s DPRX:%dx%d Tunnel alloc:%d/%d Group alloc:%d/%d Mb/s",
+                    str_yes_no(changed),
+                    tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,
+                    DPTUN_BW_ARG(tunnel->allocated_bw),
+                    DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
+                    DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
+                    DPTUN_BW_ARG(tunnel->group->available_bw));
+
+       if (ret < 0)
+               return ret;
+
+       if (changed)
+               return 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_update_state);
+
+/*
+ * drm_dp_tunnel_handle_irq - Handle DP tunnel IRQs
+ *
+ * Handle any pending DP tunnel IRQs, waking up waiters for a completion
+ * event.
+ *
+ * Returns 1 if the state of the tunnel has changed which requires calling
+ * drm_dp_tunnel_update_state(), a negative error code in case of a failure,
+ * 0 otherwise.
+ */
+int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *aux)
+{
+       u8 val;
+
+       if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0)
+               return -EIO;
+
+       if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
+               wake_up_all(&mgr->bw_req_queue);
+
+       if (val & (DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED))
+               return 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_handle_irq);
+
+/**
+ * drm_dp_tunnel_max_dprx_rate - Query the maximum rate of the tunnel's DPRX
+ * @tunnel: Tunnel object
+ *
+ * The function is used to query the maximum link rate of the DPRX connected
+ * to @tunnel. Note that this rate will not be limited by the BW limit of the
+ * tunnel, as opposed to the standard and extended DP_MAX_LINK_RATE DPCD
+ * registers.
+ *
+ * Returns the maximum link rate in 10 kbit/s units.
+ */
+int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)
+{
+       return tunnel->max_dprx_rate;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_rate);
+
+/**
+ * drm_dp_tunnel_max_dprx_lane_count - Query the maximum lane count of the tunnel's DPRX
+ * @tunnel: Tunnel object
+ *
+ * The function is used to query the maximum lane count of the DPRX connected
+ * to @tunnel. Note that this lane count will not be limited by the BW limit of
+ * the tunnel, as opposed to the standard and extended DP_MAX_LANE_COUNT DPCD
+ * registers.
+ *
+ * Returns the maximum lane count.
+ */
+int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)
+{
+       return tunnel->max_dprx_lane_count;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_lane_count);
+
+/**
+ * drm_dp_tunnel_available_bw - Query the estimated total available BW of the tunnel
+ * @tunnel: Tunnel object
+ *
+ * This function is used to query the estimated total available BW of the
+ * tunnel. This includes the currently allocated and free BW for all the
+ * tunnels in @tunnel's group. The available BW is valid only after the BW
+ * allocation mode has been enabled for the tunnel and its state got updated
+ * calling drm_dp_tunnel_update_state().
+ *
+ * Returns the @tunnel group's estimated total available bandwidth in kB/s
+ * units, or -1 if the available BW isn't valid (the BW allocation mode is
+ * not enabled or the tunnel's state hasn't been updated).
+ */
+int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)
+{
+       return tunnel->group->available_bw;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_available_bw);
+
+static struct drm_dp_tunnel_group_state *
+drm_dp_tunnel_atomic_get_group_state(struct drm_atomic_state *state,
+                                    const struct drm_dp_tunnel *tunnel)
+{
+       return (struct drm_dp_tunnel_group_state *)
+               drm_atomic_get_private_obj_state(state,
+                                                &tunnel->group->base);
+}
+
+static struct drm_dp_tunnel_state *
+add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
+                struct drm_dp_tunnel *tunnel)
+{
+       struct drm_dp_tunnel_state *tunnel_state;
+
+       tun_dbg_atomic(tunnel,
+                      "Adding state for tunnel %p to group state %p\n",
+                      tunnel, group_state);
+
+       tunnel_state = kzalloc(sizeof(*tunnel_state), GFP_KERNEL);
+       if (!tunnel_state)
+               return NULL;
+
+       tunnel_state->group_state = group_state;
+
+       drm_dp_tunnel_ref_get(tunnel, &tunnel_state->tunnel_ref);
+
+       INIT_LIST_HEAD(&tunnel_state->node);
+       list_add(&tunnel_state->node, &group_state->tunnel_states);
+
+       return tunnel_state;
+}
+
+static void free_tunnel_state(struct drm_dp_tunnel_state *tunnel_state)
+{
+       tun_dbg_atomic(tunnel_state->tunnel_ref.tunnel,
+                      "Freeing state for tunnel %p\n",
+                      tunnel_state->tunnel_ref.tunnel);
+
+       list_del(&tunnel_state->node);
+
+       kfree(tunnel_state->stream_bw);
+       drm_dp_tunnel_ref_put(&tunnel_state->tunnel_ref);
+
+       kfree(tunnel_state);
+}
+
+static void free_group_state(struct drm_dp_tunnel_group_state *group_state)
+{
+       struct drm_dp_tunnel_state *tunnel_state;
+       struct drm_dp_tunnel_state *tunnel_state_tmp;
+
+       for_each_tunnel_state_safe(group_state, tunnel_state, tunnel_state_tmp)
+               free_tunnel_state(tunnel_state);
+
+       kfree(group_state);
+}
+
+static struct drm_dp_tunnel_state *
+get_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
+                const struct drm_dp_tunnel *tunnel)
+{
+       struct drm_dp_tunnel_state *tunnel_state;
+
+       for_each_tunnel_state(group_state, tunnel_state)
+               if (tunnel_state->tunnel_ref.tunnel == tunnel)
+                       return tunnel_state;
+
+       return NULL;
+}
+
+static struct drm_dp_tunnel_state *
+get_or_add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
+                       struct drm_dp_tunnel *tunnel)
+{
+       struct drm_dp_tunnel_state *tunnel_state;
+
+       tunnel_state = get_tunnel_state(group_state, tunnel);
+       if (tunnel_state)
+               return tunnel_state;
+
+       return add_tunnel_state(group_state, tunnel);
+}
+
+static struct drm_private_state *
+tunnel_group_duplicate_state(struct drm_private_obj *obj)
+{
+       struct drm_dp_tunnel_group_state *group_state;
+       struct drm_dp_tunnel_state *tunnel_state;
+
+       group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);
+       if (!group_state)
+               return NULL;
+
+       INIT_LIST_HEAD(&group_state->tunnel_states);
+
+       __drm_atomic_helper_private_obj_duplicate_state(obj, &group_state->base);
+
+       for_each_tunnel_state(to_group_state(obj->state), tunnel_state) {
+               struct drm_dp_tunnel_state *new_tunnel_state;
+
+               new_tunnel_state = get_or_add_tunnel_state(group_state,
+                                                          tunnel_state->tunnel_ref.tunnel);
+               if (!new_tunnel_state)
+                       goto out_free_state;
+
+               new_tunnel_state->stream_mask = tunnel_state->stream_mask;
+               new_tunnel_state->stream_bw = kmemdup(tunnel_state->stream_bw,
+                                                     sizeof(*tunnel_state->stream_bw) *
+                                                       hweight32(tunnel_state->stream_mask),
+                                                     GFP_KERNEL);
+
+               if (!new_tunnel_state->stream_bw)
+                       goto out_free_state;
+       }
+
+       return &group_state->base;
+
+out_free_state:
+       free_group_state(group_state);
+
+       return NULL;
+}
+
+static void tunnel_group_destroy_state(struct drm_private_obj *obj, struct drm_private_state *state)
+{
+       free_group_state(to_group_state(state));
+}
+
+static const struct drm_private_state_funcs tunnel_group_funcs = {
+       .atomic_duplicate_state = tunnel_group_duplicate_state,
+       .atomic_destroy_state = tunnel_group_destroy_state,
+};
+
+/**
+ * drm_dp_tunnel_atomic_get_state - get/allocate the new atomic state for a tunnel
+ * @state: Atomic state
+ * @tunnel: Tunnel to get the state for
+ *
+ * Get the new atomic state for @tunnel, duplicating it from the old tunnel
+ * state if not yet allocated.
+ *
+ * Return the state or an ERR_PTR() error on failure.
+ */
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
+                              struct drm_dp_tunnel *tunnel)
+{
+       struct drm_dp_tunnel_group_state *group_state;
+       struct drm_dp_tunnel_state *tunnel_state;
+
+       group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
+       if (IS_ERR(group_state))
+               return ERR_CAST(group_state);
+
+       tunnel_state = get_or_add_tunnel_state(group_state, tunnel);
+       if (!tunnel_state)
+               return ERR_PTR(-ENOMEM);
+
+       return tunnel_state;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_state);
+
+/**
+ * drm_dp_tunnel_atomic_get_old_state - get the old atomic state for a tunnel
+ * @state: Atomic state
+ * @tunnel: Tunnel to get the state for
+ *
+ * Get the old atomic state for @tunnel.
+ *
+ * Return the old state or NULL if the tunnel's atomic state is not in @state.
+ */
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,
+                                  const struct drm_dp_tunnel *tunnel)
+{
+       struct drm_dp_tunnel_group_state *old_group_state;
+       int i;
+
+       for_each_old_group_in_state(state, old_group_state, i)
+               if (to_group(old_group_state->base.obj) == tunnel->group)
+                       return get_tunnel_state(old_group_state, tunnel);
+
+       return NULL;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_old_state);
+
+/**
+ * drm_dp_tunnel_atomic_get_new_state - get the new atomic state for a tunnel
+ * @state: Atomic state
+ * @tunnel: Tunnel to get the state for
+ *
+ * Get the new atomic state for @tunnel.
+ *
+ * Return the new state or NULL if the tunnel's atomic state is not in @state.
+ */
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
+                                  const struct drm_dp_tunnel *tunnel)
+{
+       struct drm_dp_tunnel_group_state *new_group_state;
+       int i;
+
+       for_each_new_group_in_state(state, new_group_state, i)
+               if (to_group(new_group_state->base.obj) == tunnel->group)
+                       return get_tunnel_state(new_group_state, tunnel);
+
+       return NULL;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_new_state);
+
+static bool init_group(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_tunnel_group *group)
+{
+       struct drm_dp_tunnel_group_state *group_state;
+
+       group_state = kzalloc(sizeof(*group_state), GFP_KERNEL);
+       if (!group_state)
+               return false;
+
+       INIT_LIST_HEAD(&group_state->tunnel_states);
+
+       group->mgr = mgr;
+       group->available_bw = -1;
+       INIT_LIST_HEAD(&group->tunnels);
+
+       drm_atomic_private_obj_init(mgr->dev, &group->base, &group_state->base,
+                                   &tunnel_group_funcs);
+
+       return true;
+}
+
+static void cleanup_group(struct drm_dp_tunnel_group *group)
+{
+       drm_atomic_private_obj_fini(&group->base);
+}
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
+{
+       const struct drm_dp_tunnel_state *tunnel_state;
+       u32 stream_mask = 0;
+
+       for_each_tunnel_state(group_state, tunnel_state) {
+               drm_WARN(to_group(group_state->base.obj)->mgr->dev,
+                        tunnel_state->stream_mask & stream_mask,
+                        "[DPTUN %s]: conflicting stream IDs %x (IDs in other tunnels %x)\n",
+                        tunnel_state->tunnel_ref.tunnel->name,
+                        tunnel_state->stream_mask,
+                        stream_mask);
+
+               stream_mask |= tunnel_state->stream_mask;
+       }
+}
+#else
+static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
+{
+}
+#endif
+
+static int stream_id_to_idx(u32 stream_mask, u8 stream_id)
+{
+       return hweight32(stream_mask & (BIT(stream_id) - 1));
+}
+
+static int resize_bw_array(struct drm_dp_tunnel_state *tunnel_state,
+                          unsigned long old_mask, unsigned long new_mask)
+{
+       unsigned long move_mask = old_mask & new_mask;
+       int *new_bws = NULL;
+       int id;
+
+       WARN_ON(!new_mask);
+
+       if (old_mask == new_mask)
+               return 0;
+
+       new_bws = kcalloc(hweight32(new_mask), sizeof(*new_bws), GFP_KERNEL);
+       if (!new_bws)
+               return -ENOMEM;
+
+       for_each_set_bit(id, &move_mask, BITS_PER_TYPE(move_mask))
+               new_bws[stream_id_to_idx(new_mask, id)] =
+                       tunnel_state->stream_bw[stream_id_to_idx(old_mask, id)];
+
+       kfree(tunnel_state->stream_bw);
+       tunnel_state->stream_bw = new_bws;
+       tunnel_state->stream_mask = new_mask;
+
+       return 0;
+}
+
+static int set_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
+                        u8 stream_id, int bw)
+{
+       int err;
+
+       err = resize_bw_array(tunnel_state,
+                             tunnel_state->stream_mask,
+                             tunnel_state->stream_mask | BIT(stream_id));
+       if (err)
+               return err;
+
+       tunnel_state->stream_bw[stream_id_to_idx(tunnel_state->stream_mask, stream_id)] = bw;
+
+       return 0;
+}
+
+static int clear_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
+                          u8 stream_id)
+{
+       if (!(tunnel_state->stream_mask & ~BIT(stream_id))) {
+               free_tunnel_state(tunnel_state);
+               return 0;
+       }
+
+       return resize_bw_array(tunnel_state,
+                              tunnel_state->stream_mask,
+                              tunnel_state->stream_mask & ~BIT(stream_id));
+}
+
+/**
+ * drm_dp_tunnel_atomic_set_stream_bw - Set the BW for a DP tunnel stream
+ * @state: Atomic state
+ * @tunnel: DP tunnel containing the stream
+ * @stream_id: Stream ID
+ * @bw: BW of the stream
+ *
+ * Set a DP tunnel stream's required BW in the atomic state.
+ *
+ * Returns 0 in case of success, a negative error code otherwise.
+ */
+int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
+                                      struct drm_dp_tunnel *tunnel,
+                                      u8 stream_id, int bw)
+{
+       struct drm_dp_tunnel_group_state *new_group_state;
+       struct drm_dp_tunnel_state *tunnel_state;
+       int err;
+
+       if (drm_WARN_ON(tunnel->group->mgr->dev,
+                       stream_id > BITS_PER_TYPE(tunnel_state->stream_mask)))
+               return -EINVAL;
+
+       tun_dbg(tunnel,
+               "Setting %d Mb/s for stream %d\n",
+               DPTUN_BW_ARG(bw), stream_id);
+
+       new_group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
+       if (IS_ERR(new_group_state))
+               return PTR_ERR(new_group_state);
+
+       if (bw == 0) {
+               tunnel_state = get_tunnel_state(new_group_state, tunnel);
+               if (!tunnel_state)
+                       return 0;
+
+               return clear_stream_bw(tunnel_state, stream_id);
+       }
+
+       tunnel_state = get_or_add_tunnel_state(new_group_state, tunnel);
+       if (drm_WARN_ON(state->dev, !tunnel_state))
+               return -EINVAL;
+
+       err = set_stream_bw(tunnel_state, stream_id, bw);
+       if (err)
+               return err;
+
+       check_unique_stream_ids(new_group_state);
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_set_stream_bw);
+
+/**
+ * drm_dp_tunnel_atomic_get_required_bw - Get the BW required by a DP tunnel
+ * @tunnel_state: Atomic state of the queried tunnel
+ *
+ * Calculate the BW required by a tunnel adding up the required BW of all
+ * the streams in the tunnel.
+ *
+ * Return the total BW required by the tunnel.
+ */
+int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)
+{
+       int tunnel_bw = 0;
+       int i;
+
+       if (!tunnel_state || !tunnel_state->stream_mask)
+               return 0;
+
+       for (i = 0; i < hweight32(tunnel_state->stream_mask); i++)
+               tunnel_bw += tunnel_state->stream_bw[i];
+
+       return tunnel_bw;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_required_bw);
+
+/**
+ * drm_dp_tunnel_atomic_get_group_streams_in_state - Get mask of stream IDs in a group
+ * @state: Atomic state
+ * @tunnel: Tunnel object
+ * @stream_mask: Mask of streams in @tunnel's group
+ *
+ * Get the mask of all the stream IDs in the tunnel group of @tunnel.
+ *
+ * Return 0 in case of success - with the stream IDs in @stream_mask - or a
+ * negative error code in case of failure.
+ */
+int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
+                                                   const struct drm_dp_tunnel *tunnel,
+                                                   u32 *stream_mask)
+{
+       struct drm_dp_tunnel_group_state *group_state;
+       struct drm_dp_tunnel_state *tunnel_state;
+
+       group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel);
+       if (IS_ERR(group_state))
+               return PTR_ERR(group_state);
+
+       *stream_mask = 0;
+       for_each_tunnel_state(group_state, tunnel_state)
+               *stream_mask |= tunnel_state->stream_mask;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_get_group_streams_in_state);
+
+static int
+drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state *new_group_state,
+                                   u32 *failed_stream_mask)
+{
+       struct drm_dp_tunnel_group *group = to_group(new_group_state->base.obj);
+       struct drm_dp_tunnel_state *new_tunnel_state;
+       u32 group_stream_mask = 0;
+       int group_bw = 0;
+
+       for_each_tunnel_state(new_group_state, new_tunnel_state) {
+               struct drm_dp_tunnel *tunnel = new_tunnel_state->tunnel_ref.tunnel;
+               int max_dprx_bw = get_max_dprx_bw(tunnel);
+               int tunnel_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
+
+               tun_dbg(tunnel,
+                       "%sRequired %d/%d Mb/s total for tunnel.\n",
+                       tunnel_bw > max_dprx_bw ? "Not enough BW: " : "",
+                       DPTUN_BW_ARG(tunnel_bw),
+                       DPTUN_BW_ARG(max_dprx_bw));
+
+               if (tunnel_bw > max_dprx_bw) {
+                       *failed_stream_mask = new_tunnel_state->stream_mask;
+                       return -ENOSPC;
+               }
+
+               group_bw += min(roundup(tunnel_bw, tunnel->bw_granularity),
+                               max_dprx_bw);
+               group_stream_mask |= new_tunnel_state->stream_mask;
+       }
+
+       tun_grp_dbg(group,
+                   "%sRequired %d/%d Mb/s total for tunnel group.\n",
+                   group_bw > group->available_bw ? "Not enough BW: " : "",
+                   DPTUN_BW_ARG(group_bw),
+                   DPTUN_BW_ARG(group->available_bw));
+
+       if (group_bw > group->available_bw) {
+               *failed_stream_mask = group_stream_mask;
+               return -ENOSPC;
+       }
+
+       return 0;
+}
+
+/**
+ * drm_dp_tunnel_atomic_check_stream_bws - Check BW limit for all streams in state
+ * @state: Atomic state
+ * @failed_stream_mask: Mask of stream IDs with a BW limit failure
+ *
+ * Check the required BW of each DP tunnel in @state against both the DPRX BW
+ * limit of the tunnel and the BW limit of the tunnel group. Return a mask of
+ * stream IDs in @failed_stream_mask once a check fails. The mask will contain
+ * either all the streams in a tunnel (in case a DPRX BW limit check failed) or
+ * all the streams in a tunnel group (in case a group BW limit check failed).
+ *
+ * Return 0 if all the BW limit checks passed, -ENOSPC in case a BW limit
+ * check failed - with @failed_stream_mask containing the streams failing the
+ * check - or a negative error code otherwise.
+ */
+int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
+                                         u32 *failed_stream_mask)
+{
+       struct drm_dp_tunnel_group_state *new_group_state;
+       int i;
+
+       for_each_new_group_in_state(state, new_group_state, i) {
+               int ret;
+
+               ret = drm_dp_tunnel_atomic_check_group_bw(new_group_state,
+                                                         failed_stream_mask);
+               if (ret)
+                       return ret;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_atomic_check_stream_bws);
+
+static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
+{
+       int i;
+
+       for (i = 0; i < mgr->group_count; i++) {
+               cleanup_group(&mgr->groups[i]);
+               drm_WARN_ON(mgr->dev, !list_empty(&mgr->groups[i].tunnels));
+       }
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+       ref_tracker_dir_exit(&mgr->ref_tracker);
+#endif
+
+       kfree(mgr->groups);
+       kfree(mgr);
+}
+
+/**
+ * drm_dp_tunnel_mgr_create - Create a DP tunnel manager
+ * @dev: DRM device object
+ *
+ * Creates a DP tunnel manager for @dev.
+ *
+ * Returns a pointer to the tunnel manager if created successfully or NULL in
+ * case of an error.
+ */
+struct drm_dp_tunnel_mgr *
+drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
+{
+       struct drm_dp_tunnel_mgr *mgr;
+       int i;
+
+       mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+       if (!mgr)
+               return NULL;
+
+       mgr->dev = dev;
+       init_waitqueue_head(&mgr->bw_req_queue);
+
+       mgr->groups = kcalloc(max_group_count, sizeof(*mgr->groups), GFP_KERNEL);
+       if (!mgr->groups) {
+               kfree(mgr);
+
+               return NULL;
+       }
+
+#ifdef CONFIG_DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
+       ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun");
+#endif
+
+       for (i = 0; i < max_group_count; i++) {
+               if (!init_group(mgr, &mgr->groups[i])) {
+                       destroy_mgr(mgr);
+
+                       return NULL;
+               }
+
+               mgr->group_count++;
+       }
+
+       return mgr;
+}
+EXPORT_SYMBOL(drm_dp_tunnel_mgr_create);
+
+/**
+ * drm_dp_tunnel_mgr_destroy - Destroy DP tunnel manager
+ * @mgr: Tunnel manager object
+ *
+ * Destroy the tunnel manager.
+ */
+void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr)
+{
+       destroy_mgr(mgr);
+}
+EXPORT_SYMBOL(drm_dp_tunnel_mgr_destroy);
index 3089029abba481828522070dc0063eaa79251bf9..5932024f8f9547e591b3c022a057d298fbcb5f36 100644 (file)
@@ -155,6 +155,20 @@ config DRM_I915_PXP
          protected session and manage the status of the alive software session,
          as well as its life cycle.
 
+config DRM_I915_DP_TUNNEL
+       bool "Enable DP tunnel support"
+       depends on DRM_I915
+       depends on USB4
+       select DRM_DISPLAY_DP_TUNNEL
+       default y
+       help
+         Choose this option to detect DP tunnels and enable the Bandwidth
+         Allocation mode for such tunnels. This allows using the maximum
+         resolution allowed by the link BW on all displays sharing the
+         link BW, for instance on a Thunderbolt link.
+
+         If in doubt, say "Y".
+
 menu "drm/i915 Debugging"
 depends on DRM_I915
 depends on EXPERT
index 5b7162076850c640b1bf6bd6a5690d935fe9e2bb..bc18e2d9ea05d7a8c40697edf63e1349888f4883 100644 (file)
@@ -28,6 +28,7 @@ config DRM_I915_DEBUG
        select STACKDEPOT
        select STACKTRACE
        select DRM_DP_AUX_CHARDEV
+       select DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE if DRM_I915_DP_TUNNEL
        select X86_MSR # used by igt/pm_rpm
        select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
        select DRM_DEBUG_MM if DRM=y
index c13f14edb50889baa604b044d2324a371e444ed5..3ef6ed41e62b4a05af99cb9e91f8dbe250b7a684 100644 (file)
@@ -369,6 +369,9 @@ i915-y += \
        display/vlv_dsi.o \
        display/vlv_dsi_pll.o
 
+i915-$(CONFIG_DRM_I915_DP_TUNNEL) += \
+       display/intel_dp_tunnel.o
+
 i915-y += \
        i915_perf.o
 
index 0589994dde113f562eb0da290cbb71bedbff6065..d0c3880d7f80f8b8e9ad54051a9f9eb596417a29 100644 (file)
@@ -205,7 +205,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
        const char *str;
        u8 val;
 
-       priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (priv == NULL)
                return false;
 
index 6d948520e9a6c22f7f611e1cb6dd27eb7ede5c2f..2e8e85da5a409463ee9806a224bfa08748ca9384 100644 (file)
@@ -216,7 +216,7 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
        u8 vendor, device;
        char *name, *devid;
 
-       ch7xxx = kzalloc(sizeof(struct ch7xxx_priv), GFP_KERNEL);
+       ch7xxx = kzalloc(sizeof(*ch7xxx), GFP_KERNEL);
        if (ch7xxx == NULL)
                return false;
 
index f43d8c610d3f9635e42a6536252a7dede97bbac1..eef72bb3b767cd973627f15c9b33ef8cf3a47ff4 100644 (file)
@@ -267,7 +267,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
        u16 temp;
        int i;
 
-       priv = kzalloc(sizeof(struct ivch_priv), GFP_KERNEL);
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (priv == NULL)
                return false;
 
index a724a8755673c318b110d1dfa33ee87d75a92d1a..1df212fb000ea1c50618b6820419923735bbe632 100644 (file)
@@ -476,7 +476,7 @@ static bool ns2501_init(struct intel_dvo_device *dvo,
        struct ns2501_priv *ns;
        unsigned char ch;
 
-       ns = kzalloc(sizeof(struct ns2501_priv), GFP_KERNEL);
+       ns = kzalloc(sizeof(*ns), GFP_KERNEL);
        if (ns == NULL)
                return false;
 
@@ -551,7 +551,7 @@ static void ns2501_mode_set(struct intel_dvo_device *dvo,
                            const struct drm_display_mode *adjusted_mode)
 {
        const struct ns2501_configuration *conf;
-       struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+       struct ns2501_priv *ns = dvo->dev_priv;
        int mode_idx, i;
 
        DRM_DEBUG_KMS
@@ -655,7 +655,7 @@ static bool ns2501_get_hw_state(struct intel_dvo_device *dvo)
 /* set the NS2501 power state */
 static void ns2501_dpms(struct intel_dvo_device *dvo, bool enable)
 {
-       struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+       struct ns2501_priv *ns = dvo->dev_priv;
 
        DRM_DEBUG_KMS("Trying set the dpms of the DVO to %i\n", enable);
 
index 4acc8ce29c0bd20819ef217efefe1c5020e6ca85..6c461024c8e3952daa12ced1a7306bc268781580 100644 (file)
@@ -141,7 +141,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
        struct sil164_priv *sil;
        unsigned char ch;
 
-       sil = kzalloc(sizeof(struct sil164_priv), GFP_KERNEL);
+       sil = kzalloc(sizeof(*sil), GFP_KERNEL);
        if (sil == NULL)
                return false;
 
index 009d65b0f3e9649c0ca103348454a93ccd18acdc..0939e097f4f97b6a117249b9dd1eee3dd9eb0e57 100644 (file)
@@ -173,7 +173,7 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
        struct tfp410_priv *tfp;
        int id;
 
-       tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL);
+       tfp = kzalloc(sizeof(*tfp), GFP_KERNEL);
        if (tfp == NULL)
                return false;
 
index 11ca9572e8b3c5f904453c38d967d7bafb75ed04..628e7192ebc97e4d896081b0921a59971cbf94d2 100644 (file)
@@ -70,26 +70,25 @@ static const struct cxsr_latency cxsr_latency_table[] = {
        {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
 };
 
-static const struct cxsr_latency *intel_get_cxsr_latency(bool is_desktop,
-                                                        bool is_ddr3,
-                                                        int fsb,
-                                                        int mem)
+static const struct cxsr_latency *intel_get_cxsr_latency(struct drm_i915_private *i915)
 {
-       const struct cxsr_latency *latency;
        int i;
 
-       if (fsb == 0 || mem == 0)
+       if (i915->fsb_freq == 0 || i915->mem_freq == 0)
                return NULL;
 
        for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
-               latency = &cxsr_latency_table[i];
+               const struct cxsr_latency *latency = &cxsr_latency_table[i];
+               bool is_desktop = !IS_MOBILE(i915);
+
                if (is_desktop == latency->is_desktop &&
-                   is_ddr3 == latency->is_ddr3 &&
-                   fsb == latency->fsb_freq && mem == latency->mem_freq)
+                   i915->is_ddr3 == latency->is_ddr3 &&
+                   i915->fsb_freq == latency->fsb_freq &&
+                   i915->mem_freq == latency->mem_freq)
                        return latency;
        }
 
-       DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
+       drm_dbg_kms(&i915->drm, "Unknown FSB/MEM found, disable CxSR\n");
 
        return NULL;
 }
@@ -525,6 +524,7 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
 
 /**
  * intel_calculate_wm - calculate watermark level
+ * @i915: the device
  * @pixel_rate: pixel clock
  * @wm: chip FIFO params
  * @fifo_size: size of the FIFO buffer
@@ -542,7 +542,8 @@ static unsigned int intel_wm_method2(unsigned int pixel_rate,
  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
  * will occur, and a display engine hang could result.
  */
-static unsigned int intel_calculate_wm(int pixel_rate,
+static unsigned int intel_calculate_wm(struct drm_i915_private *i915,
+                                      int pixel_rate,
                                       const struct intel_watermark_params *wm,
                                       int fifo_size, int cpp,
                                       unsigned int latency_ns)
@@ -559,10 +560,10 @@ static unsigned int intel_calculate_wm(int pixel_rate,
                                   latency_ns / 100);
        entries = DIV_ROUND_UP(entries, wm->cacheline_size) +
                wm->guard_size;
-       DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries);
+       drm_dbg_kms(&i915->drm, "FIFO entries required for mode: %d\n", entries);
 
        wm_size = fifo_size - entries;
-       DRM_DEBUG_KMS("FIFO watermark level: %d\n", wm_size);
+       drm_dbg_kms(&i915->drm, "FIFO watermark level: %d\n", wm_size);
 
        /* Don't promote wm_size to unsigned... */
        if (wm_size > wm->max_wm)
@@ -634,10 +635,7 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
        u32 reg;
        unsigned int wm;
 
-       latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
-                                        dev_priv->is_ddr3,
-                                        dev_priv->fsb_freq,
-                                        dev_priv->mem_freq);
+       latency = intel_get_cxsr_latency(dev_priv);
        if (!latency) {
                drm_dbg_kms(&dev_priv->drm,
                            "Unknown FSB/MEM found, disable CxSR\n");
@@ -653,7 +651,8 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
                int cpp = fb->format->cpp[0];
 
                /* Display SR */
-               wm = intel_calculate_wm(pixel_rate, &pnv_display_wm,
+               wm = intel_calculate_wm(dev_priv, pixel_rate,
+                                       &pnv_display_wm,
                                        pnv_display_wm.fifo_size,
                                        cpp, latency->display_sr);
                reg = intel_uncore_read(&dev_priv->uncore, DSPFW1);
@@ -663,20 +662,23 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv)
                drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg);
 
                /* cursor SR */
-               wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm,
+               wm = intel_calculate_wm(dev_priv, pixel_rate,
+                                       &pnv_cursor_wm,
                                        pnv_display_wm.fifo_size,
                                        4, latency->cursor_sr);
                intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_CURSOR_SR_MASK,
                                 FW_WM(wm, CURSOR_SR));
 
                /* Display HPLL off SR */
-               wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm,
+               wm = intel_calculate_wm(dev_priv, pixel_rate,
+                                       &pnv_display_hplloff_wm,
                                        pnv_display_hplloff_wm.fifo_size,
                                        cpp, latency->display_hpll_disable);
                intel_uncore_rmw(&dev_priv->uncore, DSPFW3, DSPFW_HPLL_SR_MASK, FW_WM(wm, HPLL_SR));
 
                /* cursor HPLL off SR */
-               wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm,
+               wm = intel_calculate_wm(dev_priv, pixel_rate,
+                                       &pnv_cursor_hplloff_wm,
                                        pnv_display_hplloff_wm.fifo_size,
                                        4, latency->cursor_hpll_disable);
                reg = intel_uncore_read(&dev_priv->uncore, DSPFW3);
@@ -2124,7 +2126,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
                else
                        cpp = fb->format->cpp[0];
 
-               planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
+               planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
                                               wm_info, fifo_size, cpp,
                                               pessimal_latency_ns);
        } else {
@@ -2151,7 +2153,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv)
                else
                        cpp = fb->format->cpp[0];
 
-               planeb_wm = intel_calculate_wm(crtc->config->pixel_rate,
+               planeb_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
                                               wm_info, fifo_size, cpp,
                                               pessimal_latency_ns);
        } else {
@@ -2245,7 +2247,7 @@ static void i845_update_wm(struct drm_i915_private *dev_priv)
        if (crtc == NULL)
                return;
 
-       planea_wm = intel_calculate_wm(crtc->config->pixel_rate,
+       planea_wm = intel_calculate_wm(dev_priv, crtc->config->pixel_rate,
                                       &i845_wm_info,
                                       i845_get_fifo_size(dev_priv, PLANE_A),
                                       4, pessimal_latency_ns);
@@ -2531,7 +2533,8 @@ static void ilk_compute_wm_reg_maximums(const struct drm_i915_private *dev_priv,
        max->fbc = ilk_fbc_wm_reg_max(dev_priv);
 }
 
-static bool ilk_validate_wm_level(int level,
+static bool ilk_validate_wm_level(struct drm_i915_private *i915,
+                                 int level,
                                  const struct ilk_wm_maximums *max,
                                  struct intel_wm_level *result)
 {
@@ -2554,14 +2557,17 @@ static bool ilk_validate_wm_level(int level,
         */
        if (level == 0 && !result->enable) {
                if (result->pri_val > max->pri)
-                       DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
-                                     level, result->pri_val, max->pri);
+                       drm_dbg_kms(&i915->drm,
+                                   "Primary WM%d too large %u (max %u)\n",
+                                   level, result->pri_val, max->pri);
                if (result->spr_val > max->spr)
-                       DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
-                                     level, result->spr_val, max->spr);
+                       drm_dbg_kms(&i915->drm,
+                                   "Sprite WM%d too large %u (max %u)\n",
+                                   level, result->spr_val, max->spr);
                if (result->cur_val > max->cur)
-                       DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
-                                     level, result->cur_val, max->cur);
+                       drm_dbg_kms(&i915->drm,
+                                   "Cursor WM%d too large %u (max %u)\n",
+                                   level, result->cur_val, max->cur);
 
                result->pri_val = min_t(u32, result->pri_val, max->pri);
                result->spr_val = min_t(u32, result->spr_val, max->spr);
@@ -2761,7 +2767,7 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv)
        }
 }
 
-static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
+static bool ilk_validate_pipe_wm(struct drm_i915_private *dev_priv,
                                 struct intel_pipe_wm *pipe_wm)
 {
        /* LP0 watermark maximums depend on this pipe alone */
@@ -2776,7 +2782,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
        ilk_compute_wm_maximums(dev_priv, 0, &config, INTEL_DDB_PART_1_2, &max);
 
        /* At least LP0 must be valid */
-       if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) {
+       if (!ilk_validate_wm_level(dev_priv, 0, &max, &pipe_wm->wm[0])) {
                drm_dbg_kms(&dev_priv->drm, "LP0 watermark invalid\n");
                return false;
        }
@@ -2845,7 +2851,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
                 * register maximums since such watermarks are
                 * always invalid.
                 */
-               if (!ilk_validate_wm_level(level, &max, wm)) {
+               if (!ilk_validate_wm_level(dev_priv, level, &max, wm)) {
                        memset(wm, 0, sizeof(*wm));
                        break;
                }
@@ -2976,7 +2982,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
 
                if (level > last_enabled_level)
                        wm->enable = false;
-               else if (!ilk_validate_wm_level(level, max, wm))
+               else if (!ilk_validate_wm_level(dev_priv, level, max, wm))
                        /* make sure all following levels get disabled */
                        last_enabled_level = level - 1;
 
@@ -4016,10 +4022,7 @@ void i9xx_wm_init(struct drm_i915_private *dev_priv)
                g4x_setup_wm_latency(dev_priv);
                dev_priv->display.funcs.wm = &g4x_wm_funcs;
        } else if (IS_PINEVIEW(dev_priv)) {
-               if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
-                                           dev_priv->is_ddr3,
-                                           dev_priv->fsb_freq,
-                                           dev_priv->mem_freq)) {
+               if (!intel_get_cxsr_latency(dev_priv)) {
                        drm_info(&dev_priv->drm,
                                 "failed to find known CxSR latency "
                                 "(found ddr%s fsb freq %d, mem freq %d), "
index ec0d5168b50352b3c375b391ab2ca705f081af55..2bb270f82932eba551e66fa2e264c06e9c01808b 100644 (file)
@@ -29,6 +29,7 @@
  * See intel_atomic_plane.c for the plane-specific atomic functionality.
  */
 
+#include <drm/display/drm_dp_tunnel.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fourcc.h>
@@ -38,6 +39,7 @@
 #include "intel_atomic.h"
 #include "intel_cdclk.h"
 #include "intel_display_types.h"
+#include "intel_dp_tunnel.h"
 #include "intel_global_state.h"
 #include "intel_hdcp.h"
 #include "intel_psr.h"
@@ -258,6 +260,10 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
        if (crtc_state->post_csc_lut)
                drm_property_blob_get(crtc_state->post_csc_lut);
 
+       if (crtc_state->dp_tunnel_ref.tunnel)
+               drm_dp_tunnel_ref_get(crtc_state->dp_tunnel_ref.tunnel,
+                                     &crtc_state->dp_tunnel_ref);
+
        crtc_state->update_pipe = false;
        crtc_state->update_m_n = false;
        crtc_state->update_lrr = false;
@@ -309,6 +315,8 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
 
        __drm_atomic_helper_crtc_destroy_state(&crtc_state->uapi);
        intel_crtc_free_hw_state(crtc_state);
+       if (crtc_state->dp_tunnel_ref.tunnel)
+               drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref);
        kfree(crtc_state);
 }
 
@@ -344,6 +352,8 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
        /* state->internal not reset on purpose */
 
        state->dpll_set = state->modeset = false;
+
+       intel_dp_tunnel_atomic_cleanup_inherited_state(state);
 }
 
 struct intel_crtc_state *
index 5f04e495fd27563342808951dabd72089b37313e..fe52c06271ef0588647b4cd68b3bee5689747f29 100644 (file)
@@ -1759,7 +1759,8 @@ parse_mipi_config(struct drm_i915_private *i915,
 
 /* Find the sequence block and size for the given panel. */
 static const u8 *
-find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
+find_panel_sequence_block(struct drm_i915_private *i915,
+                         const struct bdb_mipi_sequence *sequence,
                          u16 panel_id, u32 *seq_size)
 {
        u32 total = get_blocksize(sequence);
@@ -1776,7 +1777,7 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
 
        for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
                if (index + header_size > total) {
-                       DRM_ERROR("Invalid sequence block (header)\n");
+                       drm_err(&i915->drm, "Invalid sequence block (header)\n");
                        return NULL;
                }
 
@@ -1789,7 +1790,7 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
                index += header_size;
 
                if (index + current_size > total) {
-                       DRM_ERROR("Invalid sequence block\n");
+                       drm_err(&i915->drm, "Invalid sequence block\n");
                        return NULL;
                }
 
@@ -1801,12 +1802,13 @@ find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
                index += current_size;
        }
 
-       DRM_ERROR("Sequence block detected but no valid configuration\n");
+       drm_err(&i915->drm, "Sequence block detected but no valid configuration\n");
 
        return NULL;
 }
 
-static int goto_next_sequence(const u8 *data, int index, int total)
+static int goto_next_sequence(struct drm_i915_private *i915,
+                             const u8 *data, int index, int total)
 {
        u16 len;
 
@@ -1836,7 +1838,7 @@ static int goto_next_sequence(const u8 *data, int index, int total)
                        len = *(data + index + 6) + 7;
                        break;
                default:
-                       DRM_ERROR("Unknown operation byte\n");
+                       drm_err(&i915->drm, "Unknown operation byte\n");
                        return 0;
                }
        }
@@ -1844,7 +1846,8 @@ static int goto_next_sequence(const u8 *data, int index, int total)
        return 0;
 }
 
-static int goto_next_sequence_v3(const u8 *data, int index, int total)
+static int goto_next_sequence_v3(struct drm_i915_private *i915,
+                                const u8 *data, int index, int total)
 {
        int seq_end;
        u16 len;
@@ -1855,7 +1858,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
         * checking on the structure.
         */
        if (total < 5) {
-               DRM_ERROR("Too small sequence size\n");
+               drm_err(&i915->drm, "Too small sequence size\n");
                return 0;
        }
 
@@ -1872,7 +1875,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
 
        seq_end = index + size_of_sequence;
        if (seq_end > total) {
-               DRM_ERROR("Invalid sequence size\n");
+               drm_err(&i915->drm, "Invalid sequence size\n");
                return 0;
        }
 
@@ -1882,7 +1885,7 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
 
                if (operation_byte == MIPI_SEQ_ELEM_END) {
                        if (index != seq_end) {
-                               DRM_ERROR("Invalid element structure\n");
+                               drm_err(&i915->drm, "Invalid element structure\n");
                                return 0;
                        }
                        return index;
@@ -1904,8 +1907,8 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
                case MIPI_SEQ_ELEM_PMIC:
                        break;
                default:
-                       DRM_ERROR("Unknown operation byte %u\n",
-                                 operation_byte);
+                       drm_err(&i915->drm, "Unknown operation byte %u\n",
+                               operation_byte);
                        break;
                }
        }
@@ -2030,7 +2033,7 @@ parse_mipi_sequence(struct drm_i915_private *i915,
        drm_dbg(&i915->drm, "Found MIPI sequence block v%u\n",
                sequence->version);
 
-       seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
+       seq_data = find_panel_sequence_block(i915, sequence, panel_type, &seq_size);
        if (!seq_data)
                return;
 
@@ -2058,9 +2061,9 @@ parse_mipi_sequence(struct drm_i915_private *i915,
                panel->vbt.dsi.sequence[seq_id] = data + index;
 
                if (sequence->version >= 3)
-                       index = goto_next_sequence_v3(data, index, seq_size);
+                       index = goto_next_sequence_v3(i915, data, index, seq_size);
                else
-                       index = goto_next_sequence(data, index, seq_size);
+                       index = goto_next_sequence(i915, data, index, seq_size);
                if (!index) {
                        drm_err(&i915->drm, "Invalid sequence %u\n",
                                seq_id);
@@ -2135,12 +2138,13 @@ parse_compression_parameters(struct drm_i915_private *i915)
        }
 }
 
-static u8 translate_iboost(u8 val)
+static u8 translate_iboost(struct drm_i915_private *i915, u8 val)
 {
        static const u8 mapping[] = { 1, 3, 7 }; /* See VBT spec */
 
        if (val >= ARRAY_SIZE(mapping)) {
-               DRM_DEBUG_KMS("Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
+               drm_dbg_kms(&i915->drm,
+                           "Unsupported I_boost value found in VBT (%d), display may not work properly\n", val);
                return 0;
        }
        return mapping[val];
@@ -2897,12 +2901,14 @@ static const struct bdb_header *get_bdb_header(const struct vbt_header *vbt)
 
 /**
  * intel_bios_is_valid_vbt - does the given buffer contain a valid VBT
+ * @i915:      the device
  * @buf:       pointer to a buffer to validate
  * @size:      size of the buffer
  *
  * Returns true on valid VBT.
  */
-bool intel_bios_is_valid_vbt(const void *buf, size_t size)
+bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
+                            const void *buf, size_t size)
 {
        const struct vbt_header *vbt = buf;
        const struct bdb_header *bdb;
@@ -2911,17 +2917,17 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
                return false;
 
        if (sizeof(struct vbt_header) > size) {
-               DRM_DEBUG_DRIVER("VBT header incomplete\n");
+               drm_dbg_kms(&i915->drm, "VBT header incomplete\n");
                return false;
        }
 
        if (memcmp(vbt->signature, "$VBT", 4)) {
-               DRM_DEBUG_DRIVER("VBT invalid signature\n");
+               drm_dbg_kms(&i915->drm, "VBT invalid signature\n");
                return false;
        }
 
        if (vbt->vbt_size > size) {
-               DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+               drm_dbg_kms(&i915->drm, "VBT incomplete (vbt_size overflows)\n");
                return false;
        }
 
@@ -2931,13 +2937,13 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size)
                              vbt->bdb_offset,
                              sizeof(struct bdb_header),
                              size)) {
-               DRM_DEBUG_DRIVER("BDB header incomplete\n");
+               drm_dbg_kms(&i915->drm, "BDB header incomplete\n");
                return false;
        }
 
        bdb = get_bdb_header(vbt);
        if (range_overflows_t(size_t, vbt->bdb_offset, bdb->bdb_size, size)) {
-               DRM_DEBUG_DRIVER("BDB incomplete\n");
+               drm_dbg_kms(&i915->drm, "BDB incomplete\n");
                return false;
        }
 
@@ -2989,7 +2995,7 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915)
        for (count = 0; count < vbt_size; count += 4)
                *(vbt + store++) = intel_spi_read(&i915->uncore, found + count);
 
-       if (!intel_bios_is_valid_vbt(vbt, vbt_size))
+       if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size))
                goto err_free_vbt;
 
        drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n");
@@ -3046,7 +3052,7 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915)
 
        memcpy_fromio(vbt, p, vbt_size);
 
-       if (!intel_bios_is_valid_vbt(vbt, vbt_size))
+       if (!intel_bios_is_valid_vbt(i915, vbt, vbt_size))
                goto err_free_vbt;
 
        pci_unmap_rom(pdev, oprom);
@@ -3398,6 +3404,7 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
                     struct dsc_compression_parameters_entry *dsc,
                     int dsc_max_bpc)
 {
+       struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
        struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
        int bpc = 8;
 
@@ -3411,8 +3418,8 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
        else if (dsc->support_8bpc && dsc_max_bpc >= 8)
                bpc = 8;
        else
-               DRM_DEBUG_KMS("VBT: Unsupported BPC %d for DCS\n",
-                             dsc_max_bpc);
+               drm_dbg_kms(&i915->drm, "VBT: Unsupported BPC %d for DCS\n",
+                           dsc_max_bpc);
 
        crtc_state->pipe_bpp = bpc * 3;
 
@@ -3432,16 +3439,16 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
        } else {
                /* FIXME */
                if (!(dsc->slices_per_line & BIT(0)))
-                       DRM_DEBUG_KMS("VBT: Unsupported DSC slice count for DSI\n");
+                       drm_dbg_kms(&i915->drm, "VBT: Unsupported DSC slice count for DSI\n");
 
                crtc_state->dsc.slice_count = 1;
        }
 
        if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
            crtc_state->dsc.slice_count != 0)
-               DRM_DEBUG_KMS("VBT: DSC hdisplay %d not divisible by slice count %d\n",
-                             crtc_state->hw.adjusted_mode.crtc_hdisplay,
-                             crtc_state->dsc.slice_count);
+               drm_dbg_kms(&i915->drm, "VBT: DSC hdisplay %d not divisible by slice count %d\n",
+                           crtc_state->hw.adjusted_mode.crtc_hdisplay,
+                           crtc_state->dsc.slice_count);
 
        /*
         * The VBT rc_buffer_block_size and rc_buffer_size definitions
@@ -3597,7 +3604,7 @@ int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
        if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
                return 0;
 
-       return translate_iboost(devdata->child.dp_iboost_level);
+       return translate_iboost(devdata->i915, devdata->child.dp_iboost_level);
 }
 
 int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
@@ -3605,7 +3612,7 @@ int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata)
        if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)
                return 0;
 
-       return translate_iboost(devdata->child.hdmi_iboost_level);
+       return translate_iboost(devdata->i915, devdata->child.hdmi_iboost_level);
 }
 
 int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata)
index 41bfb009d4b047069b869d595a1ec9468a0210c9..06a51be4afd89ba7a18212b3c9cf1d3ad2c847e3 100644 (file)
@@ -242,7 +242,8 @@ void intel_bios_init_panel_late(struct drm_i915_private *dev_priv,
                                const struct drm_edid *drm_edid);
 void intel_bios_fini_panel(struct intel_panel *panel);
 void intel_bios_driver_remove(struct drm_i915_private *dev_priv);
-bool intel_bios_is_valid_vbt(const void *buf, size_t size);
+bool intel_bios_is_valid_vbt(struct drm_i915_private *i915,
+                            const void *buf, size_t size);
 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
 bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
index 26200ee3e23f9fed708b5764b4f06a4aa4a7037a..ed89b86ea625aaa408064916b982ffa92f9ef4b5 100644 (file)
  * DMC will not change the active CDCLK frequency however, so that part
  * will still be performed by the driver directly.
  *
+ * Several methods exist to change the CDCLK frequency, which ones are
+ * supported depends on the platform:
+ *
+ * - Full PLL disable + re-enable with new VCO frequency. Pipes must be inactive.
+ * - CD2X divider update. Single pipe can be active as the divider update
+ *   can be synchronized with the pipe's start of vblank.
+ * - Crawl the PLL smoothly to the new VCO frequency. Pipes can be active.
+ * - Squash waveform update. Pipes can be active.
+ * - Crawl and squash can also be done back to back. Pipes can be active.
+ *
  * RAWCLK is a fixed frequency clock, often used by various auxiliary
  * blocks such as AUX CH or backlight PWM. Hence the only thing we
  * really need to know about RAWCLK is its frequency so that various
@@ -1406,6 +1416,20 @@ static const struct intel_cdclk_vals lnl_cdclk_table[] = {
        {}
 };
 
+static const int cdclk_squash_len = 16;
+
+static int cdclk_squash_divider(u16 waveform)
+{
+       return hweight16(waveform ?: 0xffff);
+}
+
+static int cdclk_divider(int cdclk, int vco, u16 waveform)
+{
+       /* 2 * cd2x divider */
+       return DIV_ROUND_CLOSEST(vco * cdclk_squash_divider(waveform),
+                                cdclk * cdclk_squash_len);
+}
+
 static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
 {
        const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
@@ -1744,10 +1768,10 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe
 }
 
 static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
-                                 int cdclk, int vco)
+                                 int cdclk, int vco, u16 waveform)
 {
        /* cdclk = vco / 2 / div{1,1.5,2,4} */
-       switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
+       switch (cdclk_divider(cdclk, vco, waveform)) {
        default:
                drm_WARN_ON(&dev_priv->drm,
                            cdclk != dev_priv->display.cdclk.hw.bypass);
@@ -1764,7 +1788,7 @@ static u32 bxt_cdclk_cd2x_div_sel(struct drm_i915_private *dev_priv,
        }
 }
 
-static u32 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
+static u16 cdclk_squash_waveform(struct drm_i915_private *dev_priv,
                                 int cdclk)
 {
        const struct intel_cdclk_vals *table = dev_priv->display.cdclk.table;
@@ -1826,20 +1850,13 @@ static bool cdclk_pll_is_unknown(unsigned int vco)
        return vco == ~0;
 }
 
-static const int cdclk_squash_len = 16;
-
-static int cdclk_squash_divider(u16 waveform)
-{
-       return hweight16(waveform ?: 0xffff);
-}
-
 static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i915,
                                                    const struct intel_cdclk_config *old_cdclk_config,
                                                    const struct intel_cdclk_config *new_cdclk_config,
                                                    struct intel_cdclk_config *mid_cdclk_config)
 {
        u16 old_waveform, new_waveform, mid_waveform;
-       int div = 2;
+       int old_div, new_div, mid_div;
 
        /* Return if PLL is in an unknown state, force a complete disable and re-enable. */
        if (cdclk_pll_is_unknown(old_cdclk_config->vco))
@@ -1858,6 +1875,18 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
            old_waveform == new_waveform)
                return false;
 
+       old_div = cdclk_divider(old_cdclk_config->cdclk,
+                               old_cdclk_config->vco, old_waveform);
+       new_div = cdclk_divider(new_cdclk_config->cdclk,
+                               new_cdclk_config->vco, new_waveform);
+
+       /*
+        * Should not happen currently. We might need more midpoint
+        * transitions if we need to also change the cd2x divider.
+        */
+       if (drm_WARN_ON(&i915->drm, old_div != new_div))
+               return false;
+
        *mid_cdclk_config = *new_cdclk_config;
 
        /*
@@ -1870,15 +1899,17 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
 
        if (cdclk_squash_divider(new_waveform) > cdclk_squash_divider(old_waveform)) {
                mid_cdclk_config->vco = old_cdclk_config->vco;
+               mid_div = old_div;
                mid_waveform = new_waveform;
        } else {
                mid_cdclk_config->vco = new_cdclk_config->vco;
+               mid_div = new_div;
                mid_waveform = old_waveform;
        }
 
        mid_cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_squash_divider(mid_waveform) *
                                                    mid_cdclk_config->vco,
-                                                   cdclk_squash_len * div);
+                                                   cdclk_squash_len * mid_div);
 
        /* make sure the mid clock came out sane */
 
@@ -1906,16 +1937,12 @@ static u32 bxt_cdclk_ctl(struct drm_i915_private *i915,
 {
        int cdclk = cdclk_config->cdclk;
        int vco = cdclk_config->vco;
-       int unsquashed_cdclk;
        u16 waveform;
        u32 val;
 
        waveform = cdclk_squash_waveform(i915, cdclk);
 
-       unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len,
-                                            cdclk_squash_divider(waveform));
-
-       val = bxt_cdclk_cd2x_div_sel(i915, unsquashed_cdclk, vco) |
+       val = bxt_cdclk_cd2x_div_sel(i915, cdclk, vco, waveform) |
                bxt_cdclk_cd2x_pipe(i915, pipe);
 
        /*
index c5092b7e87d52b1801fa776500d7f2817aac7374..ca7112b32cb3efd4d71bbeeb3753f5afacefd125 100644 (file)
@@ -2111,7 +2111,8 @@ static u32 intel_degamma_lut_size(const struct intel_crtc_state *crtc_state)
        return DISPLAY_INFO(i915)->color.degamma_lut_size;
 }
 
-static int check_lut_size(const struct drm_property_blob *lut, int expected)
+static int check_lut_size(struct drm_i915_private *i915,
+                         const struct drm_property_blob *lut, int expected)
 {
        int len;
 
@@ -2120,8 +2121,8 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
 
        len = drm_color_lut_size(lut);
        if (len != expected) {
-               DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n",
-                             len, expected);
+               drm_dbg_kms(&i915->drm, "Invalid LUT size; got %d, expected %d\n",
+                           len, expected);
                return -EINVAL;
        }
 
@@ -2146,8 +2147,8 @@ static int _check_luts(const struct intel_crtc_state *crtc_state,
        degamma_length = intel_degamma_lut_size(crtc_state);
        gamma_length = intel_gamma_lut_size(crtc_state);
 
-       if (check_lut_size(degamma_lut, degamma_length) ||
-           check_lut_size(gamma_lut, gamma_length))
+       if (check_lut_size(i915, degamma_lut, degamma_length) ||
+           check_lut_size(i915, gamma_lut, gamma_length))
                return -EINVAL;
 
        if (drm_color_lut_check(degamma_lut, degamma_tests) ||
index b9733a73e21d4357e5716ccc06522fb7ce84eebd..93479db0f89f63bfd14c704460d8b0bb81b8f62b 100644 (file)
@@ -933,6 +933,9 @@ static int intel_crt_get_modes(struct drm_connector *connector)
        struct i2c_adapter *ddc;
        int ret;
 
+       if (!intel_display_driver_check_access(dev_priv))
+               return drm_edid_connector_add_modes(connector);
+
        wakeref = intel_display_power_get(dev_priv,
                                          intel_encoder->power_domain);
 
index 288a00e083c87596f46e1fe435b7f5644fc58c13..64e0f820a789a436452098da3f8071fb7408f0c4 100644 (file)
@@ -848,10 +848,10 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
 static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
        .clock = 1000000, /* 10 Gbps */
        .tx = { 0xbe21, /* tx cfg0 */
-               0x4800, /* tx cfg1 */
+               0xe800, /* tx cfg1 */
                0x0000, /* tx cfg2 */
                },
-       .cmn = {0x0500, /* cmn cfg0*/
+       .cmn = {0x0700, /* cmn cfg0*/
                0x0005, /* cmn cfg1 */
                0x0000, /* cmn cfg2 */
                0x0000, /* cmn cfg3 */
@@ -1641,7 +1641,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
 static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
        .clock = 3000000,
        .tx = {  0xbe98, /* tx cfg0 */
-                 0x9800, /* tx cfg1 */
+                 0x8800, /* tx cfg1 */
                  0x0000, /* tx cfg2 */
                },
        .cmn = { 0x0500, /* cmn cfg0*/
@@ -1649,8 +1649,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
                  0x0000, /* cmn cfg2 */
                  0x0000, /* cmn cfg3 */
                },
-       .mpllb = { 0x209c,      /* mpllb cfg0 */
-                  0x7d10,      /* mpllb cfg1 */
+       .mpllb = { 0x309c,      /* mpllb cfg0 */
+                  0x2110,      /* mpllb cfg1 */
                   0xca06,      /* mpllb cfg2 */
                   0xbe40,      /* mpllb cfg3 */
                   0x0000,      /* mpllb cfg4 */
@@ -1666,7 +1666,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
 static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
        .clock = 6000000,
        .tx = {  0xbe98, /* tx cfg0 */
-                 0x9800, /* tx cfg1 */
+                 0x8800, /* tx cfg1 */
                  0x0000, /* tx cfg2 */
                },
        .cmn = { 0x0500, /* cmn cfg0*/
@@ -1674,8 +1674,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
                  0x0000, /* cmn cfg2 */
                  0x0000, /* cmn cfg3 */
                },
-       .mpllb = { 0x009c,      /* mpllb cfg0 */
-                  0x7d08,      /* mpllb cfg1 */
+       .mpllb = { 0x109c,      /* mpllb cfg0 */
+                  0x2108,      /* mpllb cfg1 */
                   0xca06,      /* mpllb cfg2 */
                   0xbe40,      /* mpllb cfg3 */
                   0x0000,      /* mpllb cfg4 */
@@ -1691,7 +1691,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
 static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
        .clock = 8000000,
        .tx = {  0xbe98, /* tx cfg0 */
-                 0x9800, /* tx cfg1 */
+                 0x8800, /* tx cfg1 */
                  0x0000, /* tx cfg2 */
                },
        .cmn = { 0x0500, /* cmn cfg0*/
@@ -1699,8 +1699,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
                  0x0000, /* cmn cfg2 */
                  0x0000, /* cmn cfg3 */
                },
-       .mpllb = { 0x00d0,      /* mpllb cfg0 */
-                  0x7d08,      /* mpllb cfg1 */
+       .mpllb = { 0x10d0,      /* mpllb cfg0 */
+                  0x2108,      /* mpllb cfg1 */
                   0x4a06,      /* mpllb cfg2 */
                   0xbe40,      /* mpllb cfg3 */
                   0x0000,      /* mpllb cfg4 */
@@ -1716,7 +1716,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
 static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
        .clock = 10000000,
        .tx = {  0xbe98, /* tx cfg0 */
-                 0x9800, /* tx cfg1 */
+                 0x8800, /* tx cfg1 */
                  0x0000, /* tx cfg2 */
                },
        .cmn = { 0x0500, /* cmn cfg0*/
@@ -1725,7 +1725,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
                  0x0000, /* cmn cfg3 */
                },
        .mpllb = { 0x1104,      /* mpllb cfg0 */
-                  0x7d08,      /* mpllb cfg1 */
+                  0x2108,      /* mpllb cfg1 */
                   0x0a06,      /* mpllb cfg2 */
                   0xbe40,      /* mpllb cfg3 */
                   0x0000,      /* mpllb cfg4 */
@@ -1741,7 +1741,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
 static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
        .clock = 12000000,
        .tx = {  0xbe98, /* tx cfg0 */
-                 0x9800, /* tx cfg1 */
+                 0x8800, /* tx cfg1 */
                  0x0000, /* tx cfg2 */
                },
        .cmn = { 0x0500, /* cmn cfg0*/
@@ -1749,8 +1749,8 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
                  0x0000, /* cmn cfg2 */
                  0x0000, /* cmn cfg3 */
                },
-       .mpllb = { 0x0138,      /* mpllb cfg0 */
-                  0x7d08,      /* mpllb cfg1 */
+       .mpllb = { 0x1138,      /* mpllb cfg0 */
+                  0x2108,      /* mpllb cfg1 */
                   0x5486,      /* mpllb cfg2 */
                   0xfe40,      /* mpllb cfg3 */
                   0x0000,      /* mpllb cfg4 */
index bea441590204485c5043987275eafb118cf7ea3f..c587a8efeafcf5e561429d925c2893208908e03f 100644 (file)
@@ -54,6 +54,7 @@
 #include "intel_dp_aux.h"
 #include "intel_dp_link_training.h"
 #include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
 #include "intel_dpio_phy.h"
 #include "intel_dsi.h"
 #include "intel_fdi.h"
@@ -4150,7 +4151,7 @@ static void intel_ddi_sync_state(struct intel_encoder *encoder,
                intel_tc_port_sanitize_mode(enc_to_dig_port(encoder),
                                            crtc_state);
 
-       if (crtc_state && intel_crtc_has_dp_encoder(crtc_state))
+       if (intel_encoder_is_dp(encoder))
                intel_dp_sync_state(encoder, crtc_state);
 }
 
index 7db0655d8c9e020d678909696bf6bcfca4bb8d9e..ab2f52d21bad8bad22c184cce6aefac8b0ce5a29 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/string_helpers.h>
 
 #include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_atomic_uapi.h>
@@ -73,6 +74,7 @@
 #include "intel_dp.h"
 #include "intel_dp_link_training.h"
 #include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
 #include "intel_dpll.h"
 #include "intel_dpll_mgr.h"
 #include "intel_dpt.h"
@@ -2478,7 +2480,7 @@ intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
        u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock);
        u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16,
                                                  bw_overhead);
-       u32 data_n = intel_dp_max_data_rate(link_clock, nlanes);
+       u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes);
 
        /*
         * Windows/BIOS uses fixed M/N values always. Follow suit.
@@ -4490,6 +4492,8 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
        saved_state->crc_enabled = slave_crtc_state->crc_enabled;
 
        intel_crtc_free_hw_state(slave_crtc_state);
+       if (slave_crtc_state->dp_tunnel_ref.tunnel)
+               drm_dp_tunnel_ref_put(&slave_crtc_state->dp_tunnel_ref);
        memcpy(slave_crtc_state, saved_state, sizeof(*slave_crtc_state));
        kfree(saved_state);
 
@@ -4505,6 +4509,10 @@ copy_bigjoiner_crtc_state_modeset(struct intel_atomic_state *state,
                      &master_crtc_state->hw.adjusted_mode);
        slave_crtc_state->hw.scaling_filter = master_crtc_state->hw.scaling_filter;
 
+       if (master_crtc_state->dp_tunnel_ref.tunnel)
+               drm_dp_tunnel_ref_get(master_crtc_state->dp_tunnel_ref.tunnel,
+                                     &slave_crtc_state->dp_tunnel_ref);
+
        copy_bigjoiner_crtc_state_nomodeset(state, slave_crtc);
 
        slave_crtc_state->uapi.mode_changed = master_crtc_state->uapi.mode_changed;
@@ -4533,6 +4541,8 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
        /* free the old crtc_state->hw members */
        intel_crtc_free_hw_state(crtc_state);
 
+       intel_dp_tunnel_atomic_clear_stream_bw(state, crtc_state);
+
        /* FIXME: before the switch to atomic started, a new pipe_config was
         * kzalloc'd. Code that depends on any field being zero should be
         * fixed, so that the crtc_state can be safely duplicated. For now,
@@ -4851,10 +4861,12 @@ memcmp_diff_len(const u8 *a, const u8 *b, size_t len)
 }
 
 static void
-pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
-                           bool fastset, const char *name,
+pipe_config_buffer_mismatch(bool fastset, const struct intel_crtc *crtc,
+                           const char *name,
                            const u8 *a, const u8 *b, size_t len)
 {
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
        if (fastset) {
                if (!drm_debug_enabled(DRM_UT_KMS))
                        return;
@@ -4863,7 +4875,8 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
                len = memcmp_diff_len(a, b, len);
 
                drm_dbg_kms(&dev_priv->drm,
-                           "fastset requirement not met in %s buffer\n", name);
+                           "[CRTC:%d:%s] fastset requirement not met in %s buffer\n",
+                           crtc->base.base.id, crtc->base.name, name);
                print_hex_dump(KERN_DEBUG, "expected: ", DUMP_PREFIX_NONE,
                               16, 0, a, len, false);
                print_hex_dump(KERN_DEBUG, "found: ", DUMP_PREFIX_NONE,
@@ -4872,7 +4885,8 @@ pipe_config_buffer_mismatch(struct drm_i915_private *dev_priv,
                /* only dump up to the last difference */
                len = memcmp_diff_len(a, b, len);
 
-               drm_err(&dev_priv->drm, "mismatch in %s buffer\n", name);
+               drm_err(&dev_priv->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
+                       crtc->base.base.id, crtc->base.name, name);
                print_hex_dump(KERN_ERR, "expected: ", DUMP_PREFIX_NONE,
                               16, 0, a, len, false);
                print_hex_dump(KERN_ERR, "found: ", DUMP_PREFIX_NONE,
@@ -4903,18 +4917,34 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
        va_end(args);
 }
 
-static bool fastboot_enabled(struct drm_i915_private *dev_priv)
+static void
+pipe_config_pll_mismatch(bool fastset,
+                        const struct intel_crtc *crtc,
+                        const char *name,
+                        const struct intel_dpll_hw_state *a,
+                        const struct intel_dpll_hw_state *b)
 {
-       /* Enable fastboot by default on Skylake and newer */
-       if (DISPLAY_VER(dev_priv) >= 9)
-               return true;
+       struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 
-       /* Enable fastboot by default on VLV and CHV */
-       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
-               return true;
+       if (fastset) {
+               if (!drm_debug_enabled(DRM_UT_KMS))
+                       return;
 
-       /* Disabled by default on all others */
-       return false;
+               drm_dbg_kms(&i915->drm,
+                           "[CRTC:%d:%s] fastset requirement not met in %s\n",
+                           crtc->base.base.id, crtc->base.name, name);
+               drm_dbg_kms(&i915->drm, "expected:\n");
+               intel_dpll_dump_hw_state(i915, a);
+               drm_dbg_kms(&i915->drm, "found:\n");
+               intel_dpll_dump_hw_state(i915, b);
+       } else {
+               drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s buffer\n",
+                       crtc->base.base.id, crtc->base.name, name);
+               drm_err(&i915->drm, "expected:\n");
+               intel_dpll_dump_hw_state(i915, a);
+               drm_err(&i915->drm, "found:\n");
+               intel_dpll_dump_hw_state(i915, b);
+       }
 }
 
 bool
@@ -4925,14 +4955,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
        struct drm_i915_private *dev_priv = to_i915(current_config->uapi.crtc->dev);
        struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
        bool ret = true;
-       bool fixup_inherited = fastset &&
-               current_config->inherited && !pipe_config->inherited;
-
-       if (fixup_inherited && !fastboot_enabled(dev_priv)) {
-               drm_dbg_kms(&dev_priv->drm,
-                           "initial modeset and fastboot not set\n");
-               ret = false;
-       }
 
 #define PIPE_CONF_CHECK_X(name) do { \
        if (current_config->name != pipe_config->name) { \
@@ -5012,7 +5034,17 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
        } \
 } while (0)
 
-#define PIPE_CONF_CHECK_TIMINGS(name) do { \
+#define PIPE_CONF_CHECK_PLL(name) do { \
+       if (!intel_dpll_compare_hw_state(dev_priv, &current_config->name, \
+                                        &pipe_config->name)) { \
+               pipe_config_pll_mismatch(fastset, crtc, __stringify(name), \
+                                        &current_config->name, \
+                                        &pipe_config->name); \
+               ret = false; \
+       } \
+} while (0)
+
+#define PIPE_CONF_CHECK_TIMINGS(name) do {     \
        PIPE_CONF_CHECK_I(name.crtc_hdisplay); \
        PIPE_CONF_CHECK_I(name.crtc_htotal); \
        PIPE_CONF_CHECK_I(name.crtc_hblank_start); \
@@ -5071,7 +5103,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
        BUILD_BUG_ON(sizeof(current_config->name) != (len)); \
        BUILD_BUG_ON(sizeof(pipe_config->name) != (len)); \
        if (!intel_compare_buffer(current_config->name, pipe_config->name, (len))) { \
-               pipe_config_buffer_mismatch(dev_priv, fastset, __stringify(name), \
+               pipe_config_buffer_mismatch(fastset, crtc, __stringify(name), \
                                            current_config->name, \
                                            pipe_config->name, \
                                            (len)); \
@@ -5215,42 +5247,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 
        PIPE_CONF_CHECK_BOOL(double_wide);
 
-       if (dev_priv->display.dpll.mgr) {
+       if (dev_priv->display.dpll.mgr)
                PIPE_CONF_CHECK_P(shared_dpll);
 
-               PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
-               PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
-               PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
-               PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
-               PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
-               PIPE_CONF_CHECK_X(dpll_hw_state.spll);
-               PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
-               PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
-               PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
-               PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
-               PIPE_CONF_CHECK_X(dpll_hw_state.div0);
-               PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
-               PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
-               PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
-               PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
-               PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
-               PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
-               PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
-               PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
-               PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
-               PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
-               PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
-               PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
-               PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
-               PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
-               PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
-               PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
-               PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
-               PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
-               PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
-               PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
-               PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
-       }
+       /* FIXME convert everything over the dpll_mgr */
+       if (dev_priv->display.dpll.mgr || HAS_GMCH(dev_priv))
+               PIPE_CONF_CHECK_PLL(dpll_hw_state);
 
        PIPE_CONF_CHECK_X(dsi_pll.ctrl);
        PIPE_CONF_CHECK_X(dsi_pll.div);
@@ -5373,6 +5375,10 @@ static int intel_modeset_pipe(struct intel_atomic_state *state,
        if (ret)
                return ret;
 
+       ret = intel_dp_tunnel_atomic_add_state_for_crtc(state, crtc);
+       if (ret)
+               return ret;
+
        ret = intel_dp_mst_add_topology_state_for_crtc(state, crtc);
        if (ret)
                return ret;
@@ -6260,12 +6266,11 @@ static int intel_atomic_check_config(struct intel_atomic_state *state,
 
 static int intel_atomic_check_config_and_link(struct intel_atomic_state *state)
 {
-       struct drm_i915_private *i915 = to_i915(state->base.dev);
        struct intel_link_bw_limits new_limits;
        struct intel_link_bw_limits old_limits;
        int ret;
 
-       intel_link_bw_init_limits(i915, &new_limits);
+       intel_link_bw_init_limits(state, &new_limits);
        old_limits = new_limits;
 
        while (true) {
@@ -7118,6 +7123,8 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
 
        intel_commit_modeset_disables(state);
 
+       intel_dp_tunnel_atomic_alloc_bw(state);
+
        /* FIXME: Eventually get rid of our crtc->config pointer */
        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
                crtc->config = new_crtc_state;
@@ -8094,8 +8101,9 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915)
        /* Kill all the work that may have been queued by hpd. */
        drm_connector_list_iter_begin(&i915->drm, &conn_iter);
        for_each_intel_connector_iter(connector, &conn_iter) {
-               if (connector->modeset_retry_work.func)
-                       cancel_work_sync(&connector->modeset_retry_work);
+               if (connector->modeset_retry_work.func &&
+                   cancel_work_sync(&connector->modeset_retry_work))
+                       drm_connector_put(&connector->base);
                if (connector->hdcp.shim) {
                        cancel_delayed_work_sync(&connector->hdcp.check_work);
                        cancel_work_sync(&connector->hdcp.prop_work);
index fdeaac994e17bbf18fa1e303c12c5f68fdf5e72e..2167dbee5eea7f2aac769b984e92989b4b357db2 100644 (file)
@@ -524,6 +524,7 @@ struct intel_display {
        } wq;
 
        /* Grouping using named structs. Keep sorted. */
+       struct drm_dp_tunnel_mgr *dp_tunnel_mgr;
        struct intel_audio audio;
        struct intel_dpll dpll;
        struct intel_fbc *fbc[I915_MAX_FBCS];
index 6f2d13c8ccf776e265fa39ae23b7a4138a069c89..b99c024b0934a85d5bc642abfd30a17c65489f2d 100644 (file)
@@ -188,7 +188,8 @@ static void intel_panel_info(struct seq_file *m,
 }
 
 static void intel_hdcp_info(struct seq_file *m,
-                           struct intel_connector *intel_connector)
+                           struct intel_connector *intel_connector,
+                           bool remote_req)
 {
        bool hdcp_cap, hdcp2_cap;
 
@@ -197,8 +198,14 @@ static void intel_hdcp_info(struct seq_file *m,
                goto out;
        }
 
-       hdcp_cap = intel_hdcp_capable(intel_connector);
-       hdcp2_cap = intel_hdcp2_capable(intel_connector);
+       if (remote_req) {
+               intel_hdcp_get_remote_capability(intel_connector,
+                                                &hdcp_cap,
+                                                &hdcp2_cap);
+       } else {
+               hdcp_cap = intel_hdcp_get_capability(intel_connector);
+               hdcp2_cap = intel_hdcp2_get_capability(intel_connector);
+       }
 
        if (hdcp_cap)
                seq_puts(m, "HDCP1.4 ");
@@ -285,7 +292,11 @@ static void intel_connector_info(struct seq_file *m,
        }
 
        seq_puts(m, "\tHDCP version: ");
-       intel_hdcp_info(m, intel_connector);
+       if (intel_encoder_is_mst(encoder)) {
+               intel_hdcp_info(m, intel_connector, true);
+               seq_puts(m, "\tMST Hub HDCP version: ");
+       }
+       intel_hdcp_info(m, intel_connector, false);
 
        seq_printf(m, "\tmax bpc: %u\n", connector->display_info.bpc);
 
@@ -1131,7 +1142,7 @@ static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
 
        seq_printf(m, "%s:%d HDCP version: ", connector->base.name,
                   connector->base.base.id);
-       intel_hdcp_info(m, connector);
+       intel_hdcp_info(m, connector, false);
 
 out:
        drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
@@ -1391,6 +1402,20 @@ out:     drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
        return ret;
 }
 
+static int i915_bigjoiner_enable_show(struct seq_file *m, void *data)
+{
+       struct intel_connector *connector = m->private;
+       struct drm_crtc *crtc;
+
+       crtc = connector->base.state->crtc;
+       if (connector->base.status != connector_status_connected || !crtc)
+               return -ENODEV;
+
+       seq_printf(m, "Bigjoiner enable: %d\n", connector->force_bigjoiner_enable);
+
+       return 0;
+}
+
 static ssize_t i915_dsc_output_format_write(struct file *file,
                                            const char __user *ubuf,
                                            size_t len, loff_t *offp)
@@ -1412,6 +1437,30 @@ static ssize_t i915_dsc_output_format_write(struct file *file,
        return len;
 }
 
+static ssize_t i915_bigjoiner_enable_write(struct file *file,
+                                          const char __user *ubuf,
+                                          size_t len, loff_t *offp)
+{
+       struct seq_file *m = file->private_data;
+       struct intel_connector *connector = m->private;
+       struct drm_crtc *crtc;
+       bool bigjoiner_en = 0;
+       int ret;
+
+       crtc = connector->base.state->crtc;
+       if (connector->base.status != connector_status_connected || !crtc)
+               return -ENODEV;
+
+       ret = kstrtobool_from_user(ubuf, len, &bigjoiner_en);
+       if (ret < 0)
+               return ret;
+
+       connector->force_bigjoiner_enable = bigjoiner_en;
+       *offp += len;
+
+       return len;
+}
+
 static int i915_dsc_output_format_open(struct inode *inode,
                                       struct file *file)
 {
@@ -1505,6 +1554,8 @@ static const struct file_operations i915_dsc_fractional_bpp_fops = {
        .write = i915_dsc_fractional_bpp_write
 };
 
+DEFINE_SHOW_STORE_ATTRIBUTE(i915_bigjoiner_enable);
+
 /*
  * Returns the Current CRTC's bpc.
  * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
@@ -1586,6 +1637,13 @@ void intel_connector_debugfs_add(struct intel_connector *connector)
                                    connector, &i915_dsc_fractional_bpp_fops);
        }
 
+       if (DISPLAY_VER(i915) >= 11 &&
+           (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+            connector_type == DRM_MODE_CONNECTOR_eDP)) {
+               debugfs_create_file("i915_bigjoiner_force_enable", 0644, root,
+                                   connector, &i915_bigjoiner_enable_fops);
+       }
+
        if (connector_type == DRM_MODE_CONNECTOR_DSI ||
            connector_type == DRM_MODE_CONNECTOR_eDP ||
            connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
index 4f7ba7eb03d273ed4c36a067393b30de0c77f173..87dd07e0d138d75121cbeffcd9af858d4e082c90 100644 (file)
@@ -35,6 +35,7 @@
 #include "intel_dkl_phy.h"
 #include "intel_dmc.h"
 #include "intel_dp.h"
+#include "intel_dp_tunnel.h"
 #include "intel_dpll.h"
 #include "intel_dpll_mgr.h"
 #include "intel_fb.h"
@@ -434,10 +435,8 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
 
        for_each_pipe(i915, pipe) {
                ret = intel_crtc_init(i915, pipe);
-               if (ret) {
-                       intel_mode_config_cleanup(i915);
-                       return ret;
-               }
+               if (ret)
+                       goto err_mode_config;
        }
 
        intel_plane_possible_crtcs_init(i915);
@@ -457,6 +456,10 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
        intel_vga_disable(i915);
        intel_setup_outputs(i915);
 
+       ret = intel_dp_tunnel_mgr_init(i915);
+       if (ret)
+               goto err_hdcp;
+
        intel_display_driver_disable_user_access(i915);
 
        drm_modeset_lock_all(dev);
@@ -475,6 +478,13 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
                ilk_wm_sanitize(i915);
 
        return 0;
+
+err_hdcp:
+       intel_hdcp_component_fini(i915);
+err_mode_config:
+       intel_mode_config_cleanup(i915);
+
+       return ret;
 }
 
 /* part #3: call after gem init */
@@ -599,6 +609,8 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
 
        intel_mode_config_cleanup(i915);
 
+       intel_dp_tunnel_mgr_cleanup(i915);
+
        intel_overlay_cleanup(i915);
 
        intel_gmbus_teardown(i915);
index 01eb6e4e604914db8c780dccae513dda94f8e778..860e867586f48bd52c6082703b4cb7c505d0b653 100644 (file)
@@ -33,6 +33,7 @@
 
 #include <drm/display/drm_dp_dual_mode_helper.h>
 #include <drm/display/drm_dp_mst_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
 #include <drm/display/drm_dsc.h>
 #include <drm/drm_atomic.h>
 #include <drm/drm_crtc.h>
@@ -327,7 +328,6 @@ struct intel_vbt_panel_data {
                struct edp_power_seq pps;
                u8 drrs_msa_timing_delay;
                bool low_vswing;
-               bool initialized;
                bool hobl;
        } edp;
 
@@ -499,15 +499,15 @@ struct intel_hdcp_shim {
                           struct intel_connector *connector);
 
        /* Detects panel's hdcp capability. This is optional for HDMI. */
-       int (*hdcp_capable)(struct intel_digital_port *dig_port,
-                           bool *hdcp_capable);
+       int (*hdcp_get_capability)(struct intel_digital_port *dig_port,
+                                  bool *hdcp_capable);
 
        /* HDCP adaptation(DP/HDMI) required on the port */
        enum hdcp_wired_protocol protocol;
 
        /* Detects whether sink is HDCP2.2 capable */
-       int (*hdcp_2_2_capable)(struct intel_connector *connector,
-                               bool *capable);
+       int (*hdcp_2_2_get_capability)(struct intel_connector *connector,
+                                      bool *capable);
 
        /* Write HDCP2.2 messages */
        int (*write_2_2_msg)(struct intel_connector *connector,
@@ -532,6 +532,10 @@ struct intel_hdcp_shim {
        /* HDCP2.2 Link Integrity Check */
        int (*check_2_2_link)(struct intel_digital_port *dig_port,
                              struct intel_connector *connector);
+
+       /* HDCP remote sink cap */
+       int (*get_remote_hdcp_capability)(struct intel_connector *connector,
+                                         bool *hdcp_capable, bool *hdcp2_capable);
 };
 
 struct intel_hdcp {
@@ -626,6 +630,8 @@ struct intel_connector {
 
        struct intel_dp *mst_port;
 
+       bool force_bigjoiner_enable;
+
        struct {
                struct drm_dp_aux *dsc_decompression_aux;
                u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
@@ -677,6 +683,8 @@ struct intel_atomic_state {
 
        struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
 
+       struct intel_dp_tunnel_inherited_state *inherited_dp_tunnels;
+
        /*
         * Current watermarks can't be trusted during hardware readout, so
         * don't bother calculating intermediate watermarks.
@@ -1374,6 +1382,9 @@ struct intel_crtc_state {
                struct drm_dsc_config config;
        } dsc;
 
+       /* DP tunnel used for BW allocation. */
+       struct drm_dp_tunnel_ref dp_tunnel_ref;
+
        /* HSW+ linetime watermarks */
        u16 linetime;
        u16 ips_linetime;
@@ -1784,6 +1795,9 @@ struct intel_dp {
        /* connector directly attached - won't be use for modeset in mst world */
        struct intel_connector *attached_connector;
 
+       struct drm_dp_tunnel *tunnel;
+       bool tunnel_suspended:1;
+
        /* mst connector list */
        struct intel_dp_mst_encoder *mst_encoders[I915_MAX_PIPES];
        struct drm_dp_mst_topology_mgr mst_mgr;
index 5045c34a16be1c6f2879112b5fc91878491a8f6c..523a6d68a52c254311b523cdc896c949435e496c 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/byteorder.h>
 
 #include <drm/display/drm_dp_helper.h>
+#include <drm/display/drm_dp_tunnel.h>
 #include <drm/display/drm_dsc_helper.h>
 #include <drm/display/drm_hdmi_helper.h>
 #include <drm/drm_atomic_helper.h>
@@ -63,6 +64,7 @@
 #include "intel_dp_hdcp.h"
 #include "intel_dp_link_training.h"
 #include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
 #include "intel_dpio_phy.h"
 #include "intel_dpll.h"
 #include "intel_fifo_underrun.h"
@@ -152,6 +154,22 @@ int intel_dp_link_symbol_clock(int rate)
        return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate));
 }
 
+static int max_dprx_rate(struct intel_dp *intel_dp)
+{
+       if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+               return drm_dp_tunnel_max_dprx_rate(intel_dp->tunnel);
+
+       return drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+}
+
+static int max_dprx_lane_count(struct intel_dp *intel_dp)
+{
+       if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+               return drm_dp_tunnel_max_dprx_lane_count(intel_dp->tunnel);
+
+       return drm_dp_max_lane_count(intel_dp->dpcd);
+}
+
 static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
 {
        intel_dp->sink_rates[0] = 162000;
@@ -180,7 +198,7 @@ static void intel_dp_set_dpcd_sink_rates(struct intel_dp *intel_dp)
        /*
         * Sink rates for 8b/10b.
         */
-       max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
+       max_rate = max_dprx_rate(intel_dp);
        max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
        if (max_lttpr_rate)
                max_rate = min(max_rate, max_lttpr_rate);
@@ -259,7 +277,7 @@ static void intel_dp_set_max_sink_lane_count(struct intel_dp *intel_dp)
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *encoder = &intel_dig_port->base;
 
-       intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+       intel_dp->max_sink_lane_count = max_dprx_lane_count(intel_dp);
 
        switch (intel_dp->max_sink_lane_count) {
        case 1:
@@ -309,7 +327,7 @@ static int intel_dp_common_rate(struct intel_dp *intel_dp, int index)
 }
 
 /* Theoretical max between source and sink */
-static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
+int intel_dp_max_common_rate(struct intel_dp *intel_dp)
 {
        return intel_dp_common_rate(intel_dp, intel_dp->num_common_rates - 1);
 }
@@ -326,7 +344,7 @@ static int intel_dp_max_source_lane_count(struct intel_digital_port *dig_port)
 }
 
 /* Theoretical max between source and sink */
-static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
+int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        int source_max = intel_dp_max_source_lane_count(dig_port);
@@ -383,50 +401,27 @@ int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
                                1000000 * 16 * 8);
 }
 
-/*
- * Given a link rate and lanes, get the data bandwidth.
- *
- * Data bandwidth is the actual payload rate, which depends on the data
- * bandwidth efficiency and the link rate.
+/**
+ * intel_dp_max_link_data_rate: Calculate the maximum rate for the given link params
+ * @intel_dp: Intel DP object
+ * @max_dprx_rate: Maximum data rate of the DPRX
+ * @max_dprx_lanes: Maximum lane count of the DPRX
  *
- * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency
- * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) =
- * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by
- * coincidence, the port clock in kHz matches the data bandwidth in kBps, and
- * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no
- * longer holds for data bandwidth as soon as FEC or MST is taken into account!)
+ * Calculate the maximum data rate for the provided link parameters taking into
+ * account any BW limitations by a DP tunnel attached to @intel_dp.
  *
- * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For
- * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875
- * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000
- * does not match the symbol clock, the port clock (not even if you think in
- * terms of a byte clock), nor the data bandwidth. It only matches the link bit
- * rate in units of 10000 bps.
+ * Returns the maximum data rate in kBps units.
  */
-int
-intel_dp_max_data_rate(int max_link_rate, int max_lanes)
+int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
+                               int max_dprx_rate, int max_dprx_lanes)
 {
-       int ch_coding_efficiency =
-               drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
-       int max_link_rate_kbps = max_link_rate * 10;
+       int max_rate = drm_dp_max_dprx_data_rate(max_dprx_rate, max_dprx_lanes);
 
-       /*
-        * UHBR rates always use 128b/132b channel encoding, and have
-        * 97.71% data bandwidth efficiency. Consider max_link_rate the
-        * link bit rate in units of 10000 bps.
-        */
-       /*
-        * Lower than UHBR rates always use 8b/10b channel encoding, and have
-        * 80% data bandwidth efficiency for SST non-FEC. However, this turns
-        * out to be a nop by coincidence:
-        *
-        *      int max_link_rate_kbps = max_link_rate * 10;
-        *      max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10);
-        *      max_link_rate = max_link_rate_kbps / 8;
-        */
-       return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes,
-                                             ch_coding_efficiency),
-                                 1000000 * 8);
+       if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+               max_rate = min(max_rate,
+                              drm_dp_tunnel_available_bw(intel_dp->tunnel));
+
+       return max_rate;
 }
 
 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
@@ -658,7 +653,7 @@ static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
        int mode_rate, max_rate;
 
        mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
-       max_rate = intel_dp_max_data_rate(link_rate, lane_count);
+       max_rate = intel_dp_max_link_data_rate(intel_dp, link_rate, lane_count);
        if (mode_rate > max_rate)
                return false;
 
@@ -1205,11 +1200,13 @@ bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
                             int hdisplay, int clock)
 {
        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_connector *connector = intel_dp->attached_connector;
 
        if (!intel_dp_can_bigjoiner(intel_dp))
                return false;
 
-       return clock > i915->max_dotclk_freq || hdisplay > 5120;
+       return clock > i915->max_dotclk_freq || hdisplay > 5120 ||
+              connector->force_bigjoiner_enable;
 }
 
 static enum drm_mode_status
@@ -1260,7 +1257,8 @@ intel_dp_mode_valid(struct drm_connector *_connector,
        max_link_clock = intel_dp_max_link_rate(intel_dp);
        max_lanes = intel_dp_max_lane_count(intel_dp);
 
-       max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+       max_rate = intel_dp_max_link_data_rate(intel_dp, max_link_clock, max_lanes);
+
        mode_rate = intel_dp_link_required(target_clock,
                                           intel_dp_mode_min_output_bpp(connector, mode));
 
@@ -1610,8 +1608,10 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
                        for (lane_count = limits->min_lane_count;
                             lane_count <= limits->max_lane_count;
                             lane_count <<= 1) {
-                               link_avail = intel_dp_max_data_rate(link_rate,
-                                                                   lane_count);
+                               link_avail = intel_dp_max_link_data_rate(intel_dp,
+                                                                        link_rate,
+                                                                        lane_count);
+
 
                                if (mode_rate <= link_avail) {
                                        pipe_config->lane_count = lane_count;
@@ -2387,6 +2387,17 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
                                                       limits);
 }
 
+int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state)
+{
+       const struct drm_display_mode *adjusted_mode =
+               &crtc_state->hw.adjusted_mode;
+       int bpp = crtc_state->dsc.compression_enable ?
+               to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) :
+               crtc_state->pipe_bpp;
+
+       return intel_dp_link_required(adjusted_mode->crtc_clock, bpp);
+}
+
 static int
 intel_dp_compute_link_config(struct intel_encoder *encoder,
                             struct intel_crtc_state *pipe_config,
@@ -2454,31 +2465,16 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
                        return ret;
        }
 
-       if (pipe_config->dsc.compression_enable) {
-               drm_dbg_kms(&i915->drm,
-                           "DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n",
-                           pipe_config->lane_count, pipe_config->port_clock,
-                           pipe_config->pipe_bpp,
-                           BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
-
-               drm_dbg_kms(&i915->drm,
-                           "DP link rate required %i available %i\n",
-                           intel_dp_link_required(adjusted_mode->crtc_clock,
-                                                  to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)),
-                           intel_dp_max_data_rate(pipe_config->port_clock,
-                                                  pipe_config->lane_count));
-       } else {
-               drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
-                           pipe_config->lane_count, pipe_config->port_clock,
-                           pipe_config->pipe_bpp);
+       drm_dbg_kms(&i915->drm,
+                   "DP lane count %d clock %d bpp input %d compressed " BPP_X16_FMT " link rate required %d available %d\n",
+                   pipe_config->lane_count, pipe_config->port_clock,
+                   pipe_config->pipe_bpp,
+                   BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
+                   intel_dp_config_required_rate(pipe_config),
+                   intel_dp_max_link_data_rate(intel_dp,
+                                               pipe_config->port_clock,
+                                               pipe_config->lane_count));
 
-               drm_dbg_kms(&i915->drm,
-                           "DP link rate required %i available %i\n",
-                           intel_dp_link_required(adjusted_mode->crtc_clock,
-                                                  pipe_config->pipe_bpp),
-                           intel_dp_max_data_rate(pipe_config->port_clock,
-                                                  pipe_config->lane_count));
-       }
        return 0;
 }
 
@@ -2840,12 +2836,47 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder,
                                        intel_dp_is_uhbr(pipe_config);
 }
 
+void intel_dp_queue_modeset_retry_work(struct intel_connector *connector)
+{
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+       drm_connector_get(&connector->base);
+       if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work))
+               drm_connector_put(&connector->base);
+}
+
+void
+intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
+                                     struct intel_encoder *encoder,
+                                     const struct intel_crtc_state *crtc_state)
+{
+       struct intel_connector *connector;
+       struct intel_digital_connector_state *conn_state;
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       int i;
+
+       if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
+               intel_dp_queue_modeset_retry_work(intel_dp->attached_connector);
+
+               return;
+       }
+
+       for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+               if (!conn_state->base.crtc)
+                       continue;
+
+               if (connector->mst_port == intel_dp)
+                       intel_dp_queue_modeset_retry_work(connector);
+       }
+}
+
 int
 intel_dp_compute_config(struct intel_encoder *encoder,
                        struct intel_crtc_state *pipe_config,
                        struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
        struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
        const struct drm_display_mode *fixed_mode;
@@ -2946,7 +2977,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
        intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
        intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
 
-       return 0;
+       return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
+                                                       pipe_config);
 }
 
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -3282,18 +3314,21 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
                         const struct intel_crtc_state *crtc_state)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
-       if (!crtc_state)
-               return;
+       bool dpcd_updated = false;
 
        /*
         * Don't clobber DPCD if it's been already read out during output
         * setup (eDP) or detect.
         */
-       if (intel_dp->dpcd[DP_DPCD_REV] == 0)
+       if (crtc_state && intel_dp->dpcd[DP_DPCD_REV] == 0) {
                intel_dp_get_dpcd(intel_dp);
+               dpcd_updated = true;
+       }
 
-       intel_dp_reset_max_link_params(intel_dp);
+       intel_dp_tunnel_resume(intel_dp, crtc_state, dpcd_updated);
+
+       if (crtc_state)
+               intel_dp_reset_max_link_params(intel_dp);
 }
 
 bool intel_dp_initial_fastset_check(struct intel_encoder *encoder,
@@ -3959,6 +3994,13 @@ intel_dp_has_sink_count(struct intel_dp *intel_dp)
                                          &intel_dp->desc);
 }
 
+void intel_dp_update_sink_caps(struct intel_dp *intel_dp)
+{
+       intel_dp_set_sink_rates(intel_dp);
+       intel_dp_set_max_sink_lane_count(intel_dp);
+       intel_dp_set_common_rates(intel_dp);
+}
+
 static bool
 intel_dp_get_dpcd(struct intel_dp *intel_dp)
 {
@@ -3975,9 +4017,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
                drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
                                 drm_dp_is_branch(intel_dp->dpcd));
 
-               intel_dp_set_sink_rates(intel_dp);
-               intel_dp_set_max_sink_lane_count(intel_dp);
-               intel_dp_set_common_rates(intel_dp);
+               intel_dp_update_sink_caps(intel_dp);
        }
 
        if (intel_dp_has_sink_count(intel_dp)) {
@@ -4868,13 +4908,15 @@ static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
  * - %true if pending interrupts were serviced (or no interrupts were
  *   pending) w/o detecting an error condition.
  * - %false if an error condition - like AUX failure or a loss of link - is
- *   detected, which needs servicing from the hotplug work.
+ *   detected, or another condition - like a DP tunnel BW state change - needs
+ *   servicing from the hotplug work.
  */
 static bool
 intel_dp_check_mst_status(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        bool link_ok = true;
+       bool reprobe_needed = false;
 
        drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
 
@@ -4901,6 +4943,13 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
 
                intel_dp_mst_hpd_irq(intel_dp, esi, ack);
 
+               if (esi[3] & DP_TUNNELING_IRQ) {
+                       if (drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+                                                    &intel_dp->aux))
+                               reprobe_needed = true;
+                       ack[3] |= DP_TUNNELING_IRQ;
+               }
+
                if (!memchr_inv(ack, 0, sizeof(ack)))
                        break;
 
@@ -4911,7 +4960,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp)
                        drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst_mgr);
        }
 
-       return link_ok;
+       return link_ok && !reprobe_needed;
 }
 
 static void
@@ -5038,9 +5087,10 @@ int intel_dp_get_active_pipes(struct intel_dp *intel_dp,
                if (!crtc_state->hw.active)
                        continue;
 
-               if (conn_state->commit &&
-                   !try_wait_for_completion(&conn_state->commit->hw_done))
-                       continue;
+               if (conn_state->commit)
+                       drm_WARN_ON(&i915->drm,
+                                   !wait_for_completion_timeout(&conn_state->commit->hw_done,
+                                                                msecs_to_jiffies(5000)));
 
                *pipe_mask |= BIT(crtc->pipe);
        }
@@ -5270,23 +5320,32 @@ static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
                drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
 }
 
-static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
 {
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       bool reprobe_needed = false;
        u8 val;
 
        if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
-               return;
+               return false;
 
        if (drm_dp_dpcd_readb(&intel_dp->aux,
                              DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
-               return;
+               return false;
+
+       if ((val & DP_TUNNELING_IRQ) &&
+           drm_dp_tunnel_handle_irq(i915->display.dp_tunnel_mgr,
+                                    &intel_dp->aux))
+               reprobe_needed = true;
 
        if (drm_dp_dpcd_writeb(&intel_dp->aux,
                               DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
-               return;
+               return reprobe_needed;
 
        if (val & HDMI_LINK_STATUS_CHANGED)
                intel_dp_handle_hdmi_link_status_change(intel_dp);
+
+       return reprobe_needed;
 }
 
 /*
@@ -5307,6 +5366,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        u8 old_sink_count = intel_dp->sink_count;
+       bool reprobe_needed = false;
        bool ret;
 
        /*
@@ -5329,7 +5389,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
        }
 
        intel_dp_check_device_service_irq(intel_dp);
-       intel_dp_check_link_service_irq(intel_dp);
+       reprobe_needed = intel_dp_check_link_service_irq(intel_dp);
 
        /* Handle CEC interrupts, if any */
        drm_dp_cec_irq(&intel_dp->aux);
@@ -5356,10 +5416,10 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
                 * FIXME get rid of the ad-hoc phy test modeset code
                 * and properly incorporate it into the normal modeset.
                 */
-               return false;
+               reprobe_needed = true;
        }
 
-       return true;
+       return !reprobe_needed;
 }
 
 /* XXX this is probably wrong for multiple downstream ports */
@@ -5669,6 +5729,7 @@ intel_dp_detect(struct drm_connector *connector,
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct intel_encoder *encoder = &dig_port->base;
        enum drm_connector_status status;
+       int ret;
 
        drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
                    connector->base.id, connector->name);
@@ -5704,9 +5765,18 @@ intel_dp_detect(struct drm_connector *connector,
                                                        intel_dp->is_mst);
                }
 
+               intel_dp_tunnel_disconnect(intel_dp);
+
                goto out;
        }
 
+       ret = intel_dp_tunnel_detect(intel_dp, ctx);
+       if (ret == -EDEADLK)
+               return ret;
+
+       if (ret == 1)
+               intel_connector->base.epoch_counter++;
+
        intel_dp_detect_dsc_caps(intel_dp, intel_connector);
 
        intel_dp_configure_mst(intel_dp);
@@ -5737,8 +5807,6 @@ intel_dp_detect(struct drm_connector *connector,
         * with an IRQ_HPD, so force a link status check.
         */
        if (!intel_dp_is_edp(intel_dp)) {
-               int ret;
-
                ret = intel_dp_retrain_link(encoder, ctx);
                if (ret)
                        return ret;
@@ -5878,6 +5946,8 @@ void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
 
        intel_dp_mst_encoder_cleanup(dig_port);
 
+       intel_dp_tunnel_destroy(intel_dp);
+
        intel_pps_vdd_off_sync(intel_dp);
 
        /*
@@ -5894,6 +5964,8 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
        struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
 
        intel_pps_vdd_off_sync(intel_dp);
+
+       intel_dp_tunnel_suspend(intel_dp);
 }
 
 void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder)
@@ -6031,6 +6103,15 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
                        return ret;
        }
 
+       if (!intel_connector_needs_modeset(state, conn))
+               return 0;
+
+       ret = intel_dp_tunnel_atomic_check_state(state,
+                                                intel_dp,
+                                                intel_conn);
+       if (ret)
+               return ret;
+
        /*
         * We don't enable port sync on BDW due to missing w/as and
         * due to not having adjusted the modeset sequence appropriately.
@@ -6038,9 +6119,6 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
        if (DISPLAY_VER(dev_priv) < 9)
                return 0;
 
-       if (!intel_connector_needs_modeset(state, conn))
-               return 0;
-
        if (conn->has_tile) {
                ret = intel_modeset_tile_group(state, conn->tile_group->id);
                if (ret)
@@ -6097,6 +6175,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
 {
        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        struct intel_dp *intel_dp = &dig_port->dp;
+       u8 dpcd[DP_RECEIVER_CAP_SIZE];
 
        if (dig_port->base.type == INTEL_OUTPUT_EDP &&
            (long_hpd || !intel_pps_have_panel_power_or_vdd(intel_dp))) {
@@ -6119,6 +6198,17 @@ intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
                    dig_port->base.base.name,
                    long_hpd ? "long" : "short");
 
+       /*
+        * TBT DP tunnels require the GFX driver to read out the DPRX caps in
+        * response to long HPD pulses. The DP hotplug handler does that,
+        * however the hotplug handler may be blocked by another
+        * connector's/encoder's hotplug handler. Since the TBT CM may not
+        * complete the DP tunnel BW request for the latter connector/encoder
+        * waiting for this encoder's DPRX read, perform a dummy read here.
+        */
+       if (long_hpd)
+               intel_dp_read_dprx_caps(intel_dp, dpcd);
+
        if (long_hpd) {
                intel_dp->reset_link_params = true;
                return IRQ_NONE;
@@ -6439,6 +6529,14 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
        mutex_unlock(&connector->dev->mode_config.mutex);
        /* Send Hotplug uevent so userspace can reprobe */
        drm_kms_helper_connector_hotplug_event(connector);
+
+       drm_connector_put(connector);
+}
+
+void intel_dp_init_modeset_retry_work(struct intel_connector *connector)
+{
+       INIT_WORK(&connector->modeset_retry_work,
+                 intel_dp_modeset_retry_work_fn);
 }
 
 bool
@@ -6455,8 +6553,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
        int type;
 
        /* Initialize the work for modeset in case of link train failure */
-       INIT_WORK(&intel_connector->modeset_retry_work,
-                 intel_dp_modeset_retry_work_fn);
+       intel_dp_init_modeset_retry_work(intel_connector);
 
        if (drm_WARN(dev, dig_port->max_lanes < 1,
                     "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
index 530cc97bc42f43cc9707f410248ecb19c7bbf9b9..564a587e2d018d5bf1092fdb0166b6af0a1dd5d5 100644 (file)
@@ -43,6 +43,12 @@ void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
 bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state);
 int intel_dp_min_bpp(enum intel_output_format output_format);
+void intel_dp_init_modeset_retry_work(struct intel_connector *connector);
+void intel_dp_queue_modeset_retry_work(struct intel_connector *connector);
+void
+intel_dp_queue_modeset_retry_for_link(struct intel_atomic_state *state,
+                                     struct intel_encoder *encoder,
+                                     const struct intel_crtc_state *crtc_state);
 bool intel_dp_init_connector(struct intel_digital_port *dig_port,
                             struct intel_connector *intel_connector);
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
@@ -94,7 +100,11 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
 void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
 int intel_dp_max_link_rate(struct intel_dp *intel_dp);
 int intel_dp_max_lane_count(struct intel_dp *intel_dp);
+int intel_dp_config_required_rate(const struct intel_crtc_state *crtc_state);
 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
+int intel_dp_max_common_rate(struct intel_dp *intel_dp);
+int intel_dp_max_common_lane_count(struct intel_dp *intel_dp);
+void intel_dp_update_sink_caps(struct intel_dp *intel_dp);
 
 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
                           u8 *link_bw, u8 *rate_select);
@@ -105,7 +115,8 @@ bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
 int intel_dp_link_required(int pixel_clock, int bpp);
 int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
                                 int bw_overhead);
-int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
+int intel_dp_max_link_data_rate(struct intel_dp *intel_dp,
+                               int max_dprx_rate, int max_dprx_lanes);
 bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
 bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
                            const struct drm_connector_state *conn_state);
index 3a595cd433d4952078ed20227a1d47f4fe11d458..b98a87883fefb016be68ceb72a408258868b55ec 100644 (file)
@@ -36,8 +36,10 @@ static u32 transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
        }
 }
 
-static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
+static void intel_dp_hdcp_wait_for_cp_irq(struct intel_connector *connector,
+                                         int timeout)
 {
+       struct intel_hdcp *hdcp = &connector->hdcp;
        long ret;
 
 #define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
@@ -45,7 +47,8 @@ static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
                                               msecs_to_jiffies(timeout));
 
        if (!ret)
-               DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
+               drm_dbg_kms(connector->base.dev,
+                           "Timedout at waiting for CP_IRQ\n");
 }
 
 static
@@ -122,13 +125,13 @@ static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *dig_port,
 }
 
 static
-int intel_dp_hdcp_read_bcaps(struct intel_digital_port *dig_port,
+int intel_dp_hdcp_read_bcaps(struct drm_dp_aux *aux,
+                            struct drm_i915_private *i915,
                             u8 *bcaps)
 {
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
 
-       ret = drm_dp_dpcd_read(&dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
+       ret = drm_dp_dpcd_read(aux, DP_AUX_HDCP_BCAPS,
                               bcaps, 1);
        if (ret != 1) {
                drm_dbg_kms(&i915->drm,
@@ -143,10 +146,11 @@ static
 int intel_dp_hdcp_repeater_present(struct intel_digital_port *dig_port,
                                   bool *repeater_present)
 {
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
        u8 bcaps;
 
-       ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
+       ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915,  &bcaps);
        if (ret)
                return ret;
 
@@ -265,13 +269,14 @@ bool intel_dp_hdcp_check_link(struct intel_digital_port *dig_port,
 }
 
 static
-int intel_dp_hdcp_capable(struct intel_digital_port *dig_port,
-                         bool *hdcp_capable)
+int intel_dp_hdcp_get_capability(struct intel_digital_port *dig_port,
+                                bool *hdcp_capable)
 {
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        ssize_t ret;
        u8 bcaps;
 
-       ret = intel_dp_hdcp_read_bcaps(dig_port, &bcaps);
+       ret = intel_dp_hdcp_read_bcaps(&dig_port->dp.aux, i915, &bcaps);
        if (ret)
                return ret;
 
@@ -330,23 +335,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
          0, 0 },
 };
 
-static struct drm_dp_aux *
-intel_dp_hdcp_get_aux(struct intel_connector *connector)
-{
-       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
-
-       if (intel_encoder_is_mst(connector->encoder))
-               return &connector->port->aux;
-       else
-               return &dig_port->dp.aux;
-}
-
 static int
 intel_dp_hdcp2_read_rx_status(struct intel_connector *connector,
                              u8 *rx_status)
 {
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
-       struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+       struct drm_dp_aux *aux = &dig_port->dp.aux;
        ssize_t ret;
 
        ret = drm_dp_dpcd_read(aux,
@@ -387,7 +382,8 @@ int hdcp2_detect_msg_availability(struct intel_connector *connector,
                        *msg_ready = true;
                break;
        default:
-               DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
+               drm_err(connector->base.dev,
+                       "Unidentified msg_id: %d\n", msg_id);
                return -EINVAL;
        }
 
@@ -399,7 +395,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
                            const struct hdcp2_dp_msg_data *hdcp2_msg_data)
 {
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
-       struct intel_hdcp *hdcp = &connector->hdcp;
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+       struct intel_dp *dp = &dig_port->dp;
+       struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
        u8 msg_id = hdcp2_msg_data->msg_id;
        int ret, timeout;
        bool msg_ready = false;
@@ -421,7 +419,7 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
                 * As we want to check the msg availability at timeout, Ignoring
                 * the timeout at wait for CP_IRQ.
                 */
-               intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
+               intel_dp_hdcp_wait_for_cp_irq(connector, timeout);
                ret = hdcp2_detect_msg_availability(connector, msg_id,
                                                    &msg_ready);
                if (!msg_ready)
@@ -454,8 +452,9 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
        unsigned int offset;
        u8 *byte = buf;
        ssize_t ret, bytes_to_write, len;
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+       struct drm_dp_aux *aux = &dig_port->dp.aux;
        const struct hdcp2_dp_msg_data *hdcp2_msg_data;
-       struct drm_dp_aux *aux;
 
        hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
        if (!hdcp2_msg_data)
@@ -463,8 +462,6 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
 
        offset = hdcp2_msg_data->offset;
 
-       aux = intel_dp_hdcp_get_aux(connector);
-
        /* No msg_id in DP HDCP2.2 msgs */
        bytes_to_write = size - 1;
        byte++;
@@ -490,7 +487,8 @@ static
 ssize_t get_receiver_id_list_rx_info(struct intel_connector *connector,
                                     u32 *dev_cnt, u8 *byte)
 {
-       struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+       struct drm_dp_aux *aux = &dig_port->dp.aux;
        ssize_t ret;
        u8 *rx_info = byte;
 
@@ -515,8 +513,9 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
 {
        struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       struct intel_hdcp *hdcp = &connector->hdcp;
-       struct drm_dp_aux *aux;
+       struct drm_dp_aux *aux = &dig_port->dp.aux;
+       struct intel_dp *dp = &dig_port->dp;
+       struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
        unsigned int offset;
        u8 *byte = buf;
        ssize_t ret, bytes_to_recv, len;
@@ -530,8 +529,6 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
                return -EINVAL;
        offset = hdcp2_msg_data->offset;
 
-       aux = intel_dp_hdcp_get_aux(connector);
-
        ret = intel_dp_hdcp2_wait_for_msg(connector, hdcp2_msg_data);
        if (ret < 0)
                return ret;
@@ -561,13 +558,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
 
                /* Entire msg read timeout since initiate of msg read */
                if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0) {
-                       if (intel_encoder_is_mst(connector->encoder))
-                               msg_end = ktime_add_ms(ktime_get_raw(),
-                                                      hdcp2_msg_data->msg_read_timeout *
-                                                      connector->port->parent->num_ports);
-                       else
-                               msg_end = ktime_add_ms(ktime_get_raw(),
-                                                      hdcp2_msg_data->msg_read_timeout);
+                       msg_end = ktime_add_ms(ktime_get_raw(),
+                                              hdcp2_msg_data->msg_read_timeout);
                }
 
                ret = drm_dp_dpcd_read(aux, offset,
@@ -648,25 +640,69 @@ int intel_dp_hdcp2_check_link(struct intel_digital_port *dig_port,
 }
 
 static
-int intel_dp_hdcp2_capable(struct intel_connector *connector,
-                          bool *capable)
+int _intel_dp_hdcp2_get_capability(struct drm_dp_aux *aux,
+                                  bool *capable)
 {
-       struct drm_dp_aux *aux;
        u8 rx_caps[3];
+       int ret, i;
+
+       *capable = false;
+
+       /*
+        * Some HDCP monitors act really shady by not giving the correct hdcp
+        * capability on the first rx_caps read and usually take an extra read
+        * to give the capability. We read rx_caps three times before we
+        * declare a monitor not capable of HDCP 2.2.
+        */
+       for (i = 0; i < 3; i++) {
+               ret = drm_dp_dpcd_read(aux,
+                                      DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
+                                      rx_caps, HDCP_2_2_RXCAPS_LEN);
+               if (ret != HDCP_2_2_RXCAPS_LEN)
+                       return ret >= 0 ? -EIO : ret;
+
+               if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
+                   HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2])) {
+                       *capable = true;
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static
+int intel_dp_hdcp2_get_capability(struct intel_connector *connector,
+                                 bool *capable)
+{
+       struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+       struct drm_dp_aux *aux = &dig_port->dp.aux;
+
+       return _intel_dp_hdcp2_get_capability(aux, capable);
+}
+
+static
+int intel_dp_hdcp_get_remote_capability(struct intel_connector *connector,
+                                       bool *hdcp_capable,
+                                       bool *hdcp2_capable)
+{
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
+       struct drm_dp_aux *aux = &connector->port->aux;
+       u8 bcaps;
        int ret;
 
-       aux = intel_dp_hdcp_get_aux(connector);
+       if (!intel_encoder_is_mst(connector->encoder))
+               return -EINVAL;
 
-       *capable = false;
-       ret = drm_dp_dpcd_read(aux,
-                              DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
-                              rx_caps, HDCP_2_2_RXCAPS_LEN);
-       if (ret != HDCP_2_2_RXCAPS_LEN)
-               return ret >= 0 ? -EIO : ret;
+       ret =  _intel_dp_hdcp2_get_capability(aux, hdcp2_capable);
+       if (ret)
+               return ret;
+
+       ret = intel_dp_hdcp_read_bcaps(aux, i915, &bcaps);
+       if (ret)
+               return ret;
 
-       if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
-           HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
-               *capable = true;
+       *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
 
        return 0;
 }
@@ -682,12 +718,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
        .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
        .toggle_signalling = intel_dp_hdcp_toggle_signalling,
        .check_link = intel_dp_hdcp_check_link,
-       .hdcp_capable = intel_dp_hdcp_capable,
+       .hdcp_get_capability = intel_dp_hdcp_get_capability,
        .write_2_2_msg = intel_dp_hdcp2_write_msg,
        .read_2_2_msg = intel_dp_hdcp2_read_msg,
        .config_stream_type = intel_dp_hdcp2_config_stream_type,
        .check_2_2_link = intel_dp_hdcp2_check_link,
-       .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+       .hdcp_2_2_get_capability = intel_dp_hdcp2_get_capability,
        .protocol = HDCP_PROTOCOL_DP,
 };
 
@@ -812,13 +848,14 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
        .toggle_signalling = intel_dp_hdcp_toggle_signalling,
        .stream_encryption = intel_dp_mst_hdcp_stream_encryption,
        .check_link = intel_dp_hdcp_check_link,
-       .hdcp_capable = intel_dp_hdcp_capable,
+       .hdcp_get_capability = intel_dp_hdcp_get_capability,
        .write_2_2_msg = intel_dp_hdcp2_write_msg,
        .read_2_2_msg = intel_dp_hdcp2_read_msg,
        .config_stream_type = intel_dp_hdcp2_config_stream_type,
        .stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
        .check_2_2_link = intel_dp_mst_hdcp2_check_link,
-       .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+       .hdcp_2_2_get_capability = intel_dp_hdcp2_get_capability,
+       .get_remote_hdcp_capability = intel_dp_hdcp_get_remote_capability,
        .protocol = HDCP_PROTOCOL_DP,
 };
 
index 1abfafbbfa7571e936f2c839ce0f4377bfda6677..fb84ca98bb7abaa66c14c8b2332a7ff43805207e 100644 (file)
@@ -162,6 +162,28 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
        return lttpr_count;
 }
 
+int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+       if (intel_dp_is_edp(intel_dp))
+               return 0;
+
+       /*
+        * Detecting LTTPRs must be avoided on platforms with an AUX timeout
+        * period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
+        */
+       if (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))
+               if (drm_dp_dpcd_probe(&intel_dp->aux,
+                                     DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
+                       return -EIO;
+
+       if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
+               return -EIO;
+
+       return 0;
+}
+
 /**
  * intel_dp_init_lttpr_and_dprx_caps - detect LTTPR and DPRX caps, init the LTTPR link training mode
  * @intel_dp: Intel DP struct
@@ -192,12 +214,10 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
        if (!intel_dp_is_edp(intel_dp) &&
            (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
                u8 dpcd[DP_RECEIVER_CAP_SIZE];
+               int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
 
-               if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
-                       return -EIO;
-
-               if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
-                       return -EIO;
+               if (err != 0)
+                       return err;
 
                lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
        }
@@ -1075,7 +1095,6 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
                                                     const struct intel_crtc_state *crtc_state)
 {
        struct intel_connector *intel_connector = intel_dp->attached_connector;
-       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
 
        if (!intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base)) {
                lt_dbg(intel_dp, DP_PHY_DPRX, "Link Training failed on disconnected sink.\n");
@@ -1093,7 +1112,7 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
        }
 
        /* Schedule a Hotplug Uevent to userspace to start modeset */
-       queue_work(i915->unordered_wq, &intel_connector->modeset_retry_work);
+       intel_dp_queue_modeset_retry_work(intel_connector);
 }
 
 /* Perform the link training on all LTTPRs and the DPRX on a link. */
index 2c8f2775891b0f37ec9d46813bcb48f301d5db0c..19836a8a4f9041c035d23bc120b55b7294c5ddb0 100644 (file)
@@ -11,6 +11,7 @@
 struct intel_crtc_state;
 struct intel_dp;
 
+int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE]);
 int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp);
 
 void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
index 5fa25a5a36b5532282f9e42037c089a2dd4ebeb6..db1254b036f188789440fd4ba3963cffdf861057 100644 (file)
@@ -42,6 +42,7 @@
 #include "intel_dp.h"
 #include "intel_dp_hdcp.h"
 #include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
 #include "intel_dpio_phy.h"
 #include "intel_hdcp.h"
 #include "intel_hotplug.h"
@@ -523,6 +524,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
                                       struct drm_connector_state *conn_state)
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
        struct intel_dp *intel_dp = &intel_mst->primary->dp;
        const struct intel_connector *connector =
@@ -619,7 +621,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
 
        intel_psr_compute_config(intel_dp, pipe_config, conn_state);
 
-       return 0;
+       return intel_dp_tunnel_atomic_compute_stream_bw(state, intel_dp, connector,
+                                                       pipe_config);
 }
 
 /*
@@ -876,6 +879,14 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
        if (ret)
                return ret;
 
+       if (intel_connector_needs_modeset(state, connector)) {
+               ret = intel_dp_tunnel_atomic_check_state(state,
+                                                        intel_connector->mst_port,
+                                                        intel_connector);
+               if (ret)
+                       return ret;
+       }
+
        return drm_dp_atomic_release_time_slots(&state->base,
                                                &intel_connector->mst_port->mst_mgr,
                                                intel_connector->port);
@@ -1197,6 +1208,7 @@ static bool intel_dp_mst_initial_fastset_check(struct intel_encoder *encoder,
 static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
 {
        struct intel_connector *intel_connector = to_intel_connector(connector);
+       struct drm_i915_private *i915 = to_i915(intel_connector->base.dev);
        struct intel_dp *intel_dp = intel_connector->mst_port;
        const struct drm_edid *drm_edid;
        int ret;
@@ -1204,6 +1216,9 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
        if (drm_connector_is_unregistered(connector))
                return intel_connector_update_modes(connector, NULL);
 
+       if (!intel_display_driver_check_access(i915))
+               return drm_edid_connector_add_modes(connector);
+
        drm_edid = drm_dp_mst_edid_read(connector, &intel_dp->mst_mgr, intel_connector->port);
 
        ret = intel_connector_update_modes(connector, drm_edid);
@@ -1295,7 +1310,8 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
        max_link_clock = intel_dp_max_link_rate(intel_dp);
        max_lanes = intel_dp_max_lane_count(intel_dp);
 
-       max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+       max_rate = intel_dp_max_link_data_rate(intel_dp,
+                                              max_link_clock, max_lanes);
        mode_rate = intel_dp_link_required(mode->clock, min_bpp);
 
        ret = drm_modeset_lock(&mgr->base.lock, ctx);
@@ -1542,6 +1558,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
        intel_connector->port = port;
        drm_dp_mst_get_port_malloc(port);
 
+       intel_dp_init_modeset_retry_work(intel_connector);
+
        intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
        intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
        intel_connector->dp.dsc_hblank_expansion_quirk =
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
new file mode 100644 (file)
index 0000000..75d76f9
--- /dev/null
@@ -0,0 +1,811 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_drv.h"
+
+#include <drm/display/drm_dp_tunnel.h>
+
+#include "intel_atomic.h"
+#include "intel_display_limits.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_dp_link_training.h"
+#include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
+#include "intel_link_bw.h"
+
+struct intel_dp_tunnel_inherited_state {
+       struct drm_dp_tunnel_ref ref[I915_MAX_PIPES];
+};
+
+/**
+ * intel_dp_tunnel_disconnect - Disconnect a DP tunnel from a port
+ * @intel_dp: DP port object the tunnel is connected to
+ *
+ * Disconnect a DP tunnel from @intel_dp, destroying any related state. This
+ * should be called after detecting a sink-disconnect event from the port.
+ */
+void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp)
+{
+       drm_dp_tunnel_destroy(intel_dp->tunnel);
+       intel_dp->tunnel = NULL;
+}
+
+/**
+ * intel_dp_tunnel_destroy - Destroy a DP tunnel
+ * @intel_dp: DP port object the tunnel is connected to
+ *
+ * Destroy a DP tunnel connected to @intel_dp, after disabling the BW
+ * allocation mode on the tunnel. This should be called while destroying the
+ * port.
+ */
+void intel_dp_tunnel_destroy(struct intel_dp *intel_dp)
+{
+       if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+               drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
+
+       intel_dp_tunnel_disconnect(intel_dp);
+}
+
+static int kbytes_to_mbits(int kbytes)
+{
+       return DIV_ROUND_UP(kbytes * 8, 1000);
+}
+
+static int get_current_link_bw(struct intel_dp *intel_dp,
+                              bool *below_dprx_bw)
+{
+       int rate = intel_dp_max_common_rate(intel_dp);
+       int lane_count = intel_dp_max_common_lane_count(intel_dp);
+       int bw;
+
+       bw = intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
+       *below_dprx_bw = bw < drm_dp_max_dprx_data_rate(rate, lane_count);
+
+       return bw;
+}
+
+static int update_tunnel_state(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       bool old_bw_below_dprx;
+       bool new_bw_below_dprx;
+       int old_bw;
+       int new_bw;
+       int ret;
+
+       old_bw = get_current_link_bw(intel_dp, &old_bw_below_dprx);
+
+       ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
+       if (ret < 0) {
+               drm_dbg_kms(&i915->drm,
+                           "[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
+                           drm_dp_tunnel_name(intel_dp->tunnel),
+                           encoder->base.base.id, encoder->base.name,
+                           ERR_PTR(ret));
+
+               return ret;
+       }
+
+       if (ret == 0 ||
+           !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel))
+               return 0;
+
+       intel_dp_update_sink_caps(intel_dp);
+
+       new_bw = get_current_link_bw(intel_dp, &new_bw_below_dprx);
+
+       /* Suppress the notification if the mode list can't change due to bw. */
+       if (old_bw_below_dprx == new_bw_below_dprx &&
+           !new_bw_below_dprx)
+               return 0;
+
+       drm_dbg_kms(&i915->drm,
+                   "[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
+                   drm_dp_tunnel_name(intel_dp->tunnel),
+                   encoder->base.base.id, encoder->base.name,
+                   kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw));
+
+       return 1;
+}
+
+/*
+ * Allocate the BW for a tunnel on a DP connector/port if the connector/port
+ * was already active when detecting the tunnel. The allocated BW must be
+ * freed by the next atomic modeset, storing the BW in the
+ * intel_atomic_state::inherited_dp_tunnels, and calling
+ * intel_dp_tunnel_atomic_free_bw().
+ */
+static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       struct intel_crtc *crtc;
+       int tunnel_bw = 0;
+       int err;
+
+       for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+               const struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
+               int stream_bw = intel_dp_config_required_rate(crtc_state);
+
+               tunnel_bw += stream_bw;
+
+               drm_dbg_kms(&i915->drm,
+                           "[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
+                           drm_dp_tunnel_name(intel_dp->tunnel),
+                           encoder->base.base.id, encoder->base.name,
+                           crtc->base.base.id, crtc->base.name,
+                           crtc->pipe,
+                           kbytes_to_mbits(stream_bw), kbytes_to_mbits(tunnel_bw));
+       }
+
+       err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
+       if (err) {
+               drm_dbg_kms(&i915->drm,
+                           "[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
+                           drm_dp_tunnel_name(intel_dp->tunnel),
+                           encoder->base.base.id, encoder->base.name,
+                           ERR_PTR(err));
+
+               return err;
+       }
+
+       return update_tunnel_state(intel_dp);
+}
+
+static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
+                                     struct drm_modeset_acquire_ctx *ctx)
+{
+       u8 pipe_mask;
+       int err;
+
+       err = intel_dp_get_active_pipes(intel_dp, ctx, &pipe_mask);
+       if (err)
+               return err;
+
+       return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
+}
+
+static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       struct drm_dp_tunnel *tunnel;
+       int ret;
+
+       tunnel = drm_dp_tunnel_detect(i915->display.dp_tunnel_mgr,
+                                     &intel_dp->aux);
+       if (IS_ERR(tunnel))
+               return PTR_ERR(tunnel);
+
+       intel_dp->tunnel = tunnel;
+
+       ret = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
+       if (ret) {
+               if (ret == -EOPNOTSUPP)
+                       return 0;
+
+               drm_dbg_kms(&i915->drm,
+                           "[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
+                           drm_dp_tunnel_name(intel_dp->tunnel),
+                           encoder->base.base.id, encoder->base.name,
+                           ERR_PTR(ret));
+
+               /* Keep the tunnel with BWA disabled */
+               return 0;
+       }
+
+       ret = allocate_initial_tunnel_bw(intel_dp, ctx);
+       if (ret < 0)
+               intel_dp_tunnel_destroy(intel_dp);
+
+       return ret;
+}
+
+/**
+ * intel_dp_tunnel_detect - Detect a DP tunnel on a port
+ * @intel_dp: DP port object
+ * @ctx: lock context acquired by the connector detection handler
+ *
+ * Detect a DP tunnel on the @intel_dp port, enabling the BW allocation mode
+ * on it if supported and allocating the BW required on an already active port.
+ * The BW allocated this way must be freed by the next atomic modeset calling
+ * intel_dp_tunnel_atomic_free_bw().
+ *
+ * If @intel_dp has already a tunnel detected on it, update the tunnel's state
+ * wrt. its support for BW allocation mode and the available BW via the
+ * tunnel. If the tunnel's state change requires this - for instance the
+ * tunnel's group ID has changed - the tunnel will be dropped and recreated.
+ *
+ * Return 0 in case of success - after any tunnel detected and added to
+ * @intel_dp - 1 in case the BW on an already existing tunnel has changed in a
+ * way that requires notifying user space.
+ */
+int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
+{
+       int ret;
+
+       if (intel_dp_is_edp(intel_dp))
+               return 0;
+
+       if (intel_dp->tunnel) {
+               ret = update_tunnel_state(intel_dp);
+               if (ret >= 0)
+                       return ret;
+
+               /* Try to recreate the tunnel after an update error. */
+               intel_dp_tunnel_destroy(intel_dp);
+       }
+
+       return detect_new_tunnel(intel_dp, ctx);
+}
+
+/**
+ * intel_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation support on a tunnel
+ * @intel_dp: DP port object
+ *
+ * Query whether a DP tunnel is connected on @intel_dp and the tunnel supports
+ * the BW allocation mode.
+ *
+ * Returns %true if the BW allocation mode is supported on @intel_dp.
+ */
+bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
+{
+       return drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel);
+}
+
+/**
+ * intel_dp_tunnel_suspend - Suspend a DP tunnel connected on a port
+ * @intel_dp: DP port object
+ *
+ * Suspend a DP tunnel on @intel_dp with BW allocation mode enabled on it.
+ */
+void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_connector *connector = intel_dp->attached_connector;
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+       if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+               return;
+
+       drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
+                   drm_dp_tunnel_name(intel_dp->tunnel),
+                   connector->base.base.id, connector->base.name,
+                   encoder->base.base.id, encoder->base.name);
+
+       drm_dp_tunnel_disable_bw_alloc(intel_dp->tunnel);
+
+       intel_dp->tunnel_suspended = true;
+}
+
+/**
+ * intel_dp_tunnel_resume - Resume a DP tunnel connected on a port
+ * @intel_dp: DP port object
+ * @crtc_state: CRTC state
+ * @dpcd_updated: the DPCD DPRX capabilities got updated during resume
+ *
+ * Resume a DP tunnel on @intel_dp with BW allocation mode enabled on it.
+ */
+void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
+                           const struct intel_crtc_state *crtc_state,
+                           bool dpcd_updated)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_connector *connector = intel_dp->attached_connector;
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       u8 dpcd[DP_RECEIVER_CAP_SIZE];
+       u8 pipe_mask;
+       int err = 0;
+
+       if (!intel_dp->tunnel_suspended)
+               return;
+
+       intel_dp->tunnel_suspended = false;
+
+       drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
+                   drm_dp_tunnel_name(intel_dp->tunnel),
+                   connector->base.base.id, connector->base.name,
+                   encoder->base.base.id, encoder->base.name);
+
+       /*
+        * The TBT Connection Manager requires the GFX driver to read out
+        * the sink's DPRX caps to be able to service any BW requests later.
+        * During resume overriding the caps in @intel_dp cached before
+        * suspend must be avoided, so do here only a dummy read, unless the
+        * capabilities were updated already during resume.
+        */
+       if (!dpcd_updated) {
+               err = intel_dp_read_dprx_caps(intel_dp, dpcd);
+
+               if (err) {
+                       drm_dp_tunnel_set_io_error(intel_dp->tunnel);
+                       goto out_err;
+               }
+       }
+
+       err = drm_dp_tunnel_enable_bw_alloc(intel_dp->tunnel);
+       if (err)
+               goto out_err;
+
+       pipe_mask = 0;
+       if (crtc_state) {
+               struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+               /* TODO: Add support for MST */
+               pipe_mask |= BIT(crtc->pipe);
+       }
+
+       err = allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
+       if (err < 0)
+               goto out_err;
+
+       return;
+
+out_err:
+       drm_dbg_kms(&i915->drm,
+                   "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and redect it (err %pe)\n",
+                   drm_dp_tunnel_name(intel_dp->tunnel),
+                   connector->base.base.id, connector->base.name,
+                   encoder->base.base.id, encoder->base.name,
+                   ERR_PTR(err));
+}
+
+static struct drm_dp_tunnel *
+get_inherited_tunnel(struct intel_atomic_state *state, struct intel_crtc *crtc)
+{
+       if (!state->inherited_dp_tunnels)
+               return NULL;
+
+       return state->inherited_dp_tunnels->ref[crtc->pipe].tunnel;
+}
+
+static int
+add_inherited_tunnel(struct intel_atomic_state *state,
+                    struct drm_dp_tunnel *tunnel,
+                    struct intel_crtc *crtc)
+{
+       struct drm_i915_private *i915 = to_i915(state->base.dev);
+       struct drm_dp_tunnel *old_tunnel;
+
+       old_tunnel = get_inherited_tunnel(state, crtc);
+       if (old_tunnel) {
+               drm_WARN_ON(&i915->drm, old_tunnel != tunnel);
+               return 0;
+       }
+
+       if (!state->inherited_dp_tunnels) {
+               state->inherited_dp_tunnels = kzalloc(sizeof(*state->inherited_dp_tunnels),
+                                                     GFP_KERNEL);
+               if (!state->inherited_dp_tunnels)
+                       return -ENOMEM;
+       }
+
+       drm_dp_tunnel_ref_get(tunnel, &state->inherited_dp_tunnels->ref[crtc->pipe]);
+
+       return 0;
+}
+
+static int check_inherited_tunnel_state(struct intel_atomic_state *state,
+                                       struct intel_dp *intel_dp,
+                                       const struct intel_digital_connector_state *old_conn_state)
+{
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       struct intel_connector *connector =
+               to_intel_connector(old_conn_state->base.connector);
+       struct intel_crtc *old_crtc;
+       const struct intel_crtc_state *old_crtc_state;
+
+       /*
+        * If a BWA tunnel gets detected only after the corresponding
+        * connector got enabled already without a BWA tunnel, or a different
+        * BWA tunnel (which was removed meanwhile) the old CRTC state won't
+        * contain the state of the current tunnel. This tunnel still has a
+        * reserved BW, which needs to be released, add the state for such
+        * inherited tunnels separately only to this atomic state.
+        */
+       if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+               return 0;
+
+       if (!old_conn_state->base.crtc)
+               return 0;
+
+       old_crtc = to_intel_crtc(old_conn_state->base.crtc);
+       old_crtc_state = intel_atomic_get_old_crtc_state(state, old_crtc);
+
+       if (!old_crtc_state->hw.active ||
+           old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
+               return 0;
+
+       drm_dbg_kms(&i915->drm,
+                   "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
+                   drm_dp_tunnel_name(intel_dp->tunnel),
+                   connector->base.base.id, connector->base.name,
+                   encoder->base.base.id, encoder->base.name,
+                   old_crtc->base.base.id, old_crtc->base.name,
+                   intel_dp->tunnel);
+
+       return add_inherited_tunnel(state, intel_dp->tunnel, old_crtc);
+}
+
+/**
+ * intel_dp_tunnel_atomic_cleanup_inherited_state - Free any inherited DP tunnel state
+ * @state: Atomic state
+ *
+ * Free the inherited DP tunnel state in @state.
+ */
+void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
+{
+       enum pipe pipe;
+
+       if (!state->inherited_dp_tunnels)
+               return;
+
+       for_each_pipe(to_i915(state->base.dev), pipe)
+               if (state->inherited_dp_tunnels->ref[pipe].tunnel)
+                       drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]);
+
+       kfree(state->inherited_dp_tunnels);
+       state->inherited_dp_tunnels = NULL;
+}
+
+static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
+                                                 struct drm_dp_tunnel *tunnel)
+{
+       struct drm_i915_private *i915 = to_i915(state->base.dev);
+       u32 pipe_mask;
+       int err;
+
+       err = drm_dp_tunnel_atomic_get_group_streams_in_state(&state->base,
+                                                             tunnel, &pipe_mask);
+       if (err)
+               return err;
+
+       drm_WARN_ON(&i915->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
+
+       return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask);
+}
+
+/**
+ * intel_dp_tunnel_atomic_add_state_for_crtc - Add CRTC specific DP tunnel state
+ * @state: Atomic state
+ * @crtc: CRTC to add the tunnel state for
+ *
+ * Add the DP tunnel state for @crtc if the CRTC (aka DP tunnel stream) is enabled
+ * via a DP tunnel.
+ *
+ * Return 0 in case of success, a negative error code otherwise.
+ */
+int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
+                                             struct intel_crtc *crtc)
+{
+       const struct intel_crtc_state *new_crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+       const struct drm_dp_tunnel_state *tunnel_state;
+       struct drm_dp_tunnel *tunnel = new_crtc_state->dp_tunnel_ref.tunnel;
+
+       if (!tunnel)
+               return 0;
+
+       tunnel_state = drm_dp_tunnel_atomic_get_state(&state->base, tunnel);
+       if (IS_ERR(tunnel_state))
+               return PTR_ERR(tunnel_state);
+
+       return 0;
+}
+
+static int check_group_state(struct intel_atomic_state *state,
+                            struct intel_dp *intel_dp,
+                            struct intel_connector *connector,
+                            struct intel_crtc *crtc)
+{
+       struct drm_i915_private *i915 = to_i915(state->base.dev);
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       const struct intel_crtc_state *crtc_state =
+               intel_atomic_get_new_crtc_state(state, crtc);
+
+       if (!crtc_state->dp_tunnel_ref.tunnel)
+               return 0;
+
+       drm_dbg_kms(&i915->drm,
+                   "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
+                   drm_dp_tunnel_name(intel_dp->tunnel),
+                   connector->base.base.id, connector->base.name,
+                   encoder->base.base.id, encoder->base.name,
+                   crtc->base.base.id, crtc->base.name,
+                   crtc_state->dp_tunnel_ref.tunnel);
+
+       return intel_dp_tunnel_atomic_add_group_state(state, crtc_state->dp_tunnel_ref.tunnel);
+}
+
+/**
+ * intel_dp_tunnel_atomic_check_state - Check a connector's DP tunnel specific state
+ * @state: Atomic state
+ * @intel_dp: DP port object
+ * @connector: connector using @intel_dp
+ *
+ * Check and add the DP tunnel atomic state for @intel_dp/@connector to
+ * @state, if there is a DP tunnel detected on @intel_dp with BW allocation
+ * mode enabled on it, or if @intel_dp/@connector was previously enabled via a
+ * DP tunnel.
+ *
+ * Returns 0 in case of success, or a negative error code otherwise.
+ */
+int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
+                                      struct intel_dp *intel_dp,
+                                      struct intel_connector *connector)
+{
+       const struct intel_digital_connector_state *old_conn_state =
+               intel_atomic_get_old_connector_state(state, connector);
+       const struct intel_digital_connector_state *new_conn_state =
+               intel_atomic_get_new_connector_state(state, connector);
+       int err;
+
+       if (old_conn_state->base.crtc) {
+               err = check_group_state(state, intel_dp, connector,
+                                       to_intel_crtc(old_conn_state->base.crtc));
+               if (err)
+                       return err;
+       }
+
+       if (new_conn_state->base.crtc &&
+           new_conn_state->base.crtc != old_conn_state->base.crtc) {
+               err = check_group_state(state, intel_dp, connector,
+                                       to_intel_crtc(new_conn_state->base.crtc));
+               if (err)
+                       return err;
+       }
+
+       return check_inherited_tunnel_state(state, intel_dp, old_conn_state);
+}
+
+/**
+ * intel_dp_tunnel_atomic_compute_stream_bw - Compute the BW required by a DP tunnel stream
+ * @state: Atomic state
+ * @intel_dp: DP object
+ * @connector: connector using @intel_dp
+ * @crtc_state: state of CRTC of the given DP tunnel stream
+ *
+ * Compute the required BW of CRTC (aka DP tunnel stream), storing this BW to
+ * the DP tunnel state containing the stream in @state. Before re-calculating a
+ * BW requirement in the crtc_state state the old BW requirement computed by this
+ * function must be cleared by calling intel_dp_tunnel_atomic_clear_stream_bw().
+ *
+ * Returns 0 in case of success, a negative error code otherwise.
+ */
+int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
+                                            struct intel_dp *intel_dp,
+                                            const struct intel_connector *connector,
+                                            struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *i915 = to_i915(state->base.dev);
+       struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       int required_rate = intel_dp_config_required_rate(crtc_state);
+       int ret;
+
+       if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
+               return 0;
+
+       drm_dbg_kms(&i915->drm,
+                   "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
+                   drm_dp_tunnel_name(intel_dp->tunnel),
+                   connector->base.base.id, connector->base.name,
+                   encoder->base.base.id, encoder->base.name,
+                   crtc->base.base.id, crtc->base.name,
+                   crtc->pipe,
+                   kbytes_to_mbits(required_rate));
+
+       ret = drm_dp_tunnel_atomic_set_stream_bw(&state->base, intel_dp->tunnel,
+                                                crtc->pipe, required_rate);
+       if (ret < 0)
+               return ret;
+
+       drm_dp_tunnel_ref_get(intel_dp->tunnel,
+                             &crtc_state->dp_tunnel_ref);
+
+       return 0;
+}
+
+/**
+ * intel_dp_tunnel_atomic_clear_stream_bw - Clear any DP tunnel stream BW requirement
+ * @state: Atomic state
+ * @crtc_state: state of CRTC of the given DP tunnel stream
+ *
+ * Clear any DP tunnel stream BW requirement set by
+ * intel_dp_tunnel_atomic_compute_stream_bw().
+ */
+void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
+                                           struct intel_crtc_state *crtc_state)
+{
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+       if (!crtc_state->dp_tunnel_ref.tunnel)
+               return;
+
+       drm_dp_tunnel_atomic_set_stream_bw(&state->base,
+                                          crtc_state->dp_tunnel_ref.tunnel,
+                                          crtc->pipe, 0);
+       drm_dp_tunnel_ref_put(&crtc_state->dp_tunnel_ref);
+}
+
+/**
+ * intel_dp_tunnel_atomic_check_link - Check the DP tunnel atomic state
+ * @state: intel atomic state
+ * @limits: link BW limits
+ *
+ * Check the link configuration for all DP tunnels in @state. If the
+ * configuration is invalid @limits will be updated if possible to
+ * reduce the total BW, after which the configuration for all CRTCs in
+ * @state must be recomputed with the updated @limits.
+ *
+ * Returns:
+ *   - 0 if the confugration is valid
+ *   - %-EAGAIN, if the configuration is invalid and @limits got updated
+ *     with fallback values with which the configuration of all CRTCs in
+ *     @state must be recomputed
+ *   - Other negative error, if the configuration is invalid without a
+ *     fallback possibility, or the check failed for another reason
+ */
+int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
+                                     struct intel_link_bw_limits *limits)
+{
+       u32 failed_stream_mask;
+       int err;
+
+       err = drm_dp_tunnel_atomic_check_stream_bws(&state->base,
+                                                   &failed_stream_mask);
+       if (err != -ENOSPC)
+               return err;
+
+       err = intel_link_bw_reduce_bpp(state, limits,
+                                      failed_stream_mask, "DP tunnel link BW");
+
+       return err ? : -EAGAIN;
+}
+
+static void atomic_decrease_bw(struct intel_atomic_state *state)
+{
+       struct intel_crtc *crtc;
+       const struct intel_crtc_state *old_crtc_state;
+       const struct intel_crtc_state *new_crtc_state;
+       int i;
+
+       for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+               const struct drm_dp_tunnel_state *new_tunnel_state;
+               struct drm_dp_tunnel *tunnel;
+               int old_bw;
+               int new_bw;
+
+               if (!intel_crtc_needs_modeset(new_crtc_state))
+                       continue;
+
+               tunnel = get_inherited_tunnel(state, crtc);
+               if (!tunnel)
+                       tunnel = old_crtc_state->dp_tunnel_ref.tunnel;
+
+               if (!tunnel)
+                       continue;
+
+               old_bw = drm_dp_tunnel_get_allocated_bw(tunnel);
+
+               new_tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
+               new_bw = drm_dp_tunnel_atomic_get_required_bw(new_tunnel_state);
+
+               if (new_bw >= old_bw)
+                       continue;
+
+               drm_dp_tunnel_alloc_bw(tunnel, new_bw);
+       }
+}
+
+static void queue_retry_work(struct intel_atomic_state *state,
+                            struct drm_dp_tunnel *tunnel,
+                            const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *i915 = to_i915(state->base.dev);
+       struct intel_encoder *encoder;
+
+       encoder = intel_get_crtc_new_encoder(state, crtc_state);
+
+       if (!intel_digital_port_connected(encoder))
+               return;
+
+       drm_dbg_kms(&i915->drm,
+                   "[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
+                   drm_dp_tunnel_name(tunnel),
+                   encoder->base.base.id,
+                   encoder->base.name);
+
+       intel_dp_queue_modeset_retry_for_link(state, encoder, crtc_state);
+}
+
+static void atomic_increase_bw(struct intel_atomic_state *state)
+{
+       struct intel_crtc *crtc;
+       const struct intel_crtc_state *crtc_state;
+       int i;
+
+       for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+               struct drm_dp_tunnel_state *tunnel_state;
+               struct drm_dp_tunnel *tunnel = crtc_state->dp_tunnel_ref.tunnel;
+               int bw;
+
+               if (!intel_crtc_needs_modeset(crtc_state))
+                       continue;
+
+               if (!tunnel)
+                       continue;
+
+               tunnel_state = drm_dp_tunnel_atomic_get_new_state(&state->base, tunnel);
+
+               bw = drm_dp_tunnel_atomic_get_required_bw(tunnel_state);
+
+               if (drm_dp_tunnel_alloc_bw(tunnel, bw) != 0)
+                       queue_retry_work(state, tunnel, crtc_state);
+       }
+}
+
+/**
+ * intel_dp_tunnel_atomic_alloc_bw - Allocate the BW for all modeset tunnels
+ * @state: Atomic state
+ *
+ * Allocate the required BW for all tunnels in @state.
+ */
+void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
+{
+       atomic_decrease_bw(state);
+       atomic_increase_bw(state);
+}
+
+/**
+ * intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager
+ * @i915: i915 device object
+ *
+ * Initialize the DP tunnel manager. The tunnel manager will support the
+ * detection/management of DP tunnels on all DP connectors, so the function
+ * must be called after all these connectors have been registered already.
+ *
+ * Return 0 in case of success, a negative error code otherwise.
+ */
+int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
+{
+       struct drm_dp_tunnel_mgr *tunnel_mgr;
+       struct drm_connector_list_iter connector_list_iter;
+       struct intel_connector *connector;
+       int dp_connectors = 0;
+
+       drm_connector_list_iter_begin(&i915->drm, &connector_list_iter);
+       for_each_intel_connector_iter(connector, &connector_list_iter) {
+               if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+                       continue;
+
+               dp_connectors++;
+       }
+       drm_connector_list_iter_end(&connector_list_iter);
+
+       tunnel_mgr = drm_dp_tunnel_mgr_create(&i915->drm, dp_connectors);
+       if (IS_ERR(tunnel_mgr))
+               return PTR_ERR(tunnel_mgr);
+
+       i915->display.dp_tunnel_mgr = tunnel_mgr;
+
+       return 0;
+}
+
+/**
+ * intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state
+ * @i915: i915 device object
+ *
+ * Clean up the DP tunnel manager state.
+ */
+void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915)
+{
+       drm_dp_tunnel_mgr_destroy(i915->display.dp_tunnel_mgr);
+       i915->display.dp_tunnel_mgr = NULL;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
new file mode 100644 (file)
index 0000000..08b2cba
--- /dev/null
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_TUNNEL_H__
+#define __INTEL_DP_TUNNEL_H__
+
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct drm_connector_state;
+struct drm_modeset_acquire_ctx;
+
+struct intel_atomic_state;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_encoder;
+struct intel_link_bw_limits;
+
+#if defined(CONFIG_DRM_I915_DP_TUNNEL) && defined(I915)
+
+int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx);
+void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp);
+void intel_dp_tunnel_destroy(struct intel_dp *intel_dp);
+void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
+                           const struct intel_crtc_state *crtc_state,
+                           bool dpcd_updated);
+void intel_dp_tunnel_suspend(struct intel_dp *intel_dp);
+
+bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp);
+
+void
+intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state);
+
+int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
+                                            struct intel_dp *intel_dp,
+                                            const struct intel_connector *connector,
+                                            struct intel_crtc_state *crtc_state);
+void intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
+                                           struct intel_crtc_state *crtc_state);
+
+int intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
+                                             struct intel_crtc *crtc);
+int intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
+                                     struct intel_link_bw_limits *limits);
+int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
+                                      struct intel_dp *intel_dp,
+                                      struct intel_connector *connector);
+
+void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state);
+
+int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915);
+void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915);
+
+#else
+
+static inline int
+intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void intel_dp_tunnel_disconnect(struct intel_dp *intel_dp) {}
+static inline void intel_dp_tunnel_destroy(struct intel_dp *intel_dp) {}
+static inline void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
+                                         const struct intel_crtc_state *crtc_state,
+                                         bool dpcd_updated) {}
+static inline void intel_dp_tunnel_suspend(struct intel_dp *intel_dp) {}
+
+static inline bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
+{
+       return false;
+}
+
+static inline void
+intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state) {}
+
+static inline int
+intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
+                                        struct intel_dp *intel_dp,
+                                        const struct intel_connector *connector,
+                                        struct intel_crtc_state *crtc_state)
+{
+       return 0;
+}
+
+static inline void
+intel_dp_tunnel_atomic_clear_stream_bw(struct intel_atomic_state *state,
+                                      struct intel_crtc_state *crtc_state) {}
+
+static inline int
+intel_dp_tunnel_atomic_add_state_for_crtc(struct intel_atomic_state *state,
+                                         struct intel_crtc *crtc)
+{
+       return 0;
+}
+
+static inline int
+intel_dp_tunnel_atomic_check_link(struct intel_atomic_state *state,
+                                 struct intel_link_bw_limits *limits)
+{
+       return 0;
+}
+
+static inline int
+intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
+                                  struct intel_dp *intel_dp,
+                                  struct intel_connector *connector)
+{
+       return 0;
+}
+
+static inline int
+intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
+{
+       return 0;
+}
+
+static inline int
+intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
+{
+       return 0;
+}
+
+static inline void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) {}
+
+#endif /* CONFIG_DRM_I915_DP_TUNNEL */
+
+#endif /* __INTEL_DP_TUNNEL_H__ */
index e7e0a4cf9f93ee830606704ea3441d99594fec19..ff480f171f75a2602673e49c015a1f0441e33a53 100644 (file)
@@ -109,6 +109,8 @@ struct intel_dpll_mgr {
        void (*update_ref_clks)(struct drm_i915_private *i915);
        void (*dump_hw_state)(struct drm_i915_private *i915,
                              const struct intel_dpll_hw_state *hw_state);
+       bool (*compare_hw_state)(const struct intel_dpll_hw_state *a,
+                                const struct intel_dpll_hw_state *b);
 };
 
 static void
@@ -644,6 +646,15 @@ static void ibx_dump_hw_state(struct drm_i915_private *i915,
                    hw_state->fp1);
 }
 
+static bool ibx_compare_hw_state(const struct intel_dpll_hw_state *a,
+                                const struct intel_dpll_hw_state *b)
+{
+       return a->dpll == b->dpll &&
+               a->dpll_md == b->dpll_md &&
+               a->fp0 == b->fp0 &&
+               a->fp1 == b->fp1;
+}
+
 static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
        .enable = ibx_pch_dpll_enable,
        .disable = ibx_pch_dpll_disable,
@@ -662,6 +673,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
        .get_dplls = ibx_get_dpll,
        .put_dplls = intel_put_dpll,
        .dump_hw_state = ibx_dump_hw_state,
+       .compare_hw_state = ibx_compare_hw_state,
 };
 
 static void hsw_ddi_wrpll_enable(struct drm_i915_private *i915,
@@ -1220,6 +1232,13 @@ static void hsw_dump_hw_state(struct drm_i915_private *i915,
                    hw_state->wrpll, hw_state->spll);
 }
 
+static bool hsw_compare_hw_state(const struct intel_dpll_hw_state *a,
+                                const struct intel_dpll_hw_state *b)
+{
+       return a->wrpll == b->wrpll &&
+               a->spll == b->spll;
+}
+
 static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
        .enable = hsw_ddi_wrpll_enable,
        .disable = hsw_ddi_wrpll_disable,
@@ -1278,6 +1297,7 @@ static const struct intel_dpll_mgr hsw_pll_mgr = {
        .put_dplls = intel_put_dpll,
        .update_ref_clks = hsw_update_dpll_ref_clks,
        .dump_hw_state = hsw_dump_hw_state,
+       .compare_hw_state = hsw_compare_hw_state,
 };
 
 struct skl_dpll_regs {
@@ -1929,6 +1949,14 @@ static void skl_dump_hw_state(struct drm_i915_private *i915,
                      hw_state->cfgcr2);
 }
 
+static bool skl_compare_hw_state(const struct intel_dpll_hw_state *a,
+                                const struct intel_dpll_hw_state *b)
+{
+       return a->ctrl1 == b->ctrl1 &&
+               a->cfgcr1 == b->cfgcr1 &&
+               a->cfgcr2 == b->cfgcr2;
+}
+
 static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
        .enable = skl_ddi_pll_enable,
        .disable = skl_ddi_pll_disable,
@@ -1959,6 +1987,7 @@ static const struct intel_dpll_mgr skl_pll_mgr = {
        .put_dplls = intel_put_dpll,
        .update_ref_clks = skl_update_dpll_ref_clks,
        .dump_hw_state = skl_dump_hw_state,
+       .compare_hw_state = skl_compare_hw_state,
 };
 
 static void bxt_ddi_pll_enable(struct drm_i915_private *i915,
@@ -2392,6 +2421,21 @@ static void bxt_dump_hw_state(struct drm_i915_private *i915,
                    hw_state->pcsdw12);
 }
 
+static bool bxt_compare_hw_state(const struct intel_dpll_hw_state *a,
+                                const struct intel_dpll_hw_state *b)
+{
+       return a->ebb0 == b->ebb0 &&
+               a->ebb4 == b->ebb4 &&
+               a->pll0 == b->pll0 &&
+               a->pll1 == b->pll1 &&
+               a->pll2 == b->pll2 &&
+               a->pll3 == b->pll3 &&
+               a->pll6 == b->pll6 &&
+               a->pll8 == b->pll8 &&
+               a->pll10 == b->pll10 &&
+               a->pcsdw12 == b->pcsdw12;
+}
+
 static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
        .enable = bxt_ddi_pll_enable,
        .disable = bxt_ddi_pll_disable,
@@ -2413,6 +2457,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
        .put_dplls = intel_put_dpll,
        .update_ref_clks = bxt_update_dpll_ref_clks,
        .dump_hw_state = bxt_dump_hw_state,
+       .compare_hw_state = bxt_compare_hw_state,
 };
 
 static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
@@ -4005,6 +4050,25 @@ static void icl_dump_hw_state(struct drm_i915_private *i915,
                    hw_state->mg_pll_tdc_coldst_bias);
 }
 
+static bool icl_compare_hw_state(const struct intel_dpll_hw_state *a,
+                                const struct intel_dpll_hw_state *b)
+{
+       /* FIXME split combo vs. mg more thoroughly */
+       return a->cfgcr0 == b->cfgcr0 &&
+               a->cfgcr1 == b->cfgcr1 &&
+               a->div0 == b->div0 &&
+               a->mg_refclkin_ctl == b->mg_refclkin_ctl &&
+               a->mg_clktop2_coreclkctl1 == b->mg_clktop2_coreclkctl1 &&
+               a->mg_clktop2_hsclkctl == b->mg_clktop2_hsclkctl &&
+               a->mg_pll_div0 == b->mg_pll_div0 &&
+               a->mg_pll_div1 == b->mg_pll_div1 &&
+               a->mg_pll_lf == b->mg_pll_lf &&
+               a->mg_pll_frac_lock == b->mg_pll_frac_lock &&
+               a->mg_pll_ssc == b->mg_pll_ssc &&
+               a->mg_pll_bias == b->mg_pll_bias &&
+               a->mg_pll_tdc_coldst_bias == b->mg_pll_tdc_coldst_bias;
+}
+
 static const struct intel_shared_dpll_funcs combo_pll_funcs = {
        .enable = combo_pll_enable,
        .disable = combo_pll_disable,
@@ -4046,6 +4110,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
        .update_active_dpll = icl_update_active_dpll,
        .update_ref_clks = icl_update_dpll_ref_clks,
        .dump_hw_state = icl_dump_hw_state,
+       .compare_hw_state = icl_compare_hw_state,
 };
 
 static const struct dpll_info ehl_plls[] = {
@@ -4063,6 +4128,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
        .put_dplls = icl_put_dplls,
        .update_ref_clks = icl_update_dpll_ref_clks,
        .dump_hw_state = icl_dump_hw_state,
+       .compare_hw_state = icl_compare_hw_state,
 };
 
 static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
@@ -4094,6 +4160,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
        .update_active_dpll = icl_update_active_dpll,
        .update_ref_clks = icl_update_dpll_ref_clks,
        .dump_hw_state = icl_dump_hw_state,
+       .compare_hw_state = icl_compare_hw_state,
 };
 
 static const struct dpll_info rkl_plls[] = {
@@ -4110,6 +4177,7 @@ static const struct intel_dpll_mgr rkl_pll_mgr = {
        .put_dplls = icl_put_dplls,
        .update_ref_clks = icl_update_dpll_ref_clks,
        .dump_hw_state = icl_dump_hw_state,
+       .compare_hw_state = icl_compare_hw_state,
 };
 
 static const struct dpll_info dg1_plls[] = {
@@ -4127,6 +4195,7 @@ static const struct intel_dpll_mgr dg1_pll_mgr = {
        .put_dplls = icl_put_dplls,
        .update_ref_clks = icl_update_dpll_ref_clks,
        .dump_hw_state = icl_dump_hw_state,
+       .compare_hw_state = icl_compare_hw_state,
 };
 
 static const struct dpll_info adls_plls[] = {
@@ -4144,6 +4213,7 @@ static const struct intel_dpll_mgr adls_pll_mgr = {
        .put_dplls = icl_put_dplls,
        .update_ref_clks = icl_update_dpll_ref_clks,
        .dump_hw_state = icl_dump_hw_state,
+       .compare_hw_state = icl_compare_hw_state,
 };
 
 static const struct dpll_info adlp_plls[] = {
@@ -4166,6 +4236,7 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
        .update_active_dpll = icl_update_active_dpll,
        .update_ref_clks = icl_update_dpll_ref_clks,
        .dump_hw_state = icl_dump_hw_state,
+       .compare_hw_state = icl_compare_hw_state,
 };
 
 /**
@@ -4458,13 +4529,31 @@ void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
                /* fallback for platforms that don't use the shared dpll
                 * infrastructure
                 */
-               drm_dbg_kms(&i915->drm,
-                           "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
-                           "fp0: 0x%x, fp1: 0x%x\n",
-                           hw_state->dpll,
-                           hw_state->dpll_md,
-                           hw_state->fp0,
-                           hw_state->fp1);
+               ibx_dump_hw_state(i915, hw_state);
+       }
+}
+
+/**
+ * intel_dpll_compare_hw_state - compare the two states
+ * @i915: i915 drm device
+ * @a: first DPLL hw state
+ * @b: second DPLL hw state
+ *
+ * Compare DPLL hw states @a and @b.
+ *
+ * Returns: true if the states are equal, false if the differ
+ */
+bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
+                                const struct intel_dpll_hw_state *a,
+                                const struct intel_dpll_hw_state *b)
+{
+       if (i915->display.dpll.mgr) {
+               return i915->display.dpll.mgr->compare_hw_state(a, b);
+       } else {
+               /* fallback for platforms that don't use the shared dpll
+                * infrastructure
+                */
+               return ibx_compare_hw_state(a, b);
        }
 }
 
index 616afe861b4670fbe0818e6ffb2423fd8e07e3a2..cc0e1386309dc666753ab6f876f76b63edec868b 100644 (file)
@@ -378,6 +378,9 @@ void intel_dpll_sanitize_state(struct drm_i915_private *i915);
 
 void intel_dpll_dump_hw_state(struct drm_i915_private *i915,
                              const struct intel_dpll_hw_state *hw_state);
+bool intel_dpll_compare_hw_state(struct drm_i915_private *i915,
+                                const struct intel_dpll_hw_state *a,
+                                const struct intel_dpll_hw_state *b);
 enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
 bool intel_dpll_is_combophy(enum intel_dpll_id id);
 
index 6282ec0fc9b45598196a036929118b6d7bc4942f..169ef38ff1883389032b0084c9c88d8f1dc6a758 100644 (file)
@@ -299,6 +299,7 @@ void intel_drrs_crtc_init(struct intel_crtc *crtc)
 static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
 {
        struct intel_crtc *crtc = m->private;
+       struct drm_i915_private *i915 = to_i915(crtc->base.dev);
        const struct intel_crtc_state *crtc_state;
        int ret;
 
@@ -310,6 +311,11 @@ static int intel_drrs_debugfs_status_show(struct seq_file *m, void *unused)
 
        mutex_lock(&crtc->drrs.mutex);
 
+       seq_printf(m, "DRRS capable: %s\n",
+                  str_yes_no(crtc_state->has_drrs ||
+                             HAS_DOUBLE_BUFFERED_M_N(i915) ||
+                             intel_cpu_transcoder_has_m2_n2(i915, crtc_state->cpu_transcoder)));
+
        seq_printf(m, "DRRS enabled: %s\n",
                   str_yes_no(crtc_state->has_drrs));
 
index a6c7122fd671df14400e891366806d9bd8c317d3..d62e050185e7c3cf4d8ecb4e4ceaabbdc20a0db9 100644 (file)
@@ -325,7 +325,7 @@ static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
 {
        struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
        const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
-       unsigned int latency = skl_watermark_max_latency(i915);
+       unsigned int latency = skl_watermark_max_latency(i915, 0);
        int vblank_start;
 
        if (crtc_state->vrr.enable) {
index 083390e5e4429daf6c68cd241bd6316a1895fa33..e99c94edfaaebc450031fdc0ef7178229e215058 100644 (file)
@@ -57,9 +57,6 @@ struct intel_dsi {
                u16 phys;       /* ICL DSI */
        };
 
-       /* if true, use HS mode, otherwise LP */
-       bool hs;
-
        /* virtual channel */
        int channel;
 
@@ -93,7 +90,6 @@ struct intel_dsi {
        bool bgr_enabled;
 
        u8 pixel_overlap;
-       u32 port_bits;
        u32 bw_timer;
        u32 dphy_reg;
 
index 8ca9ae4798a8940f8f79473b3030bcb345e716e9..c076da75b066ebb18a2e1f444eea07ad6b56d857 100644 (file)
@@ -30,6 +30,7 @@
 
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
 
 #include "i915_drv.h"
 #include "i915_reg.h"
@@ -338,8 +339,12 @@ intel_dvo_detect(struct drm_connector *_connector, bool force)
 static int intel_dvo_get_modes(struct drm_connector *_connector)
 {
        struct intel_connector *connector = to_intel_connector(_connector);
+       struct drm_i915_private *i915 = to_i915(connector->base.dev);
        int num_modes;
 
+       if (!intel_display_driver_check_access(i915))
+               return drm_edid_connector_add_modes(&connector->base);
+
        /*
         * We should probably have an i2c driver get_modes function for those
         * devices which will have a fixed set of modes determined by the chip
index f7e98e1c64707a40213692647a02386ad5744c2f..af7b04539b939f290cffe708533a50cccae84fb7 100644 (file)
@@ -53,12 +53,6 @@ struct intel_dvo_dev_ops {
        bool (*init)(struct intel_dvo_device *dvo,
                     struct i2c_adapter *i2cbus);
 
-       /*
-        * Called to allow the output a chance to create properties after the
-        * RandR objects have been created.
-        */
-       void (*create_resources)(struct intel_dvo_device *dvo);
-
        /*
         * Turn on/off output.
         *
@@ -79,16 +73,6 @@ struct intel_dvo_dev_ops {
        enum drm_mode_status (*mode_valid)(struct intel_dvo_device *dvo,
                                           struct drm_display_mode *mode);
 
-       /*
-        * Callback for preparing mode changes on an output
-        */
-       void (*prepare)(struct intel_dvo_device *dvo);
-
-       /*
-        * Callback for committing mode changes on an output
-        */
-       void (*commit)(struct intel_dvo_device *dvo);
-
        /*
         * Callback for setting up a video mode after fixups have been made.
         *
@@ -111,15 +95,6 @@ struct intel_dvo_dev_ops {
         */
        bool (*get_hw_state)(struct intel_dvo_device *dev);
 
-       /**
-        * Query the device for the modes it provides.
-        *
-        * This function may also update MonInfo, mm_width, and mm_height.
-        *
-        * \return singly-linked list of modes or NULL if no modes found.
-        */
-       struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
-
        /**
         * Clean up driver-specific bits of the output
         */
index 0c0144eaa8faa1cde05450b5712f430efe62a70a..3ea6470d6d921d8c61b20cc354b5e355e2e3885b 100644 (file)
@@ -1849,9 +1849,10 @@ static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
                                       fb->modifier, rotation);
 
        if (stride > max_stride) {
-               DRM_DEBUG_KMS("[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
-                             fb->base.id, stride,
-                             plane->base.base.id, plane->base.name, max_stride);
+               drm_dbg_kms(plane->base.dev,
+                           "[FB:%d] stride (%d) exceeds [PLANE:%d:%s] max stride (%d)\n",
+                           fb->base.id, stride,
+                           plane->base.base.id, plane->base.name, max_stride);
                return -EINVAL;
        }
 
index 5c8545d7a76a7736a7e3e782187c75adfe606d39..6506a8e32972a8c6efd0280e3ea29dd8041421ef 100644 (file)
@@ -37,11 +37,11 @@ struct intel_global_obj {
             (__i)++) \
                for_each_if(obj)
 
-#define for_each_old_global_obj_in_state(__state, obj, new_obj_state, __i) \
+#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \
        for ((__i) = 0; \
             (__i) < (__state)->num_global_objs && \
                     ((obj) = (__state)->global_objs[__i].ptr, \
-                     (new_obj_state) = (__state)->global_objs[__i].old_state, 1); \
+                     (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \
             (__i)++) \
                for_each_if(obj)
 
index c3e692e7f790db2a3999822b315132fb9973052a..9edac27bab2644a6efe3c0a5b2881370b96a5449 100644 (file)
@@ -30,7 +30,7 @@
 #define KEY_LOAD_TRIES 5
 #define HDCP2_LC_RETRY_CNT                     3
 
-static int intel_conn_to_vcpi(struct drm_atomic_state *state,
+static int intel_conn_to_vcpi(struct intel_atomic_state *state,
                              struct intel_connector *connector)
 {
        struct drm_dp_mst_topology_mgr *mgr;
@@ -43,7 +43,7 @@ static int intel_conn_to_vcpi(struct drm_atomic_state *state,
                return 0;
        mgr = connector->port->mgr;
 
-       drm_modeset_lock(&mgr->base.lock, state->acquire_ctx);
+       drm_modeset_lock(&mgr->base.lock, state->base.acquire_ctx);
        mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
        payload = drm_atomic_get_mst_payload_state(mst_state, connector->port);
        if (drm_WARN_ON(mgr->dev, !payload))
@@ -68,19 +68,51 @@ out:
  * DP MST topology. Though it is not compulsory, security fw should change its
  * policy to mark different content_types for different streams.
  */
-static void
-intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
+static int
+intel_hdcp_required_content_stream(struct intel_atomic_state *state,
+                                  struct intel_digital_port *dig_port)
 {
+       struct drm_connector_list_iter conn_iter;
+       struct intel_digital_port *conn_dig_port;
+       struct intel_connector *connector;
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
        struct hdcp_port_data *data = &dig_port->hdcp_port_data;
        bool enforce_type0 = false;
        int k;
 
        if (dig_port->hdcp_auth_status)
-               return;
+               return 0;
+
+       data->k = 0;
 
        if (!dig_port->hdcp_mst_type1_capable)
                enforce_type0 = true;
 
+       drm_connector_list_iter_begin(&i915->drm, &conn_iter);
+       for_each_intel_connector_iter(connector, &conn_iter) {
+               if (connector->base.status == connector_status_disconnected)
+                       continue;
+
+               if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
+                       continue;
+
+               conn_dig_port = intel_attached_dig_port(connector);
+               if (conn_dig_port != dig_port)
+                       continue;
+
+               data->streams[data->k].stream_id =
+                       intel_conn_to_vcpi(state, connector);
+               data->k++;
+
+               /* if there is only one active stream */
+               if (dig_port->dp.active_mst_links <= 1)
+                       break;
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
+               return -EINVAL;
+
        /*
         * Apply common protection level across all streams in DP MST Topology.
         * Use highest supported content type for all streams in DP MST Topology.
@@ -88,19 +120,25 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
        for (k = 0; k < data->k; k++)
                data->streams[k].stream_type =
                        enforce_type0 ? DRM_MODE_HDCP_CONTENT_TYPE0 : DRM_MODE_HDCP_CONTENT_TYPE1;
+
+       return 0;
 }
 
-static void intel_hdcp_prepare_streams(struct intel_connector *connector)
+static int intel_hdcp_prepare_streams(struct intel_atomic_state *state,
+                                     struct intel_connector *connector)
 {
        struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct hdcp_port_data *data = &dig_port->hdcp_port_data;
        struct intel_hdcp *hdcp = &connector->hdcp;
 
-       if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
-               data->streams[0].stream_type = hdcp->content_type;
-       } else {
-               intel_hdcp_required_content_stream(dig_port);
-       }
+       if (intel_encoder_is_mst(intel_attached_encoder(connector)))
+               return intel_hdcp_required_content_stream(state, dig_port);
+
+       data->k = 1;
+       data->streams[0].stream_id = 0;
+       data->streams[0].stream_type = hdcp->content_type;
+
+       return 0;
 }
 
 static
@@ -140,7 +178,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *dig_port,
 }
 
 /* Is HDCP1.4 capable on Platform and Sink */
-bool intel_hdcp_capable(struct intel_connector *connector)
+bool intel_hdcp_get_capability(struct intel_connector *connector)
 {
        struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        const struct intel_hdcp_shim *shim = connector->hdcp.shim;
@@ -150,8 +188,8 @@ bool intel_hdcp_capable(struct intel_connector *connector)
        if (!shim)
                return capable;
 
-       if (shim->hdcp_capable) {
-               shim->hdcp_capable(dig_port, &capable);
+       if (shim->hdcp_get_capability) {
+               shim->hdcp_get_capability(dig_port, &capable);
        } else {
                if (!intel_hdcp_read_valid_bksv(dig_port, shim, bksv))
                        capable = true;
@@ -160,12 +198,14 @@ bool intel_hdcp_capable(struct intel_connector *connector)
        return capable;
 }
 
-/* Is HDCP2.2 capable on Platform and Sink */
-bool intel_hdcp2_capable(struct intel_connector *connector)
+/*
+ * Check if the source has all the building blocks ready to make
+ * HDCP 2.2 work
+ */
+static bool intel_hdcp2_prerequisite(struct intel_connector *connector)
 {
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
-       bool capable = false;
 
        /* I915 support for HDCP2.2 */
        if (!hdcp->hdcp2_supported)
@@ -185,12 +225,40 @@ bool intel_hdcp2_capable(struct intel_connector *connector)
        }
        mutex_unlock(&i915->display.hdcp.hdcp_mutex);
 
+       return true;
+}
+
+/* Is HDCP2.2 capable on Platform and Sink */
+bool intel_hdcp2_get_capability(struct intel_connector *connector)
+{
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       bool capable = false;
+
+       if (!intel_hdcp2_prerequisite(connector))
+               return false;
+
        /* Sink's capability for HDCP2.2 */
-       hdcp->shim->hdcp_2_2_capable(connector, &capable);
+       hdcp->shim->hdcp_2_2_get_capability(connector, &capable);
 
        return capable;
 }
 
+void intel_hdcp_get_remote_capability(struct intel_connector *connector,
+                                     bool *hdcp_capable,
+                                     bool *hdcp2_capable)
+{
+       struct intel_hdcp *hdcp = &connector->hdcp;
+
+       if (!hdcp->shim->get_remote_hdcp_capability)
+               return;
+
+       hdcp->shim->get_remote_hdcp_capability(connector, hdcp_capable,
+                                              hdcp2_capable);
+
+       if (!intel_hdcp2_prerequisite(connector))
+               *hdcp2_capable = false;
+}
+
 static bool intel_hdcp_in_use(struct drm_i915_private *i915,
                              enum transcoder cpu_transcoder, enum port port)
 {
@@ -726,8 +794,8 @@ static int intel_hdcp_auth(struct intel_connector *connector)
         * whether the display supports HDCP before we write An. For HDMI
         * displays, this is not necessary.
         */
-       if (shim->hdcp_capable) {
-               ret = shim->hdcp_capable(dig_port, &hdcp_capable);
+       if (shim->hdcp_get_capability) {
+               ret = shim->hdcp_get_capability(dig_port, &hdcp_capable);
                if (ret)
                        return ret;
                if (!hdcp_capable) {
@@ -1058,15 +1126,9 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
                goto out;
        }
 
-       ret = intel_hdcp1_enable(connector);
-       if (ret) {
-               drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret);
-               intel_hdcp_update_value(connector,
-                                       DRM_MODE_CONTENT_PROTECTION_DESIRED,
-                                       true);
-               goto out;
-       }
-
+       intel_hdcp_update_value(connector,
+                               DRM_MODE_CONTENT_PROTECTION_DESIRED,
+                               true);
 out:
        mutex_unlock(&dig_port->hdcp_mutex);
        mutex_unlock(&hdcp->mutex);
@@ -1871,7 +1933,8 @@ hdcp2_propagate_stream_management_info(struct intel_connector *connector)
        return ret;
 }
 
-static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
+static int hdcp2_authenticate_and_encrypt(struct intel_atomic_state *state,
+                                         struct intel_connector *connector)
 {
        struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
@@ -1880,7 +1943,13 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
        for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
                ret = hdcp2_authenticate_sink(connector);
                if (!ret) {
-                       intel_hdcp_prepare_streams(connector);
+                       ret = intel_hdcp_prepare_streams(state, connector);
+                       if (ret) {
+                               drm_dbg_kms(&i915->drm,
+                                           "Prepare stream failed.(%d)\n",
+                                           ret);
+                               break;
+                       }
 
                        ret = hdcp2_propagate_stream_management_info(connector);
                        if (ret) {
@@ -1925,7 +1994,8 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
        return ret;
 }
 
-static int _intel_hdcp2_enable(struct intel_connector *connector)
+static int _intel_hdcp2_enable(struct intel_atomic_state *state,
+                              struct intel_connector *connector)
 {
        struct drm_i915_private *i915 = to_i915(connector->base.dev);
        struct intel_hdcp *hdcp = &connector->hdcp;
@@ -1935,7 +2005,7 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
                    connector->base.base.id, connector->base.name,
                    hdcp->content_type);
 
-       ret = hdcp2_authenticate_and_encrypt(connector);
+       ret = hdcp2_authenticate_and_encrypt(state, connector);
        if (ret) {
                drm_dbg_kms(&i915->drm, "HDCP2 Type%d  Enabling Failed. (%d)\n",
                            hdcp->content_type, ret);
@@ -2038,17 +2108,6 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
 
                drm_dbg_kms(&i915->drm,
                            "HDCP2.2 Downstream topology change\n");
-               ret = hdcp2_authenticate_repeater_topology(connector);
-               if (!ret) {
-                       intel_hdcp_update_value(connector,
-                                       DRM_MODE_CONTENT_PROTECTION_ENABLED,
-                                       true);
-                       goto out;
-               }
-               drm_dbg_kms(&i915->drm,
-                           "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
-                           connector->base.base.id, connector->base.name,
-                           ret);
        } else {
                drm_dbg_kms(&i915->drm,
                            "[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
@@ -2065,18 +2124,8 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
                goto out;
        }
 
-       ret = _intel_hdcp2_enable(connector);
-       if (ret) {
-               drm_dbg_kms(&i915->drm,
-                           "[CONNECTOR:%d:%s] Failed to enable hdcp2.2 (%d)\n",
-                           connector->base.base.id, connector->base.name,
-                           ret);
-               intel_hdcp_update_value(connector,
-                                       DRM_MODE_CONTENT_PROTECTION_DESIRED,
-                                       true);
-               goto out;
-       }
-
+       intel_hdcp_update_value(connector,
+                               DRM_MODE_CONTENT_PROTECTION_DESIRED, true);
 out:
        mutex_unlock(&dig_port->hdcp_mutex);
        mutex_unlock(&hdcp->mutex);
@@ -2284,52 +2333,6 @@ int intel_hdcp_init(struct intel_connector *connector,
        return 0;
 }
 
-static int
-intel_hdcp_set_streams(struct intel_digital_port *dig_port,
-                      struct intel_atomic_state *state)
-{
-       struct drm_connector_list_iter conn_iter;
-       struct intel_digital_port *conn_dig_port;
-       struct intel_connector *connector;
-       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-       struct hdcp_port_data *data = &dig_port->hdcp_port_data;
-
-       if (!intel_encoder_is_mst(&dig_port->base)) {
-               data->k = 1;
-               data->streams[0].stream_id = 0;
-               return 0;
-       }
-
-       data->k = 0;
-
-       drm_connector_list_iter_begin(&i915->drm, &conn_iter);
-       for_each_intel_connector_iter(connector, &conn_iter) {
-               if (connector->base.status == connector_status_disconnected)
-                       continue;
-
-               if (!intel_encoder_is_mst(intel_attached_encoder(connector)))
-                       continue;
-
-               conn_dig_port = intel_attached_dig_port(connector);
-               if (conn_dig_port != dig_port)
-                       continue;
-
-               data->streams[data->k].stream_id =
-                       intel_conn_to_vcpi(&state->base, connector);
-               data->k++;
-
-               /* if there is only one active stream */
-               if (dig_port->dp.active_mst_links <= 1)
-                       break;
-       }
-       drm_connector_list_iter_end(&conn_iter);
-
-       if (drm_WARN_ON(&i915->drm, data->k > INTEL_NUM_PIPES(i915) || data->k == 0))
-               return -EINVAL;
-
-       return 0;
-}
-
 static int _intel_hdcp_enable(struct intel_atomic_state *state,
                              struct intel_encoder *encoder,
                              const struct intel_crtc_state *pipe_config,
@@ -2374,25 +2377,18 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state,
         * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
         * is capable of HDCP2.2, it is preferred to use HDCP2.2.
         */
-       if (intel_hdcp2_capable(connector)) {
-               ret = intel_hdcp_set_streams(dig_port, state);
-               if (!ret) {
-                       ret = _intel_hdcp2_enable(connector);
-                       if (!ret)
-                               check_link_interval =
-                                       DRM_HDCP2_CHECK_PERIOD_MS;
-               } else {
-                       drm_dbg_kms(&i915->drm,
-                                   "Set content streams failed: (%d)\n",
-                                   ret);
-               }
+       if (intel_hdcp2_get_capability(connector)) {
+               ret = _intel_hdcp2_enable(state, connector);
+               if (!ret)
+                       check_link_interval =
+                               DRM_HDCP2_CHECK_PERIOD_MS;
        }
 
        /*
         * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
         * be attempted.
         */
-       if (ret && intel_hdcp_capable(connector) &&
+       if (ret && intel_hdcp_get_capability(connector) &&
            hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
                ret = intel_hdcp1_enable(connector);
        }
index a9c784fd9ba507a57fee5bc42eab0fda07222590..477f2d2bb120d86edf47d6357afbd04beff00356 100644 (file)
@@ -38,8 +38,11 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
                            const struct intel_crtc_state *crtc_state,
                            const struct drm_connector_state *conn_state);
 bool is_hdcp_supported(struct drm_i915_private *i915, enum port port);
-bool intel_hdcp_capable(struct intel_connector *connector);
-bool intel_hdcp2_capable(struct intel_connector *connector);
+bool intel_hdcp_get_capability(struct intel_connector *connector);
+bool intel_hdcp2_get_capability(struct intel_connector *connector);
+void intel_hdcp_get_remote_capability(struct intel_connector *connector,
+                                     bool *hdcp_capable,
+                                     bool *hdcp2_capable);
 void intel_hdcp_component_init(struct drm_i915_private *i915);
 void intel_hdcp_component_fini(struct drm_i915_private *i915);
 void intel_hdcp_cleanup(struct intel_connector *connector);
index 7020e58061092bd919af3b53486181b3300afb9a..90d2236fede3ace1e596e515ea33f5f5887cfe12 100644 (file)
@@ -1732,8 +1732,8 @@ int intel_hdmi_hdcp2_check_link(struct intel_digital_port *dig_port,
 }
 
 static
-int intel_hdmi_hdcp2_capable(struct intel_connector *connector,
-                            bool *capable)
+int intel_hdmi_hdcp2_get_capability(struct intel_connector *connector,
+                                   bool *capable)
 {
        struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
        u8 hdcp2_version;
@@ -1762,7 +1762,7 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
        .write_2_2_msg = intel_hdmi_hdcp2_write_msg,
        .read_2_2_msg = intel_hdmi_hdcp2_read_msg,
        .check_2_2_link = intel_hdmi_hdcp2_check_link,
-       .hdcp_2_2_capable = intel_hdmi_hdcp2_capable,
+       .hdcp_2_2_get_capability = intel_hdmi_hdcp2_get_capability,
        .protocol = HDCP_PROTOCOL_HDMI,
 };
 
index 9c6d35a405a1822feb9a6c126439ece79b781303..dfd7d5e23f3fa48e881876104317f2cfa7f6bf7d 100644 (file)
@@ -6,26 +6,41 @@
 #include "i915_drv.h"
 
 #include "intel_atomic.h"
+#include "intel_crtc.h"
 #include "intel_display_types.h"
 #include "intel_dp_mst.h"
+#include "intel_dp_tunnel.h"
 #include "intel_fdi.h"
 #include "intel_link_bw.h"
 
 /**
  * intel_link_bw_init_limits - initialize BW limits
- * @i915: device instance
+ * @state: Atomic state
  * @limits: link BW limits
  *
  * Initialize @limits.
  */
-void intel_link_bw_init_limits(struct drm_i915_private *i915, struct intel_link_bw_limits *limits)
+void intel_link_bw_init_limits(struct intel_atomic_state *state,
+                              struct intel_link_bw_limits *limits)
 {
+       struct drm_i915_private *i915 = to_i915(state->base.dev);
        enum pipe pipe;
 
        limits->force_fec_pipes = 0;
        limits->bpp_limit_reached_pipes = 0;
-       for_each_pipe(i915, pipe)
-               limits->max_bpp_x16[pipe] = INT_MAX;
+       for_each_pipe(i915, pipe) {
+               const struct intel_crtc_state *crtc_state =
+                       intel_atomic_get_new_crtc_state(state,
+                                                       intel_crtc_for_pipe(i915, pipe));
+
+               if (state->base.duplicated && crtc_state) {
+                       limits->max_bpp_x16[pipe] = crtc_state->max_link_bpp_x16;
+                       if (crtc_state->fec_enable)
+                               limits->force_fec_pipes |= BIT(pipe);
+               } else {
+                       limits->max_bpp_x16[pipe] = INT_MAX;
+               }
+       }
 }
 
 /**
@@ -149,6 +164,10 @@ static int check_all_link_config(struct intel_atomic_state *state,
        if (ret)
                return ret;
 
+       ret = intel_dp_tunnel_atomic_check_link(state, limits);
+       if (ret)
+               return ret;
+
        ret = intel_fdi_atomic_check_link(state, limits);
        if (ret)
                return ret;
index 2cf57307cc2491451fcdc28b44753da2a1fb6935..6b0ccfff59dab4cf554da1b6aaf118fad39b64b4 100644 (file)
@@ -22,7 +22,7 @@ struct intel_link_bw_limits {
        int max_bpp_x16[I915_MAX_PIPES];
 };
 
-void intel_link_bw_init_limits(struct drm_i915_private *i915,
+void intel_link_bw_init_limits(struct intel_atomic_state *state,
                               struct intel_link_bw_limits *limits);
 int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
                             struct intel_link_bw_limits *limits,
index f242bb320610a623cdeb54cd1c09e05756307d7b..fcbb083318a7f2cba98ecbf4a8825e828451baa6 100644 (file)
@@ -887,7 +887,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
                return ret;
        }
 
-       if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
+       if (intel_bios_is_valid_vbt(dev_priv, fw->data, fw->size)) {
                opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
                if (opregion->vbt_firmware) {
                        drm_dbg_kms(&dev_priv->drm,
@@ -1034,7 +1034,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
 
                vbt = opregion->rvda;
                vbt_size = opregion->asle->rvds;
-               if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+               if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) {
                        drm_dbg_kms(&dev_priv->drm,
                                    "Found valid VBT in ACPI OpRegion (RVDA)\n");
                        opregion->vbt = vbt;
@@ -1059,7 +1059,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
        vbt_size = (mboxes & MBOX_ASLE_EXT) ?
                OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
        vbt_size -= OPREGION_VBT_OFFSET;
-       if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+       if (intel_bios_is_valid_vbt(dev_priv, vbt, vbt_size)) {
                drm_dbg_kms(&dev_priv->drm,
                            "Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
                opregion->vbt = vbt;
index 093106c1e10148f56a6955669d3324aebc178ac7..5f9e748adc89ee846de12974195e17ab38e14b21 100644 (file)
@@ -252,6 +252,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
 
 static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
 {
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        struct i2c_msg msgs[] = {
                {
                        .addr = intel_sdvo->slave_addr,
@@ -271,7 +272,7 @@ static bool intel_sdvo_read_byte(struct intel_sdvo *intel_sdvo, u8 addr, u8 *ch)
        if ((ret = i2c_transfer(intel_sdvo->i2c, msgs, 2)) == 2)
                return true;
 
-       DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+       drm_dbg_kms(&i915->drm, "i2c transfer returned %d\n", ret);
        return false;
 }
 
@@ -437,7 +438,8 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
        drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
 #undef BUF_PRINT
 
-       DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
+       drm_dbg_kms(&dev_priv->drm, "%s: W: %02X %s\n", SDVO_NAME(intel_sdvo),
+                   cmd, buffer);
 }
 
 static const char * const cmd_status_names[] = {
@@ -462,6 +464,7 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
                                   const void *args, int args_len,
                                   bool unlocked)
 {
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        u8 *buf, status;
        struct i2c_msg *msgs;
        int i, ret = true;
@@ -511,13 +514,13 @@ static bool __intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
        else
                ret = __i2c_transfer(intel_sdvo->i2c, msgs, i+3);
        if (ret < 0) {
-               DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+               drm_dbg_kms(&i915->drm, "I2c transfer returned %d\n", ret);
                ret = false;
                goto out;
        }
        if (ret != i+3) {
                /* failure in I2C transfer */
-               DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+               drm_dbg_kms(&i915->drm, "I2c transfer returned %d/%d\n", ret, i+3);
                ret = false;
        }
 
@@ -604,12 +607,13 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
        drm_WARN_ON(&dev_priv->drm, pos >= sizeof(buffer) - 1);
 #undef BUF_PRINT
 
-       DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
+       drm_dbg_kms(&dev_priv->drm, "%s: R: %s\n",
+                   SDVO_NAME(intel_sdvo), buffer);
        return true;
 
 log_fail:
-       DRM_DEBUG_KMS("%s: R: ... failed %s\n",
-                     SDVO_NAME(intel_sdvo), buffer);
+       drm_dbg_kms(&dev_priv->drm, "%s: R: ... failed %s\n",
+                   SDVO_NAME(intel_sdvo), buffer);
        return false;
 }
 
@@ -758,7 +762,7 @@ static bool intel_sdvo_get_timing(struct intel_sdvo *intel_sdvo, u8 cmd,
 }
 
 static bool intel_sdvo_set_input_timing(struct intel_sdvo *intel_sdvo,
-                                        struct intel_sdvo_dtd *dtd)
+                                       struct intel_sdvo_dtd *dtd)
 {
        return intel_sdvo_set_timing(intel_sdvo,
                                     SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
@@ -926,8 +930,8 @@ static bool intel_sdvo_check_supp_encode(struct intel_sdvo *intel_sdvo)
 
        BUILD_BUG_ON(sizeof(encode) != 2);
        return intel_sdvo_get_value(intel_sdvo,
-                                 SDVO_CMD_GET_SUPP_ENCODE,
-                                 &encode, sizeof(encode));
+                                   SDVO_CMD_GET_SUPP_ENCODE,
+                                   &encode, sizeof(encode));
 }
 
 static bool intel_sdvo_set_encode(struct intel_sdvo *intel_sdvo,
@@ -1004,6 +1008,7 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
                                       unsigned int if_index, u8 tx_rate,
                                       const u8 *data, unsigned int length)
 {
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        u8 set_buf_index[2] = { if_index, 0 };
        u8 hbuf_size, tmp[8];
        int i;
@@ -1016,8 +1021,9 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
        if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
                return false;
 
-       DRM_DEBUG_KMS("writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
-                     if_index, length, hbuf_size);
+       drm_dbg_kms(&i915->drm,
+                   "writing sdvo hbuf: %i, length %u, hbuf_size: %i\n",
+                   if_index, length, hbuf_size);
 
        if (hbuf_size < length)
                return false;
@@ -1042,6 +1048,7 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
                                         unsigned int if_index,
                                         u8 *data, unsigned int length)
 {
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        u8 set_buf_index[2] = { if_index, 0 };
        u8 hbuf_size, tx_rate, av_split;
        int i;
@@ -1071,8 +1078,9 @@ static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
        if (!intel_sdvo_get_hbuf_size(intel_sdvo, &hbuf_size))
                return false;
 
-       DRM_DEBUG_KMS("reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
-                     if_index, length, hbuf_size);
+       drm_dbg_kms(&i915->drm,
+                   "reading sdvo hbuf: %i, length %u, hbuf_size: %i\n",
+                   if_index, length, hbuf_size);
 
        hbuf_size = min_t(unsigned int, length, hbuf_size);
 
@@ -1151,6 +1159,7 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
 static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
                                         struct intel_crtc_state *crtc_state)
 {
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
        union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
        ssize_t len;
@@ -1162,7 +1171,7 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
        len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
                                        sdvo_data, sizeof(sdvo_data));
        if (len < 0) {
-               DRM_DEBUG_KMS("failed to read AVI infoframe\n");
+               drm_dbg_kms(&i915->drm, "failed to read AVI infoframe\n");
                return;
        } else if (len == 0) {
                return;
@@ -1173,13 +1182,14 @@ static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
 
        ret = hdmi_infoframe_unpack(frame, sdvo_data, len);
        if (ret) {
-               DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n");
+               drm_dbg_kms(&i915->drm, "Failed to unpack AVI infoframe\n");
                return;
        }
 
        if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI)
-               DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
-                             frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
+               drm_dbg_kms(&i915->drm,
+                           "Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+                           frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
 }
 
 static void intel_sdvo_get_eld(struct intel_sdvo *intel_sdvo,
@@ -1348,6 +1358,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config,
                                     struct drm_connector_state *conn_state)
 {
+       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
        struct intel_sdvo_connector *intel_sdvo_connector =
                to_intel_sdvo_connector(conn_state->connector);
@@ -1360,7 +1371,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
                        return -EINVAL;
        }
 
-       DRM_DEBUG_KMS("forcing bpc to 8 for SDVO\n");
+       drm_dbg_kms(&i915->drm, "forcing bpc to 8 for SDVO\n");
        /* FIXME: Don't increase pipe_bpp */
        pipe_config->pipe_bpp = 8*3;
        pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
@@ -1439,7 +1450,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
 
        if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
                                              pipe_config, conn_state)) {
-               DRM_DEBUG_KMS("bad AVI infoframe\n");
+               drm_dbg_kms(&i915->drm, "bad AVI infoframe\n");
                return -EINVAL;
        }
 
@@ -1916,8 +1927,8 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
         */
        if (success && !input1) {
                drm_dbg_kms(&dev_priv->drm,
-                           "First %s output reported failure to "
-                           "sync\n", SDVO_NAME(intel_sdvo));
+                           "First %s output reported failure to sync\n",
+                           SDVO_NAME(intel_sdvo));
        }
 
        if (0)
@@ -1976,37 +1987,38 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
 
 static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct intel_sdvo_caps *caps)
 {
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        BUILD_BUG_ON(sizeof(*caps) != 8);
        if (!intel_sdvo_get_value(intel_sdvo,
                                  SDVO_CMD_GET_DEVICE_CAPS,
                                  caps, sizeof(*caps)))
                return false;
 
-       DRM_DEBUG_KMS("SDVO capabilities:\n"
-                     "  vendor_id: %d\n"
-                     "  device_id: %d\n"
-                     "  device_rev_id: %d\n"
-                     "  sdvo_version_major: %d\n"
-                     "  sdvo_version_minor: %d\n"
-                     "  sdvo_num_inputs: %d\n"
-                     "  smooth_scaling: %d\n"
-                     "  sharp_scaling: %d\n"
-                     "  up_scaling: %d\n"
-                     "  down_scaling: %d\n"
-                     "  stall_support: %d\n"
-                     "  output_flags: %d\n",
-                     caps->vendor_id,
-                     caps->device_id,
-                     caps->device_rev_id,
-                     caps->sdvo_version_major,
-                     caps->sdvo_version_minor,
-                     caps->sdvo_num_inputs,
-                     caps->smooth_scaling,
-                     caps->sharp_scaling,
-                     caps->up_scaling,
-                     caps->down_scaling,
-                     caps->stall_support,
-                     caps->output_flags);
+       drm_dbg_kms(&i915->drm, "SDVO capabilities:\n"
+                   "  vendor_id: %d\n"
+                   "  device_id: %d\n"
+                   "  device_rev_id: %d\n"
+                   "  sdvo_version_major: %d\n"
+                   "  sdvo_version_minor: %d\n"
+                   "  sdvo_num_inputs: %d\n"
+                   "  smooth_scaling: %d\n"
+                   "  sharp_scaling: %d\n"
+                   "  up_scaling: %d\n"
+                   "  down_scaling: %d\n"
+                   "  stall_support: %d\n"
+                   "  output_flags: %d\n",
+                   caps->vendor_id,
+                   caps->device_id,
+                   caps->device_rev_id,
+                   caps->sdvo_version_major,
+                   caps->sdvo_version_minor,
+                   caps->sdvo_num_inputs,
+                   caps->smooth_scaling,
+                   caps->sharp_scaling,
+                   caps->up_scaling,
+                   caps->down_scaling,
+                   caps->stall_support,
+                   caps->output_flags);
 
        return true;
 }
@@ -2038,7 +2050,7 @@ static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
                return 0;
 
        if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
-                                       &hotplug, sizeof(hotplug)))
+                                 &hotplug, sizeof(hotplug)))
                return 0;
 
        return hotplug;
@@ -2121,8 +2133,9 @@ intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
        bool monitor_is_digital = drm_edid_is_digital(drm_edid);
        bool connector_is_digital = !!IS_DIGITAL(sdvo);
 
-       DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
-                     connector_is_digital, monitor_is_digital);
+       drm_dbg_kms(sdvo->base.base.dev,
+                   "connector_is_digital? %d, monitor_is_digital? %d\n",
+                   connector_is_digital, monitor_is_digital);
        return connector_is_digital == monitor_is_digital;
 }
 
@@ -2135,8 +2148,8 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
        enum drm_connector_status ret;
        u16 response;
 
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                     connector->base.id, connector->name);
+       drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+                   connector->base.id, connector->name);
 
        if (!intel_display_device_enabled(i915))
                return connector_status_disconnected;
@@ -2153,9 +2166,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
                                  &response, 2))
                return connector_status_unknown;
 
-       DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
-                     response & 0xff, response >> 8,
-                     intel_sdvo_connector->output_flag);
+       drm_dbg_kms(&i915->drm, "SDVO response %d %d [%x]\n",
+                   response & 0xff, response >> 8,
+                   intel_sdvo_connector->output_flag);
 
        if (response == 0)
                return connector_status_disconnected;
@@ -2189,11 +2202,15 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
 
 static int intel_sdvo_get_ddc_modes(struct drm_connector *connector)
 {
+       struct drm_i915_private *i915 = to_i915(connector->dev);
        int num_modes = 0;
        const struct drm_edid *drm_edid;
 
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                     connector->base.id, connector->name);
+       drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n",
+                   connector->base.id, connector->name);
+
+       if (!intel_display_driver_check_access(i915))
+               return drm_edid_connector_add_modes(connector);
 
        /* set the bus switch and get the modes */
        drm_edid = intel_sdvo_get_edid(connector);
@@ -2287,6 +2304,7 @@ static const struct drm_display_mode sdvo_tv_modes[] = {
 static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
 {
        struct intel_sdvo *intel_sdvo = intel_attached_sdvo(to_intel_connector(connector));
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        struct intel_sdvo_connector *intel_sdvo_connector =
                to_intel_sdvo_connector(connector);
        const struct drm_connector_state *conn_state = connector->state;
@@ -2295,8 +2313,11 @@ static int intel_sdvo_get_tv_modes(struct drm_connector *connector)
        int num_modes = 0;
        int i;
 
-       DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
-                     connector->base.id, connector->name);
+       drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+                   connector->base.id, connector->name);
+
+       if (!intel_display_driver_check_access(i915))
+               return 0;
 
        /*
         * Read the list of supported input resolutions for the selected TV
@@ -2783,10 +2804,11 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
        struct drm_encoder *encoder = &intel_sdvo->base.base;
        struct drm_connector *connector;
        struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+       struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
        struct intel_connector *intel_connector;
        struct intel_sdvo_connector *intel_sdvo_connector;
 
-       DRM_DEBUG_KMS("initialising DVI type 0x%x\n", type);
+       drm_dbg_kms(&i915->drm, "initialising DVI type 0x%x\n", type);
 
        intel_sdvo_connector = intel_sdvo_connector_alloc();
        if (!intel_sdvo_connector)
@@ -2797,7 +2819,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
        intel_connector = &intel_sdvo_connector->base;
        connector = &intel_connector->base;
        if (intel_sdvo_get_hotplug_support(intel_sdvo) &
-               intel_sdvo_connector->output_flag) {
+           intel_sdvo_connector->output_flag) {
                intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag;
                /*
                 * Some SDVO devices have one-shot hotplug interrupts.
@@ -2832,12 +2854,13 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, u16 type)
 static bool
 intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, u16 type)
 {
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        struct drm_encoder *encoder = &intel_sdvo->base.base;
        struct drm_connector *connector;
        struct intel_connector *intel_connector;
        struct intel_sdvo_connector *intel_sdvo_connector;
 
-       DRM_DEBUG_KMS("initialising TV type 0x%x\n", type);
+       drm_dbg_kms(&i915->drm, "initialising TV type 0x%x\n", type);
 
        intel_sdvo_connector = intel_sdvo_connector_alloc();
        if (!intel_sdvo_connector)
@@ -2871,12 +2894,13 @@ err:
 static bool
 intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, u16 type)
 {
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        struct drm_encoder *encoder = &intel_sdvo->base.base;
        struct drm_connector *connector;
        struct intel_connector *intel_connector;
        struct intel_sdvo_connector *intel_sdvo_connector;
 
-       DRM_DEBUG_KMS("initialising analog type 0x%x\n", type);
+       drm_dbg_kms(&i915->drm, "initialising analog type 0x%x\n", type);
 
        intel_sdvo_connector = intel_sdvo_connector_alloc();
        if (!intel_sdvo_connector)
@@ -2908,7 +2932,7 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, u16 type)
        struct intel_connector *intel_connector;
        struct intel_sdvo_connector *intel_sdvo_connector;
 
-       DRM_DEBUG_KMS("initialising LVDS type 0x%x\n", type);
+       drm_dbg_kms(&i915->drm, "initialising LVDS type 0x%x\n", type);
 
        intel_sdvo_connector = intel_sdvo_connector_alloc();
        if (!intel_sdvo_connector)
@@ -2992,6 +3016,7 @@ static bool intel_sdvo_output_init(struct intel_sdvo *sdvo, u16 type)
 static bool
 intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo)
 {
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        static const u16 probe_order[] = {
                SDVO_OUTPUT_TMDS0,
                SDVO_OUTPUT_TMDS1,
@@ -3010,8 +3035,9 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo)
        flags = intel_sdvo_filter_output_flags(intel_sdvo->caps.output_flags);
 
        if (flags == 0) {
-               DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%04x)\n",
-                             SDVO_NAME(intel_sdvo), intel_sdvo->caps.output_flags);
+               drm_dbg_kms(&i915->drm,
+                           "%s: Unknown SDVO output type (0x%04x)\n",
+                           SDVO_NAME(intel_sdvo), intel_sdvo->caps.output_flags);
                return false;
        }
 
@@ -3073,8 +3099,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
 
 
        intel_sdvo_connector->tv_format =
-                       drm_property_create(dev, DRM_MODE_PROP_ENUM,
-                                           "mode", intel_sdvo_connector->format_supported_num);
+               drm_property_create(dev, DRM_MODE_PROP_ENUM,
+                                   "mode", intel_sdvo_connector->format_supported_num);
        if (!intel_sdvo_connector->tv_format)
                return false;
 
@@ -3100,8 +3126,8 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
                state_assignment = response; \
                drm_object_attach_property(&connector->base, \
                                           intel_sdvo_connector->name, 0); \
-               DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
-                             data_value[0], data_value[1], response); \
+               drm_dbg_kms(dev, #name ": max %d, default %d, current %d\n", \
+                           data_value[0], data_value[1], response); \
        } \
 } while (0)
 
@@ -3112,6 +3138,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
                                      struct intel_sdvo_connector *intel_sdvo_connector,
                                      struct intel_sdvo_enhancements_reply enhancements)
 {
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        struct drm_device *dev = intel_sdvo->base.base.dev;
        struct drm_connector *connector = &intel_sdvo_connector->base.base;
        struct drm_connector_state *conn_state = connector->state;
@@ -3148,10 +3175,9 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
                        return false;
 
                drm_object_attach_property(&connector->base,
-                                             intel_sdvo_connector->right, 0);
-               DRM_DEBUG_KMS("h_overscan: max %d, "
-                             "default %d, current %d\n",
-                             data_value[0], data_value[1], response);
+                                          intel_sdvo_connector->right, 0);
+               drm_dbg_kms(&i915->drm, "h_overscan: max %d, default %d, current %d\n",
+                           data_value[0], data_value[1], response);
        }
 
        if (enhancements.overscan_v) {
@@ -3170,7 +3196,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
                intel_sdvo_connector->max_vscan = data_value[0];
                intel_sdvo_connector->top =
                        drm_property_create_range(dev, 0,
-                                           "top_margin", 0, data_value[0]);
+                                                 "top_margin", 0, data_value[0]);
                if (!intel_sdvo_connector->top)
                        return false;
 
@@ -3179,15 +3205,14 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
 
                intel_sdvo_connector->bottom =
                        drm_property_create_range(dev, 0,
-                                           "bottom_margin", 0, data_value[0]);
+                                                 "bottom_margin", 0, data_value[0]);
                if (!intel_sdvo_connector->bottom)
                        return false;
 
                drm_object_attach_property(&connector->base,
-                                             intel_sdvo_connector->bottom, 0);
-               DRM_DEBUG_KMS("v_overscan: max %d, "
-                             "default %d, current %d\n",
-                             data_value[0], data_value[1], response);
+                                          intel_sdvo_connector->bottom, 0);
+               drm_dbg_kms(&i915->drm, "v_overscan: max %d, default %d, current %d\n",
+                           data_value[0], data_value[1], response);
        }
 
        ENHANCEMENT(&sdvo_state->tv, hpos, HPOS);
@@ -3215,7 +3240,7 @@ intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
 
                drm_object_attach_property(&connector->base,
                                           intel_sdvo_connector->dot_crawl, 0);
-               DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+               drm_dbg_kms(&i915->drm, "dot crawl: current %d\n", response);
        }
 
        return true;
@@ -3240,6 +3265,7 @@ intel_sdvo_create_enhance_property_lvds(struct intel_sdvo *intel_sdvo,
 static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
                                               struct intel_sdvo_connector *intel_sdvo_connector)
 {
+       struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev);
        union {
                struct intel_sdvo_enhancements_reply reply;
                u16 response;
@@ -3251,7 +3277,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
                                  SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
                                  &enhancements, sizeof(enhancements)) ||
            enhancements.response == 0) {
-               DRM_DEBUG_KMS("No enhancement is supported\n");
+               drm_dbg_kms(&i915->drm, "No enhancement is supported\n");
                return true;
        }
 
@@ -3471,23 +3497,23 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
                goto err_output;
 
        drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, "
-                       "clock range %dMHz - %dMHz, "
-                       "num inputs: %d, "
-                       "output 1: %c, output 2: %c\n",
-                       SDVO_NAME(intel_sdvo),
-                       intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
-                       intel_sdvo->caps.device_rev_id,
-                       intel_sdvo->pixel_clock_min / 1000,
-                       intel_sdvo->pixel_clock_max / 1000,
-                       intel_sdvo->caps.sdvo_num_inputs,
-                       /* check currently supported outputs */
-                       intel_sdvo->caps.output_flags &
-                       (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0 |
-                        SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_SVID0 |
-                        SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB0) ? 'Y' : 'N',
-                       intel_sdvo->caps.output_flags &
-                       (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1 |
-                        SDVO_OUTPUT_LVDS1) ? 'Y' : 'N');
+                   "clock range %dMHz - %dMHz, "
+                   "num inputs: %d, "
+                   "output 1: %c, output 2: %c\n",
+                   SDVO_NAME(intel_sdvo),
+                   intel_sdvo->caps.vendor_id, intel_sdvo->caps.device_id,
+                   intel_sdvo->caps.device_rev_id,
+                   intel_sdvo->pixel_clock_min / 1000,
+                   intel_sdvo->pixel_clock_max / 1000,
+                   intel_sdvo->caps.sdvo_num_inputs,
+                   /* check currently supported outputs */
+                   intel_sdvo->caps.output_flags &
+                   (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0 |
+                    SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_SVID0 |
+                    SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB0) ? 'Y' : 'N',
+                   intel_sdvo->caps.output_flags &
+                   (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1 |
+                    SDVO_OUTPUT_LVDS1) ? 'Y' : 'N');
        return true;
 
 err_output:
index 392d93e97bf8320db6706353ed6736a2ef82116e..e941e2e4fd14c2ce489d5e7a162853723b47b7d4 100644 (file)
@@ -948,6 +948,11 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
        if (DISPLAY_VER(dev_priv) == 13)
                plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
 
+       if (GRAPHICS_VER(dev_priv) >= 20 &&
+           fb->modifier == I915_FORMAT_MOD_4_TILED) {
+               plane_ctl |= PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
+       }
+
        return plane_ctl;
 }
 
index 614f319d754e9fe02970637a9ef6f7be45322022..c6b9be80d83c4dfeda8c6d6968ccafd0ac1c8d13 100644 (file)
 #include "skl_watermark.h"
 #include "skl_watermark_regs.h"
 
+/*It is expected that DSB can do posted writes to every register in
+ * the pipe and planes within 100us. For flip queue use case, the
+ * recommended DSB execution time is 100us + one SAGV block time.
+ */
+#define DSB_EXE_TIME 100
+
 static void skl_sagv_disable(struct drm_i915_private *i915);
 
 /* Stores plane specific WM parameters */
@@ -2904,12 +2910,51 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state,
        return 0;
 }
 
+/*
+ * If Fixed Refresh Rate:
+ * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from
+ * watermark level1 and up and above. If watermark level 1 is
+ * invalid program it with all 1's.
+ * Program PKG_C_LATENCY Added Wake Time = DSB execution time
+ * If Variable Refresh Rate:
+ * Program DEEP PKG_C_LATENCY Pkg C with all 1's.
+ * Program PKG_C_LATENCY Added Wake Time = 0
+ */
+static void
+skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled)
+{
+       u32 max_latency = 0;
+       u32 clear = 0, val = 0;
+       u32 added_wake_time = 0;
+
+       if (DISPLAY_VER(i915) < 20)
+               return;
+
+       if (vrr_enabled) {
+               max_latency = LNL_PKG_C_LATENCY_MASK;
+               added_wake_time = 0;
+       } else {
+               max_latency = skl_watermark_max_latency(i915, 1);
+               if (max_latency == 0)
+                       max_latency = LNL_PKG_C_LATENCY_MASK;
+               added_wake_time = DSB_EXE_TIME +
+                       i915->display.sagv.block_time_us;
+       }
+
+       clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK;
+       val |= REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, max_latency);
+       val |= REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time);
+
+       intel_uncore_rmw(&i915->uncore, LNL_PKG_C_LATENCY, clear, val);
+}
+
 static int
 skl_compute_wm(struct intel_atomic_state *state)
 {
        struct intel_crtc *crtc;
        struct intel_crtc_state __maybe_unused *new_crtc_state;
        int ret, i;
+       bool vrr_enabled = false;
 
        for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
                ret = skl_build_pipe_wm(state, crtc);
@@ -2934,8 +2979,13 @@ skl_compute_wm(struct intel_atomic_state *state)
                ret = skl_wm_add_affected_planes(state, crtc);
                if (ret)
                        return ret;
+
+               if (new_crtc_state->vrr.enable)
+                       vrr_enabled = true;
        }
 
+       skl_program_dpkgc_latency(to_i915(state->base.dev), vrr_enabled);
+
        skl_print_wm_changes(state);
 
        return 0;
@@ -3731,11 +3781,11 @@ void skl_watermark_debugfs_register(struct drm_i915_private *i915)
                                    &intel_sagv_status_fops);
 }
 
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915)
+unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level)
 {
        int level;
 
-       for (level = i915->display.wm.num_levels - 1; level >= 0; level--) {
+       for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) {
                unsigned int latency = skl_wm_latency(i915, level, NULL);
 
                if (latency)
index fb0da36fd3ec3a7cb639ffddc273ef6a3df40d0f..e3d1d74a7b170666d7df8412670bfb420060d575 100644 (file)
@@ -46,8 +46,8 @@ void skl_watermark_ipc_update(struct drm_i915_private *i915);
 bool skl_watermark_ipc_enabled(struct drm_i915_private *i915);
 void skl_watermark_debugfs_register(struct drm_i915_private *i915);
 
-unsigned int skl_watermark_max_latency(struct drm_i915_private *i915);
-
+unsigned int skl_watermark_max_latency(struct drm_i915_private *i915,
+                                      int initial_wm_level);
 void skl_wm_init(struct drm_i915_private *i915);
 
 struct intel_dbuf_state {
index 628c5920ad4911a79138ea3f0f1ad5edf24bde91..20b30c9a6613dafc3c98c8868c1bad43df985a04 100644 (file)
 #define MTL_LATENCY_SAGV               _MMIO(0x4578c)
 #define   MTL_LATENCY_QCLK_SAGV                REG_GENMASK(12, 0)
 
+#define LNL_PKG_C_LATENCY              _MMIO(0x46460)
+#define   LNL_ADDED_WAKE_TIME_MASK     REG_GENMASK(28, 16)
+#define   LNL_PKG_C_LATENCY_MASK       REG_GENMASK(12, 0)
+
 #endif /* __SKL_WATERMARK_REGS_H__ */
index 813cc888e6fae3f602661840f726ccf8f8908ec2..be70c46604b49bf244e916d924738818dc2ec82d 100644 (file)
@@ -206,8 +206,6 @@ struct intel_guc {
        u32 ads_golden_ctxt_size;
        /** @ads_capture_size: size of register lists in the ADS used for error capture */
        u32 ads_capture_size;
-       /** @ads_engine_usage_size: size of engine usage in the ADS */
-       u32 ads_engine_usage_size;
 
        /** @lrc_desc_pool_v69: object allocated to hold the GuC LRC descriptor pool */
        struct i915_vma *lrc_desc_pool_v69;
index 4eff44194439500ed88d45c4139e02ac66d8df95..fa6503900c842bea7acf4192140ec08d8f4ceefd 100644 (file)
@@ -152,17 +152,6 @@ struct intel_vgpu_cursor_plane_format {
        u32     y_hot;          /* in pixels */
 };
 
-struct intel_vgpu_pipe_format {
-       struct intel_vgpu_primary_plane_format  primary;
-       struct intel_vgpu_sprite_plane_format   sprite;
-       struct intel_vgpu_cursor_plane_format   cursor;
-       enum DDI_PORT ddi_port;  /* the DDI port that pipe is connected to */
-};
-
-struct intel_vgpu_fb_format {
-       struct intel_vgpu_pipe_format   pipes[I915_MAX_PIPES];
-};
-
 int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
        struct intel_vgpu_primary_plane_format *plane);
 int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
index 4cb183e06e95a1ba408a06099a4049671bea6789..cb50700e6cc90ba5f7341190d61dc67591a7db65 100644 (file)
@@ -93,8 +93,6 @@ struct intel_gvt_gtt_gma_ops {
 struct intel_gvt_gtt {
        const struct intel_gvt_gtt_pte_ops *pte_ops;
        const struct intel_gvt_gtt_gma_ops *gma_ops;
-       int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm);
-       void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
        struct list_head oos_page_use_list_head;
        struct list_head oos_page_free_list_head;
        struct mutex ppgtt_mm_lock;
@@ -210,7 +208,6 @@ struct intel_vgpu_scratch_pt {
 
 struct intel_vgpu_gtt {
        struct intel_vgpu_mm *ggtt_mm;
-       unsigned long active_ppgtt_mm_bitmap;
        struct list_head ppgtt_mm_list_head;
        struct radix_tree_root spt_tree;
        struct list_head oos_page_list_head;
index c57aba09091fe06538bddbe3eead1744b4113c50..2c95aeef4e4156a32d340cd86a39421f276b08f8 100644 (file)
@@ -89,7 +89,6 @@ struct intel_vgpu_gm {
 /* Fences owned by a vGPU */
 struct intel_vgpu_fence {
        struct i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
-       u32 base;
        u32 size;
 };
 
@@ -119,7 +118,6 @@ struct intel_vgpu_irq {
 };
 
 struct intel_vgpu_opregion {
-       bool mapped;
        void *va;
        u32 gfn[INTEL_GVT_OPREGION_PAGES];
 };
@@ -223,7 +221,6 @@ struct intel_vgpu {
 
        struct vfio_region *region;
        int num_regions;
-       struct eventfd_ctx *intx_trigger;
        struct eventfd_ctx *msi_trigger;
 
        /*
@@ -256,7 +253,6 @@ struct intel_gvt_fence {
 
 /* Special MMIO blocks. */
 struct gvt_mmio_block {
-       unsigned int device;
        i915_reg_t   offset;
        unsigned int size;
        gvt_mmio_func read;
@@ -444,7 +440,6 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
 #define vgpu_hidden_gmadr_end(vgpu) \
        (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
 
-#define vgpu_fence_base(vgpu) (vgpu->fence.base)
 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
 
 /* ring context size i.e. the first 0x50 dwords*/
index c8e7dfc9f79109b50179596a1b0b34cf7f11447a..336d079c420793a0a752f52a8fe211f721fe72a7 100644 (file)
@@ -40,7 +40,6 @@ struct intel_gvt_irq_info {
        char *name;
        i915_reg_t reg_base;
        enum intel_gvt_event_type bit_to_event[INTEL_GVT_IRQ_BITWIDTH];
-       unsigned long warned;
        int group;
        DECLARE_BITMAP(downstream_irq_bitmap, INTEL_GVT_IRQ_BITWIDTH);
        bool has_upstream_irq;
index e60ad476fe60462a15c8445e418223cf7280bebf..cd214be98668850421d43e48ddfbec10c6936f21 100644 (file)
@@ -177,7 +177,6 @@ enum intel_gvt_irq_type {
 /* per-event information */
 struct intel_gvt_event_info {
        int bit;                                /* map to register bit */
-       int policy;                             /* forwarding policy */
        struct intel_gvt_irq_info *info;        /* register info */
        gvt_event_virt_handler_t v_handler;     /* for v_event */
 };
@@ -188,7 +187,6 @@ struct intel_gvt_irq {
        struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX];
        DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX);
        struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX];
-       DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
        struct intel_gvt_irq_map *irq_map;
 };
 
index bba154e38705971a3755e29cfe700cb6cb58fa86..32ebacb078e8a15276c590e17b02a89b1071feeb 100644 (file)
@@ -62,10 +62,8 @@ typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
 struct intel_gvt_mmio_info {
        u32 offset;
        u64 ro_mask;
-       u32 device;
        gvt_mmio_func read;
        gvt_mmio_func write;
-       u32 addr_range;
        struct hlist_node node;
 };
 
index 1f391b3da2cce3d0f7dcc425abd42ee3fcb0a05a..cd94993278b67ca629c8dc30285a101c1520418f 100644 (file)
@@ -104,10 +104,8 @@ struct intel_vgpu_workload {
 
        /* execlist context information */
        struct execlist_ctx_descriptor_format ctx_desc;
-       struct execlist_ring_context *ring_context;
        unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
        unsigned long guest_rb_head;
-       bool restore_inhibit;
        struct intel_vgpu_elsp_dwords elsp_dwords;
        bool emulate_schedule_in;
        atomic_t shadow_ctx_active;
index a439dd78993660c75f8f8b37618307c01bf565a2..2e7a50d16a88399e8760ae74ba0885059b4e5949 100644 (file)
@@ -24,8 +24,6 @@ struct drm_printer;
 struct i915_drm_client {
        struct kref kref;
 
-       unsigned int id;
-
        spinlock_t ctx_lock; /* For add/remove from ctx_list. */
        struct list_head ctx_list; /* List of contexts belonging to client. */
 
index 46445248d193e6bf0316e4347a993bfc5c01ae1d..39fb6ce4a7ef9946245f77113b98f96c976e6a4f 100644 (file)
@@ -288,7 +288,6 @@ struct i915_perf_stream {
                struct i915_vma *vma;
                u8 *vaddr;
                u32 last_ctx_id;
-               int size_exponent;
 
                /**
                 * @oa_buffer.ptr_lock: Locks reads and writes to all
index f59081066a197a9ae0ccf0a1d2dd9eca01e04700..519e096c607cd8b1943dbc8e5bff14ccf48822db 100644 (file)
@@ -52,7 +52,6 @@
 struct execute_cb {
        struct irq_work work;
        struct i915_sw_fence *fence;
-       struct i915_request *signal;
 };
 
 static struct kmem_cache *slab_requests;
index 64472b7f0e7707c0f68f2ca2c8c8dc4b12b93bdb..559de74d0b114bc4d20bd560e110bd3645502f2b 100644 (file)
@@ -290,7 +290,6 @@ struct i915_vma {
 
        struct list_head obj_link; /* Link in the object's VMA list */
        struct rb_node obj_node;
-       struct hlist_node obj_hash;
 
        /** This vma's place in the eviction list */
        struct list_head evict_link;
index 40810cfb3fd9a2a1d5036525a3b122305fee3d9f..8c927e303c4a69b5d1c292b0364975594955af3b 100644 (file)
@@ -50,8 +50,6 @@ enum intel_region_id {
                for_each_if((mr) = (i915)->mm.regions[id])
 
 struct intel_memory_region_ops {
-       unsigned int flags;
-
        int (*init)(struct intel_memory_region *mem);
        int (*release)(struct intel_memory_region *mem);
 
index 281afff6ee4e558a7bb7e01686904122d9a75445..4891bd916d26a3bb3e37d05e91e7e5e6a473cec3 100644 (file)
 # define STREAM_STATUS_CHANGED               (1 << 2)
 # define HDMI_LINK_STATUS_CHANGED            (1 << 3)
 # define CONNECTED_OFF_ENTRY_REQUESTED       (1 << 4)
+# define DP_TUNNELING_IRQ                    (1 << 5)
 
 #define DP_PSR_ERROR_STATUS                 0x2006  /* XXX 1.2? */
 # define DP_PSR_LINK_CRC_ERROR              (1 << 0)
 #define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET     0x69494
 #define DP_HDCP_2_2_REG_DBG_OFFSET             0x69518
 
+/* DP-tunneling */
+#define DP_TUNNELING_OUI                               0xe0000
+#define  DP_TUNNELING_OUI_BYTES                                3
+
+#define DP_TUNNELING_DEV_ID                            0xe0003
+#define  DP_TUNNELING_DEV_ID_BYTES                     6
+
+#define DP_TUNNELING_HW_REV                            0xe0009
+#define  DP_TUNNELING_HW_REV_MAJOR_SHIFT               4
+#define  DP_TUNNELING_HW_REV_MAJOR_MASK                        (0xf << DP_TUNNELING_HW_REV_MAJOR_SHIFT)
+#define  DP_TUNNELING_HW_REV_MINOR_SHIFT               0
+#define  DP_TUNNELING_HW_REV_MINOR_MASK                        (0xf << DP_TUNNELING_HW_REV_MINOR_SHIFT)
+
+#define DP_TUNNELING_SW_REV_MAJOR                      0xe000a
+#define DP_TUNNELING_SW_REV_MINOR                      0xe000b
+
+#define DP_TUNNELING_CAPABILITIES                      0xe000d
+#define  DP_IN_BW_ALLOCATION_MODE_SUPPORT              (1 << 7)
+#define  DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT          (1 << 6)
+#define  DP_TUNNELING_SUPPORT                          (1 << 0)
+
+#define DP_IN_ADAPTER_INFO                             0xe000e
+#define  DP_IN_ADAPTER_NUMBER_BITS                     7
+#define  DP_IN_ADAPTER_NUMBER_MASK                     ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1)
+
+#define DP_USB4_DRIVER_ID                              0xe000f
+#define  DP_USB4_DRIVER_ID_BITS                                4
+#define  DP_USB4_DRIVER_ID_MASK                                ((1 << DP_USB4_DRIVER_ID_BITS) - 1)
+
+#define DP_USB4_DRIVER_BW_CAPABILITY                   0xe0020
+#define  DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT     (1 << 7)
+
+#define DP_IN_ADAPTER_TUNNEL_INFORMATION               0xe0021
+#define  DP_GROUP_ID_BITS                              3
+#define  DP_GROUP_ID_MASK                              ((1 << DP_GROUP_ID_BITS) - 1)
+
+#define DP_BW_GRANULARITY                              0xe0022
+#define  DP_BW_GRANULARITY_MASK                                0x3
+
+#define DP_ESTIMATED_BW                                        0xe0023
+#define DP_ALLOCATED_BW                                        0xe0024
+
+#define DP_TUNNELING_STATUS                            0xe0025
+#define  DP_BW_ALLOCATION_CAPABILITY_CHANGED           (1 << 3)
+#define  DP_ESTIMATED_BW_CHANGED                       (1 << 2)
+#define  DP_BW_REQUEST_SUCCEEDED                       (1 << 1)
+#define  DP_BW_REQUEST_FAILED                          (1 << 0)
+
+#define DP_TUNNELING_MAX_LINK_RATE                     0xe0028
+
+#define DP_TUNNELING_MAX_LANE_COUNT                    0xe0029
+#define  DP_TUNNELING_MAX_LANE_COUNT_MASK              0x1f
+
+#define DP_DPTX_BW_ALLOCATION_MODE_CONTROL             0xe0030
+#define  DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE   (1 << 7)
+#define  DP_UNMASK_BW_ALLOCATION_IRQ                   (1 << 6)
+
+#define DP_REQUEST_BW                                  0xe0031
+#define  MAX_DP_REQUEST_BW                             255
+
 /* LTTPR: Link Training (LT)-tunable PHY Repeaters */
 #define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
 #define DP_MAX_LINK_RATE_PHY_REPEATER                      0xf0001 /* 1.4a */
index d02014a87f1277b812cbfe624b52a2478e09c6e5..534f78e368b0510d595dbab54274a9f4fcd700a5 100644 (file)
@@ -811,5 +811,6 @@ int drm_dp_bw_overhead(int lane_count, int hactive,
                       int dsc_slice_count,
                       int bpp_x16, unsigned long flags);
 int drm_dp_bw_channel_coding_efficiency(bool is_uhbr);
+int drm_dp_max_dprx_data_rate(int max_link_rate, int max_lanes);
 
 #endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/display/drm_dp_tunnel.h b/include/drm/display/drm_dp_tunnel.h
new file mode 100644 (file)
index 0000000..87212c8
--- /dev/null
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __DRM_DP_TUNNEL_H__
+#define __DRM_DP_TUNNEL_H__
+
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct drm_dp_aux;
+
+struct drm_device;
+
+struct drm_atomic_state;
+struct drm_dp_tunnel_mgr;
+struct drm_dp_tunnel_state;
+
+struct ref_tracker;
+
+struct drm_dp_tunnel_ref {
+       struct drm_dp_tunnel *tunnel;
+       struct ref_tracker *tracker;
+};
+
+#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL
+
+struct drm_dp_tunnel *
+drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker);
+
+void
+drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker);
+
+static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel,
+                                        struct drm_dp_tunnel_ref *tunnel_ref)
+{
+       tunnel_ref->tunnel = drm_dp_tunnel_get(tunnel, &tunnel_ref->tracker);
+}
+
+static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref)
+{
+       drm_dp_tunnel_put(tunnel_ref->tunnel, &tunnel_ref->tracker);
+       tunnel_ref->tunnel = NULL;
+}
+
+struct drm_dp_tunnel *
+drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
+                    struct drm_dp_aux *aux);
+int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel);
+
+int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel);
+bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw);
+int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel);
+
+void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel);
+
+int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr,
+                            struct drm_dp_aux *aux);
+
+int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel);
+int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel);
+
+const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel);
+
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
+                              struct drm_dp_tunnel *tunnel);
+
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,
+                                  const struct drm_dp_tunnel *tunnel);
+
+struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
+                                  const struct drm_dp_tunnel *tunnel);
+
+int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
+                                      struct drm_dp_tunnel *tunnel,
+                                      u8 stream_id, int bw);
+int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
+                                                   const struct drm_dp_tunnel *tunnel,
+                                                   u32 *stream_mask);
+
+int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
+                                         u32 *failed_stream_mask);
+
+int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state);
+
+struct drm_dp_tunnel_mgr *
+drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count);
+void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr);
+
+#else
+
+static inline struct drm_dp_tunnel *
+drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker)
+{
+       return NULL;
+}
+
+static inline void
+drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker) {}
+
+static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel,
+                                        struct drm_dp_tunnel_ref *tunnel_ref) {}
+
+static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref) {}
+
+static inline struct drm_dp_tunnel *
+drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
+                    struct drm_dp_aux *aux)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int
+drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
+{
+       return 0;
+}
+
+static inline int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)
+{
+       return false;
+}
+
+static inline int
+drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
+{
+       return -1;
+}
+
+static inline int
+drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel) {}
+
+static inline int
+drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr,
+                        struct drm_dp_aux *aux)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)
+{
+       return 0;
+}
+
+static inline int
+drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)
+{
+       return 0;
+}
+
+static inline int
+drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)
+{
+       return -1;
+}
+
+static inline const char *
+drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
+{
+       return NULL;
+}
+
+static inline struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
+                              struct drm_dp_tunnel *tunnel)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct drm_dp_tunnel_state *
+drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
+                                  const struct drm_dp_tunnel *tunnel)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int
+drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
+                                  struct drm_dp_tunnel *tunnel,
+                                  u8 stream_id, int bw)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
+                                               const struct drm_dp_tunnel *tunnel,
+                                               u32 *stream_mask)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
+                                     u32 *failed_stream_mask)
+{
+       return -EOPNOTSUPP;
+}
+
+static inline int
+drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)
+{
+       return 0;
+}
+
+static inline struct drm_dp_tunnel_mgr *
+drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
+{
+       return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline
+void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr) {}
+
+#endif /* CONFIG_DRM_DISPLAY_DP_TUNNEL */
+
+#endif /* __DRM_DP_TUNNEL_H__ */
index 07779a11758e32fad383633cf3711f36198ac4ab..28a96aa1e08fb0b71fcf476bea68b70bd3ba54bf 100644 (file)
 #define INTEL_ADLN_IDS(info) \
        INTEL_VGA_DEVICE(0x46D0, info), \
        INTEL_VGA_DEVICE(0x46D1, info), \
-       INTEL_VGA_DEVICE(0x46D2, info)
+       INTEL_VGA_DEVICE(0x46D2, info), \
+       INTEL_VGA_DEVICE(0x46D3, info), \
+       INTEL_VGA_DEVICE(0x46D4, info)
 
 /* RPL-S */
 #define INTEL_RPLS_IDS(info) \