]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Aug 2025 10:55:40 +0000 (12:55 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 Aug 2025 10:55:40 +0000 (12:55 +0200)
added patches:
accel-ivpu-fix-reset_engine-debugfs-file-logic.patch
drm-i915-ddi-change-intel_ddi_init_-dp-hdmi-_connector-return-type.patch
drm-i915-ddi-gracefully-handle-errors-from-intel_ddi_init_hdmi_connector.patch
drm-i915-ddi-only-call-shutdown-hooks-for-valid-encoders.patch
drm-i915-display-add-intel_encoder_is_hdmi.patch
drm-i915-hdmi-add-error-handling-in-g4x_hdmi_init.patch
drm-i915-hdmi-propagate-errors-from-intel_hdmi_init_connector.patch
ice-ptp-fix-crosstimestamp-reporting.patch
net-packet-fix-a-race-in-packet_set_ring-and-packet_notifier.patch
revert-bcache-remove-heap-related-macros-and-switch-to-generic-min_heap.patch
selftests-bpf-add-a-test-for-arena-range-tree-algorithm.patch
selftests-bpf-fix-build-error-with-llvm-19.patch
vsock-do-not-allow-binding-to-vmaddr_port_any.patch

14 files changed:
queue-6.12/accel-ivpu-fix-reset_engine-debugfs-file-logic.patch [new file with mode: 0644]
queue-6.12/drm-i915-ddi-change-intel_ddi_init_-dp-hdmi-_connector-return-type.patch [new file with mode: 0644]
queue-6.12/drm-i915-ddi-gracefully-handle-errors-from-intel_ddi_init_hdmi_connector.patch [new file with mode: 0644]
queue-6.12/drm-i915-ddi-only-call-shutdown-hooks-for-valid-encoders.patch [new file with mode: 0644]
queue-6.12/drm-i915-display-add-intel_encoder_is_hdmi.patch [new file with mode: 0644]
queue-6.12/drm-i915-hdmi-add-error-handling-in-g4x_hdmi_init.patch [new file with mode: 0644]
queue-6.12/drm-i915-hdmi-propagate-errors-from-intel_hdmi_init_connector.patch [new file with mode: 0644]
queue-6.12/ice-ptp-fix-crosstimestamp-reporting.patch [new file with mode: 0644]
queue-6.12/net-packet-fix-a-race-in-packet_set_ring-and-packet_notifier.patch [new file with mode: 0644]
queue-6.12/revert-bcache-remove-heap-related-macros-and-switch-to-generic-min_heap.patch [new file with mode: 0644]
queue-6.12/selftests-bpf-add-a-test-for-arena-range-tree-algorithm.patch [new file with mode: 0644]
queue-6.12/selftests-bpf-fix-build-error-with-llvm-19.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/vsock-do-not-allow-binding-to-vmaddr_port_any.patch [new file with mode: 0644]

diff --git a/queue-6.12/accel-ivpu-fix-reset_engine-debugfs-file-logic.patch b/queue-6.12/accel-ivpu-fix-reset_engine-debugfs-file-logic.patch
new file mode 100644 (file)
index 0000000..30678b0
--- /dev/null
@@ -0,0 +1,86 @@
+From 541a137254c71822e7a3ebdf8309c5a37b7de465 Mon Sep 17 00:00:00 2001
+From: Andrzej Kacprowski <Andrzej.Kacprowski@intel.com>
+Date: Mon, 30 Sep 2024 21:53:12 +0200
+Subject: accel/ivpu: Fix reset_engine debugfs file logic
+
+From: Andrzej Kacprowski <Andrzej.Kacprowski@intel.com>
+
+commit 541a137254c71822e7a3ebdf8309c5a37b7de465 upstream.
+
+The current reset_engine implementation unconditionally resets
+all engines. Improve implementation to reset only the engine
+requested by the user space to allow more granular testing.
+Also use DEFINE_DEBUGFS_ATTRIBUTE() to simplify implementation.
+
+Same changes applied to resume_engine debugfs file for consistency.
+
+Signed-off-by: Andrzej Kacprowski <Andrzej.Kacprowski@intel.com>
+Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240930195322.461209-22-jacek.lawrynowicz@linux.intel.com
+Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/accel/ivpu/ivpu_debugfs.c |   42 +++++++-------------------------------
+ 1 file changed, 8 insertions(+), 34 deletions(-)
+
+--- a/drivers/accel/ivpu/ivpu_debugfs.c
++++ b/drivers/accel/ivpu/ivpu_debugfs.c
+@@ -346,49 +346,23 @@ static const struct file_operations ivpu
+       .write = ivpu_force_recovery_fn,
+ };
+-static ssize_t
+-ivpu_reset_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
++static int ivpu_reset_engine_fn(void *data, u64 val)
+ {
+-      struct ivpu_device *vdev = file->private_data;
++      struct ivpu_device *vdev = (struct ivpu_device *)data;
+-      if (!size)
+-              return -EINVAL;
+-
+-      if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
+-              return -ENODEV;
+-      if (ivpu_jsm_reset_engine(vdev, DRM_IVPU_ENGINE_COPY))
+-              return -ENODEV;
+-
+-      return size;
++      return ivpu_jsm_reset_engine(vdev, (u32)val);
+ }
+-static const struct file_operations ivpu_reset_engine_fops = {
+-      .owner = THIS_MODULE,
+-      .open = simple_open,
+-      .write = ivpu_reset_engine_fn,
+-};
++DEFINE_DEBUGFS_ATTRIBUTE(ivpu_reset_engine_fops, NULL, ivpu_reset_engine_fn, "0x%02llx\n");
+-static ssize_t
+-ivpu_resume_engine_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
++static int ivpu_resume_engine_fn(void *data, u64 val)
+ {
+-      struct ivpu_device *vdev = file->private_data;
++      struct ivpu_device *vdev = (struct ivpu_device *)data;
+-      if (!size)
+-              return -EINVAL;
+-
+-      if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COMPUTE))
+-              return -ENODEV;
+-      if (ivpu_jsm_hws_resume_engine(vdev, DRM_IVPU_ENGINE_COPY))
+-              return -ENODEV;
+-
+-      return size;
++      return ivpu_jsm_hws_resume_engine(vdev, (u32)val);
+ }
+-static const struct file_operations ivpu_resume_engine_fops = {
+-      .owner = THIS_MODULE,
+-      .open = simple_open,
+-      .write = ivpu_resume_engine_fn,
+-};
++DEFINE_DEBUGFS_ATTRIBUTE(ivpu_resume_engine_fops, NULL, ivpu_resume_engine_fn, "0x%02llx\n");
+ static int dct_active_get(void *data, u64 *active_percent)
+ {
diff --git a/queue-6.12/drm-i915-ddi-change-intel_ddi_init_-dp-hdmi-_connector-return-type.patch b/queue-6.12/drm-i915-ddi-change-intel_ddi_init_-dp-hdmi-_connector-return-type.patch
new file mode 100644 (file)
index 0000000..84043dc
--- /dev/null
@@ -0,0 +1,105 @@
+From e1980a977686d46dbf45687f7750f1c50d1d6cf8 Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Mon, 30 Dec 2024 16:14:40 +0200
+Subject: drm/i915/ddi: change intel_ddi_init_{dp, hdmi}_connector() return type
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit e1980a977686d46dbf45687f7750f1c50d1d6cf8 upstream.
+
+The caller doesn't actually need the returned struct intel_connector;
+it's stored in the ->attached_connector of intel_dp and
+intel_hdmi. Switch to returning an int with 0 for success and negative
+errors codes to be able to indicate success even when we don't have a
+connector.
+
+Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
+Tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/8ef7fe838231919e85eaead640c51ad3e4550d27.1735568047.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_ddi.c |   20 +++++++++-----------
+ 1 file changed, 9 insertions(+), 11 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -4413,8 +4413,7 @@ static const struct drm_encoder_funcs in
+       .late_register = intel_ddi_encoder_late_register,
+ };
+-static struct intel_connector *
+-intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
++static int intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
+ {
+       struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+       struct intel_connector *connector;
+@@ -4422,7 +4421,7 @@ intel_ddi_init_dp_connector(struct intel
+       connector = intel_connector_alloc();
+       if (!connector)
+-              return NULL;
++              return -ENOMEM;
+       dig_port->dp.output_reg = DDI_BUF_CTL(port);
+       if (DISPLAY_VER(i915) >= 14)
+@@ -4437,7 +4436,7 @@ intel_ddi_init_dp_connector(struct intel
+       if (!intel_dp_init_connector(dig_port, connector)) {
+               kfree(connector);
+-              return NULL;
++              return -EINVAL;
+       }
+       if (dig_port->base.type == INTEL_OUTPUT_EDP) {
+@@ -4453,7 +4452,7 @@ intel_ddi_init_dp_connector(struct intel
+               }
+       }
+-      return connector;
++      return 0;
+ }
+ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
+@@ -4623,20 +4622,19 @@ static bool bdw_digital_port_connected(s
+       return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
+ }
+-static struct intel_connector *
+-intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
++static int intel_ddi_init_hdmi_connector(struct intel_digital_port *dig_port)
+ {
+       struct intel_connector *connector;
+       enum port port = dig_port->base.port;
+       connector = intel_connector_alloc();
+       if (!connector)
+-              return NULL;
++              return -ENOMEM;
+       dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+       intel_hdmi_init_connector(dig_port, connector);
+-      return connector;
++      return 0;
+ }
+ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
+@@ -5185,7 +5183,7 @@ void intel_ddi_init(struct intel_display
+       intel_infoframe_init(dig_port);
+       if (init_dp) {
+-              if (!intel_ddi_init_dp_connector(dig_port))
++              if (intel_ddi_init_dp_connector(dig_port))
+                       goto err;
+               dig_port->hpd_pulse = intel_dp_hpd_pulse;
+@@ -5199,7 +5197,7 @@ void intel_ddi_init(struct intel_display
+        * but leave it just in case we have some really bad VBTs...
+        */
+       if (encoder->type != INTEL_OUTPUT_EDP && init_hdmi) {
+-              if (!intel_ddi_init_hdmi_connector(dig_port))
++              if (intel_ddi_init_hdmi_connector(dig_port))
+                       goto err;
+       }
diff --git a/queue-6.12/drm-i915-ddi-gracefully-handle-errors-from-intel_ddi_init_hdmi_connector.patch b/queue-6.12/drm-i915-ddi-gracefully-handle-errors-from-intel_ddi_init_hdmi_connector.patch
new file mode 100644 (file)
index 0000000..4f474ef
--- /dev/null
@@ -0,0 +1,49 @@
+From 8ea07e294ea2d046e16fa98e37007edcd4b9525d Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Mon, 30 Dec 2024 16:14:43 +0200
+Subject: drm/i915/ddi: gracefully handle errors from intel_ddi_init_hdmi_connector()
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit 8ea07e294ea2d046e16fa98e37007edcd4b9525d upstream.
+
+Errors from intel_ddi_init_hdmi_connector() can just mean "there's no
+HDMI" while we'll still want to continue with DP only. Handle the errors
+gracefully, but don't propagate. Clear the hdmi_reg which is used as a
+proxy to indicate the HDMI is initialized.
+
+v2: Gracefully handle but do not propagate
+
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
+Reported-and-tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Closes: https://lore.kernel.org/r/20241031105145.2140590-1-senozhatsky@chromium.org
+Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org> # v1
+Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/d72cb54ac7cc5ca29b3b9d70e4d368ea41643b08.1735568047.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_ddi.c |   11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -4632,7 +4632,16 @@ static int intel_ddi_init_hdmi_connector
+               return -ENOMEM;
+       dig_port->hdmi.hdmi_reg = DDI_BUF_CTL(port);
+-      intel_hdmi_init_connector(dig_port, connector);
++
++      if (!intel_hdmi_init_connector(dig_port, connector)) {
++              /*
++               * HDMI connector init failures may just mean conflicting DDC
++               * pins or not having enough lanes. Handle them gracefully, but
++               * don't fail the entire DDI init.
++               */
++              dig_port->hdmi.hdmi_reg = INVALID_MMIO_REG;
++              kfree(connector);
++      }
+       return 0;
+ }
diff --git a/queue-6.12/drm-i915-ddi-only-call-shutdown-hooks-for-valid-encoders.patch b/queue-6.12/drm-i915-ddi-only-call-shutdown-hooks-for-valid-encoders.patch
new file mode 100644 (file)
index 0000000..b0ca710
--- /dev/null
@@ -0,0 +1,42 @@
+From 60a43ecbd59decb77b31c09a73f09e1d4f4d1c4c Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Mon, 30 Dec 2024 16:14:45 +0200
+Subject: drm/i915/ddi: only call shutdown hooks for valid encoders
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit 60a43ecbd59decb77b31c09a73f09e1d4f4d1c4c upstream.
+
+DDI might be HDMI or DP only, leaving the other encoder
+uninitialized. Calling the shutdown hook on an uninitialized encoder may
+lead to a NULL pointer dereference. Check the encoder types (and thus
+validity via the DP output_reg or HDMI hdmi_reg checks) before calling
+the hooks.
+
+Reported-and-tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Closes: https://lore.kernel.org/r/20241031105145.2140590-1-senozhatsky@chromium.org
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
+Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/8b197c50e7f3be2bbc07e3935b21e919815015d5.1735568047.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_ddi.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_ddi.c
++++ b/drivers/gpu/drm/i915/display/intel_ddi.c
+@@ -4798,8 +4798,10 @@ static void intel_ddi_tc_encoder_suspend
+ static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
+ {
+-      intel_dp_encoder_shutdown(encoder);
+-      intel_hdmi_encoder_shutdown(encoder);
++      if (intel_encoder_is_dp(encoder))
++              intel_dp_encoder_shutdown(encoder);
++      if (intel_encoder_is_hdmi(encoder))
++              intel_hdmi_encoder_shutdown(encoder);
+ }
+ static void intel_ddi_tc_encoder_shutdown_complete(struct intel_encoder *encoder)
diff --git a/queue-6.12/drm-i915-display-add-intel_encoder_is_hdmi.patch b/queue-6.12/drm-i915-display-add-intel_encoder_is_hdmi.patch
new file mode 100644 (file)
index 0000000..e54e767
--- /dev/null
@@ -0,0 +1,44 @@
+From efa43b751637c0e16a92e1787f1d8baaf56dafba Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Mon, 30 Dec 2024 16:14:44 +0200
+Subject: drm/i915/display: add intel_encoder_is_hdmi()
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit efa43b751637c0e16a92e1787f1d8baaf56dafba upstream.
+
+Similar to intel_encoder_is_dp() and friends.
+
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
+Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
+Tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/e6bf9e01deb5d0d8b566af128a762d1313638847.1735568047.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_display_types.h |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/gpu/drm/i915/display/intel_display_types.h
++++ b/drivers/gpu/drm/i915/display/intel_display_types.h
+@@ -2075,6 +2075,19 @@ static inline bool intel_encoder_is_dp(s
+       }
+ }
++static inline bool intel_encoder_is_hdmi(struct intel_encoder *encoder)
++{
++      switch (encoder->type) {
++      case INTEL_OUTPUT_HDMI:
++              return true;
++      case INTEL_OUTPUT_DDI:
++              /* See if the HDMI encoder is valid. */
++              return i915_mmio_reg_valid(enc_to_intel_hdmi(encoder)->hdmi_reg);
++      default:
++              return false;
++      }
++}
++
+ static inline struct intel_lspcon *
+ enc_to_intel_lspcon(struct intel_encoder *encoder)
+ {
diff --git a/queue-6.12/drm-i915-hdmi-add-error-handling-in-g4x_hdmi_init.patch b/queue-6.12/drm-i915-hdmi-add-error-handling-in-g4x_hdmi_init.patch
new file mode 100644 (file)
index 0000000..326d18b
--- /dev/null
@@ -0,0 +1,121 @@
+From 7603ba81225c815d2ceb4ad52f13e8df4b9d03cc Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Mon, 30 Dec 2024 16:14:42 +0200
+Subject: drm/i915/hdmi: add error handling in g4x_hdmi_init()
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit 7603ba81225c815d2ceb4ad52f13e8df4b9d03cc upstream.
+
+Handle encoder and connector init failures in g4x_hdmi_init(). This is
+similar to g4x_dp_init().
+
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
+Reported-and-tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Closes: https://lore.kernel.org/r/20241031105145.2140590-1-senozhatsky@chromium.org
+Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/cafae7bf1f9ffb8f6a1d7a508cd2ce7dcf06fef7.1735568047.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/g4x_hdmi.c |   35 +++++++++++++++++++++-----------
+ drivers/gpu/drm/i915/display/g4x_hdmi.h |    5 ++--
+ 2 files changed, 26 insertions(+), 14 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
++++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
+@@ -683,7 +683,7 @@ static bool assert_hdmi_port_valid(struc
+                        "Platform does not support HDMI %c\n", port_name(port));
+ }
+-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
++bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
+                  i915_reg_t hdmi_reg, enum port port)
+ {
+       struct intel_display *display = &dev_priv->display;
+@@ -693,10 +693,10 @@ void g4x_hdmi_init(struct drm_i915_priva
+       struct intel_connector *intel_connector;
+       if (!assert_port_valid(dev_priv, port))
+-              return;
++              return false;
+       if (!assert_hdmi_port_valid(dev_priv, port))
+-              return;
++              return false;
+       devdata = intel_bios_encoder_data_lookup(display, port);
+@@ -707,15 +707,13 @@ void g4x_hdmi_init(struct drm_i915_priva
+       dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
+       if (!dig_port)
+-              return;
++              return false;
+       dig_port->aux_ch = AUX_CH_NONE;
+       intel_connector = intel_connector_alloc();
+-      if (!intel_connector) {
+-              kfree(dig_port);
+-              return;
+-      }
++      if (!intel_connector)
++              goto err_connector_alloc;
+       intel_encoder = &dig_port->base;
+@@ -723,9 +721,10 @@ void g4x_hdmi_init(struct drm_i915_priva
+       mutex_init(&dig_port->hdcp_mutex);
+-      drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+-                       &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
+-                       "HDMI %c", port_name(port));
++      if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
++                           &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS,
++                           "HDMI %c", port_name(port)))
++              goto err_encoder_init;
+       intel_encoder->hotplug = intel_hdmi_hotplug;
+       intel_encoder->compute_config = g4x_hdmi_compute_config;
+@@ -788,5 +787,17 @@ void g4x_hdmi_init(struct drm_i915_priva
+       intel_infoframe_init(dig_port);
+-      intel_hdmi_init_connector(dig_port, intel_connector);
++      if (!intel_hdmi_init_connector(dig_port, intel_connector))
++              goto err_init_connector;
++
++      return true;
++
++err_init_connector:
++      drm_encoder_cleanup(&intel_encoder->base);
++err_encoder_init:
++      kfree(intel_connector);
++err_connector_alloc:
++      kfree(dig_port);
++
++      return false;
+ }
+--- a/drivers/gpu/drm/i915/display/g4x_hdmi.h
++++ b/drivers/gpu/drm/i915/display/g4x_hdmi.h
+@@ -16,14 +16,15 @@ struct drm_connector;
+ struct drm_i915_private;
+ #ifdef I915
+-void g4x_hdmi_init(struct drm_i915_private *dev_priv,
++bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
+                  i915_reg_t hdmi_reg, enum port port);
+ int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
+                                   struct drm_atomic_state *state);
+ #else
+-static inline void g4x_hdmi_init(struct drm_i915_private *dev_priv,
++static inline bool g4x_hdmi_init(struct drm_i915_private *dev_priv,
+                                i915_reg_t hdmi_reg, int port)
+ {
++      return false;
+ }
+ static inline int g4x_hdmi_connector_atomic_check(struct drm_connector *connector,
+                                                 struct drm_atomic_state *state)
diff --git a/queue-6.12/drm-i915-hdmi-propagate-errors-from-intel_hdmi_init_connector.patch b/queue-6.12/drm-i915-hdmi-propagate-errors-from-intel_hdmi_init_connector.patch
new file mode 100644 (file)
index 0000000..2b735e6
--- /dev/null
@@ -0,0 +1,77 @@
+From 7fb56536fa37e23bc291d31c10e575d500f4fda7 Mon Sep 17 00:00:00 2001
+From: Jani Nikula <jani.nikula@intel.com>
+Date: Mon, 30 Dec 2024 16:14:41 +0200
+Subject: drm/i915/hdmi: propagate errors from intel_hdmi_init_connector()
+
+From: Jani Nikula <jani.nikula@intel.com>
+
+commit 7fb56536fa37e23bc291d31c10e575d500f4fda7 upstream.
+
+Propagate errors from intel_hdmi_init_connector() to be able to handle
+them at callers. This is similar to intel_dp_init_connector().
+
+Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
+Cc: Ville Syrjala <ville.syrjala@linux.intel.com>
+Reported-and-tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Closes: https://lore.kernel.org/r/20241031105145.2140590-1-senozhatsky@chromium.org
+Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/cdaf9e32cc4880c46e120933438c37b4d87be12e.1735568047.git.jani.nikula@intel.com
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_hdmi.c |   10 ++++++----
+ drivers/gpu/drm/i915/display/intel_hdmi.h |    2 +-
+ 2 files changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
+@@ -3015,7 +3015,7 @@ void intel_infoframe_init(struct intel_d
+       }
+ }
+-void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
++bool intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+                              struct intel_connector *intel_connector)
+ {
+       struct intel_display *display = to_intel_display(dig_port);
+@@ -3033,17 +3033,17 @@ void intel_hdmi_init_connector(struct in
+                   intel_encoder->base.base.id, intel_encoder->base.name);
+       if (DISPLAY_VER(display) < 12 && drm_WARN_ON(dev, port == PORT_A))
+-              return;
++              return false;
+       if (drm_WARN(dev, dig_port->max_lanes < 4,
+                    "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
+                    dig_port->max_lanes, intel_encoder->base.base.id,
+                    intel_encoder->base.name))
+-              return;
++              return false;
+       ddc_pin = intel_hdmi_ddc_pin(intel_encoder);
+       if (!ddc_pin)
+-              return;
++              return false;
+       drm_connector_init_with_ddc(dev, connector,
+                                   &intel_hdmi_connector_funcs,
+@@ -3088,6 +3088,8 @@ void intel_hdmi_init_connector(struct in
+                                          &conn_info);
+       if (!intel_hdmi->cec_notifier)
+               drm_dbg_kms(display->drm, "CEC notifier get failed\n");
++
++      return true;
+ }
+ /*
+--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
++++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
+@@ -22,7 +22,7 @@ struct intel_encoder;
+ struct intel_hdmi;
+ union hdmi_infoframe;
+-void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
++bool intel_hdmi_init_connector(struct intel_digital_port *dig_port,
+                              struct intel_connector *intel_connector);
+ bool intel_hdmi_compute_has_hdmi_sink(struct intel_encoder *encoder,
+                                     const struct intel_crtc_state *crtc_state,
diff --git a/queue-6.12/ice-ptp-fix-crosstimestamp-reporting.patch b/queue-6.12/ice-ptp-fix-crosstimestamp-reporting.patch
new file mode 100644 (file)
index 0000000..3adadcf
--- /dev/null
@@ -0,0 +1,44 @@
+From a5a441ae283d54ec329aadc7426991dc32786d52 Mon Sep 17 00:00:00 2001
+From: Anton Nadezhdin <anton.nadezhdin@intel.com>
+Date: Tue, 20 May 2025 10:42:16 +0200
+Subject: ice/ptp: fix crosstimestamp reporting
+
+From: Anton Nadezhdin <anton.nadezhdin@intel.com>
+
+commit a5a441ae283d54ec329aadc7426991dc32786d52 upstream.
+
+Set use_nsecs=true as timestamp is reported in ns. Lack of this result
+in smaller timestamp error window which cause error during phc2sys
+execution on E825 NICs:
+phc2sys[1768.256]: ioctl PTP_SYS_OFFSET_PRECISE: Invalid argument
+
+This problem was introduced in the cited commit which omitted setting
+use_nsecs to true when converting the ice driver to use
+convert_base_to_cs().
+
+Testing hints (ethX is PF netdev):
+phc2sys -s ethX -c CLOCK_REALTIME  -O 37 -m
+phc2sys[1769.256]: CLOCK_REALTIME phc offset -5 s0 freq      -0 delay    0
+
+Fixes: d4bea547ebb57 ("ice/ptp: Remove convert_art_to_tsc()")
+Signed-off-by: Anton Nadezhdin <anton.nadezhdin@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Markus Blöchl <markus@blochl.de>
+---
+ drivers/net/ethernet/intel/ice/ice_ptp.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
++++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
+@@ -2226,6 +2226,7 @@ ice_ptp_get_syncdevicetime(ktime_t *devi
+                       hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
+                       system->cycles = hh_ts;
+                       system->cs_id = CSID_X86_ART;
++                      system->use_nsecs = true;
+                       /* Read Device source clock time */
+                       hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
+                       hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
diff --git a/queue-6.12/net-packet-fix-a-race-in-packet_set_ring-and-packet_notifier.patch b/queue-6.12/net-packet-fix-a-race-in-packet_set_ring-and-packet_notifier.patch
new file mode 100644 (file)
index 0000000..e1107b3
--- /dev/null
@@ -0,0 +1,64 @@
+From 01d3c8417b9c1b884a8a981a3b886da556512f36 Mon Sep 17 00:00:00 2001
+From: Quang Le <quanglex97@gmail.com>
+Date: Fri, 1 Aug 2025 13:54:16 -0400
+Subject: net/packet: fix a race in packet_set_ring() and packet_notifier()
+
+From: Quang Le <quanglex97@gmail.com>
+
+commit 01d3c8417b9c1b884a8a981a3b886da556512f36 upstream.
+
+When packet_set_ring() releases po->bind_lock, another thread can
+run packet_notifier() and process an NETDEV_UP event.
+
+This race and the fix are both similar to that of commit 15fe076edea7
+("net/packet: fix a race in packet_bind() and packet_notifier()").
+
+There too the packet_notifier NETDEV_UP event managed to run while a
+po->bind_lock critical section had to be temporarily released. And
+the fix was similarly to temporarily set po->num to zero to keep
+the socket unhooked until the lock is retaken.
+
+The po->bind_lock in packet_set_ring and packet_notifier precede the
+introduction of git history.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@vger.kernel.org
+Signed-off-by: Quang Le <quanglex97@gmail.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250801175423.2970334-1-willemdebruijn.kernel@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -4562,10 +4562,10 @@ static int packet_set_ring(struct sock *
+       spin_lock(&po->bind_lock);
+       was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
+       num = po->num;
+-      if (was_running) {
+-              WRITE_ONCE(po->num, 0);
++      WRITE_ONCE(po->num, 0);
++      if (was_running)
+               __unregister_prot_hook(sk, false);
+-      }
++
+       spin_unlock(&po->bind_lock);
+       synchronize_net();
+@@ -4597,10 +4597,10 @@ static int packet_set_ring(struct sock *
+       mutex_unlock(&po->pg_vec_lock);
+       spin_lock(&po->bind_lock);
+-      if (was_running) {
+-              WRITE_ONCE(po->num, num);
++      WRITE_ONCE(po->num, num);
++      if (was_running)
+               register_prot_hook(sk);
+-      }
++
+       spin_unlock(&po->bind_lock);
+       if (pg_vec && (po->tp_version > TPACKET_V2)) {
+               /* Because we don't support block-based V3 on tx-ring */
diff --git a/queue-6.12/revert-bcache-remove-heap-related-macros-and-switch-to-generic-min_heap.patch b/queue-6.12/revert-bcache-remove-heap-related-macros-and-switch-to-generic-min_heap.patch
new file mode 100644 (file)
index 0000000..73b7b6f
--- /dev/null
@@ -0,0 +1,1089 @@
+From 48fd7ebe00c1cdc782b42576548b25185902f64c Mon Sep 17 00:00:00 2001
+From: Kuan-Wei Chiu <visitorckw@gmail.com>
+Date: Sun, 15 Jun 2025 04:23:52 +0800
+Subject: Revert "bcache: remove heap-related macros and switch to generic min_heap"
+
+From: Kuan-Wei Chiu <visitorckw@gmail.com>
+
+commit 48fd7ebe00c1cdc782b42576548b25185902f64c upstream.
+
+This reverts commit 866898efbb25bb44fd42848318e46db9e785973a.
+
+The generic bottom-up min_heap implementation causes performance
+regression in invalidate_buckets_lru(), a hot path in bcache.  Before the
+cache is fully populated, new_bucket_prio() often returns zero, leading to
+many equal comparisons.  In such cases, bottom-up sift_down performs up to
+2 * log2(n) comparisons, while the original top-down approach completes
+with just O() comparisons, resulting in a measurable performance gap.
+
+The performance degradation is further worsened by the non-inlined
+min_heap API functions introduced in commit 92a8b224b833 ("lib/min_heap:
+introduce non-inline versions of min heap API functions"), adding function
+call overhead to this critical path.
+
+As reported by Robert, bcache now suffers from latency spikes, with P100
+(max) latency increasing from 600 ms to 2.4 seconds every 5 minutes.
+These regressions degrade bcache's effectiveness as a low-latency cache
+layer and lead to frequent timeouts and application stalls in production
+environments.
+
+This revert aims to restore bcache's original low-latency behavior.
+
+Link: https://lore.kernel.org/lkml/CAJhEC05+0S69z+3+FB2Cd0hD+pCRyWTKLEOsc8BOmH73p1m+KQ@mail.gmail.com
+Link: https://lkml.kernel.org/r/20250614202353.1632957-3-visitorckw@gmail.com
+Fixes: 866898efbb25 ("bcache: remove heap-related macros and switch to generic min_heap")
+Fixes: 92a8b224b833 ("lib/min_heap: introduce non-inline versions of min heap API functions")
+Signed-off-by: Kuan-Wei Chiu <visitorckw@gmail.com>
+Reported-by: Robert Pang <robertpang@google.com>
+Closes: https://lore.kernel.org/linux-bcache/CAJhEC06F_AtrPgw2-7CvCqZgeStgCtitbD-ryuPpXQA-JG5XXw@mail.gmail.com
+Acked-by: Coly Li <colyli@kernel.org>
+Cc: Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw>
+Cc: Kent Overstreet <kent.overstreet@linux.dev>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Kuan-Wei Chiu <visitorckw@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/bcache/alloc.c     |   64 +++++----------------
+ drivers/md/bcache/bcache.h    |    2 
+ drivers/md/bcache/bset.c      |  124 +++++++++++++++---------------------------
+ drivers/md/bcache/bset.h      |   42 ++++++++------
+ drivers/md/bcache/btree.c     |   69 +++++++++--------------
+ drivers/md/bcache/extents.c   |   51 ++++++-----------
+ drivers/md/bcache/movinggc.c  |   41 +++----------
+ drivers/md/bcache/super.c     |    3 -
+ drivers/md/bcache/sysfs.c     |    4 -
+ drivers/md/bcache/util.h      |   67 ++++++++++++++++++++++
+ drivers/md/bcache/writeback.c |   13 +---
+ 11 files changed, 217 insertions(+), 263 deletions(-)
+
+--- a/drivers/md/bcache/alloc.c
++++ b/drivers/md/bcache/alloc.c
+@@ -164,68 +164,40 @@ static void bch_invalidate_one_bucket(st
+  * prio is worth 1/8th of what INITIAL_PRIO is worth.
+  */
+-static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b)
+-{
+-      unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;
+-
+-      return (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);
+-}
+-
+-static inline bool new_bucket_max_cmp(const void *l, const void *r, void *args)
+-{
+-      struct bucket **lhs = (struct bucket **)l;
+-      struct bucket **rhs = (struct bucket **)r;
+-      struct cache *ca = args;
+-
+-      return new_bucket_prio(ca, *lhs) > new_bucket_prio(ca, *rhs);
+-}
+-
+-static inline bool new_bucket_min_cmp(const void *l, const void *r, void *args)
+-{
+-      struct bucket **lhs = (struct bucket **)l;
+-      struct bucket **rhs = (struct bucket **)r;
+-      struct cache *ca = args;
+-
+-      return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs);
+-}
+-
+-static inline void new_bucket_swap(void *l, void *r, void __always_unused *args)
+-{
+-      struct bucket **lhs = l, **rhs = r;
++#define bucket_prio(b)                                                        \
++({                                                                    \
++      unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
++                                                                      \
++      (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);  \
++})
+-      swap(*lhs, *rhs);
+-}
++#define bucket_max_cmp(l, r)  (bucket_prio(l) < bucket_prio(r))
++#define bucket_min_cmp(l, r)  (bucket_prio(l) > bucket_prio(r))
+ static void invalidate_buckets_lru(struct cache *ca)
+ {
+       struct bucket *b;
+-      const struct min_heap_callbacks bucket_max_cmp_callback = {
+-              .less = new_bucket_max_cmp,
+-              .swp = new_bucket_swap,
+-      };
+-      const struct min_heap_callbacks bucket_min_cmp_callback = {
+-              .less = new_bucket_min_cmp,
+-              .swp = new_bucket_swap,
+-      };
++      ssize_t i;
+-      ca->heap.nr = 0;
++      ca->heap.used = 0;
+       for_each_bucket(b, ca) {
+               if (!bch_can_invalidate_bucket(ca, b))
+                       continue;
+-              if (!min_heap_full(&ca->heap))
+-                      min_heap_push(&ca->heap, &b, &bucket_max_cmp_callback, ca);
+-              else if (!new_bucket_max_cmp(&b, min_heap_peek(&ca->heap), ca)) {
++              if (!heap_full(&ca->heap))
++                      heap_add(&ca->heap, b, bucket_max_cmp);
++              else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
+                       ca->heap.data[0] = b;
+-                      min_heap_sift_down(&ca->heap, 0, &bucket_max_cmp_callback, ca);
++                      heap_sift(&ca->heap, 0, bucket_max_cmp);
+               }
+       }
+-      min_heapify_all(&ca->heap, &bucket_min_cmp_callback, ca);
++      for (i = ca->heap.used / 2 - 1; i >= 0; --i)
++              heap_sift(&ca->heap, i, bucket_min_cmp);
+       while (!fifo_full(&ca->free_inc)) {
+-              if (!ca->heap.nr) {
++              if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
+                       /*
+                        * We don't want to be calling invalidate_buckets()
+                        * multiple times when it can't do anything
+@@ -234,8 +206,6 @@ static void invalidate_buckets_lru(struc
+                       wake_up_gc(ca->set);
+                       return;
+               }
+-              b = min_heap_peek(&ca->heap)[0];
+-              min_heap_pop(&ca->heap, &bucket_min_cmp_callback, ca);
+               bch_invalidate_one_bucket(ca, b);
+       }
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -458,7 +458,7 @@ struct cache {
+       /* Allocation stuff: */
+       struct bucket           *buckets;
+-      DEFINE_MIN_HEAP(struct bucket *, cache_heap) heap;
++      DECLARE_HEAP(struct bucket *, heap);
+       /*
+        * If nonzero, we know we aren't going to find any buckets to invalidate
+--- a/drivers/md/bcache/bset.c
++++ b/drivers/md/bcache/bset.c
+@@ -54,11 +54,9 @@ void bch_dump_bucket(struct btree_keys *
+ int __bch_count_data(struct btree_keys *b)
+ {
+       unsigned int ret = 0;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+       struct bkey *k;
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+       if (b->ops->is_extents)
+               for_each_key(b, k, &iter)
+                       ret += KEY_SIZE(k);
+@@ -69,11 +67,9 @@ void __bch_check_keys(struct btree_keys
+ {
+       va_list args;
+       struct bkey *k, *p = NULL;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+       const char *err;
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+       for_each_key(b, k, &iter) {
+               if (b->ops->is_extents) {
+                       err = "Keys out of order";
+@@ -114,9 +110,9 @@ bug:
+ static void bch_btree_iter_next_check(struct btree_iter *iter)
+ {
+-      struct bkey *k = iter->heap.data->k, *next = bkey_next(k);
++      struct bkey *k = iter->data->k, *next = bkey_next(k);
+-      if (next < iter->heap.data->end &&
++      if (next < iter->data->end &&
+           bkey_cmp(k, iter->b->ops->is_extents ?
+                    &START_KEY(next) : next) > 0) {
+               bch_dump_bucket(iter->b);
+@@ -883,14 +879,12 @@ unsigned int bch_btree_insert_key(struct
+       unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
+       struct bset *i = bset_tree_last(b)->data;
+       struct bkey *m, *prev = NULL;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+       struct bkey preceding_key_on_stack = ZERO_KEY;
+       struct bkey *preceding_key_p = &preceding_key_on_stack;
+       BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+       /*
+        * If k has preceding key, preceding_key_p will be set to address
+        *  of k's preceding key; otherwise preceding_key_p will be set
+@@ -901,9 +895,9 @@ unsigned int bch_btree_insert_key(struct
+       else
+               preceding_key(k, &preceding_key_p);
+-      m = bch_btree_iter_init(b, &iter, preceding_key_p);
++      m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
+-      if (b->ops->insert_fixup(b, k, &iter, replace_key))
++      if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
+               return status;
+       status = BTREE_INSERT_STATUS_INSERT;
+@@ -1083,102 +1077,79 @@ struct bkey *__bch_bset_search(struct bt
+ /* Btree iterator */
+-typedef bool (new_btree_iter_cmp_fn)(const void *, const void *, void *);
+-
+-static inline bool new_btree_iter_cmp(const void *l, const void *r, void __always_unused *args)
+-{
+-      const struct btree_iter_set *_l = l;
+-      const struct btree_iter_set *_r = r;
+-
+-      return bkey_cmp(_l->k, _r->k) <= 0;
+-}
++typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
++                               struct btree_iter_set);
+-static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
++static inline bool btree_iter_cmp(struct btree_iter_set l,
++                                struct btree_iter_set r)
+ {
+-      struct btree_iter_set *_iter1 = iter1;
+-      struct btree_iter_set *_iter2 = iter2;
+-
+-      swap(*_iter1, *_iter2);
++      return bkey_cmp(l.k, r.k) > 0;
+ }
+ static inline bool btree_iter_end(struct btree_iter *iter)
+ {
+-      return !iter->heap.nr;
++      return !iter->used;
+ }
+ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+                        struct bkey *end)
+ {
+-      const struct min_heap_callbacks callbacks = {
+-              .less = new_btree_iter_cmp,
+-              .swp = new_btree_iter_swap,
+-      };
+-
+       if (k != end)
+-              BUG_ON(!min_heap_push(&iter->heap,
+-                               &((struct btree_iter_set) { k, end }),
+-                               &callbacks,
+-                               NULL));
++              BUG_ON(!heap_add(iter,
++                               ((struct btree_iter_set) { k, end }),
++                               btree_iter_cmp));
+ }
+-static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
+-                                        struct btree_iter *iter,
+-                                        struct bkey *search,
+-                                        struct bset_tree *start)
++static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
++                                              struct btree_iter_stack *iter,
++                                              struct bkey *search,
++                                              struct bset_tree *start)
+ {
+       struct bkey *ret = NULL;
+-      iter->heap.size = ARRAY_SIZE(iter->heap.preallocated);
+-      iter->heap.nr = 0;
++      iter->iter.size = ARRAY_SIZE(iter->stack_data);
++      iter->iter.used = 0;
+ #ifdef CONFIG_BCACHE_DEBUG
+-      iter->b = b;
++      iter->iter.b = b;
+ #endif
+       for (; start <= bset_tree_last(b); start++) {
+               ret = bch_bset_search(b, start, search);
+-              bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
++              bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
+       }
+       return ret;
+ }
+-struct bkey *bch_btree_iter_init(struct btree_keys *b,
+-                               struct btree_iter *iter,
++struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
++                               struct btree_iter_stack *iter,
+                                struct bkey *search)
+ {
+-      return __bch_btree_iter_init(b, iter, search, b->set);
++      return __bch_btree_iter_stack_init(b, iter, search, b->set);
+ }
+ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
+-                                               new_btree_iter_cmp_fn *cmp)
++                                               btree_iter_cmp_fn *cmp)
+ {
+       struct btree_iter_set b __maybe_unused;
+       struct bkey *ret = NULL;
+-      const struct min_heap_callbacks callbacks = {
+-              .less = cmp,
+-              .swp = new_btree_iter_swap,
+-      };
+       if (!btree_iter_end(iter)) {
+               bch_btree_iter_next_check(iter);
+-              ret = iter->heap.data->k;
+-              iter->heap.data->k = bkey_next(iter->heap.data->k);
++              ret = iter->data->k;
++              iter->data->k = bkey_next(iter->data->k);
+-              if (iter->heap.data->k > iter->heap.data->end) {
++              if (iter->data->k > iter->data->end) {
+                       WARN_ONCE(1, "bset was corrupt!\n");
+-                      iter->heap.data->k = iter->heap.data->end;
++                      iter->data->k = iter->data->end;
+               }
+-              if (iter->heap.data->k == iter->heap.data->end) {
+-                      if (iter->heap.nr) {
+-                              b = min_heap_peek(&iter->heap)[0];
+-                              min_heap_pop(&iter->heap, &callbacks, NULL);
+-                      }
+-              }
++              if (iter->data->k == iter->data->end)
++                      heap_pop(iter, b, cmp);
+               else
+-                      min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
++                      heap_sift(iter, 0, cmp);
+       }
+       return ret;
+@@ -1186,7 +1157,7 @@ static inline struct bkey *__bch_btree_i
+ struct bkey *bch_btree_iter_next(struct btree_iter *iter)
+ {
+-      return __bch_btree_iter_next(iter, new_btree_iter_cmp);
++      return __bch_btree_iter_next(iter, btree_iter_cmp);
+ }
+@@ -1224,18 +1195,16 @@ static void btree_mergesort(struct btree
+                           struct btree_iter *iter,
+                           bool fixup, bool remove_stale)
+ {
++      int i;
+       struct bkey *k, *last = NULL;
+       BKEY_PADDED(k) tmp;
+       bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
+               ? bch_ptr_bad
+               : bch_ptr_invalid;
+-      const struct min_heap_callbacks callbacks = {
+-              .less = b->ops->sort_cmp,
+-              .swp = new_btree_iter_swap,
+-      };
+       /* Heapify the iterator, using our comparison function */
+-      min_heapify_all(&iter->heap, &callbacks, NULL);
++      for (i = iter->used / 2 - 1; i >= 0; --i)
++              heap_sift(iter, i, b->ops->sort_cmp);
+       while (!btree_iter_end(iter)) {
+               if (b->ops->sort_fixup && fixup)
+@@ -1324,11 +1293,10 @@ void bch_btree_sort_partial(struct btree
+                           struct bset_sort_state *state)
+ {
+       size_t order = b->page_order, keys = 0;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+       int oldsize = bch_count_data(b);
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-      __bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
++      __bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
+       if (start) {
+               unsigned int i;
+@@ -1339,7 +1307,7 @@ void bch_btree_sort_partial(struct btree
+               order = get_order(__set_bytes(b->set->data, keys));
+       }
+-      __btree_sort(b, &iter, start, order, false, state);
++      __btree_sort(b, &iter.iter, start, order, false, state);
+       EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
+ }
+@@ -1355,13 +1323,11 @@ void bch_btree_sort_into(struct btree_ke
+                        struct bset_sort_state *state)
+ {
+       uint64_t start_time = local_clock();
+-      struct btree_iter iter;
+-
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
++      struct btree_iter_stack iter;
+-      bch_btree_iter_init(b, &iter, NULL);
++      bch_btree_iter_stack_init(b, &iter, NULL);
+-      btree_mergesort(b, new->set->data, &iter, false, true);
++      btree_mergesort(b, new->set->data, &iter.iter, false, true);
+       bch_time_stats_update(&state->time, start_time);
+--- a/drivers/md/bcache/bset.h
++++ b/drivers/md/bcache/bset.h
+@@ -187,9 +187,8 @@ struct bset_tree {
+ };
+ struct btree_keys_ops {
+-      bool            (*sort_cmp)(const void *l,
+-                                  const void *r,
+-                                      void *args);
++      bool            (*sort_cmp)(struct btree_iter_set l,
++                                  struct btree_iter_set r);
+       struct bkey     *(*sort_fixup)(struct btree_iter *iter,
+                                      struct bkey *tmp);
+       bool            (*insert_fixup)(struct btree_keys *b,
+@@ -313,17 +312,23 @@ enum {
+       BTREE_INSERT_STATUS_FRONT_MERGE,
+ };
+-struct btree_iter_set {
+-      struct bkey *k, *end;
+-};
+-
+ /* Btree key iteration */
+ struct btree_iter {
++      size_t size, used;
+ #ifdef CONFIG_BCACHE_DEBUG
+       struct btree_keys *b;
+ #endif
+-      MIN_HEAP_PREALLOCATED(struct btree_iter_set, btree_iter_heap, MAX_BSETS) heap;
++      struct btree_iter_set {
++              struct bkey *k, *end;
++      } data[];
++};
++
++/* Fixed-size btree_iter that can be allocated on the stack */
++
++struct btree_iter_stack {
++      struct btree_iter iter;
++      struct btree_iter_set stack_data[MAX_BSETS];
+ };
+ typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
+@@ -335,9 +340,9 @@ struct bkey *bch_btree_iter_next_filter(
+ void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
+                        struct bkey *end);
+-struct bkey *bch_btree_iter_init(struct btree_keys *b,
+-                               struct btree_iter *iter,
+-                               struct bkey *search);
++struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
++                                     struct btree_iter_stack *iter,
++                                     struct bkey *search);
+ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
+                              const struct bkey *search);
+@@ -352,13 +357,14 @@ static inline struct bkey *bch_bset_sear
+       return search ? __bch_bset_search(b, t, search) : t->data->start;
+ }
+-#define for_each_key_filter(b, k, iter, filter)                               \
+-      for (bch_btree_iter_init((b), (iter), NULL);                    \
+-           ((k) = bch_btree_iter_next_filter((iter), (b), filter));)
+-
+-#define for_each_key(b, k, iter)                                      \
+-      for (bch_btree_iter_init((b), (iter), NULL);                    \
+-           ((k) = bch_btree_iter_next(iter));)
++#define for_each_key_filter(b, k, stack_iter, filter)                      \
++      for (bch_btree_iter_stack_init((b), (stack_iter), NULL);           \
++           ((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
++                                             filter));)
++
++#define for_each_key(b, k, stack_iter)                           \
++      for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
++           ((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
+ /* Sorting */
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -149,19 +149,19 @@ void bch_btree_node_read_done(struct btr
+ {
+       const char *err = "bad btree header";
+       struct bset *i = btree_bset_first(b);
+-      struct btree_iter iter;
++      struct btree_iter *iter;
+       /*
+        * c->fill_iter can allocate an iterator with more memory space
+        * than static MAX_BSETS.
+        * See the comment arount cache_set->fill_iter.
+        */
+-      iter.heap.data = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
+-      iter.heap.size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
+-      iter.heap.nr = 0;
++      iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
++      iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
++      iter->used = 0;
+ #ifdef CONFIG_BCACHE_DEBUG
+-      iter.b = &b->keys;
++      iter->b = &b->keys;
+ #endif
+       if (!i->seq)
+@@ -199,7 +199,7 @@ void bch_btree_node_read_done(struct btr
+               if (i != b->keys.set[0].data && !i->keys)
+                       goto err;
+-              bch_btree_iter_push(&iter, i->start, bset_bkey_last(i));
++              bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
+               b->written += set_blocks(i, block_bytes(b->c->cache));
+       }
+@@ -211,7 +211,7 @@ void bch_btree_node_read_done(struct btr
+               if (i->seq == b->keys.set[0].data->seq)
+                       goto err;
+-      bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort);
++      bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
+       i = b->keys.set[0].data;
+       err = "short btree key";
+@@ -223,7 +223,7 @@ void bch_btree_node_read_done(struct btr
+               bch_bset_init_next(&b->keys, write_block(b),
+                                  bset_magic(&b->c->cache->sb));
+ out:
+-      mempool_free(iter.heap.data, &b->c->fill_iter);
++      mempool_free(iter, &b->c->fill_iter);
+       return;
+ err:
+       set_btree_node_io_error(b);
+@@ -1309,11 +1309,9 @@ static bool btree_gc_mark_node(struct bt
+       uint8_t stale = 0;
+       unsigned int keys = 0, good_keys = 0;
+       struct bkey *k;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+       struct bset_tree *t;
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+       gc->nodes++;
+       for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
+@@ -1572,11 +1570,9 @@ static int btree_gc_rewrite_node(struct
+ static unsigned int btree_gc_count_keys(struct btree *b)
+ {
+       struct bkey *k;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+       unsigned int ret = 0;
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+       for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
+               ret += bkey_u64s(k);
+@@ -1615,18 +1611,18 @@ static int btree_gc_recurse(struct btree
+       int ret = 0;
+       bool should_rewrite;
+       struct bkey *k;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+       struct gc_merge_info r[GC_MERGE_NODES];
+       struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-      bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
++      bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
+       for (i = r; i < r + ARRAY_SIZE(r); i++)
+               i->b = ERR_PTR(-EINTR);
+       while (1) {
+-              k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
++              k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
++                                             bch_ptr_bad);
+               if (k) {
+                       r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
+                                                 true, b);
+@@ -1921,9 +1917,7 @@ static int bch_btree_check_recurse(struc
+ {
+       int ret = 0;
+       struct bkey *k, *p = NULL;
+-      struct btree_iter iter;
+-
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
++      struct btree_iter_stack iter;
+       for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
+               bch_initial_mark_key(b->c, b->level, k);
+@@ -1931,10 +1925,10 @@ static int bch_btree_check_recurse(struc
+       bch_initial_mark_key(b->c, b->level + 1, &b->key);
+       if (b->level) {
+-              bch_btree_iter_init(&b->keys, &iter, NULL);
++              bch_btree_iter_stack_init(&b->keys, &iter, NULL);
+               do {
+-                      k = bch_btree_iter_next_filter(&iter, &b->keys,
++                      k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+                                                      bch_ptr_bad);
+                       if (k) {
+                               btree_node_prefetch(b, k);
+@@ -1962,7 +1956,7 @@ static int bch_btree_check_thread(void *
+       struct btree_check_info *info = arg;
+       struct btree_check_state *check_state = info->state;
+       struct cache_set *c = check_state->c;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+       struct bkey *k, *p;
+       int cur_idx, prev_idx, skip_nr;
+@@ -1970,11 +1964,9 @@ static int bch_btree_check_thread(void *
+       cur_idx = prev_idx = 0;
+       ret = 0;
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+       /* root node keys are checked before thread created */
+-      bch_btree_iter_init(&c->root->keys, &iter, NULL);
+-      k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
++      bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
++      k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+       BUG_ON(!k);
+       p = k;
+@@ -1992,7 +1984,7 @@ static int bch_btree_check_thread(void *
+               skip_nr = cur_idx - prev_idx;
+               while (skip_nr) {
+-                      k = bch_btree_iter_next_filter(&iter,
++                      k = bch_btree_iter_next_filter(&iter.iter,
+                                                      &c->root->keys,
+                                                      bch_ptr_bad);
+                       if (k)
+@@ -2065,11 +2057,9 @@ int bch_btree_check(struct cache_set *c)
+       int ret = 0;
+       int i;
+       struct bkey *k = NULL;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+       struct btree_check_state check_state;
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+       /* check and mark root node keys */
+       for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
+               bch_initial_mark_key(c, c->root->level, k);
+@@ -2563,12 +2553,11 @@ static int bch_btree_map_nodes_recurse(s
+       if (b->level) {
+               struct bkey *k;
+-              struct btree_iter iter;
++              struct btree_iter_stack iter;
+-              min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-              bch_btree_iter_init(&b->keys, &iter, from);
++              bch_btree_iter_stack_init(&b->keys, &iter, from);
+-              while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
++              while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
+                                                      bch_ptr_bad))) {
+                       ret = bcache_btree(map_nodes_recurse, k, b,
+                                   op, from, fn, flags);
+@@ -2597,12 +2586,12 @@ int bch_btree_map_keys_recurse(struct bt
+ {
+       int ret = MAP_CONTINUE;
+       struct bkey *k;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-      bch_btree_iter_init(&b->keys, &iter, from);
++      bch_btree_iter_stack_init(&b->keys, &iter, from);
+-      while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
++      while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
++                                             bch_ptr_bad))) {
+               ret = !b->level
+                       ? fn(op, b, k)
+                       : bcache_btree(map_keys_recurse, k,
+--- a/drivers/md/bcache/extents.c
++++ b/drivers/md/bcache/extents.c
+@@ -33,16 +33,15 @@ static void sort_key_next(struct btree_i
+       i->k = bkey_next(i->k);
+       if (i->k == i->end)
+-              *i = iter->heap.data[--iter->heap.nr];
++              *i = iter->data[--iter->used];
+ }
+-static bool new_bch_key_sort_cmp(const void *l, const void *r, void *args)
++static bool bch_key_sort_cmp(struct btree_iter_set l,
++                           struct btree_iter_set r)
+ {
+-      struct btree_iter_set *_l = (struct btree_iter_set *)l;
+-      struct btree_iter_set *_r = (struct btree_iter_set *)r;
+-      int64_t c = bkey_cmp(_l->k, _r->k);
++      int64_t c = bkey_cmp(l.k, r.k);
+-      return !(c ? c > 0 : _l->k < _r->k);
++      return c ? c > 0 : l.k < r.k;
+ }
+ static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
+@@ -239,7 +238,7 @@ static bool bch_btree_ptr_insert_fixup(s
+ }
+ const struct btree_keys_ops bch_btree_keys_ops = {
+-      .sort_cmp       = new_bch_key_sort_cmp,
++      .sort_cmp       = bch_key_sort_cmp,
+       .insert_fixup   = bch_btree_ptr_insert_fixup,
+       .key_invalid    = bch_btree_ptr_invalid,
+       .key_bad        = bch_btree_ptr_bad,
+@@ -256,36 +255,22 @@ const struct btree_keys_ops bch_btree_ke
+  * Necessary for btree_sort_fixup() - if there are multiple keys that compare
+  * equal in different sets, we have to process them newest to oldest.
+  */
+-
+-static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_unused *args)
+-{
+-      struct btree_iter_set *_l = (struct btree_iter_set *)l;
+-      struct btree_iter_set *_r = (struct btree_iter_set *)r;
+-      int64_t c = bkey_cmp(&START_KEY(_l->k), &START_KEY(_r->k));
+-
+-      return !(c ? c > 0 : _l->k < _r->k);
+-}
+-
+-static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
++static bool bch_extent_sort_cmp(struct btree_iter_set l,
++                              struct btree_iter_set r)
+ {
+-      struct btree_iter_set *_iter1 = iter1;
+-      struct btree_iter_set *_iter2 = iter2;
++      int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
+-      swap(*_iter1, *_iter2);
++      return c ? c > 0 : l.k < r.k;
+ }
+ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
+                                         struct bkey *tmp)
+ {
+-      const struct min_heap_callbacks callbacks = {
+-              .less = new_bch_extent_sort_cmp,
+-              .swp = new_btree_iter_swap,
+-      };
+-      while (iter->heap.nr > 1) {
+-              struct btree_iter_set *top = iter->heap.data, *i = top + 1;
++      while (iter->used > 1) {
++              struct btree_iter_set *top = iter->data, *i = top + 1;
+-              if (iter->heap.nr > 2 &&
+-                  !new_bch_extent_sort_cmp(&i[0], &i[1], NULL))
++              if (iter->used > 2 &&
++                  bch_extent_sort_cmp(i[0], i[1]))
+                       i++;
+               if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
+@@ -293,7 +278,7 @@ static struct bkey *bch_extent_sort_fixu
+               if (!KEY_SIZE(i->k)) {
+                       sort_key_next(iter, i);
+-                      min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
++                      heap_sift(iter, i - top, bch_extent_sort_cmp);
+                       continue;
+               }
+@@ -303,7 +288,7 @@ static struct bkey *bch_extent_sort_fixu
+                       else
+                               bch_cut_front(top->k, i->k);
+-                      min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
++                      heap_sift(iter, i - top, bch_extent_sort_cmp);
+               } else {
+                       /* can't happen because of comparison func */
+                       BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
+@@ -313,7 +298,7 @@ static struct bkey *bch_extent_sort_fixu
+                               bch_cut_back(&START_KEY(i->k), tmp);
+                               bch_cut_front(i->k, top->k);
+-                              min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
++                              heap_sift(iter, 0, bch_extent_sort_cmp);
+                               return tmp;
+                       } else {
+@@ -633,7 +618,7 @@ static bool bch_extent_merge(struct btre
+ }
+ const struct btree_keys_ops bch_extent_keys_ops = {
+-      .sort_cmp       = new_bch_extent_sort_cmp,
++      .sort_cmp       = bch_extent_sort_cmp,
+       .sort_fixup     = bch_extent_sort_fixup,
+       .insert_fixup   = bch_extent_insert_fixup,
+       .key_invalid    = bch_extent_invalid,
+--- a/drivers/md/bcache/movinggc.c
++++ b/drivers/md/bcache/movinggc.c
+@@ -182,27 +182,16 @@ err:             if (!IS_ERR_OR_NULL(w->private))
+       closure_sync(&cl);
+ }
+-static bool new_bucket_cmp(const void *l, const void *r, void __always_unused *args)
++static bool bucket_cmp(struct bucket *l, struct bucket *r)
+ {
+-      struct bucket **_l = (struct bucket **)l;
+-      struct bucket **_r = (struct bucket **)r;
+-
+-      return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r);
+-}
+-
+-static void new_bucket_swap(void *l, void *r, void __always_unused *args)
+-{
+-      struct bucket **_l = l;
+-      struct bucket **_r = r;
+-
+-      swap(*_l, *_r);
++      return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
+ }
+ static unsigned int bucket_heap_top(struct cache *ca)
+ {
+       struct bucket *b;
+-      return (b = min_heap_peek(&ca->heap)[0]) ? GC_SECTORS_USED(b) : 0;
++      return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
+ }
+ void bch_moving_gc(struct cache_set *c)
+@@ -210,10 +199,6 @@ void bch_moving_gc(struct cache_set *c)
+       struct cache *ca = c->cache;
+       struct bucket *b;
+       unsigned long sectors_to_move, reserve_sectors;
+-      const struct min_heap_callbacks callbacks = {
+-              .less = new_bucket_cmp,
+-              .swp = new_bucket_swap,
+-      };
+       if (!c->copy_gc_enabled)
+               return;
+@@ -224,7 +209,7 @@ void bch_moving_gc(struct cache_set *c)
+       reserve_sectors = ca->sb.bucket_size *
+                            fifo_used(&ca->free[RESERVE_MOVINGGC]);
+-      ca->heap.nr = 0;
++      ca->heap.used = 0;
+       for_each_bucket(b, ca) {
+               if (GC_MARK(b) == GC_MARK_METADATA ||
+@@ -233,31 +218,25 @@ void bch_moving_gc(struct cache_set *c)
+                   atomic_read(&b->pin))
+                       continue;
+-              if (!min_heap_full(&ca->heap)) {
++              if (!heap_full(&ca->heap)) {
+                       sectors_to_move += GC_SECTORS_USED(b);
+-                      min_heap_push(&ca->heap, &b, &callbacks, NULL);
+-              } else if (!new_bucket_cmp(&b, min_heap_peek(&ca->heap), ca)) {
++                      heap_add(&ca->heap, b, bucket_cmp);
++              } else if (bucket_cmp(b, heap_peek(&ca->heap))) {
+                       sectors_to_move -= bucket_heap_top(ca);
+                       sectors_to_move += GC_SECTORS_USED(b);
+                       ca->heap.data[0] = b;
+-                      min_heap_sift_down(&ca->heap, 0, &callbacks, NULL);
++                      heap_sift(&ca->heap, 0, bucket_cmp);
+               }
+       }
+       while (sectors_to_move > reserve_sectors) {
+-              if (ca->heap.nr) {
+-                      b = min_heap_peek(&ca->heap)[0];
+-                      min_heap_pop(&ca->heap, &callbacks, NULL);
+-              }
++              heap_pop(&ca->heap, b, bucket_cmp);
+               sectors_to_move -= GC_SECTORS_USED(b);
+       }
+-      while (ca->heap.nr) {
+-              b = min_heap_peek(&ca->heap)[0];
+-              min_heap_pop(&ca->heap, &callbacks, NULL);
++      while (heap_pop(&ca->heap, b, bucket_cmp))
+               SET_GC_MOVE(b, 1);
+-      }
+       mutex_unlock(&c->bucket_lock);
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1912,7 +1912,8 @@ struct cache_set *bch_cache_set_alloc(st
+       INIT_LIST_HEAD(&c->btree_cache_freed);
+       INIT_LIST_HEAD(&c->data_buckets);
+-      iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
++      iter_size = sizeof(struct btree_iter) +
++                  ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
+                           sizeof(struct btree_iter_set);
+       c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -660,9 +660,7 @@ static unsigned int bch_root_usage(struc
+       unsigned int bytes = 0;
+       struct bkey *k;
+       struct btree *b;
+-      struct btree_iter iter;
+-
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
++      struct btree_iter_stack iter;
+       goto lock_root;
+--- a/drivers/md/bcache/util.h
++++ b/drivers/md/bcache/util.h
+@@ -9,7 +9,6 @@
+ #include <linux/kernel.h>
+ #include <linux/sched/clock.h>
+ #include <linux/llist.h>
+-#include <linux/min_heap.h>
+ #include <linux/ratelimit.h>
+ #include <linux/vmalloc.h>
+ #include <linux/workqueue.h>
+@@ -31,10 +30,16 @@ struct closure;
+ #endif
++#define DECLARE_HEAP(type, name)                                      \
++      struct {                                                        \
++              size_t size, used;                                      \
++              type *data;                                             \
++      } name
++
+ #define init_heap(heap, _size, gfp)                                   \
+ ({                                                                    \
+       size_t _bytes;                                                  \
+-      (heap)->nr = 0;                                         \
++      (heap)->used = 0;                                               \
+       (heap)->size = (_size);                                         \
+       _bytes = (heap)->size * sizeof(*(heap)->data);                  \
+       (heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL);            \
+@@ -47,6 +52,64 @@ do {                                                                        \
+       (heap)->data = NULL;                                            \
+ } while (0)
++#define heap_swap(h, i, j)    swap((h)->data[i], (h)->data[j])
++
++#define heap_sift(h, i, cmp)                                          \
++do {                                                                  \
++      size_t _r, _j = i;                                              \
++                                                                      \
++      for (; _j * 2 + 1 < (h)->used; _j = _r) {                       \
++              _r = _j * 2 + 1;                                        \
++              if (_r + 1 < (h)->used &&                               \
++                  cmp((h)->data[_r], (h)->data[_r + 1]))              \
++                      _r++;                                           \
++                                                                      \
++              if (cmp((h)->data[_r], (h)->data[_j]))                  \
++                      break;                                          \
++              heap_swap(h, _r, _j);                                   \
++      }                                                               \
++} while (0)
++
++#define heap_sift_down(h, i, cmp)                                     \
++do {                                                                  \
++      while (i) {                                                     \
++              size_t p = (i - 1) / 2;                                 \
++              if (cmp((h)->data[i], (h)->data[p]))                    \
++                      break;                                          \
++              heap_swap(h, i, p);                                     \
++              i = p;                                                  \
++      }                                                               \
++} while (0)
++
++#define heap_add(h, d, cmp)                                           \
++({                                                                    \
++      bool _r = !heap_full(h);                                        \
++      if (_r) {                                                       \
++              size_t _i = (h)->used++;                                \
++              (h)->data[_i] = d;                                      \
++                                                                      \
++              heap_sift_down(h, _i, cmp);                             \
++              heap_sift(h, _i, cmp);                                  \
++      }                                                               \
++      _r;                                                             \
++})
++
++#define heap_pop(h, d, cmp)                                           \
++({                                                                    \
++      bool _r = (h)->used;                                            \
++      if (_r) {                                                       \
++              (d) = (h)->data[0];                                     \
++              (h)->used--;                                            \
++              heap_swap(h, 0, (h)->used);                             \
++              heap_sift(h, 0, cmp);                                   \
++      }                                                               \
++      _r;                                                             \
++})
++
++#define heap_peek(h)  ((h)->used ? (h)->data[0] : NULL)
++
++#define heap_full(h)  ((h)->used == (h)->size)
++
+ #define DECLARE_FIFO(type, name)                                      \
+       struct {                                                        \
+               size_t front, back, size, mask;                         \
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -908,16 +908,15 @@ static int bch_dirty_init_thread(void *a
+       struct dirty_init_thrd_info *info = arg;
+       struct bch_dirty_init_state *state = info->state;
+       struct cache_set *c = state->c;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+       struct bkey *k, *p;
+       int cur_idx, prev_idx, skip_nr;
+       k = p = NULL;
+       prev_idx = 0;
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-      bch_btree_iter_init(&c->root->keys, &iter, NULL);
+-      k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
++      bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
++      k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
+       BUG_ON(!k);
+       p = k;
+@@ -931,7 +930,7 @@ static int bch_dirty_init_thread(void *a
+               skip_nr = cur_idx - prev_idx;
+               while (skip_nr) {
+-                      k = bch_btree_iter_next_filter(&iter,
++                      k = bch_btree_iter_next_filter(&iter.iter,
+                                                      &c->root->keys,
+                                                      bch_ptr_bad);
+                       if (k)
+@@ -980,13 +979,11 @@ void bch_sectors_dirty_init(struct bcach
+       int i;
+       struct btree *b = NULL;
+       struct bkey *k = NULL;
+-      struct btree_iter iter;
++      struct btree_iter_stack iter;
+       struct sectors_dirty_init op;
+       struct cache_set *c = d->c;
+       struct bch_dirty_init_state state;
+-      min_heap_init(&iter.heap, NULL, MAX_BSETS);
+-
+ retry_lock:
+       b = c->root;
+       rw_lock(0, b, b->level);
diff --git a/queue-6.12/selftests-bpf-add-a-test-for-arena-range-tree-algorithm.patch b/queue-6.12/selftests-bpf-add-a-test-for-arena-range-tree-algorithm.patch
new file mode 100644 (file)
index 0000000..cee618d
--- /dev/null
@@ -0,0 +1,151 @@
+From e58358afa84e8e271a296459d35d1715c7572013 Mon Sep 17 00:00:00 2001
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Thu, 7 Nov 2024 18:56:16 -0800
+Subject: selftests/bpf: Add a test for arena range tree algorithm
+
+From: Alexei Starovoitov <ast@kernel.org>
+
+commit e58358afa84e8e271a296459d35d1715c7572013 upstream.
+
+Add a test that verifies specific behavior of arena range tree
+algorithm and adjust existing big_alloc1 test due to use
+of global data in arena.
+
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
+Link: https://lore.kernel.org/bpf/20241108025616.17625-3-alexei.starovoitov@gmail.com
+Signed-off-by: Yifei Liu <yifei.l.liu@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/progs/verifier_arena_large.c |  110 ++++++++++++++-
+ 1 file changed, 108 insertions(+), 2 deletions(-)
+
+--- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c
++++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
+@@ -29,12 +29,12 @@ int big_alloc1(void *ctx)
+       if (!page1)
+               return 1;
+       *page1 = 1;
+-      page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE,
++      page2 = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE * 2,
+                                     1, NUMA_NO_NODE, 0);
+       if (!page2)
+               return 2;
+       *page2 = 2;
+-      no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE,
++      no_page = bpf_arena_alloc_pages(&arena, base + ARENA_SIZE - PAGE_SIZE,
+                                       1, NUMA_NO_NODE, 0);
+       if (no_page)
+               return 3;
+@@ -66,4 +66,110 @@ int big_alloc1(void *ctx)
+ #endif
+       return 0;
+ }
++
++#if defined(__BPF_FEATURE_ADDR_SPACE_CAST)
++#define PAGE_CNT 100
++__u8 __arena * __arena page[PAGE_CNT]; /* occupies the first page */
++__u8 __arena *base;
++
++/*
++ * Check that arena's range_tree algorithm allocates pages sequentially
++ * on the first pass and then fills in all gaps on the second pass.
++ */
++__noinline int alloc_pages(int page_cnt, int pages_atonce, bool first_pass,
++              int max_idx, int step)
++{
++      __u8 __arena *pg;
++      int i, pg_idx;
++
++      for (i = 0; i < page_cnt; i++) {
++              pg = bpf_arena_alloc_pages(&arena, NULL, pages_atonce,
++                                         NUMA_NO_NODE, 0);
++              if (!pg)
++                      return step;
++              pg_idx = (pg - base) / PAGE_SIZE;
++              if (first_pass) {
++                      /* Pages must be allocated sequentially */
++                      if (pg_idx != i)
++                              return step + 100;
++              } else {
++                      /* Allocator must fill into gaps */
++                      if (pg_idx >= max_idx || (pg_idx & 1))
++                              return step + 200;
++              }
++              *pg = pg_idx;
++              page[pg_idx] = pg;
++              cond_break;
++      }
++      return 0;
++}
++
++//SEC("syscall")
++//__success __retval(0)
++int big_alloc2(void *ctx)
++{
++      __u8 __arena *pg;
++      int i, err;
++
++      base = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
++      if (!base)
++              return 1;
++      bpf_arena_free_pages(&arena, (void __arena *)base, 1);
++
++      err = alloc_pages(PAGE_CNT, 1, true, PAGE_CNT, 2);
++      if (err)
++              return err;
++
++      /* Clear all even pages */
++      for (i = 0; i < PAGE_CNT; i += 2) {
++              pg = page[i];
++              if (*pg != i)
++                      return 3;
++              bpf_arena_free_pages(&arena, (void __arena *)pg, 1);
++              page[i] = NULL;
++              cond_break;
++      }
++
++      /* Allocate into freed gaps */
++      err = alloc_pages(PAGE_CNT / 2, 1, false, PAGE_CNT, 4);
++      if (err)
++              return err;
++
++      /* Free pairs of pages */
++      for (i = 0; i < PAGE_CNT; i += 4) {
++              pg = page[i];
++              if (*pg != i)
++                      return 5;
++              bpf_arena_free_pages(&arena, (void __arena *)pg, 2);
++              page[i] = NULL;
++              page[i + 1] = NULL;
++              cond_break;
++      }
++
++      /* Allocate 2 pages at a time into freed gaps */
++      err = alloc_pages(PAGE_CNT / 4, 2, false, PAGE_CNT, 6);
++      if (err)
++              return err;
++
++      /* Check pages without freeing */
++      for (i = 0; i < PAGE_CNT; i += 2) {
++              pg = page[i];
++              if (*pg != i)
++                      return 7;
++              cond_break;
++      }
++
++      pg = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
++
++      if (!pg)
++              return 8;
++      /*
++       * The first PAGE_CNT pages are occupied. The new page
++       * must be above.
++       */
++      if ((pg - base) / PAGE_SIZE < PAGE_CNT)
++              return 9;
++      return 0;
++}
++#endif
+ char _license[] SEC("license") = "GPL";
diff --git a/queue-6.12/selftests-bpf-fix-build-error-with-llvm-19.patch b/queue-6.12/selftests-bpf-fix-build-error-with-llvm-19.patch
new file mode 100644 (file)
index 0000000..bd529e2
--- /dev/null
@@ -0,0 +1,36 @@
+From 608e99f7869e3a6e028c7cba14a896c7797e8746 Mon Sep 17 00:00:00 2001
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Sat, 16 Nov 2024 10:56:17 -0800
+Subject: selftests/bpf: Fix build error with llvm 19
+
+From: Alexei Starovoitov <ast@kernel.org>
+
+commit 608e99f7869e3a6e028c7cba14a896c7797e8746 upstream.
+
+llvm 19 fails to compile arena self test:
+CLNG-BPF [test_progs] verifier_arena_large.bpf.o
+progs/verifier_arena_large.c:90:24: error: unsupported signed division, please convert to unsigned div/mod.
+   90 |                 pg_idx = (pg - base) / PAGE_SIZE;
+
+Though llvm <= 18 and llvm >= 20 don't have this issue,
+fix the test to avoid the build error.
+
+Reported-by: Jiri Olsa <olsajiri@gmail.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Yifei Liu <yifei.l.liu@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/progs/verifier_arena_large.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c
++++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c
+@@ -87,7 +87,7 @@ __noinline int alloc_pages(int page_cnt,
+                                          NUMA_NO_NODE, 0);
+               if (!pg)
+                       return step;
+-              pg_idx = (pg - base) / PAGE_SIZE;
++              pg_idx = (unsigned long) (pg - base) / PAGE_SIZE;
+               if (first_pass) {
+                       /* Pages must be allocated sequentially */
+                       if (pg_idx != i)
index 50d8d8adf5a7c8989a2b12e808762eb3c0e72eee..1c1420c61d8b1aeaedc91d55e42092aa94b4c0df 100644 (file)
@@ -325,3 +325,16 @@ perf-core-don-t-leak-aux-buffer-refcount-on-allocation-failure.patch
 perf-core-exit-early-on-perf_mmap-fail.patch
 perf-core-prevent-vma-split-of-buffer-mappings.patch
 selftests-perf_events-add-a-mmap-correctness-test.patch
+net-packet-fix-a-race-in-packet_set_ring-and-packet_notifier.patch
+vsock-do-not-allow-binding-to-vmaddr_port_any.patch
+accel-ivpu-fix-reset_engine-debugfs-file-logic.patch
+revert-bcache-remove-heap-related-macros-and-switch-to-generic-min_heap.patch
+ice-ptp-fix-crosstimestamp-reporting.patch
+selftests-bpf-add-a-test-for-arena-range-tree-algorithm.patch
+selftests-bpf-fix-build-error-with-llvm-19.patch
+drm-i915-ddi-change-intel_ddi_init_-dp-hdmi-_connector-return-type.patch
+drm-i915-hdmi-propagate-errors-from-intel_hdmi_init_connector.patch
+drm-i915-hdmi-add-error-handling-in-g4x_hdmi_init.patch
+drm-i915-ddi-gracefully-handle-errors-from-intel_ddi_init_hdmi_connector.patch
+drm-i915-display-add-intel_encoder_is_hdmi.patch
+drm-i915-ddi-only-call-shutdown-hooks-for-valid-encoders.patch
diff --git a/queue-6.12/vsock-do-not-allow-binding-to-vmaddr_port_any.patch b/queue-6.12/vsock-do-not-allow-binding-to-vmaddr_port_any.patch
new file mode 100644 (file)
index 0000000..40893d9
--- /dev/null
@@ -0,0 +1,42 @@
+From aba0c94f61ec05315fa7815d21aefa4c87f6a9f4 Mon Sep 17 00:00:00 2001
+From: Budimir Markovic <markovicbudimir@gmail.com>
+Date: Thu, 7 Aug 2025 04:18:11 +0000
+Subject: vsock: Do not allow binding to VMADDR_PORT_ANY
+
+From: Budimir Markovic <markovicbudimir@gmail.com>
+
+commit aba0c94f61ec05315fa7815d21aefa4c87f6a9f4 upstream.
+
+It is possible for a vsock to autobind to VMADDR_PORT_ANY. This can
+cause a use-after-free when a connection is made to the bound socket.
+The socket returned by accept() also has port VMADDR_PORT_ANY but is not
+on the list of unbound sockets. Binding it will result in an extra
+refcount decrement similar to the one fixed in fcdd2242c023 (vsock: Keep
+the binding until socket destruction).
+
+Modify the check in __vsock_bind_connectible() to also prevent binding
+to VMADDR_PORT_ANY.
+
+Fixes: d021c344051a ("VSOCK: Introduce VM Sockets")
+Reported-by: Budimir Markovic <markovicbudimir@gmail.com>
+Signed-off-by: Budimir Markovic <markovicbudimir@gmail.com>
+Reviewed-by: Stefano Garzarella <sgarzare@redhat.com>
+Link: https://patch.msgid.link/20250807041811.678-1-markovicbudimir@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/vmw_vsock/af_vsock.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/vmw_vsock/af_vsock.c
++++ b/net/vmw_vsock/af_vsock.c
+@@ -689,7 +689,8 @@ static int __vsock_bind_connectible(stru
+               unsigned int i;
+               for (i = 0; i < MAX_PORT_RETRIES; i++) {
+-                      if (port <= LAST_RESERVED_PORT)
++                      if (port == VMADDR_PORT_ANY ||
++                          port <= LAST_RESERVED_PORT)
+                               port = LAST_RESERVED_PORT + 1;
+                       new_addr.svm_port = port++;