]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 4.14
authorSasha Levin <sashal@kernel.org>
Mon, 31 Jul 2023 01:25:07 +0000 (21:25 -0400)
committerSasha Levin <sashal@kernel.org>
Mon, 31 Jul 2023 01:25:07 +0000 (21:25 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.14/asoc-fsl_spdif-silence-output-on-stop.patch [new file with mode: 0644]
queue-4.14/ata-pata_ns87415-mark-ns87560_tf_read-static.patch [new file with mode: 0644]
queue-4.14/block-fix-a-source-code-comment-in-include-uapi-linu.patch [new file with mode: 0644]
queue-4.14/dm-raid-fix-missing-reconfig_mutex-unlock-in-raid_ct.patch [new file with mode: 0644]
queue-4.14/ring-buffer-fix-wrong-stat-of-cpu_buffer-read.patch [new file with mode: 0644]
queue-4.14/series
queue-4.14/tracing-fix-warning-in-trace_buffered_event_disable.patch [new file with mode: 0644]

diff --git a/queue-4.14/asoc-fsl_spdif-silence-output-on-stop.patch b/queue-4.14/asoc-fsl_spdif-silence-output-on-stop.patch
new file mode 100644 (file)
index 0000000..c9cada2
--- /dev/null
@@ -0,0 +1,38 @@
+From 24ca92348a4a41c5c8cae951435a4b7447586fbf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Jul 2023 18:47:29 +0200
+Subject: ASoC: fsl_spdif: Silence output on stop
+
+From: Matus Gajdos <matuszpd@gmail.com>
+
+[ Upstream commit 0e4c2b6b0c4a4b4014d9424c27e5e79d185229c5 ]
+
+Clear TX registers on stop to prevent the SPDIF interface from sending
+last written word over and over again.
+
+Fixes: a2388a498ad2 ("ASoC: fsl: Add S/PDIF CPU DAI driver")
+Signed-off-by: Matus Gajdos <matuszpd@gmail.com>
+Reviewed-by: Fabio Estevam <festevam@gmail.com>
+Link: https://lore.kernel.org/r/20230719164729.19969-1-matuszpd@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/fsl/fsl_spdif.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
+index 7e6cc4da00887..b079754aed409 100644
+--- a/sound/soc/fsl/fsl_spdif.c
++++ b/sound/soc/fsl/fsl_spdif.c
+@@ -618,6 +618,8 @@ static int fsl_spdif_trigger(struct snd_pcm_substream *substream,
+       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               regmap_update_bits(regmap, REG_SPDIF_SCR, dmaen, 0);
+               regmap_update_bits(regmap, REG_SPDIF_SIE, intr, 0);
++              regmap_write(regmap, REG_SPDIF_STL, 0x0);
++              regmap_write(regmap, REG_SPDIF_STR, 0x0);
+               break;
+       default:
+               return -EINVAL;
+-- 
+2.40.1
+
diff --git a/queue-4.14/ata-pata_ns87415-mark-ns87560_tf_read-static.patch b/queue-4.14/ata-pata_ns87415-mark-ns87560_tf_read-static.patch
new file mode 100644 (file)
index 0000000..669e797
--- /dev/null
@@ -0,0 +1,42 @@
+From 5067e4ab41248d5529eb7234b9331c97ccb5b64f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jul 2023 22:33:22 +0200
+Subject: ata: pata_ns87415: mark ns87560_tf_read static
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 3fc2febb0f8ffae354820c1772ec008733237cfa ]
+
+The global function triggers a warning because of the missing prototype
+
+drivers/ata/pata_ns87415.c:263:6: warning: no previous prototype for 'ns87560_tf_read' [-Wmissing-prototypes]
+  263 | void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+
+There are no other references to this, so just make it static.
+
+Fixes: c4b5b7b6c4423 ("pata_ns87415: Initial cut at 87415/87560 IDE support")
+Reviewed-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Reviewed-by: Serge Semin <fancer.lancer@gmail.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/pata_ns87415.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
+index 84c6b225b56e9..9ee4aefca8675 100644
+--- a/drivers/ata/pata_ns87415.c
++++ b/drivers/ata/pata_ns87415.c
+@@ -260,7 +260,7 @@ static u8 ns87560_check_status(struct ata_port *ap)
+  *    LOCKING:
+  *    Inherited from caller.
+  */
+-void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
++static void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+ {
+       struct ata_ioports *ioaddr = &ap->ioaddr;
+-- 
+2.40.1
+
diff --git a/queue-4.14/block-fix-a-source-code-comment-in-include-uapi-linu.patch b/queue-4.14/block-fix-a-source-code-comment-in-include-uapi-linu.patch
new file mode 100644 (file)
index 0000000..cc55466
--- /dev/null
@@ -0,0 +1,50 @@
+From ef43679c58293808b40eb7e7fe4ef62bde49999b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Jul 2023 13:14:12 -0700
+Subject: block: Fix a source code comment in include/uapi/linux/blkzoned.h
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit e0933b526fbfd937c4a8f4e35fcdd49f0e22d411 ]
+
+Fix the symbolic names for zone conditions in the blkzoned.h header
+file.
+
+Cc: Hannes Reinecke <hare@suse.de>
+Cc: Damien Le Moal <dlemoal@kernel.org>
+Fixes: 6a0cb1bc106f ("block: Implement support for zoned block devices")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://lore.kernel.org/r/20230706201422.3987341-1-bvanassche@acm.org
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/linux/blkzoned.h | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
+index e3c70fe6bf0fb..f5e619abcc9a6 100644
+--- a/include/uapi/linux/blkzoned.h
++++ b/include/uapi/linux/blkzoned.h
+@@ -51,13 +51,13 @@ enum blk_zone_type {
+  *
+  * The Zone Condition state machine in the ZBC/ZAC standards maps the above
+  * deinitions as:
+- *   - ZC1: Empty         | BLK_ZONE_EMPTY
++ *   - ZC1: Empty         | BLK_ZONE_COND_EMPTY
+  *   - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN
+  *   - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN
+- *   - ZC4: Closed        | BLK_ZONE_CLOSED
+- *   - ZC5: Full          | BLK_ZONE_FULL
+- *   - ZC6: Read Only     | BLK_ZONE_READONLY
+- *   - ZC7: Offline       | BLK_ZONE_OFFLINE
++ *   - ZC4: Closed        | BLK_ZONE_COND_CLOSED
++ *   - ZC5: Full          | BLK_ZONE_COND_FULL
++ *   - ZC6: Read Only     | BLK_ZONE_COND_READONLY
++ *   - ZC7: Offline       | BLK_ZONE_COND_OFFLINE
+  *
+  * Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should
+  * be considered invalid.
+-- 
+2.40.1
+
diff --git a/queue-4.14/dm-raid-fix-missing-reconfig_mutex-unlock-in-raid_ct.patch b/queue-4.14/dm-raid-fix-missing-reconfig_mutex-unlock-in-raid_ct.patch
new file mode 100644 (file)
index 0000000..99d6ffb
--- /dev/null
@@ -0,0 +1,57 @@
+From 5bd837e565b35be3c6942eb7e5062a70b7613453 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 8 Jul 2023 17:21:51 +0800
+Subject: dm raid: fix missing reconfig_mutex unlock in raid_ctr() error paths
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+[ Upstream commit bae3028799dc4f1109acc4df37c8ff06f2d8f1a0 ]
+
+In the error paths 'bad_stripe_cache' and 'bad_check_reshape',
+'reconfig_mutex' is still held after raid_ctr() returns.
+
+Fixes: 9dbd1aa3a81c ("dm raid: add reshaping support to the target")
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-raid.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index 025a36ddf26ec..b818cc982e624 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -3190,15 +3190,19 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+       /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */
+       if (rs_is_raid456(rs)) {
+               r = rs_set_raid456_stripe_cache(rs);
+-              if (r)
++              if (r) {
++                      mddev_unlock(&rs->md);
+                       goto bad_stripe_cache;
++              }
+       }
+       /* Now do an early reshape check */
+       if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
+               r = rs_check_reshape(rs);
+-              if (r)
++              if (r) {
++                      mddev_unlock(&rs->md);
+                       goto bad_check_reshape;
++              }
+               /* Restore new, ctr requested layout to perform check */
+               rs_config_restore(rs, &rs_layout);
+@@ -3207,6 +3211,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+                       r = rs->md.pers->check_reshape(&rs->md);
+                       if (r) {
+                               ti->error = "Reshape check failed";
++                              mddev_unlock(&rs->md);
+                               goto bad_check_reshape;
+                       }
+               }
+-- 
+2.40.1
+
diff --git a/queue-4.14/ring-buffer-fix-wrong-stat-of-cpu_buffer-read.patch b/queue-4.14/ring-buffer-fix-wrong-stat-of-cpu_buffer-read.patch
new file mode 100644 (file)
index 0000000..5110391
--- /dev/null
@@ -0,0 +1,130 @@
+From bc14762fd7dd68f1e1862b6dc1d66175d53d21b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Jul 2023 13:40:40 +0800
+Subject: ring-buffer: Fix wrong stat of cpu_buffer->read
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+[ Upstream commit 2d093282b0d4357373497f65db6a05eb0c28b7c8 ]
+
+When pages are removed in rb_remove_pages(), 'cpu_buffer->read' is set
+to 0 in order to make sure any read iterators reset themselves. However,
+this will mess 'entries' stating, see following steps:
+
+  # cd /sys/kernel/tracing/
+  # 1. Enlarge ring buffer prepare for later reducing:
+  # echo 20 > per_cpu/cpu0/buffer_size_kb
+  # 2. Write a log into ring buffer of cpu0:
+  # taskset -c 0 echo "hello1" > trace_marker
+  # 3. Read the log:
+  # cat per_cpu/cpu0/trace_pipe
+       <...>-332     [000] .....    62.406844: tracing_mark_write: hello1
+  # 4. Stop reading and see the stats, now 0 entries, and 1 event readed:
+  # cat per_cpu/cpu0/stats
+   entries: 0
+   [...]
+   read events: 1
+  # 5. Reduce the ring buffer
+  # echo 7 > per_cpu/cpu0/buffer_size_kb
+  # 6. Now entries became unexpected 1 because actually no entries!!!
+  # cat per_cpu/cpu0/stats
+   entries: 1
+   [...]
+   read events: 0
+
+To fix it, introduce 'page_removed' field to count total removed pages
+since last reset, then use it to let read iterators reset themselves
+instead of changing the 'read' pointer.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230724054040.3489499-1-zhengyejian1@huawei.com
+
+Cc: <mhiramat@kernel.org>
+Cc: <vnagarnaik@google.com>
+Fixes: 83f40318dab0 ("ring-buffer: Make removal of ring buffer pages atomic")
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/ring_buffer.c | 22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
+
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index d0fed522bf23a..1949d7bbe145d 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -464,6 +464,8 @@ struct ring_buffer_per_cpu {
+       unsigned long                   read_bytes;
+       u64                             write_stamp;
+       u64                             read_stamp;
++      /* pages removed since last reset */
++      unsigned long                   pages_removed;
+       /* ring buffer pages to update, > 0 to add, < 0 to remove */
+       long                            nr_pages_to_update;
+       struct list_head                new_pages; /* new pages to add */
+@@ -498,6 +500,7 @@ struct ring_buffer_iter {
+       struct buffer_page              *head_page;
+       struct buffer_page              *cache_reader_page;
+       unsigned long                   cache_read;
++      unsigned long                   cache_pages_removed;
+       u64                             read_stamp;
+ };
+@@ -1447,6 +1450,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+               to_remove = rb_list_head(to_remove)->next;
+               head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
+       }
++      /* Read iterators need to reset themselves when some pages removed */
++      cpu_buffer->pages_removed += nr_removed;
+       next_page = rb_list_head(to_remove)->next;
+@@ -1468,12 +1473,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
+               cpu_buffer->head_page = list_entry(next_page,
+                                               struct buffer_page, list);
+-      /*
+-       * change read pointer to make sure any read iterators reset
+-       * themselves
+-       */
+-      cpu_buffer->read = 0;
+-
+       /* pages are removed, resume tracing and then free the pages */
+       atomic_dec(&cpu_buffer->record_disabled);
+       raw_spin_unlock_irq(&cpu_buffer->reader_lock);
+@@ -3464,6 +3463,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
+       iter->cache_reader_page = iter->head_page;
+       iter->cache_read = cpu_buffer->read;
++      iter->cache_pages_removed = cpu_buffer->pages_removed;
+       if (iter->head)
+               iter->read_stamp = cpu_buffer->read_stamp;
+@@ -3896,12 +3896,13 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+       buffer = cpu_buffer->buffer;
+       /*
+-       * Check if someone performed a consuming read to
+-       * the buffer. A consuming read invalidates the iterator
+-       * and we need to reset the iterator in this case.
++       * Check if someone performed a consuming read to the buffer
++       * or removed some pages from the buffer. In these cases,
++       * iterator was invalidated and we need to reset it.
+        */
+       if (unlikely(iter->cache_read != cpu_buffer->read ||
+-                   iter->cache_reader_page != cpu_buffer->reader_page))
++                   iter->cache_reader_page != cpu_buffer->reader_page ||
++                   iter->cache_pages_removed != cpu_buffer->pages_removed))
+               rb_iter_reset(iter);
+  again:
+@@ -4323,6 +4324,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
+       cpu_buffer->last_overrun = 0;
+       rb_head_page_activate(cpu_buffer);
++      cpu_buffer->pages_removed = 0;
+ }
+ /**
+-- 
+2.40.1
+
index cb94638421c1fa640ee1df4896f247807fd8d658..816c6f4ed2fb234baf2c1f9608383cc04c3fd53c 100644 (file)
@@ -153,3 +153,9 @@ bonding-reset-bond-s-flags-when-down-link-is-p2p-dev.patch
 team-reset-team-s-flags-when-down-link-is-p2p-device.patch
 platform-x86-msi-laptop-fix-rfkill-out-of-sync-on-ms.patch
 benet-fix-return-value-check-in-be_lancer_xmit_worka.patch
+asoc-fsl_spdif-silence-output-on-stop.patch
+block-fix-a-source-code-comment-in-include-uapi-linu.patch
+dm-raid-fix-missing-reconfig_mutex-unlock-in-raid_ct.patch
+ata-pata_ns87415-mark-ns87560_tf_read-static.patch
+ring-buffer-fix-wrong-stat-of-cpu_buffer-read.patch
+tracing-fix-warning-in-trace_buffered_event_disable.patch
diff --git a/queue-4.14/tracing-fix-warning-in-trace_buffered_event_disable.patch b/queue-4.14/tracing-fix-warning-in-trace_buffered_event_disable.patch
new file mode 100644 (file)
index 0000000..86b416a
--- /dev/null
@@ -0,0 +1,119 @@
+From 0522c0f4bdc5ca2405478c85a96dfea05607e44e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Jul 2023 17:58:04 +0800
+Subject: tracing: Fix warning in trace_buffered_event_disable()
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+[ Upstream commit dea499781a1150d285c62b26659f62fb00824fce ]
+
+Warning happened in trace_buffered_event_disable() at
+  WARN_ON_ONCE(!trace_buffered_event_ref)
+
+  Call Trace:
+   ? __warn+0xa5/0x1b0
+   ? trace_buffered_event_disable+0x189/0x1b0
+   __ftrace_event_enable_disable+0x19e/0x3e0
+   free_probe_data+0x3b/0xa0
+   unregister_ftrace_function_probe_func+0x6b8/0x800
+   event_enable_func+0x2f0/0x3d0
+   ftrace_process_regex.isra.0+0x12d/0x1b0
+   ftrace_filter_write+0xe6/0x140
+   vfs_write+0x1c9/0x6f0
+   [...]
+
+The cause of the warning is in __ftrace_event_enable_disable(),
+trace_buffered_event_enable() was called once while
+trace_buffered_event_disable() was called twice.
+Reproduction script show as below, for analysis, see the comments:
+ ```
+ #!/bin/bash
+
+ cd /sys/kernel/tracing/
+
+ # 1. Register a 'disable_event' command, then:
+ #    1) SOFT_DISABLED_BIT was set;
+ #    2) trace_buffered_event_enable() was called first time;
+ echo 'cmdline_proc_show:disable_event:initcall:initcall_finish' > \
+     set_ftrace_filter
+
+ # 2. Enable the event registered, then:
+ #    1) SOFT_DISABLED_BIT was cleared;
+ #    2) trace_buffered_event_disable() was called first time;
+ echo 1 > events/initcall/initcall_finish/enable
+
+ # 3. Try to call into cmdline_proc_show(), then SOFT_DISABLED_BIT was
+ #    set again!!!
+ cat /proc/cmdline
+
+ # 4. Unregister the 'disable_event' command, then:
+ #    1) SOFT_DISABLED_BIT was cleared again;
+ #    2) trace_buffered_event_disable() was called second time!!!
+ echo '!cmdline_proc_show:disable_event:initcall:initcall_finish' > \
+     set_ftrace_filter
+ ```
+
+To fix it, IIUC, we can change to call trace_buffered_event_enable() at
+fist time soft-mode enabled, and call trace_buffered_event_disable() at
+last time soft-mode disabled.
+
+Link: https://lore.kernel.org/linux-trace-kernel/20230726095804.920457-1-zhengyejian1@huawei.com
+
+Cc: <mhiramat@kernel.org>
+Fixes: 0fc1b09ff1ff ("tracing: Use temp buffer when filtering events")
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_events.c | 14 ++++----------
+ 1 file changed, 4 insertions(+), 10 deletions(-)
+
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 1285ef6e2d140..37be6913cfb27 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -370,7 +370,6 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
+ {
+       struct trace_event_call *call = file->event_call;
+       struct trace_array *tr = file->tr;
+-      unsigned long file_flags = file->flags;
+       int ret = 0;
+       int disable;
+@@ -394,6 +393,8 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
+                               break;
+                       disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
+                       clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
++                      /* Disable use of trace_buffered_event */
++                      trace_buffered_event_disable();
+               } else
+                       disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
+@@ -432,6 +433,8 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
+                       if (atomic_inc_return(&file->sm_ref) > 1)
+                               break;
+                       set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
++                      /* Enable use of trace_buffered_event */
++                      trace_buffered_event_enable();
+               }
+               if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
+@@ -471,15 +474,6 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file,
+               break;
+       }
+-      /* Enable or disable use of trace_buffered_event */
+-      if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) !=
+-          (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) {
+-              if (file->flags & EVENT_FILE_FL_SOFT_DISABLED)
+-                      trace_buffered_event_enable();
+-              else
+-                      trace_buffered_event_disable();
+-      }
+-
+       return ret;
+ }
+-- 
+2.40.1
+