--- /dev/null
+From stable+bounces-214809-greg=kroah.com@vger.kernel.org Sat Feb 7 18:13:30 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 12:13:22 -0500
+Subject: gve: Fix stats report corruption on queue count change
+To: stable@vger.kernel.org
+Cc: Debarghya Kundu <debarghyak@google.com>, Joshua Washington <joshwash@google.com>, Harshitha Ramamurthy <hramamurthy@google.com>, Jacob Keller <jacob.e.keller@intel.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260207171322.459324-1-sashal@kernel.org>
+
+From: Debarghya Kundu <debarghyak@google.com>
+
+[ Upstream commit 7b9ebcce0296e104a0d82a6b09d68564806158ff ]
+
+The driver and the NIC share a region in memory for stats reporting.
+The NIC calculates its offset into this region based on the total size
+of the stats region and the size of the NIC's stats.
+
+When the number of queues is changed, the driver's stats region is
+resized. If the queue count is increased, the NIC can write past
+the end of the allocated stats region, causing memory corruption.
+If the queue count is decreased, there is a gap between the driver
+and NIC stats, leading to incorrect stats reporting.
+
+This change fixes the issue by allocating stats region with maximum
+size, and the offset calculation for NIC stats is changed to match
+with the calculation of the NIC.
+
+Cc: stable@vger.kernel.org
+Fixes: 24aeb56f2d38 ("gve: Add Gvnic stats AQ command and ethtool show/set-priv-flags.")
+Signed-off-by: Debarghya Kundu <debarghyak@google.com>
+Reviewed-by: Joshua Washington <joshwash@google.com>
+Signed-off-by: Harshitha Ramamurthy <hramamurthy@google.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/20260202193925.3106272-2-hramamurthy@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ no stopped-queue feature in older trees ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/google/gve/gve_ethtool.c | 41 +++++++++++++++++---------
+ drivers/net/ethernet/google/gve/gve_main.c | 4 +-
+ 2 files changed, 30 insertions(+), 15 deletions(-)
+
+--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
++++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
+@@ -159,7 +159,8 @@ gve_get_ethtool_stats(struct net_device
+ tmp_tx_pkts, tmp_tx_bytes;
+ u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
+ rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, tx_dropped;
+- int stats_idx, base_stats_idx, max_stats_idx;
++ int rx_base_stats_idx, max_rx_stats_idx, max_tx_stats_idx;
++ int stats_idx, stats_region_len, nic_stats_len;
+ struct stats *report_stats;
+ int *rx_qid_to_stats_idx;
+ int *tx_qid_to_stats_idx;
+@@ -246,14 +247,32 @@ gve_get_ethtool_stats(struct net_device
+ data[i++] = priv->stats_report_trigger_cnt;
+ i = GVE_MAIN_STATS_LEN;
+
+- /* For rx cross-reporting stats, start from nic rx stats in report */
+- base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
+- GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
+- max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
+- base_stats_idx;
++ rx_base_stats_idx = 0;
++ max_rx_stats_idx = 0;
++ max_tx_stats_idx = 0;
++ stats_region_len = priv->stats_report_len -
++ sizeof(struct gve_stats_report);
++ nic_stats_len = (NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
++ NIC_TX_STATS_REPORT_NUM * num_tx_queues) * sizeof(struct stats);
++ if (unlikely((stats_region_len -
++ nic_stats_len) % sizeof(struct stats))) {
++ net_err_ratelimited("Starting index of NIC stats should be multiple of stats size");
++ } else {
++ /* For rx cross-reporting stats,
++ * start from nic rx stats in report
++ */
++ rx_base_stats_idx = (stats_region_len - nic_stats_len) /
++ sizeof(struct stats);
++ max_rx_stats_idx = NIC_RX_STATS_REPORT_NUM *
++ priv->rx_cfg.num_queues +
++ rx_base_stats_idx;
++ max_tx_stats_idx = NIC_TX_STATS_REPORT_NUM *
++ num_tx_queues +
++ max_rx_stats_idx;
++ }
+ /* Preprocess the stats report for rx, map queue id to start index */
+ skip_nic_stats = false;
+- for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
++ for (stats_idx = rx_base_stats_idx; stats_idx < max_rx_stats_idx;
+ stats_idx += NIC_RX_STATS_REPORT_NUM) {
+ u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
+ u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
+@@ -323,13 +342,9 @@ gve_get_ethtool_stats(struct net_device
+ i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
+ }
+
+- /* For tx cross-reporting stats, start from nic tx stats in report */
+- base_stats_idx = max_stats_idx;
+- max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
+- max_stats_idx;
+- /* Preprocess the stats report for tx, map queue id to start index */
+ skip_nic_stats = false;
+- for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
++ /* NIC TX stats start right after NIC RX stats */
++ for (stats_idx = max_rx_stats_idx; stats_idx < max_tx_stats_idx;
+ stats_idx += NIC_TX_STATS_REPORT_NUM) {
+ u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
+ u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -186,9 +186,9 @@ static int gve_alloc_stats_report(struct
+ int tx_stats_num, rx_stats_num;
+
+ tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
+- gve_num_tx_queues(priv);
++ priv->tx_cfg.max_queues;
+ rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
+- priv->rx_cfg.num_queues;
++ priv->rx_cfg.max_queues;
+ priv->stats_report_len = struct_size(priv->stats_report, stats,
+ size_add(tx_stats_num, rx_stats_num));
+ priv->stats_report =
--- /dev/null
+From stable+bounces-214838-greg=kroah.com@vger.kernel.org Sat Feb 7 22:07:13 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 16:07:08 -0500
+Subject: KVM: selftests: Add -U_FORTIFY_SOURCE to avoid some unpredictable test failures
+To: stable@vger.kernel.org
+Cc: Zhiquan Li <zhiquan_li@163.com>, Sean Christopherson <seanjc@google.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260207210708.557817-1-sashal@kernel.org>
+
+From: Zhiquan Li <zhiquan_li@163.com>
+
+[ Upstream commit e396a74222654486d6ab45dca5d0c54c408b8b91 ]
+
+Some distributions (such as Ubuntu) configure GCC so that
+_FORTIFY_SOURCE is automatically enabled at -O1 or above. This results
+in some fortified version of definitions of standard library functions
+are included. While linker resolves the symbols, the fortified versions
+might override the definitions in lib/string_override.c and reference to
+those PLT entries in GLIBC. This is not a problem for the code in host,
+but it is a disaster for the guest code. E.g., if build and run
+x86/nested_emulation_test on Ubuntu 24.04 will encounter a L1 #PF due to
+memset() reference to __memset_chk@plt.
+
+The option -fno-builtin-memset is not helpful here, because those
+fortified versions are not built-in but some definitions which are
+included by header, they are for different intentions.
+
+In order to eliminate the unpredictable behaviors may vary depending on
+the linker and platform, add the "-U_FORTIFY_SOURCE" into CFLAGS to
+prevent from introducing the fortified definitions.
+
+Signed-off-by: Zhiquan Li <zhiquan_li@163.com>
+Link: https://patch.msgid.link/20260122053551.548229-1-zhiquan_li@163.com
+Fixes: 6b6f71484bf4 ("KVM: selftests: Implement memcmp(), memcpy(), and memset() for guest use")
+Cc: stable@vger.kernel.org
+[sean: tag for stable]
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+[ Makefile.kvm -> Makefile ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/kvm/Makefile | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/tools/testing/selftests/kvm/Makefile
++++ b/tools/testing/selftests/kvm/Makefile
+@@ -212,6 +212,7 @@ LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/
+ endif
+ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
+ -Wno-gnu-variable-sized-type-not-at-end -MD\
++ -U_FORTIFY_SOURCE \
+ -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
+ -fno-builtin-strnlen \
+ -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
--- /dev/null
+From stable+bounces-214808-greg=kroah.com@vger.kernel.org Sat Feb 7 17:52:59 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 11:52:19 -0500
+Subject: tracing: Fix ftrace event field alignments
+To: stable@vger.kernel.org
+Cc: Steven Rostedt <rostedt@goodmis.org>, Mathieu Desnoyers <mathieu.desnoyers@efficios.com>, Mark Rutland <mark.rutland@arm.com>, "Masami Hiramatsu (Google)" <mhiramat@kernel.org>, "jempty.liang" <imntjempty@163.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260207165219.435349-1-sashal@kernel.org>
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+[ Upstream commit 033c55fe2e326bea022c3cc5178ecf3e0e459b82 ]
+
+The fields of ftrace specific events (events used to save ftrace internal
+events like function traces and trace_printk) are generated similarly to
+how normal trace event fields are generated. That is, the fields are added
+to a trace_events_fields array that saves the name, offset, size,
+alignment and signness of the field. It is used to produce the output in
+the format file in tracefs so that tooling knows how to parse the binary
+data of the trace events.
+
+The issue is that some of the ftrace event structures are packed. The
+function graph exit event structures are one of them. The 64 bit calltime
+and rettime fields end up 4 byte aligned, but the algorithm to show to
+userspace shows them as 8 byte aligned.
+
+The macros that create the ftrace events has one for embedded structure
+fields. There's two macros for theses fields:
+
+ __field_desc() and __field_packed()
+
+The difference of the latter macro is that it treats the field as packed.
+
+Rename that field to __field_desc_packed() and create replace the
+__field_packed() to be a normal field that is packed and have the calltime
+and rettime use those.
+
+This showed up on 32bit architectures for function graph time fields. It
+had:
+
+ ~# cat /sys/kernel/tracing/events/ftrace/funcgraph_exit/format
+[..]
+ field:unsigned long func; offset:8; size:4; signed:0;
+ field:unsigned int depth; offset:12; size:4; signed:0;
+ field:unsigned int overrun; offset:16; size:4; signed:0;
+ field:unsigned long long calltime; offset:24; size:8; signed:0;
+ field:unsigned long long rettime; offset:32; size:8; signed:0;
+
+Notice that overrun is at offset 16 with size 4, where in the structure
+calltime is at offset 20 (16 + 4), but it shows the offset at 24. That's
+because it used the alignment of unsigned long long when used as a
+declaration and not as a member of a structure where it would be aligned
+by word size (in this case 4).
+
+By using the proper structure alignment, the format has it at the correct
+offset:
+
+ ~# cat /sys/kernel/tracing/events/ftrace/funcgraph_exit/format
+[..]
+ field:unsigned long func; offset:8; size:4; signed:0;
+ field:unsigned int depth; offset:12; size:4; signed:0;
+ field:unsigned int overrun; offset:16; size:4; signed:0;
+ field:unsigned long long calltime; offset:20; size:8; signed:0;
+ field:unsigned long long rettime; offset:28; size:8; signed:0;
+
+Cc: stable@vger.kernel.org
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Reported-by: "jempty.liang" <imntjempty@163.com>
+Link: https://patch.msgid.link/20260204113628.53faec78@gandalf.local.home
+Fixes: 04ae87a52074e ("ftrace: Rework event_create_dir()")
+Closes: https://lore.kernel.org/all/20260130015740.212343-1-imntjempty@163.com/
+Closes: https://lore.kernel.org/all/20260202123342.2544795-1-imntjempty@163.com/
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+[ adapted field types and macro arguments ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace.h | 7 +++++--
+ kernel/trace/trace_entries.h | 26 +++++++++++++-------------
+ kernel/trace/trace_export.c | 21 +++++++++++++++------
+ 3 files changed, 33 insertions(+), 21 deletions(-)
+
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -65,14 +65,17 @@ enum trace_type {
+ #undef __field_fn
+ #define __field_fn(type, item) type item;
+
++#undef __field_packed
++#define __field_packed(type, item) type item;
++
+ #undef __field_struct
+ #define __field_struct(type, item) __field(type, item)
+
+ #undef __field_desc
+ #define __field_desc(type, container, item)
+
+-#undef __field_packed
+-#define __field_packed(type, container, item)
++#undef __field_desc_packed
++#define __field_desc_packed(type, container, item)
+
+ #undef __array
+ #define __array(type, item, size) type item[size];
+--- a/kernel/trace/trace_entries.h
++++ b/kernel/trace/trace_entries.h
+@@ -78,8 +78,8 @@ FTRACE_ENTRY_PACKED(funcgraph_entry, ftr
+
+ F_STRUCT(
+ __field_struct( struct ftrace_graph_ent, graph_ent )
+- __field_packed( unsigned long, graph_ent, func )
+- __field_packed( int, graph_ent, depth )
++ __field_desc_packed( unsigned long, graph_ent, func )
++ __field_desc_packed( int, graph_ent, depth )
+ ),
+
+ F_printk("--> %ps (%d)", (void *)__entry->func, __entry->depth)
+@@ -94,12 +94,12 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftra
+
+ F_STRUCT(
+ __field_struct( struct ftrace_graph_ret, ret )
+- __field_packed( unsigned long, ret, func )
+- __field_packed( unsigned long, ret, retval )
+- __field_packed( int, ret, depth )
+- __field_packed( unsigned int, ret, overrun )
+- __field_packed( unsigned long long, ret, calltime)
+- __field_packed( unsigned long long, ret, rettime )
++ __field_desc_packed( unsigned long, ret, func )
++ __field_desc_packed( unsigned long, ret, retval )
++ __field_desc_packed( int, ret, depth )
++ __field_desc_packed( unsigned int, ret, overrun )
++ __field_packed(unsigned long long, calltime)
++ __field_packed(unsigned long long, rettime )
+ ),
+
+ F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d retval: %lx",
+@@ -116,11 +116,11 @@ FTRACE_ENTRY_PACKED(funcgraph_exit, ftra
+
+ F_STRUCT(
+ __field_struct( struct ftrace_graph_ret, ret )
+- __field_packed( unsigned long, ret, func )
+- __field_packed( int, ret, depth )
+- __field_packed( unsigned int, ret, overrun )
+- __field_packed( unsigned long long, ret, calltime)
+- __field_packed( unsigned long long, ret, rettime )
++ __field_desc_packed( unsigned long, ret, func )
++ __field_desc_packed( int, ret, depth )
++ __field_desc_packed( unsigned int, ret, overrun )
++ __field_packed(unsigned long long, calltime)
++ __field_packed(unsigned long long, rettime )
+ ),
+
+ F_printk("<-- %ps (%d) (start: %llx end: %llx) over: %d",
+--- a/kernel/trace/trace_export.c
++++ b/kernel/trace/trace_export.c
+@@ -42,11 +42,14 @@ static int ftrace_event_register(struct
+ #undef __field_fn
+ #define __field_fn(type, item) type item;
+
++#undef __field_packed
++#define __field_packed(type, item) type item;
++
+ #undef __field_desc
+ #define __field_desc(type, container, item) type item;
+
+-#undef __field_packed
+-#define __field_packed(type, container, item) type item;
++#undef __field_desc_packed
++#define __field_desc_packed(type, container, item) type item;
+
+ #undef __array
+ #define __array(type, item, size) type item[size];
+@@ -104,11 +107,14 @@ static void __always_unused ____ftrace_c
+ #undef __field_fn
+ #define __field_fn(_type, _item) __field_ext(_type, _item, FILTER_TRACE_FN)
+
++#undef __field_packed
++#define __field_packed(_type, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
++
+ #undef __field_desc
+ #define __field_desc(_type, _container, _item) __field_ext(_type, _item, FILTER_OTHER)
+
+-#undef __field_packed
+-#define __field_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
++#undef __field_desc_packed
++#define __field_desc_packed(_type, _container, _item) __field_ext_packed(_type, _item, FILTER_OTHER)
+
+ #undef __array
+ #define __array(_type, _item, _len) { \
+@@ -146,11 +152,14 @@ static struct trace_event_fields ftrace_
+ #undef __field_fn
+ #define __field_fn(type, item)
+
++#undef __field_packed
++#define __field_packed(type, item)
++
+ #undef __field_desc
+ #define __field_desc(type, container, item)
+
+-#undef __field_packed
+-#define __field_packed(type, container, item)
++#undef __field_desc_packed
++#define __field_desc_packed(type, container, item)
+
+ #undef __array
+ #define __array(type, item, len)