-LINUX_VERSION-5.15 = .178
-LINUX_KERNEL_HASH-5.15.178 = efe9f7eb5ea4d26cec6290689343e1804eb3b4a88ff5a60497a696fc08157c42
+LINUX_VERSION-5.15 = .179
+LINUX_KERNEL_HASH-5.15.179 = 9319a47b1e9b5d344ff6015431856d0c9640e4faedc527c87f9129061a27136f
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
-@@ -226,7 +226,7 @@ struct sk_buff *tcp_gro_receive(struct l
+@@ -229,7 +229,7 @@ struct sk_buff *tcp_gro_receive(struct l
th2 = tcp_hdr(p);
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
-@@ -244,8 +244,8 @@ found:
+@@ -247,8 +247,8 @@ found:
~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
for (i = sizeof(*th); i < thlen; i += 4)
+++ /dev/null
-From 22ce134cee72bd9ef4b02c8769f868309f2ab8fd Mon Sep 17 00:00:00 2001
-From: Maxime Ripard <maxime@cerno.tech>
-Date: Thu, 19 Aug 2021 14:37:04 +0200
-Subject: [PATCH] drm/probe-helper: Create a HPD IRQ event helper for a
- single connector
-
-The drm_helper_hpd_irq_event() function is iterating over all the
-connectors when an hotplug event is detected.
-
-During that iteration, it will call each connector detect function and
-figure out if its status changed.
-
-Finally, if any connector changed, it will notify the user-space and the
-clients that something changed on the DRM device.
-
-This is supposed to be used for drivers that don't have a hotplug
-interrupt for individual connectors. However, drivers that can use an
-interrupt for a single connector are left in the dust and can either
-reimplement the logic used during the iteration for each connector or
-use that helper and iterate over all connectors all the time.
-
-Since both are suboptimal, let's create a helper that will only perform
-the status detection on a single connector.
-
-Signed-off-by: Maxime Ripard <maxime@cerno.tech>
-
----
-
-Changes from v1:
- - Rename the shared function
- - Move the hotplug event notification out of the shared function
- - Added missing locks
- - Improve the documentation
- - Switched to drm_dbg_kms
----
- drivers/gpu/drm/drm_probe_helper.c | 120 ++++++++++++++++++++---------
- include/drm/drm_probe_helper.h | 1 +
- 2 files changed, 86 insertions(+), 35 deletions(-)
-
---- a/drivers/gpu/drm/drm_probe_helper.c
-+++ b/drivers/gpu/drm/drm_probe_helper.c
-@@ -805,6 +805,86 @@ void drm_kms_helper_poll_fini(struct drm
- }
- EXPORT_SYMBOL(drm_kms_helper_poll_fini);
-
-+static bool check_connector_changed(struct drm_connector *connector)
-+{
-+ struct drm_device *dev = connector->dev;
-+ enum drm_connector_status old_status;
-+ u64 old_epoch_counter;
-+ bool changed = false;
-+
-+ /* Only handle HPD capable connectors. */
-+ drm_WARN_ON(dev, !(connector->polled & DRM_CONNECTOR_POLL_HPD));
-+
-+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
-+
-+ old_status = connector->status;
-+ old_epoch_counter = connector->epoch_counter;
-+
-+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Old epoch counter %llu\n",
-+ connector->base.id,
-+ connector->name,
-+ old_epoch_counter);
-+
-+ connector->status = drm_helper_probe_detect(connector, NULL, false);
-+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s\n",
-+ connector->base.id,
-+ connector->name,
-+ drm_get_connector_status_name(old_status),
-+ drm_get_connector_status_name(connector->status));
-+
-+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] New epoch counter %llu\n",
-+ connector->base.id,
-+ connector->name,
-+ connector->epoch_counter);
-+
-+ /*
-+ * Check if epoch counter had changed, meaning that we need
-+ * to send a uevent.
-+ */
-+ if (old_epoch_counter != connector->epoch_counter)
-+ changed = true;
-+
-+ return changed;
-+}
-+
-+/**
-+ * drm_connector_helper_hpd_irq_event - hotplug processing
-+ * @connector: drm_connector
-+ *
-+ * Drivers can use this helper function to run a detect cycle on a connector
-+ * which has the DRM_CONNECTOR_POLL_HPD flag set in its &polled member.
-+ *
-+ * This helper function is useful for drivers which can track hotplug
-+ * interrupts for a single connector. Drivers that want to send a
-+ * hotplug event for all connectors or can't track hotplug interrupts
-+ * per connector need to use drm_helper_hpd_irq_event().
-+ *
-+ * This function must be called from process context with no mode
-+ * setting locks held.
-+ *
-+ * Note that a connector can be both polled and probed from the hotplug
-+ * handler, in case the hotplug interrupt is known to be unreliable.
-+ */
-+bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector)
-+{
-+ struct drm_device *dev = connector->dev;
-+ bool changed;
-+
-+ mutex_lock(&dev->mode_config.mutex);
-+ changed = check_connector_changed(connector);
-+ mutex_unlock(&dev->mode_config.mutex);
-+
-+ if (changed) {
-+ drm_kms_helper_hotplug_event(dev);
-+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Sent hotplug event\n",
-+ connector->base.id,
-+ connector->name);
-+ }
-+
-+ return changed;
-+}
-+EXPORT_SYMBOL(drm_connector_helper_hpd_irq_event);
-+
- /**
- * drm_helper_hpd_irq_event - hotplug processing
- * @dev: drm_device
-@@ -818,9 +898,10 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
- * interrupts for each connector.
- *
- * Drivers which support hotplug interrupts for each connector individually and
-- * which have a more fine-grained detect logic should bypass this code and
-- * directly call drm_kms_helper_hotplug_event() in case the connector state
-- * changed.
-+ * which have a more fine-grained detect logic can use
-+ * drm_connector_helper_hpd_irq_event(). Alternatively, they should bypass this
-+ * code and directly call drm_kms_helper_hotplug_event() in case the connector
-+ * state changed.
- *
- * This function must be called from process context with no mode
- * setting locks held.
-@@ -832,9 +913,7 @@ bool drm_helper_hpd_irq_event(struct drm
- {
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-- enum drm_connector_status old_status;
- bool changed = false;
-- u64 old_epoch_counter;
-
- if (!dev->mode_config.poll_enabled)
- return false;
-@@ -842,37 +921,8 @@ bool drm_helper_hpd_irq_event(struct drm
- mutex_lock(&dev->mode_config.mutex);
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
-- /* Only handle HPD capable connectors. */
-- if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
-- continue;
--
-- old_status = connector->status;
--
-- old_epoch_counter = connector->epoch_counter;
--
-- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Old epoch counter %llu\n", connector->base.id,
-- connector->name,
-- old_epoch_counter);
--
-- connector->status = drm_helper_probe_detect(connector, NULL, false);
-- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
-- connector->base.id,
-- connector->name,
-- drm_get_connector_status_name(old_status),
-- drm_get_connector_status_name(connector->status));
--
-- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] New epoch counter %llu\n",
-- connector->base.id,
-- connector->name,
-- connector->epoch_counter);
--
-- /*
-- * Check if epoch counter had changed, meaning that we need
-- * to send a uevent.
-- */
-- if (old_epoch_counter != connector->epoch_counter)
-+ if (check_connector_changed(connector))
- changed = true;
--
- }
- drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
---- a/include/drm/drm_probe_helper.h
-+++ b/include/drm/drm_probe_helper.h
-@@ -18,6 +18,7 @@ int drm_helper_probe_detect(struct drm_c
- void drm_kms_helper_poll_init(struct drm_device *dev);
- void drm_kms_helper_poll_fini(struct drm_device *dev);
- bool drm_helper_hpd_irq_event(struct drm_device *dev);
-+bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector);
- void drm_kms_helper_hotplug_event(struct drm_device *dev);
-
- void drm_kms_helper_poll_disable(struct drm_device *dev);
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
-@@ -5893,6 +5893,9 @@ int __init cgroup_init_early(void)
+@@ -5889,6 +5889,9 @@ int __init cgroup_init_early(void)
return 0;
}
/**
* cgroup_init - cgroup initialization
*
-@@ -5931,6 +5934,12 @@ int __init cgroup_init(void)
+@@ -5927,6 +5930,12 @@ int __init cgroup_init(void)
mutex_unlock(&cgroup_mutex);
for_each_subsys(ss, ssid) {
if (ss->early_init) {
struct cgroup_subsys_state *css =
-@@ -6523,6 +6532,10 @@ static int __init cgroup_disable(char *s
+@@ -6527,6 +6536,10 @@ static int __init cgroup_disable(char *s
strcmp(token, ss->legacy_name))
continue;
static_branch_disable(cgroup_subsys_enabled_key[i]);
pr_info("Disabling %s control group subsystem\n",
ss->name);
-@@ -6541,6 +6554,31 @@ static int __init cgroup_disable(char *s
+@@ -6545,6 +6558,31 @@ static int __init cgroup_disable(char *s
}
__setup("cgroup_disable=", cgroup_disable);
}
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
-@@ -5679,7 +5679,7 @@ static void port_event(struct usb_hub *h
+@@ -5689,7 +5689,7 @@ static void port_event(struct usb_hub *h
port_dev->over_current_count++;
port_over_current_notify(port_dev);
+ */
+ void (*fixup_endpoint)(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint *ep, int interval);
- /* Returns the hardware-chosen device address */
- int (*address_device)(struct usb_hcd *, struct usb_device *udev);
- /* prepares the hardware to send commands to the device */
-@@ -449,6 +454,8 @@ extern void usb_hcd_unmap_urb_setup_for_
+ /* Set the hardware-chosen device address */
+ int (*address_device)(struct usb_hcd *, struct usb_device *udev,
+ unsigned int timeout_ms);
+@@ -450,6 +455,8 @@ extern void usb_hcd_unmap_urb_setup_for_
extern void usb_hcd_unmap_urb_for_dma(struct usb_hcd *, struct urb *);
extern void usb_hcd_flush_endpoint(struct usb_device *udev,
struct usb_host_endpoint *ep);
* non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*/
-@@ -5501,6 +5604,7 @@ static const struct hc_driver xhci_hc_dr
+@@ -5510,6 +5613,7 @@ static const struct hc_driver xhci_hc_dr
.endpoint_reset = xhci_endpoint_reset,
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
-@@ -2276,6 +2276,7 @@ xhci_alloc_interrupter(struct xhci_hcd *
+@@ -2278,6 +2278,7 @@ xhci_alloc_interrupter(struct xhci_hcd *
{
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
struct xhci_interrupter *ir;
u64 erst_base;
u32 erst_size;
int ret;
-@@ -2296,7 +2297,11 @@ xhci_alloc_interrupter(struct xhci_hcd *
+@@ -2298,7 +2299,11 @@ xhci_alloc_interrupter(struct xhci_hcd *
return NULL;
ir->ir_set = &xhci->run_regs->ir_set[intr_num];
0, flags);
if (!ir->event_ring) {
xhci_warn(xhci, "Failed to allocate interrupter %d event ring\n", intr_num);
-@@ -2312,7 +2317,7 @@ xhci_alloc_interrupter(struct xhci_hcd *
+@@ -2314,7 +2319,7 @@ xhci_alloc_interrupter(struct xhci_hcd *
/* set ERST count with the number of entries in the segment table */
erst_size = readl(&ir->ir_set->erst_size);
erst_size &= ERST_SIZE_MASK;
erst_base = xhci_read_64(xhci, &ir->ir_set->erst_base);
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
-@@ -1424,8 +1424,9 @@ struct urb_priv {
+@@ -1429,8 +1429,9 @@ struct urb_priv {
* Each segment table entry is 4*32bits long. 1K seems like an ok size:
* (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
* meaning 64 ring segments.
--- a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
-@@ -109,7 +109,7 @@ required:
+@@ -108,7 +108,7 @@ required:
- resets
- ddc
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
-@@ -675,9 +675,9 @@ deq_found:
+@@ -677,9 +677,9 @@ deq_found:
}
if ((ep->ep_state & SET_DEQ_PENDING)) {
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
-@@ -1832,6 +1832,8 @@ static const struct usb_audio_quirk_flag
+@@ -1833,6 +1833,8 @@ static const struct usb_audio_quirk_flag
QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x0951, 0x16ad, /* Kingston HyperX */
QUIRK_FLAG_CTL_MSG_DELAY_1M),
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
-@@ -305,8 +305,10 @@ static void xhci_pci_quirks(struct devic
- pdev->device == 0x3432)
- xhci->quirks |= XHCI_BROKEN_STREAMS;
-
-- if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
-+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
+@@ -310,6 +310,7 @@ static void xhci_pci_quirks(struct devic
+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) {
xhci->quirks |= XHCI_LPM_SUPPORT;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
+ xhci->quirks |= XHCI_AVOID_DQ_ON_LINK;
-+ }
+ }
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
- pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
-@@ -665,6 +665,15 @@ static int xhci_move_dequeue_past_td(str
+@@ -667,6 +667,15 @@ static int xhci_move_dequeue_past_td(str
} while (!cycle_found || !td_last_trb_found);
deq_found:
addr = xhci_trb_virt_to_dma(new_seg, new_deq);
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
-@@ -1665,6 +1665,7 @@ struct xhci_hcd {
+@@ -1670,6 +1670,7 @@ struct xhci_hcd {
#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
if (ret)
return -ENOMEM;
-@@ -1820,7 +1824,7 @@ int xhci_alloc_erst(struct xhci_hcd *xhc
+@@ -1822,7 +1826,7 @@ int xhci_alloc_erst(struct xhci_hcd *xhc
for (val = 0; val < evt_ring->num_segs; val++) {
entry = &erst->entries[val];
entry->seg_addr = cpu_to_le64(seg->dma);
xhci_err(xhci, "Tried to move enqueue past ring segment\n");
return;
}
-@@ -3314,7 +3317,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd
+@@ -3316,7 +3319,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd
* that clears the EHB.
*/
while (xhci_handle_event(xhci, ir) > 0) {
continue;
xhci_update_erst_dequeue(xhci, ir, event_ring_deq);
event_ring_deq = ir->event_ring->dequeue;
-@@ -3456,7 +3459,8 @@ static int prepare_ring(struct xhci_hcd
+@@ -3458,7 +3461,8 @@ static int prepare_ring(struct xhci_hcd
}
}
* when the cycle bit is set to 1.
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
-@@ -1386,6 +1386,7 @@ struct xhci_ring {
+@@ -1391,6 +1391,7 @@ struct xhci_ring {
unsigned int num_trbs_free;
unsigned int num_trbs_free_temp;
unsigned int bounce_buf_len;
cycle_state, type, max_packet, flags);
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
-@@ -308,6 +308,7 @@ static void xhci_pci_quirks(struct devic
- if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) {
+@@ -311,6 +311,7 @@ static void xhci_pci_quirks(struct devic
xhci->quirks |= XHCI_LPM_SUPPORT;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
xhci->quirks |= XHCI_AVOID_DQ_ON_LINK;
+ xhci->quirks |= XHCI_VLI_TRB_CACHE_BUG;
}
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
-@@ -1667,6 +1667,7 @@ struct xhci_hcd {
+@@ -1672,6 +1672,7 @@ struct xhci_hcd {
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
#define XHCI_AVOID_DQ_ON_LINK BIT_ULL(49)
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
-@@ -309,6 +309,7 @@ static void xhci_pci_quirks(struct devic
- xhci->quirks |= XHCI_LPM_SUPPORT;
+@@ -312,6 +312,7 @@ static void xhci_pci_quirks(struct devic
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
xhci->quirks |= XHCI_AVOID_DQ_ON_LINK;
xhci->quirks |= XHCI_VLI_TRB_CACHE_BUG;
+ xhci->quirks |= XHCI_VLI_SS_BULK_OUT_BUG;
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
-@@ -3769,14 +3769,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
+@@ -3771,14 +3771,15 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
unsigned int num_trbs;
unsigned int start_cycle, num_sgs = 0;
unsigned int enqd_len, block_len, trb_buff_len, full_len;
full_len = urb->transfer_buffer_length;
/* If we have scatter/gather list, we use it. */
if (urb->num_sgs && !(urb->transfer_flags & URB_DMA_MAP_SINGLE)) {
-@@ -3813,6 +3814,17 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
+@@ -3815,6 +3816,17 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
start_cycle = ring->cycle_state;
send_addr = addr;
/* Queue the TRBs, even if they are zero-length */
for (enqd_len = 0; first_trb || enqd_len < full_len;
enqd_len += trb_buff_len) {
-@@ -3825,6 +3837,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
+@@ -3827,6 +3839,11 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
if (enqd_len + trb_buff_len > full_len)
trb_buff_len = full_len - enqd_len;
first_trb = false;
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
-@@ -1668,6 +1668,7 @@ struct xhci_hcd {
+@@ -1673,6 +1673,7 @@ struct xhci_hcd {
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
#define XHCI_AVOID_DQ_ON_LINK BIT_ULL(49)
#define XHCI_VLI_TRB_CACHE_BUG BIT_ULL(50)
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
-@@ -3769,7 +3769,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
+@@ -3771,7 +3771,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
unsigned int num_trbs;
unsigned int start_cycle, num_sgs = 0;
unsigned int enqd_len, block_len, trb_buff_len, full_len;
u32 field, length_field, remainder, maxpacket;
u64 addr, send_addr;
-@@ -3815,14 +3815,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
+@@ -3817,14 +3817,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
send_addr = addr;
if (xhci->quirks & XHCI_VLI_SS_BULK_OUT_BUG &&
}
/* Queue the TRBs, even if they are zero-length */
-@@ -3837,7 +3832,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
+@@ -3839,7 +3834,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *
if (enqd_len + trb_buff_len > full_len)
trb_buff_len = full_len - enqd_len;
+++ /dev/null
-From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
-Date: Fri, 10 Jun 2022 13:10:47 +0200
-Subject: [PATCH] bgmac: reduce max frame size to support just MTU 1500
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-bgmac allocates new replacement buffer before handling each received
-frame. Allocating & DMA-preparing 9724 B each time consumes a lot of CPU
-time. Ideally bgmac should just respect currently set MTU but it isn't
-the case right now. For now just revert back to the old limited frame
-size.
-
-This change bumps NAT masquarade speed by ~95%.
-
-Ref: 8c7da63978f1 ("bgmac: configure MTU and add support for frames beyond 8192 byte size")
-Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
----
- drivers/net/ethernet/broadcom/bgmac.h | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/broadcom/bgmac.h
-+++ b/drivers/net/ethernet/broadcom/bgmac.h
-@@ -328,8 +328,7 @@
- #define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
- #define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
- BGMAC_RX_FRAME_OFFSET)
--/* Jumbo frame size with FCS */
--#define BGMAC_RX_MAX_FRAME_SIZE 9724
-+#define BGMAC_RX_MAX_FRAME_SIZE 1536
- #define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
- #define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
-@@ -4268,7 +4268,8 @@ static int tg3_power_down_prepare(struct
+@@ -4269,7 +4269,8 @@ static int tg3_power_down_prepare(struct
static void tg3_power_down(struct tg3 *tp)
{
pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
-@@ -1664,6 +1664,7 @@ struct xhci_hcd {
+@@ -1669,6 +1669,7 @@ struct xhci_hcd {
#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
+++ /dev/null
-From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= <rafal@milecki.pl>
-Date: Fri, 10 Jun 2022 13:10:47 +0200
-Subject: [PATCH] bgmac: reduce max frame size to support just MTU 1500
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-bgmac allocates new replacement buffer before handling each received
-frame. Allocating & DMA-preparing 9724 B each time consumes a lot of CPU
-time. Ideally bgmac should just respect currently set MTU but it isn't
-the case right now. For now just revert back to the old limited frame
-size.
-
-This change bumps NAT masquarade speed by ~95%.
-
-Ref: 8c7da63978f1 ("bgmac: configure MTU and add support for frames beyond 8192 byte size")
-Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
----
- drivers/net/ethernet/broadcom/bgmac.h | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
---- a/drivers/net/ethernet/broadcom/bgmac.h
-+++ b/drivers/net/ethernet/broadcom/bgmac.h
-@@ -328,8 +328,7 @@
- #define BGMAC_RX_FRAME_OFFSET 30 /* There are 2 unused bytes between header and real data */
- #define BGMAC_RX_BUF_OFFSET (NET_SKB_PAD + NET_IP_ALIGN - \
- BGMAC_RX_FRAME_OFFSET)
--/* Jumbo frame size with FCS */
--#define BGMAC_RX_MAX_FRAME_SIZE 9724
-+#define BGMAC_RX_MAX_FRAME_SIZE 1536
- #define BGMAC_RX_BUF_SIZE (BGMAC_RX_FRAME_OFFSET + BGMAC_RX_MAX_FRAME_SIZE)
- #define BGMAC_RX_ALLOC_SIZE (SKB_DATA_ALIGN(BGMAC_RX_BUF_SIZE + BGMAC_RX_BUF_OFFSET) + \
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#ifndef arch_wants_old_prefaulted_pte
static inline bool arch_wants_old_prefaulted_pte(void)
{
-@@ -2819,7 +2807,7 @@ static inline int cow_user_page(struct p
+@@ -2821,7 +2809,7 @@ static inline int cow_user_page(struct p
* On architectures with software "accessed" bits, we would
* take a double page fault, so mark it accessed here.
*/
(1UL << PG_private | 1UL << PG_private_2)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -907,6 +907,10 @@ struct task_struct {
+@@ -905,6 +905,10 @@ struct task_struct {
#ifdef CONFIG_MEMCG
unsigned in_user_fault:1;
#endif
VM_BUG_ON_PAGE(tail > 2 && page_tail->mapping != TAIL_MAPPING,
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -5199,6 +5199,7 @@ static void __mem_cgroup_free(struct mem
+@@ -5204,6 +5204,7 @@ static void __mem_cgroup_free(struct mem
static void mem_cgroup_free(struct mem_cgroup *memcg)
{
memcg_wb_domain_exit(memcg);
__mem_cgroup_free(memcg);
}
-@@ -5262,6 +5263,7 @@ static struct mem_cgroup *mem_cgroup_all
+@@ -5267,6 +5268,7 @@ static struct mem_cgroup *mem_cgroup_all
spin_lock(&memcg_idr_lock);
idr_replace(&mem_cgroup_idr, memcg, memcg->id.id);
spin_unlock(&memcg_idr_lock);
mem_cgroup_id_remove(memcg);
--- a/mm/memory.c
+++ b/mm/memory.c
-@@ -4832,6 +4832,27 @@ static inline void mm_account_fault(stru
+@@ -4834,6 +4834,27 @@ static inline void mm_account_fault(stru
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
}
/*
* By the time we get here, we already hold the mm semaphore
*
-@@ -4863,11 +4884,15 @@ vm_fault_t handle_mm_fault(struct vm_are
+@@ -4865,11 +4886,15 @@ vm_fault_t handle_mm_fault(struct vm_are
if (flags & FAULT_FLAG_USER)
mem_cgroup_enter_user_fault();
unsigned long floor, unsigned long ceiling);
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2798,6 +2798,7 @@ static void commit_charge(struct page *p
+@@ -2803,6 +2803,7 @@ static void commit_charge(struct page *p
* - LRU isolation
* - lock_page_memcg()
* - exclusive reference
/* forking complete and child started to run, tell ptracer */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -5011,6 +5011,7 @@ context_switch(struct rq *rq, struct tas
+@@ -5013,6 +5013,7 @@ context_switch(struct rq *rq, struct tas
* finish_task_switch()'s mmdrop().
*/
switch_mm_irqs_off(prev->active_mm, next->mm, next);
/* will mmdrop() in finish_task_switch(). */
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -6233,6 +6233,30 @@ static void mem_cgroup_move_task(void)
+@@ -6238,6 +6238,30 @@ static void mem_cgroup_move_task(void)
}
#endif
static int seq_puts_memcg_tunable(struct seq_file *m, unsigned long value)
{
if (value == PAGE_COUNTER_MAX)
-@@ -6576,6 +6600,7 @@ struct cgroup_subsys memory_cgrp_subsys
+@@ -6581,6 +6605,7 @@ struct cgroup_subsys memory_cgrp_subsys
.css_reset = mem_cgroup_css_reset,
.css_rstat_flush = mem_cgroup_css_rstat_flush,
.can_attach = mem_cgroup_can_attach,
mark_page_accessed(page);
}
rss[mm_counter(page)]--;
-@@ -4835,8 +4835,8 @@ static inline void mm_account_fault(stru
+@@ -4837,8 +4837,8 @@ static inline void mm_account_fault(stru
#ifdef CONFIG_LRU_GEN
static void lru_gen_enter_fault(struct vm_area_struct *vma)
{
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
-@@ -167,6 +167,8 @@ typedef int (dio_iodone_t)(struct kiocb
+@@ -165,6 +165,8 @@ typedef int (dio_iodone_t)(struct kiocb
/* File is stream-like */
#define FMODE_STREAM ((__force fmode_t)0x200000)
mctz = soft_limit_tree_from_page(page);
if (!mctz)
return;
-@@ -3434,6 +3444,9 @@ unsigned long mem_cgroup_soft_limit_recl
+@@ -3439,6 +3449,9 @@ unsigned long mem_cgroup_soft_limit_recl
unsigned long excess;
unsigned long nr_scanned;
if (order > 0)
return 0;
-@@ -5342,6 +5355,7 @@ static int mem_cgroup_css_online(struct
+@@ -5347,6 +5360,7 @@ static int mem_cgroup_css_online(struct
if (unlikely(mem_cgroup_is_root(memcg)))
queue_delayed_work(system_unbound_wq, &stats_flush_dwork,
2UL*HZ);
return 0;
}
-@@ -5368,6 +5382,7 @@ static void mem_cgroup_css_offline(struc
+@@ -5373,6 +5387,7 @@ static void mem_cgroup_css_offline(struc
memcg_offline_kmem(memcg);
reparent_shrinker_deferred(memcg);
wb_memcg_offline(memcg);
drain_all_stock(memcg);
-@@ -5379,6 +5394,7 @@ static void mem_cgroup_css_released(stru
+@@ -5384,6 +5399,7 @@ static void mem_cgroup_css_released(stru
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
invalidate_reclaim_iterators(memcg);
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -7705,6 +7705,7 @@ static void __init free_area_init_node(i
+@@ -7706,6 +7706,7 @@ static void __init free_area_init_node(i
pgdat_set_deferred_range(pgdat);
free_area_init_core(pgdat);
ops.mode = MTD_OPS_AUTO_OOB;
--- a/drivers/mtd/nand/onenand/onenand_base.c
+++ b/drivers/mtd/nand/onenand/onenand_base.c
-@@ -2935,7 +2935,7 @@ static int do_otp_write(struct mtd_info
+@@ -2936,7 +2936,7 @@ static int do_otp_write(struct mtd_info
struct onenand_chip *this = mtd->priv;
unsigned char *pbuf = buf;
int ret;
/* Force buffer page aligned */
if (len < mtd->writesize) {
-@@ -2977,7 +2977,7 @@ static int do_otp_lock(struct mtd_info *
+@@ -2978,7 +2978,7 @@ static int do_otp_lock(struct mtd_info *
size_t *retlen, u_char *buf)
{
struct onenand_chip *this = mtd->priv;
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
-@@ -940,12 +940,14 @@ bool __skb_flow_dissect(const struct net
+@@ -949,12 +949,14 @@ bool __skb_flow_dissect(const struct net
#if IS_ENABLED(CONFIG_NET_DSA)
if (unlikely(skb->dev && netdev_uses_dsa(skb->dev) &&
proto == htons(ETH_P_XDSA))) {
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
-@@ -806,7 +806,7 @@ static int mhi_pci_probe(struct pci_dev
+@@ -807,7 +807,7 @@ static int mhi_pci_probe(struct pci_dev
struct mhi_controller *mhi_cntrl;
int err;
/* Specifies the type of the struct net_device::ml_priv pointer */
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -1242,22 +1242,6 @@ int dev_change_name(struct net_device *d
+@@ -1273,22 +1273,6 @@ int dev_change_name(struct net_device *d
net = dev_net(dev);
down_write(&devnet_rename_sem);
if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
-@@ -1274,7 +1258,8 @@ int dev_change_name(struct net_device *d
+@@ -1305,7 +1289,8 @@ int dev_change_name(struct net_device *d
}
if (oldname[0] && !strchr(oldname, '%'))
{
struct nvmem_device *nvmem = cell->nvmem;
int rc;
-@@ -1528,6 +1558,21 @@ int nvmem_cell_write(struct nvmem_cell *
+@@ -1530,6 +1560,21 @@ int nvmem_cell_write(struct nvmem_cell *
return len;
}
EXPORT_SYMBOL_GPL(nvmem_cell_write);
static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
-@@ -1630,7 +1675,7 @@ static const void *nvmem_cell_read_varia
+@@ -1632,7 +1677,7 @@ static const void *nvmem_cell_read_varia
if (IS_ERR(cell))
return cell;
buf = nvmem_cell_read(cell, len);
nvmem_cell_put(cell);
if (IS_ERR(buf))
-@@ -1726,18 +1771,18 @@ EXPORT_SYMBOL_GPL(nvmem_cell_read_variab
+@@ -1728,18 +1773,18 @@ EXPORT_SYMBOL_GPL(nvmem_cell_read_variab
ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf)
{
if (rc)
return rc;
-@@ -1757,17 +1802,17 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read
+@@ -1759,17 +1804,17 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read
int nvmem_device_cell_write(struct nvmem_device *nvmem,
struct nvmem_cell_info *info, void *buf)
{
if (rc) {
kfree(buf);
return ERR_PTR(rc);
-@@ -1772,7 +1787,7 @@ ssize_t nvmem_device_cell_read(struct nv
+@@ -1774,7 +1789,7 @@ ssize_t nvmem_device_cell_read(struct nv
if (rc)
return rc;
--- a/drivers/nvmem/qcom-spmi-sdam.c
+++ b/drivers/nvmem/qcom-spmi-sdam.c
-@@ -175,18 +175,7 @@ static struct platform_driver sdam_drive
+@@ -176,18 +176,7 @@ static struct platform_driver sdam_drive
},
.probe = sdam_probe,
};
+ return -EINVAL;
+
if (cell->bit_offset || cell->nbits) {
- buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
- if (IS_ERR(buf))
+ if (len != BITS_TO_BYTES(cell->nbits) && len != cell->bytes)
+ return -EINVAL;
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -54,6 +54,8 @@ struct nvmem_keepout {
sdam->sdam_config.owner = THIS_MODULE;
+ sdam->sdam_config.add_legacy_fixed_of_cells = true;
sdam->sdam_config.stride = 1;
+ sdam->sdam_config.size = sdam->size;
sdam->sdam_config.word_size = 1;
- sdam->sdam_config.reg_read = sdam_read;
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -357,6 +357,7 @@ static int qfprom_probe(struct platform_
help
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3600,6 +3600,11 @@ static int xmit_one(struct sk_buff *skb,
+@@ -3631,6 +3631,11 @@ static int xmit_one(struct sk_buff *skb,
if (dev_nit_active(dev))
dev_queue_xmit_nit(skb, dev);
netif_napi_del(&bgmac->napi);
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
-@@ -390,6 +390,7 @@
+@@ -389,6 +389,7 @@
#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18)
#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19)
#define BGMAC_FEAT_IDM_MASK BIT(20)
struct bgmac_slot_info {
union {
-@@ -497,6 +498,9 @@ struct bgmac {
+@@ -496,6 +497,9 @@ struct bgmac {
void (*cmn_maskset32)(struct bgmac *bgmac, u16 offset, u32 mask,
u32 set);
int (*phy_connect)(struct bgmac *bgmac);
#define QUECTEL_VENDOR_ID 0x2c7c
/* These Quectel products use Quectel's vendor ID */
-@@ -1162,6 +1167,11 @@ static const struct usb_device_id option
+@@ -1153,6 +1158,11 @@ static const struct usb_device_id option
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
.driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
-@@ -1203,6 +1213,11 @@ static const struct usb_device_id option
+@@ -1194,6 +1204,11 @@ static const struct usb_device_id option
.driver_info = ZLP },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
}
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
-@@ -388,6 +388,9 @@ static struct pernet_operations ip_rt_pr
+@@ -389,6 +389,9 @@ static struct pernet_operations ip_rt_pr
static int __init ip_rt_proc_init(void)
{
+MODULE_LICENSE("GPL");
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4217,6 +4217,7 @@ int wake_up_state(struct task_struct *p,
+@@ -4219,6 +4219,7 @@ int wake_up_state(struct task_struct *p,
{
return try_to_wake_up(p, state, 0);
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -7664,7 +7664,7 @@ static void __init alloc_node_mem_map(st
+@@ -7665,7 +7665,7 @@ static void __init alloc_node_mem_map(st
if (pgdat == NODE_DATA(0)) {
mem_map = NODE_DATA(0)->node_mem_map;
if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
case RTN_THROW:
case RTN_UNREACHABLE:
default:
-@@ -4552,6 +4571,17 @@ static int ip6_pkt_prohibit_out(struct n
+@@ -4557,6 +4576,17 @@ static int ip6_pkt_prohibit_out(struct n
return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
}
/*
* Allocate a dst for local (unicast / anycast) address.
*/
-@@ -5039,7 +5069,8 @@ static int rtm_to_fib6_config(struct sk_
+@@ -5044,7 +5074,8 @@ static int rtm_to_fib6_config(struct sk_
if (rtm->rtm_type == RTN_UNREACHABLE ||
rtm->rtm_type == RTN_BLACKHOLE ||
rtm->rtm_type == RTN_PROHIBIT ||
cfg->fc_flags |= RTF_REJECT;
if (rtm->rtm_type == RTN_LOCAL)
-@@ -6286,6 +6317,8 @@ static int ip6_route_dev_notify(struct n
+@@ -6291,6 +6322,8 @@ static int ip6_route_dev_notify(struct n
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.ip6_prohibit_entry->dst.dev = dev;
net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
#endif
-@@ -6297,6 +6330,7 @@ static int ip6_route_dev_notify(struct n
+@@ -6302,6 +6335,7 @@ static int ip6_route_dev_notify(struct n
in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
#endif
}
-@@ -6488,6 +6522,8 @@ static int __net_init ip6_route_net_init
+@@ -6493,6 +6527,8 @@ static int __net_init ip6_route_net_init
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
net->ipv6.fib6_has_custom_rules = false;
net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
sizeof(*net->ipv6.ip6_prohibit_entry),
GFP_KERNEL);
-@@ -6498,11 +6534,21 @@ static int __net_init ip6_route_net_init
+@@ -6503,11 +6539,21 @@ static int __net_init ip6_route_net_init
ip6_template_metrics, true);
INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
ip6_template_metrics, true);
-@@ -6529,6 +6575,8 @@ out:
+@@ -6534,6 +6580,8 @@ out:
return ret;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
out_ip6_prohibit_entry:
kfree(net->ipv6.ip6_prohibit_entry);
out_ip6_null_entry:
-@@ -6548,6 +6596,7 @@ static void __net_exit ip6_route_net_exi
+@@ -6553,6 +6601,7 @@ static void __net_exit ip6_route_net_exi
kfree(net->ipv6.ip6_null_entry);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
kfree(net->ipv6.ip6_prohibit_entry);
kfree(net->ipv6.ip6_blk_hole_entry);
#endif
dst_entries_destroy(&net->ipv6.ip6_dst_ops);
-@@ -6631,6 +6680,9 @@ void __init ip6_route_init_special_entri
+@@ -6636,6 +6685,9 @@ void __init ip6_route_init_special_entri
init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
__u8 inner_protocol_type:1;
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -6085,6 +6085,9 @@ static enum gro_result dev_gro_receive(s
+@@ -6116,6 +6116,9 @@ static enum gro_result dev_gro_receive(s
int same_flow;
int grow;
if (netif_elide_gro(skb->dev))
goto normal;
-@@ -8102,6 +8105,48 @@ static void __netdev_adjacent_dev_unlink
+@@ -8133,6 +8136,48 @@ static void __netdev_adjacent_dev_unlink
&upper_dev->adj_list.lower);
}
static int __netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev, bool master,
void *upper_priv, void *upper_info,
-@@ -8153,6 +8198,7 @@ static int __netdev_upper_dev_link(struc
+@@ -8184,6 +8229,7 @@ static int __netdev_upper_dev_link(struc
if (ret)
return ret;
ret = call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
&changeupper_info.info);
ret = notifier_to_errno(ret);
-@@ -8249,6 +8295,7 @@ static void __netdev_upper_dev_unlink(st
+@@ -8280,6 +8326,7 @@ static void __netdev_upper_dev_unlink(st
__netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
&changeupper_info.info);
-@@ -9068,6 +9115,7 @@ int dev_set_mac_address(struct net_devic
+@@ -9099,6 +9146,7 @@ int dev_set_mac_address(struct net_devic
if (err)
return err;
dev->addr_assign_type = NET_ADDR_SET;
}
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
-@@ -455,47 +455,14 @@ static struct nft_expr_type nft_flow_off
+@@ -461,47 +461,14 @@ static struct nft_expr_type nft_flow_off
.owner = THIS_MODULE,
};
/**
* napi_disable - prevent NAPI from scheduling
-@@ -3364,6 +3365,7 @@ struct softnet_data {
+@@ -3372,6 +3373,7 @@ struct softnet_data {
unsigned int processed;
unsigned int time_squeeze;
unsigned int received_rps;
#endif
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4594,7 +4594,7 @@ static int rps_ipi_queued(struct softnet
+@@ -4625,7 +4625,7 @@ static int rps_ipi_queued(struct softnet
#ifdef CONFIG_RPS
struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
sd->rps_ipi_next = mysd->rps_ipi_list;
mysd->rps_ipi_list = sd;
-@@ -5775,6 +5775,8 @@ static DEFINE_PER_CPU(struct work_struct
+@@ -5806,6 +5806,8 @@ static DEFINE_PER_CPU(struct work_struct
/* Network device is going away, flush any packets still pending */
static void flush_backlog(struct work_struct *work)
{
struct sk_buff *skb, *tmp;
struct softnet_data *sd;
-@@ -5790,9 +5792,18 @@ static void flush_backlog(struct work_st
+@@ -5821,9 +5823,18 @@ static void flush_backlog(struct work_st
input_queue_head_incr(sd);
}
}
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
-@@ -5800,7 +5811,18 @@ static void flush_backlog(struct work_st
+@@ -5831,7 +5842,18 @@ static void flush_backlog(struct work_st
input_queue_head_incr(sd);
}
}
}
static bool flush_required(int cpu)
-@@ -6483,6 +6505,7 @@ static int process_backlog(struct napi_s
+@@ -6514,6 +6536,7 @@ static int process_backlog(struct napi_s
local_irq_disable();
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
* Inline a custom version of __napi_complete().
-@@ -6492,7 +6515,8 @@ static int process_backlog(struct napi_s
+@@ -6523,7 +6546,8 @@ static int process_backlog(struct napi_s
* We can use a plain write instead of clear_bit(),
* and we dont need an smp_mb() memory barrier.
*/
again = false;
} else {
skb_queue_splice_tail_init(&sd->input_pkt_queue,
-@@ -6909,6 +6933,57 @@ int dev_set_threaded(struct net_device *
+@@ -6940,6 +6964,57 @@ int dev_set_threaded(struct net_device *
}
EXPORT_SYMBOL(dev_set_threaded);
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
-@@ -11393,6 +11468,9 @@ static int dev_cpu_dead(unsigned int old
+@@ -11424,6 +11499,9 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
#ifdef CONFIG_RPS
remsd = oldsd->rps_ipi_list;
oldsd->rps_ipi_list = NULL;
-@@ -11732,6 +11810,7 @@ static int __init net_dev_init(void)
+@@ -11763,6 +11841,7 @@ static int __init net_dev_init(void)
sd->cpu = i;
#endif
#ifdef CONFIG_NET_FLOW_LIMIT
static DEFINE_MUTEX(flow_limit_update_mutex);
-@@ -470,6 +488,15 @@ static struct ctl_table net_core_table[]
+@@ -473,6 +491,15 @@ static struct ctl_table net_core_table[]
.proc_handler = rps_sock_flow_sysctl
},
#endif
static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask)
{
u16 cmd;
-@@ -1285,3 +1300,4 @@ static void quirk_usb_early_handoff(stru
+@@ -1294,3 +1309,4 @@ static void quirk_usb_early_handoff(stru
}
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff);
#endif /* __LINUX_USB_PCI_QUIRKS_H */
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
-@@ -498,7 +498,14 @@ extern int usb_hcd_pci_probe(struct pci_
+@@ -499,7 +499,14 @@ extern int usb_hcd_pci_probe(struct pci_
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
-@@ -2447,7 +2447,7 @@ MODULE_DEVICE_TABLE(of, sdhci_msm_dt_mat
+@@ -2496,7 +2496,7 @@ MODULE_DEVICE_TABLE(of, sdhci_msm_dt_mat
static const struct sdhci_ops sdhci_msm_ops = {
.reset = sdhci_msm_reset,
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
-@@ -1751,49 +1751,49 @@ static unsigned int sdhci_msm_get_min_cl
+@@ -1800,49 +1800,49 @@ static unsigned int sdhci_msm_get_min_cl
return SDHCI_MSM_MIN_CLOCK;
}
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
-@@ -2967,6 +2967,22 @@ char *ppp_dev_name(struct ppp_channel *c
+@@ -2977,6 +2977,22 @@ char *ppp_dev_name(struct ppp_channel *c
return name;
}
/*
* Disconnect a channel from the generic layer.
-@@ -3613,6 +3629,7 @@ EXPORT_SYMBOL(ppp_unregister_channel);
+@@ -3623,6 +3639,7 @@ EXPORT_SYMBOL(ppp_unregister_channel);
EXPORT_SYMBOL(ppp_channel_index);
EXPORT_SYMBOL(ppp_unit_number);
EXPORT_SYMBOL(ppp_dev_name);
},
[PORT_NPCM] = {
.name = "Nuvoton 16550",
-@@ -2746,6 +2746,11 @@ serial8250_do_set_termios(struct uart_po
+@@ -2754,6 +2754,11 @@ serial8250_do_set_termios(struct uart_po
unsigned long flags;
unsigned int baud, quot, frac = 0;
#include <linux/platform_data/x86/apple.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
-@@ -6015,3 +6016,34 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I
+@@ -6016,3 +6017,34 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_I
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa73f, dpc_log_size);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0xa76e, dpc_log_size);
#endif
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
-@@ -3225,6 +3225,18 @@ static const struct usb_device_id uvc_id
+@@ -3220,6 +3220,18 @@ static const struct usb_device_id uvc_id
.bInterfaceSubClass = 1,
.bInterfaceProtocol = 0,
.driver_info = UVC_INFO_META(V4L2_META_FMT_D4XX) },
break;
}
-@@ -272,6 +273,7 @@ int uvc_status_init(struct uvc_device *d
+@@ -273,6 +274,7 @@ int uvc_status_init(struct uvc_device *d
}
pipe = usb_rcvintpipe(dev->udev, ep->desc.bEndpointAddress);
/* Format flags */
#define UVC_FMT_FLAG_COMPRESSED 0x00000001
-@@ -701,6 +703,7 @@ struct uvc_device {
+@@ -706,6 +708,7 @@ struct uvc_device {
u8 *status;
struct input_dev *input;
char input_phys[64];