]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorJakub Kicinski <kuba@kernel.org>
Fri, 12 Sep 2025 00:37:09 +0000 (17:37 -0700)
committerJakub Kicinski <kuba@kernel.org>
Thu, 25 Sep 2025 18:00:59 +0000 (11:00 -0700)
Cross-merge networking fixes after downstream PR (net-6.17-rc8).

Conflicts:

drivers/net/can/spi/hi311x.c
  6b6968084721 ("can: hi311x: fix null pointer dereference when resuming from sleep before interface was enabled")
  27ce71e1ce81 ("net: WQ_PERCPU added to alloc_workqueue users")
https://lore.kernel.org/72ce7599-1b5b-464a-a5de-228ff9724701@kernel.org

net/smc/smc_loopback.c
drivers/dibs/dibs_loopback.c
  a35c04de2565 ("net/smc: fix warning in smc_rx_splice() when calling get_page()")
  cc21191b584c ("dibs: Move data path to dibs layer")
https://lore.kernel.org/74368a5c-48ac-4f8e-a198-40ec1ed3cf5f@kernel.org

Adjacent changes:

drivers/net/dsa/lantiq/lantiq_gswip.c
  c0054b25e2f1 ("net: dsa: lantiq_gswip: move gswip_add_single_port_br() call to port_setup()")
  7a1eaef0a791 ("net: dsa: lantiq_gswip: support model-specific mac_select_pcs()")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
12 files changed:
1  2 
MAINTAINERS
drivers/dibs/dibs_loopback.c
drivers/net/can/spi/hi311x.c
drivers/net/dsa/lantiq/lantiq_gswip.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/tun.c
include/uapi/linux/ptp_clock.h
net/core/skbuff.c
net/ipv4/nexthop.c

diff --cc MAINTAINERS
Simple merge
index b3fd0f8100d46d2dc3640d5ea2c9dfcfa616a33d,0000000000000000000000000000000000000000..aa029e29c6b2cbb782fc6b7657bc4321168c30d6
mode 100644,000000..100644
--- /dev/null
@@@ -1,356 -1,0 +1,361 @@@
-       dmb_node->cpu_addr = kzalloc(dmb_node->len, GFP_KERNEL |
-                                    __GFP_NOWARN | __GFP_NORETRY |
-                                    __GFP_NOMEMALLOC);
-       if (!dmb_node->cpu_addr) {
 +// SPDX-License-Identifier: GPL-2.0
 +/*
 + *  Functions for dibs loopback/loopback-ism device.
 + *
 + *  Copyright (c) 2024, Alibaba Inc.
 + *
 + *  Author: Wen Gu <guwen@linux.alibaba.com>
 + *          Tony Lu <tonylu@linux.alibaba.com>
 + *
 + */
 +
 +#include <linux/bitops.h>
 +#include <linux/device.h>
 +#include <linux/dibs.h>
++#include <linux/mm.h>
 +#include <linux/slab.h>
 +#include <linux/spinlock.h>
 +#include <linux/types.h>
 +
 +#include "dibs_loopback.h"
 +
 +#define DIBS_LO_SUPPORT_NOCOPY        0x1
 +#define DIBS_DMA_ADDR_INVALID (~(dma_addr_t)0)
 +
 +static const char dibs_lo_dev_name[] = "lo";
 +/* global loopback device */
 +static struct dibs_lo_dev *lo_dev;
 +
 +static u16 dibs_lo_get_fabric_id(struct dibs_dev *dibs)
 +{
 +      return DIBS_LOOPBACK_FABRIC;
 +}
 +
 +static int dibs_lo_query_rgid(struct dibs_dev *dibs, const uuid_t *rgid,
 +                            u32 vid_valid, u32 vid)
 +{
 +      /* rgid should be the same as lgid */
 +      if (!uuid_equal(rgid, &dibs->gid))
 +              return -ENETUNREACH;
 +      return 0;
 +}
 +
 +static int dibs_lo_max_dmbs(void)
 +{
 +      return DIBS_LO_MAX_DMBS;
 +}
 +
 +static int dibs_lo_register_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb,
 +                              struct dibs_client *client)
 +{
 +      struct dibs_lo_dmb_node *dmb_node, *tmp_node;
 +      struct dibs_lo_dev *ldev;
++      struct folio *folio;
 +      unsigned long flags;
 +      int sba_idx, rc;
 +
 +      ldev = dibs->drv_priv;
 +      sba_idx = dmb->idx;
 +      /* check space for new dmb */
 +      for_each_clear_bit(sba_idx, ldev->sba_idx_mask, DIBS_LO_MAX_DMBS) {
 +              if (!test_and_set_bit(sba_idx, ldev->sba_idx_mask))
 +                      break;
 +      }
 +      if (sba_idx == DIBS_LO_MAX_DMBS)
 +              return -ENOSPC;
 +
 +      dmb_node = kzalloc(sizeof(*dmb_node), GFP_KERNEL);
 +      if (!dmb_node) {
 +              rc = -ENOMEM;
 +              goto err_bit;
 +      }
 +
 +      dmb_node->sba_idx = sba_idx;
 +      dmb_node->len = dmb->dmb_len;
-       kfree(dmb_node->cpu_addr);
++
++      /* not critical; fail under memory pressure and fallback to TCP */
++      folio = folio_alloc(GFP_KERNEL | __GFP_NOWARN | __GFP_NOMEMALLOC |
++                          __GFP_NORETRY | __GFP_ZERO,
++                          get_order(dmb_node->len));
++      if (!folio) {
 +              rc = -ENOMEM;
 +              goto err_node;
 +      }
++      dmb_node->cpu_addr = folio_address(folio);
 +      dmb_node->dma_addr = DIBS_DMA_ADDR_INVALID;
 +      refcount_set(&dmb_node->refcnt, 1);
 +
 +again:
 +      /* add new dmb into hash table */
 +      get_random_bytes(&dmb_node->token, sizeof(dmb_node->token));
 +      write_lock_bh(&ldev->dmb_ht_lock);
 +      hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_node->token) {
 +              if (tmp_node->token == dmb_node->token) {
 +                      write_unlock_bh(&ldev->dmb_ht_lock);
 +                      goto again;
 +              }
 +      }
 +      hash_add(ldev->dmb_ht, &dmb_node->list, dmb_node->token);
 +      write_unlock_bh(&ldev->dmb_ht_lock);
 +      atomic_inc(&ldev->dmb_cnt);
 +
 +      dmb->idx = dmb_node->sba_idx;
 +      dmb->dmb_tok = dmb_node->token;
 +      dmb->cpu_addr = dmb_node->cpu_addr;
 +      dmb->dma_addr = dmb_node->dma_addr;
 +      dmb->dmb_len = dmb_node->len;
 +
 +      spin_lock_irqsave(&dibs->lock, flags);
 +      dibs->dmb_clientid_arr[sba_idx] = client->id;
 +      spin_unlock_irqrestore(&dibs->lock, flags);
 +
 +      return 0;
 +
 +err_node:
 +      kfree(dmb_node);
 +err_bit:
 +      clear_bit(sba_idx, ldev->sba_idx_mask);
 +      return rc;
 +}
 +
 +static void __dibs_lo_unregister_dmb(struct dibs_lo_dev *ldev,
 +                                   struct dibs_lo_dmb_node *dmb_node)
 +{
 +      /* remove dmb from hash table */
 +      write_lock_bh(&ldev->dmb_ht_lock);
 +      hash_del(&dmb_node->list);
 +      write_unlock_bh(&ldev->dmb_ht_lock);
 +
 +      clear_bit(dmb_node->sba_idx, ldev->sba_idx_mask);
++      folio_put(virt_to_folio(dmb_node->cpu_addr));
 +      kfree(dmb_node);
 +
 +      if (atomic_dec_and_test(&ldev->dmb_cnt))
 +              wake_up(&ldev->ldev_release);
 +}
 +
 +static int dibs_lo_unregister_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
 +{
 +      struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
 +      struct dibs_lo_dev *ldev;
 +      unsigned long flags;
 +
 +      ldev = dibs->drv_priv;
 +
 +      /* find dmb from hash table */
 +      read_lock_bh(&ldev->dmb_ht_lock);
 +      hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
 +              if (tmp_node->token == dmb->dmb_tok) {
 +                      dmb_node = tmp_node;
 +                      break;
 +              }
 +      }
 +      read_unlock_bh(&ldev->dmb_ht_lock);
 +      if (!dmb_node)
 +              return -EINVAL;
 +
 +      if (refcount_dec_and_test(&dmb_node->refcnt)) {
 +              spin_lock_irqsave(&dibs->lock, flags);
 +              dibs->dmb_clientid_arr[dmb_node->sba_idx] = NO_DIBS_CLIENT;
 +              spin_unlock_irqrestore(&dibs->lock, flags);
 +
 +              __dibs_lo_unregister_dmb(ldev, dmb_node);
 +      }
 +      return 0;
 +}
 +
 +static int dibs_lo_support_dmb_nocopy(struct dibs_dev *dibs)
 +{
 +      return DIBS_LO_SUPPORT_NOCOPY;
 +}
 +
 +static int dibs_lo_attach_dmb(struct dibs_dev *dibs, struct dibs_dmb *dmb)
 +{
 +      struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
 +      struct dibs_lo_dev *ldev;
 +
 +      ldev = dibs->drv_priv;
 +
 +      /* find dmb_node according to dmb->dmb_tok */
 +      read_lock_bh(&ldev->dmb_ht_lock);
 +      hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb->dmb_tok) {
 +              if (tmp_node->token == dmb->dmb_tok) {
 +                      dmb_node = tmp_node;
 +                      break;
 +              }
 +      }
 +      if (!dmb_node) {
 +              read_unlock_bh(&ldev->dmb_ht_lock);
 +              return -EINVAL;
 +      }
 +      read_unlock_bh(&ldev->dmb_ht_lock);
 +
 +      if (!refcount_inc_not_zero(&dmb_node->refcnt))
 +              /* the dmb is being unregistered, but has
 +               * not been removed from the hash table.
 +               */
 +              return -EINVAL;
 +
 +      /* provide dmb information */
 +      dmb->idx = dmb_node->sba_idx;
 +      dmb->dmb_tok = dmb_node->token;
 +      dmb->cpu_addr = dmb_node->cpu_addr;
 +      dmb->dma_addr = dmb_node->dma_addr;
 +      dmb->dmb_len = dmb_node->len;
 +      return 0;
 +}
 +
 +static int dibs_lo_detach_dmb(struct dibs_dev *dibs, u64 token)
 +{
 +      struct dibs_lo_dmb_node *dmb_node = NULL, *tmp_node;
 +      struct dibs_lo_dev *ldev;
 +
 +      ldev = dibs->drv_priv;
 +
 +      /* find dmb_node according to dmb->dmb_tok */
 +      read_lock_bh(&ldev->dmb_ht_lock);
 +      hash_for_each_possible(ldev->dmb_ht, tmp_node, list, token) {
 +              if (tmp_node->token == token) {
 +                      dmb_node = tmp_node;
 +                      break;
 +              }
 +      }
 +      if (!dmb_node) {
 +              read_unlock_bh(&ldev->dmb_ht_lock);
 +              return -EINVAL;
 +      }
 +      read_unlock_bh(&ldev->dmb_ht_lock);
 +
 +      if (refcount_dec_and_test(&dmb_node->refcnt))
 +              __dibs_lo_unregister_dmb(ldev, dmb_node);
 +      return 0;
 +}
 +
 +static int dibs_lo_move_data(struct dibs_dev *dibs, u64 dmb_tok,
 +                           unsigned int idx, bool sf, unsigned int offset,
 +                           void *data, unsigned int size)
 +{
 +      struct dibs_lo_dmb_node *rmb_node = NULL, *tmp_node;
 +      struct dibs_lo_dev *ldev;
 +      u16 s_mask;
 +      u8 client_id;
 +      u32 sba_idx;
 +
 +      ldev = dibs->drv_priv;
 +
 +      read_lock_bh(&ldev->dmb_ht_lock);
 +      hash_for_each_possible(ldev->dmb_ht, tmp_node, list, dmb_tok) {
 +              if (tmp_node->token == dmb_tok) {
 +                      rmb_node = tmp_node;
 +                      break;
 +              }
 +      }
 +      if (!rmb_node) {
 +              read_unlock_bh(&ldev->dmb_ht_lock);
 +              return -EINVAL;
 +      }
 +      memcpy((char *)rmb_node->cpu_addr + offset, data, size);
 +      sba_idx = rmb_node->sba_idx;
 +      read_unlock_bh(&ldev->dmb_ht_lock);
 +
 +      if (!sf)
 +              return 0;
 +
 +      spin_lock(&dibs->lock);
 +      client_id = dibs->dmb_clientid_arr[sba_idx];
 +      s_mask = ror16(0x1000, idx);
 +      if (likely(client_id != NO_DIBS_CLIENT && dibs->subs[client_id]))
 +              dibs->subs[client_id]->ops->handle_irq(dibs, sba_idx, s_mask);
 +      spin_unlock(&dibs->lock);
 +
 +      return 0;
 +}
 +
 +static const struct dibs_dev_ops dibs_lo_ops = {
 +      .get_fabric_id = dibs_lo_get_fabric_id,
 +      .query_remote_gid = dibs_lo_query_rgid,
 +      .max_dmbs = dibs_lo_max_dmbs,
 +      .register_dmb = dibs_lo_register_dmb,
 +      .unregister_dmb = dibs_lo_unregister_dmb,
 +      .move_data = dibs_lo_move_data,
 +      .support_mmapped_rdmb = dibs_lo_support_dmb_nocopy,
 +      .attach_dmb = dibs_lo_attach_dmb,
 +      .detach_dmb = dibs_lo_detach_dmb,
 +};
 +
 +static void dibs_lo_dev_init(struct dibs_lo_dev *ldev)
 +{
 +      rwlock_init(&ldev->dmb_ht_lock);
 +      hash_init(ldev->dmb_ht);
 +      atomic_set(&ldev->dmb_cnt, 0);
 +      init_waitqueue_head(&ldev->ldev_release);
 +}
 +
 +static void dibs_lo_dev_exit(struct dibs_lo_dev *ldev)
 +{
 +      if (atomic_read(&ldev->dmb_cnt))
 +              wait_event(ldev->ldev_release, !atomic_read(&ldev->dmb_cnt));
 +}
 +
 +static int dibs_lo_dev_probe(void)
 +{
 +      struct dibs_lo_dev *ldev;
 +      struct dibs_dev *dibs;
 +      int ret;
 +
 +      ldev = kzalloc(sizeof(*ldev), GFP_KERNEL);
 +      if (!ldev)
 +              return -ENOMEM;
 +
 +      dibs = dibs_dev_alloc();
 +      if (!dibs) {
 +              kfree(ldev);
 +              return -ENOMEM;
 +      }
 +
 +      ldev->dibs = dibs;
 +      dibs->drv_priv = ldev;
 +      dibs_lo_dev_init(ldev);
 +      uuid_gen(&dibs->gid);
 +      dibs->ops = &dibs_lo_ops;
 +
 +      dibs->dev.parent = NULL;
 +      dev_set_name(&dibs->dev, "%s", dibs_lo_dev_name);
 +
 +      ret = dibs_dev_add(dibs);
 +      if (ret)
 +              goto err_reg;
 +      lo_dev = ldev;
 +      return 0;
 +
 +err_reg:
 +      kfree(dibs->dmb_clientid_arr);
 +      /* pairs with dibs_dev_alloc() */
 +      put_device(&dibs->dev);
 +      kfree(ldev);
 +
 +      return ret;
 +}
 +
 +static void dibs_lo_dev_remove(void)
 +{
 +      if (!lo_dev)
 +              return;
 +
 +      dibs_dev_del(lo_dev->dibs);
 +      dibs_lo_dev_exit(lo_dev);
 +      /* pairs with dibs_dev_alloc() */
 +      put_device(&lo_dev->dibs->dev);
 +      kfree(lo_dev);
 +      lo_dev = NULL;
 +}
 +
 +int dibs_loopback_init(void)
 +{
 +      return dibs_lo_dev_probe();
 +}
 +
 +void dibs_loopback_exit(void)
 +{
 +      dibs_lo_dev_remove();
 +}
index 96f23311b4ee35bbb1f0e316a5c838d79d15e087,963ea8510dd9bf7bc7d4fd86124dc5d3ef894f7b..6d4b643e135fdbd1d054a121c2a71090478b2c9c
@@@ -909,6 -896,15 +896,16 @@@ static int hi3110_can_probe(struct spi_
        if (ret)
                goto out_clk;
  
 -      priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
++      priv->wq = alloc_workqueue("hi3110_wq",
++                                 WQ_FREEZABLE | WQ_MEM_RECLAIM | WQ_PERCPU,
+                                  0);
+       if (!priv->wq) {
+               ret = -ENOMEM;
+               goto out_clk;
+       }
+       INIT_WORK(&priv->tx_work, hi3110_tx_work_handler);
+       INIT_WORK(&priv->restart_work, hi3110_restart_work_handler);
        priv->spi = spi;
        mutex_init(&priv->hi3110_lock);
  
index 1e991d7bca0bb27a88be82145fb3a12cb09cbecd,84dc6e517acf9416f850b079f43e816ac1628c28..2169c0814a48b3d5f5d66481e3566cd3d63f84e0
@@@ -1614,28 -1830,16 +1624,29 @@@ static int gswip_get_sset_count(struct 
        return ARRAY_SIZE(gswip_rmon_cnt);
  }
  
 +static struct phylink_pcs *gswip_phylink_mac_select_pcs(struct phylink_config *config,
 +                                                      phy_interface_t interface)
 +{
 +      struct dsa_port *dp = dsa_phylink_to_port(config);
 +      struct gswip_priv *priv = dp->ds->priv;
 +
 +      if (priv->hw_info->mac_select_pcs)
 +              return priv->hw_info->mac_select_pcs(config, interface);
 +
 +      return NULL;
 +}
 +
  static const struct phylink_mac_ops gswip_phylink_mac_ops = {
 -      .mac_config     = gswip_phylink_mac_config,
 -      .mac_link_down  = gswip_phylink_mac_link_down,
 -      .mac_link_up    = gswip_phylink_mac_link_up,
 +      .mac_config             = gswip_phylink_mac_config,
 +      .mac_link_down          = gswip_phylink_mac_link_down,
 +      .mac_link_up            = gswip_phylink_mac_link_up,
 +      .mac_select_pcs         = gswip_phylink_mac_select_pcs,
  };
  
 -static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
 +static const struct dsa_switch_ops gswip_switch_ops = {
        .get_tag_protocol       = gswip_get_tag_protocol,
        .setup                  = gswip_setup,
+       .port_setup             = gswip_port_setup,
        .port_enable            = gswip_port_enable,
        .port_disable           = gswip_port_disable,
        .port_bridge_join       = gswip_port_bridge_join,
Simple merge
Simple merge
Simple merge
Simple merge