From: Jakub Kicinski Date: Thu, 3 Dec 2020 23:42:13 +0000 (-0800) Subject: Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net X-Git-Tag: v5.11-rc1~169^2~125 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=55fd59b003f6e8fd88cf16590e79823d7ccf3026;p=thirdparty%2Flinux.git Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net Conflicts: drivers/net/ethernet/ibm/ibmvnic.c Signed-off-by: Jakub Kicinski --- 55fd59b003f6e8fd88cf16590e79823d7ccf3026 diff --cc drivers/net/can/m_can/tcan4x5x.c index 483a78dca17e8,7347ab39c5b65..a0fecc3fb8290 --- a/drivers/net/can/m_can/tcan4x5x.c +++ b/drivers/net/can/m_can/tcan4x5x.c @@@ -490,9 -500,9 +490,9 @@@ static int tcan4x5x_can_probe(struct sp ret = tcan4x5x_power_enable(priv->power, 1); if (ret) - goto out_clk; + goto out_m_can_class_free_dev; - ret = tcan4x5x_parse_config(mcan_class); + ret = tcan4x5x_get_gpios(mcan_class); if (ret) goto out_power; @@@ -509,13 -519,10 +509,8 @@@ out_power: tcan4x5x_power_enable(priv->power, 0); - out_clk: - if (!IS_ERR(mcan_class->cclk)) { - clk_disable_unprepare(mcan_class->cclk); - clk_disable_unprepare(mcan_class->hclk); - } out_m_can_class_free_dev: m_can_class_free_dev(mcan_class->net); - dev_err(&spi->dev, "Probe failed, err=%d\n", ret); - return ret; } diff --cc drivers/net/ethernet/ibm/ibmvnic.c index cdd1ff9aa9c45,da9450f187176..cf55c5ea07cb1 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@@ -2489,9 -2402,15 +2491,15 @@@ restart_poll return frames_processed; } - if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num])) + if (!pending_scrq(adapter, rx_scrq)) break; + /* The queue entry at the current index is peeked at above + * to determine that there is a valid descriptor awaiting + * processing. We want to be sure that the current slot + * holds a valid descriptor before reading its contents. + */ + dma_rmb(); - next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]); + next = ibmvnic_next_scrq(adapter, rx_scrq); rx_buff = (struct ibmvnic_rx_buff *)be64_to_cpu(next-> rx_comp.correlator); @@@ -2959,11 -2880,14 +2974,16 @@@ static int reset_one_sub_crq_queue(stru irq_dispose_mapping(scrq->irq); scrq->irq = 0; } + - memset(scrq->msgs, 0, 4 * PAGE_SIZE); - atomic_set(&scrq->used, 0); - scrq->cur = 0; - scrq->ind_buf.index = 0; + if (scrq->msgs) { + memset(scrq->msgs, 0, 4 * PAGE_SIZE); + atomic_set(&scrq->used, 0); + scrq->cur = 0; ++ scrq->ind_buf.index = 0; + } else { + netdev_dbg(adapter->netdev, "Invalid scrq reset\n"); + return -EINVAL; + } rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token, 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq); @@@ -3217,16 -3118,19 +3237,21 @@@ restart_loop while (pending_scrq(adapter, scrq)) { unsigned int pool = scrq->pool_index; int num_entries = 0; + int total_bytes = 0; + int num_packets = 0; + /* The queue entry at the current index is peeked at above + * to determine that there is a valid descriptor awaiting + * processing. We want to be sure that the current slot + * holds a valid descriptor before reading its contents. + */ + dma_rmb(); + next = ibmvnic_next_scrq(adapter, scrq); for (i = 0; i < next->tx_comp.num_comps; i++) { - if (next->tx_comp.rcs[i]) { + if (next->tx_comp.rcs[i]) dev_err(dev, "tx error %x\n", next->tx_comp.rcs[i]); - continue; - } index = be32_to_cpu(next->tx_comp.correlators[i]); if (index & IBMVNIC_TSO_POOL_MASK) { tx_pool = &adapter->tso_pool[pool]; diff --cc drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index ebc879052e42b,51bbd88ff021c..ba65ec406cfab --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@@ -92,8 -92,9 +92,9 @@@ int mlx5dr_cmd_query_device(struct mlx5 caps->eswitch_manager = MLX5_CAP_GEN(mdev, eswitch_manager); caps->gvmi = MLX5_CAP_GEN(mdev, vhca_id); caps->flex_protocols = MLX5_CAP_GEN(mdev, flex_parser_protocols); + caps->sw_format_ver = MLX5_CAP_GEN(mdev, steering_format_version); - if (mlx5dr_matcher_supp_flex_parser_icmp_v4(caps)) { + if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) { caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0); caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1); } diff --cc include/linux/mlx5/mlx5_ifc.h index 82fa5e9f43227,233352447b1a4..0d6e287d614fb --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@@ -1248,17 -1223,13 +1248,22 @@@ enum mlx5_fc_bulk_alloc_bitmask #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) + enum { + MLX5_STEERING_FORMAT_CONNECTX_5 = 0, + MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, + }; + struct mlx5_ifc_cmd_hca_cap_bits { - u8 reserved_at_0[0x30]; + u8 reserved_at_0[0x1f]; + u8 vhca_resource_manager[0x1]; + + u8 reserved_at_20[0x3]; + u8 event_on_vhca_state_teardown_request[0x1]; + u8 event_on_vhca_state_in_use[0x1]; + u8 event_on_vhca_state_active[0x1]; + u8 event_on_vhca_state_allocated[0x1]; + u8 event_on_vhca_state_invalid[0x1]; + u8 reserved_at_28[0x8]; u8 vhca_id[0x10]; u8 reserved_at_40[0x40];