--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
+Date: Fri, 5 Jun 2015 15:33:59 +0530
+Subject: be2net: Replace dma/pci_alloc_coherent() calls with dma_zalloc_coherent()
+
+From: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
+
+[ Upstream commit e51000db4c880165eab06ec0990605f24e75203f ]
+
+There are several places in the driver (all in control paths) where
+coherent dma memory is being allocated using either dma_alloc_coherent()
+or the deprecated pci_alloc_consistent(). All these calls should be
+changed to use dma_zalloc_coherent() to avoid uninitialized fields in
+data structures backed by this memory.
+
+Reported-by: Joerg Roedel <jroedel@suse.de>
+Tested-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/emulex/benet/be_cmds.c | 87 ++++++++++++++-----------
+ drivers/net/ethernet/emulex/benet/be_ethtool.c | 18 ++---
+ drivers/net/ethernet/emulex/benet/be_main.c | 16 ++--
+ 3 files changed, 68 insertions(+), 53 deletions(-)
+
+--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
+@@ -1773,9 +1773,9 @@ int be_cmd_get_regs(struct be_adapter *a
+ total_size = buf_len;
+
+ get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
+- get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
+- get_fat_cmd.size,
+- &get_fat_cmd.dma);
++ get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++ get_fat_cmd.size,
++ &get_fat_cmd.dma, GFP_ATOMIC);
+ if (!get_fat_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while reading FAT data\n");
+@@ -1820,8 +1820,8 @@ int be_cmd_get_regs(struct be_adapter *a
+ log_offset += buf_size;
+ }
+ err:
+- pci_free_consistent(adapter->pdev, get_fat_cmd.size,
+- get_fat_cmd.va, get_fat_cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
++ get_fat_cmd.va, get_fat_cmd.dma);
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+ }
+@@ -2272,12 +2272,12 @@ int be_cmd_read_port_transceiver_data(st
+ return -EINVAL;
+
+ cmd.size = sizeof(struct be_cmd_resp_port_type);
+- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++ GFP_ATOMIC);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+- memset(cmd.va, 0, cmd.size);
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+@@ -2302,7 +2302,7 @@ int be_cmd_read_port_transceiver_data(st
+ }
+ err:
+ spin_unlock_bh(&adapter->mcc_lock);
+- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ return status;
+ }
+
+@@ -2777,7 +2777,8 @@ int be_cmd_get_phy_info(struct be_adapte
+ goto err;
+ }
+ cmd.size = sizeof(struct be_cmd_req_get_phy_info);
+- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++ GFP_ATOMIC);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ status = -ENOMEM;
+@@ -2811,7 +2812,7 @@ int be_cmd_get_phy_info(struct be_adapte
+ BE_SUPPORTED_SPEED_1GBPS;
+ }
+ }
+- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+@@ -2862,8 +2863,9 @@ int be_cmd_get_cntl_attributes(struct be
+
+ memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
+ attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
+- attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
+- &attribs_cmd.dma);
++ attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++ attribs_cmd.size,
++ &attribs_cmd.dma, GFP_ATOMIC);
+ if (!attribs_cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ status = -ENOMEM;
+@@ -2890,8 +2892,8 @@ int be_cmd_get_cntl_attributes(struct be
+ err:
+ mutex_unlock(&adapter->mbox_lock);
+ if (attribs_cmd.va)
+- pci_free_consistent(adapter->pdev, attribs_cmd.size,
+- attribs_cmd.va, attribs_cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
++ attribs_cmd.va, attribs_cmd.dma);
+ return status;
+ }
+
+@@ -3029,9 +3031,10 @@ int be_cmd_get_mac_from_list(struct be_a
+
+ memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
+ get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
+- get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
+- get_mac_list_cmd.size,
+- &get_mac_list_cmd.dma);
++ get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++ get_mac_list_cmd.size,
++ &get_mac_list_cmd.dma,
++ GFP_ATOMIC);
+
+ if (!get_mac_list_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+@@ -3104,8 +3107,8 @@ int be_cmd_get_mac_from_list(struct be_a
+
+ out:
+ spin_unlock_bh(&adapter->mcc_lock);
+- pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
+- get_mac_list_cmd.va, get_mac_list_cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
++ get_mac_list_cmd.va, get_mac_list_cmd.dma);
+ return status;
+ }
+
+@@ -3158,8 +3161,8 @@ int be_cmd_set_mac_list(struct be_adapte
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_req_set_mac_list);
+- cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
+- &cmd.dma, GFP_KERNEL);
++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++ GFP_KERNEL);
+ if (!cmd.va)
+ return -ENOMEM;
+
+@@ -3348,7 +3351,8 @@ int be_cmd_get_acpi_wol_cap(struct be_ad
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
+- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++ GFP_ATOMIC);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
+ status = -ENOMEM;
+@@ -3383,7 +3387,8 @@ int be_cmd_get_acpi_wol_cap(struct be_ad
+ err:
+ mutex_unlock(&adapter->mbox_lock);
+ if (cmd.va)
+- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
++ cmd.dma);
+ return status;
+
+ }
+@@ -3397,8 +3402,9 @@ int be_cmd_set_fw_log_level(struct be_ad
+
+ memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+- &extfat_cmd.dma);
++ extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++ extfat_cmd.size, &extfat_cmd.dma,
++ GFP_ATOMIC);
+ if (!extfat_cmd.va)
+ return -ENOMEM;
+
+@@ -3420,8 +3426,8 @@ int be_cmd_set_fw_log_level(struct be_ad
+
+ status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
+ err:
+- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+- extfat_cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
++ extfat_cmd.dma);
+ return status;
+ }
+
+@@ -3434,8 +3440,9 @@ int be_cmd_get_fw_log_level(struct be_ad
+
+ memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
+ extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
+- extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
+- &extfat_cmd.dma);
++ extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++ extfat_cmd.size, &extfat_cmd.dma,
++ GFP_ATOMIC);
+
+ if (!extfat_cmd.va) {
+ dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
+@@ -3453,8 +3460,8 @@ int be_cmd_get_fw_log_level(struct be_ad
+ level = cfgs->module[0].trace_lvl[j].dbg_lvl;
+ }
+ }
+- pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
+- extfat_cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
++ extfat_cmd.dma);
+ err:
+ return level;
+ }
+@@ -3652,7 +3659,8 @@ int be_cmd_get_func_config(struct be_ada
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_resp_get_func_config);
+- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++ GFP_ATOMIC);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ status = -ENOMEM;
+@@ -3692,7 +3700,8 @@ int be_cmd_get_func_config(struct be_ada
+ err:
+ mutex_unlock(&adapter->mbox_lock);
+ if (cmd.va)
+- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
++ cmd.dma);
+ return status;
+ }
+
+@@ -3713,7 +3722,8 @@ int be_cmd_get_profile_config(struct be_
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
+- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++ GFP_ATOMIC);
+ if (!cmd.va)
+ return -ENOMEM;
+
+@@ -3752,7 +3762,8 @@ int be_cmd_get_profile_config(struct be_
+ res->vf_if_cap_flags = vf_res->cap_flags;
+ err:
+ if (cmd.va)
+- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
++ cmd.dma);
+ return status;
+ }
+
+@@ -3767,7 +3778,8 @@ static int be_cmd_set_profile_config(str
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_req_set_profile_config);
+- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
++ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
++ GFP_ATOMIC);
+ if (!cmd.va)
+ return -ENOMEM;
+
+@@ -3783,7 +3795,8 @@ static int be_cmd_set_profile_config(str
+ status = be_cmd_notify_wait(adapter, &wrb);
+
+ if (cmd.va)
+- pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
++ cmd.dma);
+ return status;
+ }
+
+--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+@@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct b
+ int status = 0;
+
+ read_cmd.size = LANCER_READ_FILE_CHUNK;
+- read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
+- &read_cmd.dma);
++ read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
++ &read_cmd.dma, GFP_ATOMIC);
+
+ if (!read_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+@@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct b
+ break;
+ }
+ }
+- pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
+- read_cmd.dma);
++ dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
++ read_cmd.dma);
+
+ return status;
+ }
+@@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_ada
+ };
+
+ ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
+- ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
+- &ddrdma_cmd.dma, GFP_KERNEL);
++ ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++ ddrdma_cmd.size, &ddrdma_cmd.dma,
++ GFP_KERNEL);
+ if (!ddrdma_cmd.va)
+ return -ENOMEM;
+
+@@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_dev
+
+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
+- eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
+- &eeprom_cmd.dma, GFP_KERNEL);
++ eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
++ eeprom_cmd.size, &eeprom_cmd.dma,
++ GFP_KERNEL);
+
+ if (!eeprom_cmd.va)
+ return -ENOMEM;
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -4392,8 +4392,8 @@ static int lancer_fw_download(struct be_
+
+ flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
+ + LANCER_FW_DOWNLOAD_CHUNK;
+- flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
+- &flash_cmd.dma, GFP_KERNEL);
++ flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
++ &flash_cmd.dma, GFP_KERNEL);
+ if (!flash_cmd.va)
+ return -ENOMEM;
+
+@@ -4526,8 +4526,8 @@ static int be_fw_download(struct be_adap
+ }
+
+ flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
+- flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+- GFP_KERNEL);
++ flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
++ GFP_KERNEL);
+ if (!flash_cmd.va)
+ return -ENOMEM;
+
+@@ -4941,10 +4941,10 @@ static int be_ctrl_init(struct be_adapte
+ goto done;
+
+ mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
+- mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
+- mbox_mem_alloc->size,
+- &mbox_mem_alloc->dma,
+- GFP_KERNEL);
++ mbox_mem_alloc->va = dma_zalloc_coherent(&adapter->pdev->dev,
++ mbox_mem_alloc->size,
++ &mbox_mem_alloc->dma,
++ GFP_KERNEL);
+ if (!mbox_mem_alloc->va) {
+ status = -ENOMEM;
+ goto unmap_pci_bars;
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+Date: Sat, 6 Jun 2015 06:49:00 -0700
+Subject: bridge: disable softirqs around br_fdb_update to avoid lockup
+
+From: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+
+[ Upstream commit c4c832f89dc468cf11dc0dd17206bace44526651 ]
+
+br_fdb_update() can be called in process context in the following way:
+br_fdb_add() -> __br_fdb_add() -> br_fdb_update() (if NTF_USE flag is set)
+so we need to disable softirqs because there are softirq users of the
+hash_lock. One easy way to reproduce this is to modify the bridge utility
+to set NTF_USE, enable stp and then set maxageing to a low value so
+br_fdb_cleanup() is called frequently and then just add new entries in
+a loop. This happens because br_fdb_cleanup() is called from timer/softirq
+context. The spin locks in br_fdb_update were _bh before commit f8ae737deea1
+("[BRIDGE]: forwarding remove unneeded preempt and bh diasables")
+and at the time that commit was correct because br_fdb_update() couldn't be
+called from process context, but that changed after commit:
+292d1398983f ("bridge: add NTF_USE support")
+Using local_bh_disable/enable around br_fdb_update() allows us to keep
+using the spin_lock/unlock in br_fdb_update for the fast-path.
+
+Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
+Fixes: 292d1398983f ("bridge: add NTF_USE support")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_fdb.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -796,9 +796,11 @@ static int __br_fdb_add(struct ndmsg *nd
+ int err = 0;
+
+ if (ndm->ndm_flags & NTF_USE) {
++ local_bh_disable();
+ rcu_read_lock();
+ br_fdb_update(p->br, p, addr, vid, true);
+ rcu_read_unlock();
++ local_bh_enable();
+ } else {
+ spin_lock_bh(&p->br->hash_lock);
+ err = fdb_add_entry(p, addr, ndm->ndm_state,
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 28 May 2015 04:42:54 -0700
+Subject: bridge: fix br_multicast_query_expired() bug
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 71d9f6149cac8fc6646adfb2a6f3b0de6ddd23f6 ]
+
+br_multicast_query_expired() querier argument is a pointer to
+a struct bridge_mcast_querier :
+
+struct bridge_mcast_querier {
+ struct br_ip addr;
+ struct net_bridge_port __rcu *port;
+};
+
+Intent of the code was to clear port field, not the pointer to querier.
+
+Fixes: 2cd4143192e8 ("bridge: memorize and export selected IGMP/MLD querier port")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Thadeu Lima de Souza Cascardo <cascardo@redhat.com>
+Acked-by: Linus Lüssing <linus.luessing@c0d3.blue>
+Cc: Linus Lüssing <linus.luessing@web.de>
+Cc: Steinar H. Gunderson <sesse@samfundet.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_multicast.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1821,7 +1821,7 @@ static void br_multicast_query_expired(s
+ if (query->startup_sent < br->multicast_startup_query_count)
+ query->startup_sent++;
+
+- RCU_INIT_POINTER(querier, NULL);
++ RCU_INIT_POINTER(querier->port, NULL);
+ br_multicast_send_query(br, NULL, query);
+ spin_unlock(&br->multicast_lock);
+ }
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Thadeu Lima de Souza Cascardo <cascardo@redhat.com>
+Date: Fri, 22 May 2015 12:18:59 -0300
+Subject: bridge: fix parsing of MLDv2 reports
+
+From: Thadeu Lima de Souza Cascardo <cascardo@redhat.com>
+
+[ Upstream commit 47cc84ce0c2fe75c99ea5963c4b5704dd78ead54 ]
+
+When more than a multicast address is present in a MLDv2 report, all but
+the first address is ignored, because the code breaks out of the loop if
+there has not been an error adding that address.
+
+This has caused failures when two guests connected through the bridge
+tried to communicate using IPv6. Neighbor discoveries would not be
+transmitted to the other guest when both used a link-local address and a
+static address.
+
+This only happens when there is a MLDv2 querier in the network.
+
+The fix will only break out of the loop when there is a failure adding a
+multicast address.
+
+The mdb before the patch:
+
+dev ovirtmgmt port vnet0 grp ff02::1:ff7d:6603 temp
+dev ovirtmgmt port vnet1 grp ff02::1:ff7d:6604 temp
+dev ovirtmgmt port bond0.86 grp ff02::2 temp
+
+After the patch:
+
+dev ovirtmgmt port vnet0 grp ff02::1:ff7d:6603 temp
+dev ovirtmgmt port vnet1 grp ff02::1:ff7d:6604 temp
+dev ovirtmgmt port bond0.86 grp ff02::fb temp
+dev ovirtmgmt port bond0.86 grp ff02::2 temp
+dev ovirtmgmt port bond0.86 grp ff02::d temp
+dev ovirtmgmt port vnet0 grp ff02::1:ff00:76 temp
+dev ovirtmgmt port bond0.86 grp ff02::16 temp
+dev ovirtmgmt port vnet1 grp ff02::1:ff00:77 temp
+dev ovirtmgmt port bond0.86 grp ff02::1:ff00:def temp
+dev ovirtmgmt port bond0.86 grp ff02::1:ffa1:40bf temp
+
+Fixes: 08b202b67264 ("bridge br_multicast: IPv6 MLD support.")
+Reported-by: Rik Theys <Rik.Theys@esat.kuleuven.be>
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@redhat.com>
+Tested-by: Rik Theys <Rik.Theys@esat.kuleuven.be>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_multicast.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1071,7 +1071,7 @@ static int br_ip6_multicast_mld2_report(
+
+ err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
+ vid);
+- if (!err)
++ if (err)
+ break;
+ }
+
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= <bjorn@mork.no>
+Date: Fri, 22 May 2015 13:15:22 +0200
+Subject: cdc_ncm: Fix tx_bytes statistics
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= <bjorn@mork.no>
+
+[ Upstream commit 44f6731d8b68fa02f5ed65eaceac41f8c3c9279e ]
+
+The tx_curr_frame_payload field is u32. When we try to calculate a
+small negative delta based on it, we end up with a positive integer
+close to 2^32 instead. So the tx_bytes pointer increases by about
+2^32 for every transmitted frame.
+
+Fix by calculating the delta as a signed long.
+
+Cc: Ben Hutchings <ben.hutchings@codethink.co.uk>
+Reported-by: Florian Bruhin <me@the-compiler.org>
+Fixes: 7a1e890e2168 ("usbnet: Fix tx_bytes statistic running backward in cdc_ncm")
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/cdc_ncm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/usb/cdc_ncm.c
++++ b/drivers/net/usb/cdc_ncm.c
+@@ -1182,7 +1182,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev
+ * payload data instead.
+ */
+ usbnet_set_skb_tx_stats(skb_out, n,
+- ctx->tx_curr_frame_payload - skb_out->len);
++ (long)ctx->tx_curr_frame_payload - skb_out->len);
+
+ return skb_out;
+
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+Date: Fri, 22 May 2015 04:58:12 -0500
+Subject: ipv4: Avoid crashing in ip_error
+
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+
+[ Upstream commit 381c759d9916c42959515ad34a6d467e24a88e93 ]
+
+ip_error does not check if in_dev is NULL before dereferencing it.
+
+IThe following sequence of calls is possible:
+CPU A CPU B
+ip_rcv_finish
+ ip_route_input_noref()
+ ip_route_input_slow()
+ inetdev_destroy()
+ dst_input()
+
+With the result that a network device can be destroyed while processing
+an input packet.
+
+A crash was triggered with only unicast packets in flight, and
+forwarding enabled on the only network device. The error condition
+was created by the removal of the network device.
+
+As such it is likely the that error code was -EHOSTUNREACH, and the
+action taken by ip_error (if in_dev had been accessible) would have
+been to not increment any counters and to have tried and likely failed
+to send an icmp error as the network device is going away.
+
+Therefore handle this weird case by just dropping the packet if
+!in_dev. It will result in dropping the packet sooner, and will not
+result in an actual change of behavior.
+
+Fixes: 251da4130115b ("ipv4: Cache ip_error() routes even when not forwarding.")
+Reported-by: Vittorio Gambaletta <linuxbugs@vittgam.net>
+Tested-by: Vittorio Gambaletta <linuxbugs@vittgam.net>
+Signed-off-by: Vittorio Gambaletta <linuxbugs@vittgam.net>
+Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/route.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -903,6 +903,10 @@ static int ip_error(struct sk_buff *skb)
+ bool send;
+ int code;
+
++ /* IP on this device is disabled. */
++ if (!in_dev)
++ goto out;
++
+ net = dev_net(rt->dst.dev);
+ if (!IN_DEV_FORWARD(in_dev)) {
+ switch (rt->dst.error) {
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Shawn Bohrer <sbohrer@rgmadvisors.com>
+Date: Wed, 3 Jun 2015 16:27:38 -0500
+Subject: ipv4/udp: Verify multicast group is ours in upd_v4_early_demux()
+
+From: Shawn Bohrer <sbohrer@rgmadvisors.com>
+
+[ Upstream commit 6e540309326188f769e03bb4c6dd8ff6752930c2 ]
+
+421b3885bf6d56391297844f43fb7154a6396e12 "udp: ipv4: Add udp early
+demux" introduced a regression that allowed sockets bound to INADDR_ANY
+to receive packets from multicast groups that the socket had not joined.
+For example a socket that had joined 224.168.2.9 could also receive
+packets from 225.168.2.9 despite not having joined that group if
+ip_early_demux is enabled.
+
+Fix this by calling ip_check_mc_rcu() in udp_v4_early_demux() to verify
+that the multicast packet is indeed ours.
+
+Signed-off-by: Shawn Bohrer <sbohrer@rgmadvisors.com>
+Reported-by: Yurij M. Plotnikov <Yurij.Plotnikov@oktetlabs.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/udp.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -90,6 +90,7 @@
+ #include <linux/socket.h>
+ #include <linux/sockios.h>
+ #include <linux/igmp.h>
++#include <linux/inetdevice.h>
+ #include <linux/in.h>
+ #include <linux/errno.h>
+ #include <linux/timer.h>
+@@ -1966,6 +1967,7 @@ void udp_v4_early_demux(struct sk_buff *
+ struct sock *sk;
+ struct dst_entry *dst;
+ int dif = skb->dev->ifindex;
++ int ours;
+
+ /* validate the packet */
+ if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
+@@ -1975,14 +1977,24 @@ void udp_v4_early_demux(struct sk_buff *
+ uh = udp_hdr(skb);
+
+ if (skb->pkt_type == PACKET_BROADCAST ||
+- skb->pkt_type == PACKET_MULTICAST)
++ skb->pkt_type == PACKET_MULTICAST) {
++ struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
++
++ if (!in_dev)
++ return;
++
++ ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
++ iph->protocol);
++ if (!ours)
++ return;
+ sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
+ uh->source, iph->saddr, dif);
+- else if (skb->pkt_type == PACKET_HOST)
++ } else if (skb->pkt_type == PACKET_HOST) {
+ sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
+ uh->source, iph->saddr, dif);
+- else
++ } else {
+ return;
++ }
+
+ if (!sk)
+ return;
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Vlad Yasevich <vyasevich@gmail.com>
+Date: Sat, 2 May 2015 21:33:44 -0400
+Subject: net: core: Correct an over-stringent device loop detection.
+
+From: Vlad Yasevich <vyasevich@gmail.com>
+
+[ Upstream commit d66bf7dd27573ee5ea90484899ee952c19ccb194 ]
+
+The code in __netdev_upper_dev_link() has an over-stringent
+loop detection logic that actually prevents valid configurations
+from working correctly.
+
+In particular, the logic returns an error if an upper device
+is already in the list of all upper devices for a given dev.
+This particular check seems to be a overzealous as it disallows
+perfectly valid configurations. For example:
+ # ip l a link eth0 name eth0.10 type vlan id 10
+ # ip l a dev br0 typ bridge
+ # ip l s eth0.10 master br0
+ # ip l s eth0 master br0 <--- Will fail
+
+If you switch the last two commands (add eth0 first), then both
+will succeed. If after that, you remove eth0 and try to re-add
+it, it will fail!
+
+It appears to be enough to simply check adj_list to keeps things
+safe.
+
+I've tried stacking multiple devices multiple times in all different
+combinations, and either rx_handler registration prevented the stacking
+of the device linking cought the error.
+
+Signed-off-by: Vladislav Yasevich <vyasevic@redhat.com>
+Acked-by: Jiri Pirko <jiri@resnulli.us>
+Acked-by: Veaceslav Falico <vfalico@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5170,7 +5170,7 @@ static int __netdev_upper_dev_link(struc
+ if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
+ return -EBUSY;
+
+- if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
++ if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
+ return -EEXIST;
+
+ if (master && netdev_master_upper_dev_get(dev))
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Richard Cochran <richardcochran@gmail.com>
+Date: Mon, 25 May 2015 11:55:43 +0200
+Subject: net: dp83640: fix broken calibration routine.
+
+From: Richard Cochran <richardcochran@gmail.com>
+
+[ Upstream commit 397a253af5031de4a4612210055935309af4472c ]
+
+Currently, the calibration function that corrects the initial offsets
+among multiple devices only works the first time. If the function is
+called more than once, the calibration fails and bogus offsets will be
+programmed into the devices.
+
+In a well hidden spot, the device documentation tells that trigger indexes
+0 and 1 are special in allowing the TRIG_IF_LATE flag to actually work.
+
+This patch fixes the issue by using one of the special triggers during the
+recalibration method.
+
+Signed-off-by: Richard Cochran <richardcochran@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/dp83640.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -47,7 +47,7 @@
+ #define PSF_TX 0x1000
+ #define EXT_EVENT 1
+ #define CAL_EVENT 7
+-#define CAL_TRIGGER 7
++#define CAL_TRIGGER 1
+ #define DP83640_N_PINS 12
+
+ #define MII_DP83640_MICR 0x11
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Richard Cochran <richardcochran@gmail.com>
+Date: Mon, 25 May 2015 11:55:45 +0200
+Subject: net: dp83640: fix improper double spin locking.
+
+From: Richard Cochran <richardcochran@gmail.com>
+
+[ Upstream commit adbe088f6f8b0b7701fe07f51fe6f2bd602a6665 ]
+
+A pair of nested spin locks was introduced in commit 63502b8d0
+"dp83640: Fix receive timestamp race condition".
+
+Unfortunately the 'flags' parameter was reused for the inner lock,
+clobbering the originally saved IRQ state. This patch fixes the issue
+by changing the inner lock to plain spin_lock without irqsave.
+
+Signed-off-by: Richard Cochran <richardcochran@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/dp83640.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -845,7 +845,7 @@ static void decode_rxts(struct dp83640_p
+ list_del_init(&rxts->list);
+ phy2rxts(phy_rxts, rxts);
+
+- spin_lock_irqsave(&dp83640->rx_queue.lock, flags);
++ spin_lock(&dp83640->rx_queue.lock);
+ skb_queue_walk(&dp83640->rx_queue, skb) {
+ struct dp83640_skb_info *skb_info;
+
+@@ -860,7 +860,7 @@ static void decode_rxts(struct dp83640_p
+ break;
+ }
+ }
+- spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags);
++ spin_unlock(&dp83640->rx_queue.lock);
+
+ if (!shhwtstamps)
+ list_add_tail(&rxts->list, &dp83640->rxts);
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Richard Cochran <richardcochran@gmail.com>
+Date: Mon, 25 May 2015 11:55:44 +0200
+Subject: net: dp83640: reinforce locking rules.
+
+From: Richard Cochran <richardcochran@gmail.com>
+
+[ Upstream commit a935865c828c8cd20501f618c69f659a5b6d6a5f ]
+
+Callers of the ext_write function are supposed to hold a mutex that
+protects the state of the dialed page, but one caller was missing the
+lock from the very start, and over time the code has been changed
+without following the rule. This patch cleans up the call sites in
+violation of the rule.
+
+Signed-off-by: Richard Cochran <richardcochran@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/dp83640.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -495,7 +495,9 @@ static int ptp_dp83640_enable(struct ptp
+ else
+ evnt |= EVNT_RISE;
+ }
++ mutex_lock(&clock->extreg_lock);
+ ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
++ mutex_unlock(&clock->extreg_lock);
+ return 0;
+
+ case PTP_CLK_REQ_PEROUT:
+@@ -531,6 +533,8 @@ static u8 status_frame_src[6] = { 0x08,
+
+ static void enable_status_frames(struct phy_device *phydev, bool on)
+ {
++ struct dp83640_private *dp83640 = phydev->priv;
++ struct dp83640_clock *clock = dp83640->clock;
+ u16 cfg0 = 0, ver;
+
+ if (on)
+@@ -538,9 +542,13 @@ static void enable_status_frames(struct
+
+ ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
+
++ mutex_lock(&clock->extreg_lock);
++
+ ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
+ ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
+
++ mutex_unlock(&clock->extreg_lock);
++
+ if (!phydev->attached_dev) {
+ pr_warn("expected to find an attached netdevice\n");
+ return;
+@@ -1172,11 +1180,18 @@ static int dp83640_config_init(struct ph
+
+ if (clock->chosen && !list_empty(&clock->phylist))
+ recalibrate(clock);
+- else
++ else {
++ mutex_lock(&clock->extreg_lock);
+ enable_broadcast(phydev, clock->page, 1);
++ mutex_unlock(&clock->extreg_lock);
++ }
+
+ enable_status_frames(phydev, true);
++
++ mutex_lock(&clock->extreg_lock);
+ ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
++ mutex_unlock(&clock->extreg_lock);
++
+ return 0;
+ }
+
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Henning Rogge <hrogge@gmail.com>
+Date: Mon, 18 May 2015 21:08:49 +0200
+Subject: net/ipv6/udp: Fix ipv6 multicast socket filter regression
+
+From: Henning Rogge <hrogge@gmail.com>
+
+[ Upstream commit 33b4b015e1a1ca7a8fdce40af5e71642a8ea355c ]
+
+Commit <5cf3d46192fc> ("udp: Simplify__udp*_lib_mcast_deliver")
+simplified the filter for incoming IPv6 multicast but removed
+the check of the local socket address and the UDP destination
+address.
+
+This patch restores the filter to prevent sockets bound to a IPv6
+multicast IP to receive other UDP traffic link unicast.
+
+Signed-off-by: Henning Rogge <hrogge@gmail.com>
+Fixes: 5cf3d46192fc ("udp: Simplify__udp*_lib_mcast_deliver")
+Cc: "David S. Miller" <davem@davemloft.net>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/udp.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -734,7 +734,9 @@ static bool __udp_v6_is_mcast_sock(struc
+ (inet->inet_dport && inet->inet_dport != rmt_port) ||
+ (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+ !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
+- (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
++ (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
++ (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
++ !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
+ return false;
+ if (!inet6_mc_check(sk, loc_addr, rmt_addr))
+ return false;
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Fri, 15 May 2015 16:30:41 -0700
+Subject: net: phy: Allow EEE for all RGMII variants
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+[ Upstream commit 7e14069651591c81046ffaec13c3dac8cb70f5fb ]
+
+RGMII interfaces come in multiple flavors: RGMII with transmit or
+receive internal delay, no delays at all, or delays in both direction.
+
+This change extends the initial check for PHY_INTERFACE_MODE_RGMII to
+cover all of these variants since EEE should be allowed for any of these
+modes, since it is a property of the RGMII, hence Gigabit PHY capability
+more than the RGMII electrical interface and its delays.
+
+Fixes: a59a4d192166 ("phy: add the EEE support and the way to access to the MMD registers")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -1053,13 +1053,14 @@ int phy_init_eee(struct phy_device *phyd
+ {
+ /* According to 802.3az,the EEE is supported only in full duplex-mode.
+ * Also EEE feature is active when core is operating with MII, GMII
+- * or RGMII. Internal PHYs are also allowed to proceed and should
+- * return an error if they do not support EEE.
++ * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
++ * should return an error if they do not support EEE.
+ */
+ if ((phydev->duplex == DUPLEX_FULL) &&
+ ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
+ (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
+- (phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
++ (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
++ phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
+ phy_is_internal(phydev))) {
+ int eee_lp, eee_cap, eee_adv;
+ u32 lp, cap, adv;
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Wed, 20 May 2015 17:13:33 +0200
+Subject: net: sched: fix call_rcu() race on classifier module unloads
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit c78e1746d3ad7d548bdf3fe491898cc453911a49 ]
+
+Vijay reported that a loop as simple as ...
+
+ while true; do
+ tc qdisc add dev foo root handle 1: prio
+ tc filter add dev foo parent 1: u32 match u32 0 0 flowid 1
+ tc qdisc del dev foo root
+ rmmod cls_u32
+ done
+
+... will panic the kernel. Moreover, he bisected the change
+apparently introducing it to 78fd1d0ab072 ("netlink: Re-add
+locking to netlink_lookup() and seq walker").
+
+The removal of synchronize_net() from the netlink socket
+triggering the qdisc to be removed, seems to have uncovered
+an RCU resp. module reference count race from the tc API.
+Given that RCU conversion was done after e341694e3eb5 ("netlink:
+Convert netlink_lookup() to use RCU protected hash table")
+which added the synchronize_net() originally, occasion of
+hitting the bug was less likely (not impossible though):
+
+When qdiscs that i) support attaching classifiers and,
+ii) have at least one of them attached, get deleted, they
+invoke tcf_destroy_chain(), and thus call into ->destroy()
+handler from a classifier module.
+
+After RCU conversion, all classifier that have an internal
+prio list, unlink them and initiate freeing via call_rcu()
+deferral.
+
+Meanhile, tcf_destroy() releases already reference to the
+tp->ops->owner module before the queued RCU callback handler
+has been invoked.
+
+Subsequent rmmod on the classifier module is then not prevented
+since all module references are already dropped.
+
+By the time, the kernel invokes the RCU callback handler from
+the module, that function address is then invalid.
+
+One way to fix it would be to add an rcu_barrier() to
+unregister_tcf_proto_ops() to wait for all pending call_rcu()s
+to complete.
+
+synchronize_rcu() is not appropriate as under heavy RCU
+callback load, registered call_rcu()s could be deferred
+longer than a grace period. In case we don't have any pending
+call_rcu()s, the barrier is allowed to return immediately.
+
+Since we came here via unregister_tcf_proto_ops(), there
+are no users of a given classifier anymore. Further nested
+call_rcu()s pointing into the module space are not being
+done anywhere.
+
+Only cls_bpf_delete_prog() may schedule a work item, to
+unlock pages eventually, but that is not in the range/context
+of cls_bpf anymore.
+
+Fixes: 25d8c0d55f24 ("net: rcu-ify tcf_proto")
+Fixes: 9888faefe132 ("net: sched: cls_basic use RCU")
+Reported-by: Vijay Subramanian <subramanian.vijay@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Cc: John Fastabend <john.r.fastabend@intel.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Thomas Graf <tgraf@suug.ch>
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Alexei Starovoitov <ast@plumgrid.com>
+Tested-by: Vijay Subramanian <subramanian.vijay@gmail.com>
+Acked-by: Alexei Starovoitov <ast@plumgrid.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/cls_api.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -81,6 +81,11 @@ int unregister_tcf_proto_ops(struct tcf_
+ struct tcf_proto_ops *t;
+ int rc = -ENOENT;
+
++ /* Wait for outstanding call_rcu()s, if any, from a
++ * tcf_proto_ops's destroy() handler.
++ */
++ rcu_barrier();
++
+ write_lock(&cls_mod_lock);
+ list_for_each_entry(t, &tcf_proto_base, head) {
+ if (t == ops) {
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: WANG Cong <xiyou.wangcong@gmail.com>
+Date: Tue, 26 May 2015 16:08:48 -0700
+Subject: net_sched: invoke ->attach() after setting dev->qdisc
+
+From: WANG Cong <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 86e363dc3b50bfd50a1f315934583fbda673ab8d ]
+
+For mq qdisc, we add per tx queue qdisc to root qdisc
+for display purpose, however, that happens too early,
+before the new dev->qdisc is finally set, this causes
+q->list points to an old root qdisc which is going to be
+freed right before assigning with a new one.
+
+Fix this by moving ->attach() after setting dev->qdisc.
+
+For the record, this fixes the following crash:
+
+ ------------[ cut here ]------------
+ WARNING: CPU: 1 PID: 975 at lib/list_debug.c:59 __list_del_entry+0x5a/0x98()
+ list_del corruption. prev->next should be ffff8800d1998ae8, but was 6b6b6b6b6b6b6b6b
+ CPU: 1 PID: 975 Comm: tc Not tainted 4.1.0-rc4+ #1019
+ Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+ 0000000000000009 ffff8800d73fb928 ffffffff81a44e7f 0000000047574756
+ ffff8800d73fb978 ffff8800d73fb968 ffffffff810790da ffff8800cfc4cd20
+ ffffffff814e725b ffff8800d1998ae8 ffffffff82381250 0000000000000000
+ Call Trace:
+ [<ffffffff81a44e7f>] dump_stack+0x4c/0x65
+ [<ffffffff810790da>] warn_slowpath_common+0x9c/0xb6
+ [<ffffffff814e725b>] ? __list_del_entry+0x5a/0x98
+ [<ffffffff81079162>] warn_slowpath_fmt+0x46/0x48
+ [<ffffffff81820eb0>] ? dev_graft_qdisc+0x5e/0x6a
+ [<ffffffff814e725b>] __list_del_entry+0x5a/0x98
+ [<ffffffff814e72a7>] list_del+0xe/0x2d
+ [<ffffffff81822f05>] qdisc_list_del+0x1e/0x20
+ [<ffffffff81820cd1>] qdisc_destroy+0x30/0xd6
+ [<ffffffff81822676>] qdisc_graft+0x11d/0x243
+ [<ffffffff818233c1>] tc_get_qdisc+0x1a6/0x1d4
+ [<ffffffff810b5eaf>] ? mark_lock+0x2e/0x226
+ [<ffffffff817ff8f5>] rtnetlink_rcv_msg+0x181/0x194
+ [<ffffffff817ff72e>] ? rtnl_lock+0x17/0x19
+ [<ffffffff817ff72e>] ? rtnl_lock+0x17/0x19
+ [<ffffffff817ff774>] ? __rtnl_unlock+0x17/0x17
+ [<ffffffff81855dc6>] netlink_rcv_skb+0x4d/0x93
+ [<ffffffff817ff756>] rtnetlink_rcv+0x26/0x2d
+ [<ffffffff818544b2>] netlink_unicast+0xcb/0x150
+ [<ffffffff81161db9>] ? might_fault+0x59/0xa9
+ [<ffffffff81854f78>] netlink_sendmsg+0x4fa/0x51c
+ [<ffffffff817d6e09>] sock_sendmsg_nosec+0x12/0x1d
+ [<ffffffff817d8967>] sock_sendmsg+0x29/0x2e
+ [<ffffffff817d8cf3>] ___sys_sendmsg+0x1b4/0x23a
+ [<ffffffff8100a1b8>] ? native_sched_clock+0x35/0x37
+ [<ffffffff810a1d83>] ? sched_clock_local+0x12/0x72
+ [<ffffffff810a1fd4>] ? sched_clock_cpu+0x9e/0xb7
+ [<ffffffff810def2a>] ? current_kernel_time+0xe/0x32
+ [<ffffffff810b4bc5>] ? lock_release_holdtime.part.29+0x71/0x7f
+ [<ffffffff810ddebf>] ? read_seqcount_begin.constprop.27+0x5f/0x76
+ [<ffffffff810b6292>] ? trace_hardirqs_on_caller+0x17d/0x199
+ [<ffffffff811b14d5>] ? __fget_light+0x50/0x78
+ [<ffffffff817d9808>] __sys_sendmsg+0x42/0x60
+ [<ffffffff817d9838>] SyS_sendmsg+0x12/0x1c
+ [<ffffffff81a50e97>] system_call_fastpath+0x12/0x6f
+ ---[ end trace ef29d3fb28e97ae7 ]---
+
+For long term, we probably need to clean up the qdisc_graft() code
+in case it hides other bugs like this.
+
+Fixes: 95dc19299f74 ("pkt_sched: give visibility to mq slave qdiscs")
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_api.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device
+ if (dev->flags & IFF_UP)
+ dev_deactivate(dev);
+
+- if (new && new->ops->attach) {
+- new->ops->attach(new);
+- num_q = 0;
+- }
++ if (new && new->ops->attach)
++ goto skip;
+
+ for (i = 0; i < num_q; i++) {
+ struct netdev_queue *dev_queue = dev_ingress_queue(dev);
+@@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device
+ qdisc_destroy(old);
+ }
+
++skip:
+ if (!ingress) {
+ notify_and_destroy(net, skb, n, classid,
+ dev->qdisc, new);
+ if (new && !new->ops->attach)
+ atomic_inc(&new->refcnt);
+ dev->qdisc = new ? : &noop_qdisc;
++
++ if (new && new->ops->attach)
++ new->ops->attach(new);
+ } else {
+ notify_and_destroy(net, skb, n, classid, old, new);
+ }
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Sat, 16 May 2015 21:16:28 +0800
+Subject: netlink: Disable insertions/removals during rehash
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit: Not applicable ]
+
+The current rhashtable rehash code is buggy and can't deal with
+parallel insertions/removals without corrupting the hash table.
+
+This patch disables it by partially reverting
+c5adde9468b0714a051eac7f9666f23eb10b61f7 ("netlink: eliminate
+nl_sk_hash_lock").
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netlink/af_netlink.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1052,7 +1052,7 @@ static int netlink_insert(struct sock *s
+ struct netlink_table *table = &nl_table[sk->sk_protocol];
+ int err;
+
+- lock_sock(sk);
++ mutex_lock(&table->hash.mutex);
+
+ err = -EBUSY;
+ if (nlk_sk(sk)->portid)
+@@ -1074,7 +1074,7 @@ static int netlink_insert(struct sock *s
+ }
+
+ err:
+- release_sock(sk);
++ mutex_unlock(&table->hash.mutex);
+ return err;
+ }
+
+@@ -1083,10 +1083,12 @@ static void netlink_remove(struct sock *
+ struct netlink_table *table;
+
+ table = &nl_table[sk->sk_protocol];
++ mutex_lock(&table->hash.mutex);
+ if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node)) {
+ WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+ __sock_put(sk);
+ }
++ mutex_unlock(&table->hash.mutex);
+
+ netlink_table_grab();
+ if (nlk_sk(sk)->subscriptions) {
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Sat, 16 May 2015 21:50:28 +0800
+Subject: netlink: Reset portid after netlink_insert failure
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit c0bb07df7d981e4091432754e30c9c720e2c0c78 ]
+
+The commit c5adde9468b0714a051eac7f9666f23eb10b61f7 ("netlink:
+eliminate nl_sk_hash_lock") breaks the autobind retry mechanism
+because it doesn't reset portid after a failed netlink_insert.
+
+This means that should autobind fail the first time around, then
+the socket will be stuck in limbo as it can never be bound again
+since it already has a non-zero portid.
+
+Fixes: c5adde9468b0 ("netlink: eliminate nl_sk_hash_lock")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netlink/af_netlink.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -1069,6 +1069,7 @@ static int netlink_insert(struct sock *s
+ err = 0;
+ if (!__netlink_insert(table, sk)) {
+ err = -EADDRINUSE;
++ nlk_sk(sk)->portid = 0;
+ sock_put(sk);
+ }
+
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Wed, 13 May 2015 14:19:42 +0200
+Subject: rtnl/bond: don't send rtnl msg for unregistered iface
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+[ Upstream commit ed2a80ab7b76f11af0b2c6255709c4ebf164b667 ]
+
+Before the patch, the command 'ip link add bond2 type bond mode 802.3ad'
+causes the kernel to send a rtnl message for the bond2 interface, with an
+ifindex 0.
+
+'ip monitor' shows:
+0: bond2: <BROADCAST,MULTICAST,MASTER> mtu 1500 state DOWN group default
+ link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff
+9: bond2@NONE: <BROADCAST,MULTICAST,MASTER> mtu 1500 qdisc noop state DOWN group default
+ link/ether ea:3e:1f:53:92:7b brd ff:ff:ff:ff:ff:ff
+[snip]
+
+The patch fixes the spotted bug by checking in bond driver if the interface
+is registered before calling the notifier chain.
+It also adds a check in rtmsg_ifinfo() to prevent this kind of bug in the
+future.
+
+Fixes: d4261e565000 ("bonding: create netlink event when bonding option is changed")
+CC: Jiri Pirko <jiri@resnulli.us>
+Reported-by: Julien Meunier <julien.meunier@6wind.com>
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_options.c | 2 +-
+ net/core/rtnetlink.c | 3 +++
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -624,7 +624,7 @@ int __bond_opt_set(struct bonding *bond,
+ out:
+ if (ret)
+ bond_opt_error_interpret(bond, opt, ret, val);
+- else
++ else if (bond->dev->reg_state == NETREG_REGISTERED)
+ call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
+
+ return ret;
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2337,6 +2337,9 @@ void rtmsg_ifinfo(int type, struct net_d
+ {
+ struct sk_buff *skb;
+
++ if (dev->reg_state != NETREG_REGISTERED)
++ return;
++
+ skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
+ if (skb)
+ rtmsg_ifinfo_send(skb, dev, flags);
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+Date: Tue, 26 May 2015 17:30:17 -0600
+Subject: sctp: Fix mangled IPv4 addresses on a IPv6 listening socket
+
+From: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+
+[ Upstream commit 9302d7bb0c5cd46be5706859301f18c137b2439f ]
+
+sctp_v4_map_v6 was subtly writing and reading from members
+of a union in a way the clobbered data it needed to read before
+it read it.
+
+Zeroing the v6 flowinfo overwrites the v4 sin_addr with 0, meaning
+that every place that calls sctp_v4_map_v6 gets ::ffff:0.0.0.0 as the
+result.
+
+Reorder things to guarantee correct behaviour no matter what the
+union layout is.
+
+This impacts user space clients that open an IPv6 SCTP socket and
+receive IPv4 connections. Prior to 299ee user space would see a
+sockaddr with AF_INET and a correct address, after 299ee the sockaddr
+is AF_INET6, but the address is wrong.
+
+Fixes: 299ee123e198 (sctp: Fixup v4mapped behaviour to comply with Sock API)
+Signed-off-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sctp/sctp.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -571,11 +571,14 @@ static inline void sctp_v6_map_v4(union
+ /* Map v4 address to v4-mapped v6 address */
+ static inline void sctp_v4_map_v6(union sctp_addr *addr)
+ {
++ __be16 port;
++
++ port = addr->v4.sin_port;
++ addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
++ addr->v6.sin6_port = port;
+ addr->v6.sin6_family = AF_INET6;
+ addr->v6.sin6_flowinfo = 0;
+ addr->v6.sin6_scope_id = 0;
+- addr->v6.sin6_port = addr->v4.sin_port;
+- addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+ addr->v6.sin6_addr.s6_addr32[0] = 0;
+ addr->v6.sin6_addr.s6_addr32[1] = 0;
+ addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
crush-ensuring-at-most-num-rep-osds-are-selected.patch
aio-fix-serial-draining-in-exit_aio.patch
+net-core-correct-an-over-stringent-device-loop-detection.patch
+x86-bpf_jit-fix-from_be16-and-from_le16-32-instructions.patch
+x86-bpf_jit-fix-compilation-of-large-bpf-programs.patch
+net-phy-allow-eee-for-all-rgmii-variants.patch
+netlink-reset-portid-after-netlink_insert-failure.patch
+rtnl-bond-don-t-send-rtnl-msg-for-unregistered-iface.patch
+tcp-ipv6-fix-flow-label-setting-in-time_wait-state.patch
+net-ipv6-udp-fix-ipv6-multicast-socket-filter-regression.patch
+net-sched-fix-call_rcu-race-on-classifier-module-unloads.patch
+ipv4-avoid-crashing-in-ip_error.patch
+cdc_ncm-fix-tx_bytes-statistics.patch
+bridge-fix-parsing-of-mldv2-reports.patch
+net-dp83640-fix-broken-calibration-routine.patch
+net-dp83640-reinforce-locking-rules.patch
+net-dp83640-fix-improper-double-spin-locking.patch
+unix-caif-sk_socket-can-disappear-when-state-is-unlocked.patch
+xen-netback-properly-initialize-credit_bytes.patch
+net_sched-invoke-attach-after-setting-dev-qdisc.patch
+sctp-fix-mangled-ipv4-addresses-on-a-ipv6-listening-socket.patch
+bridge-fix-br_multicast_query_expired-bug.patch
+udp-fix-behavior-of-wrong-checksums.patch
+tcp-fix-child-sockets-to-use-system-default-congestion-control-if-not-set.patch
+xen-netback-read-hotplug-script-once-at-start-of-day.patch
+ipv4-udp-verify-multicast-group-is-ours-in-upd_v4_early_demux.patch
+be2net-replace-dma-pci_alloc_coherent-calls-with-dma_zalloc_coherent.patch
+bridge-disable-softirqs-around-br_fdb_update-to-avoid-lockup.patch
+netlink-disable-insertions-removals-during-rehash.patch
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Neal Cardwell <ncardwell@google.com>
+Date: Fri, 29 May 2015 13:47:07 -0400
+Subject: tcp: fix child sockets to use system default congestion control if not set
+
+From: Neal Cardwell <ncardwell@google.com>
+
+[ Upstream commit 9f950415e4e28e7cfae2e416b43e862e8101d996 ]
+
+Linux 3.17 and earlier are explicitly engineered so that if the app
+doesn't specifically request a CC module on a listener before the SYN
+arrives, then the child gets the system default CC when the connection
+is established. See tcp_init_congestion_control() in 3.17 or earlier,
+which says "if no choice made yet assign the current value set as
+default". The change ("net: tcp: assign tcp cong_ops when tcp sk is
+created") altered these semantics, so that children got their parent
+listener's congestion control even if the system default had changed
+after the listener was created.
+
+This commit returns to those original semantics from 3.17 and earlier,
+since they are the original semantics from 2007 in 4d4d3d1e8 ("[TCP]:
+Congestion control initialization."), and some Linux congestion
+control workflows depend on that.
+
+In summary, if a listener socket specifically sets TCP_CONGESTION to
+"x", or the route locks the CC module to "x", then the child gets
+"x". Otherwise the child gets current system default from
+net.ipv4.tcp_congestion_control. That's the behavior in 3.17 and
+earlier, and this commit restores that.
+
+Fixes: 55d8694fa82c ("net: tcp: assign tcp cong_ops when tcp sk is created")
+Cc: Florian Westphal <fw@strlen.de>
+Cc: Daniel Borkmann <dborkman@redhat.com>
+Cc: Glenn Judd <glenn.judd@morganstanley.com>
+Cc: Stephen Hemminger <stephen@networkplumber.org>
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inet_connection_sock.h | 3 ++-
+ net/ipv4/tcp_cong.c | 5 ++++-
+ net/ipv4/tcp_minisocks.c | 5 ++++-
+ 3 files changed, 10 insertions(+), 3 deletions(-)
+
+--- a/include/net/inet_connection_sock.h
++++ b/include/net/inet_connection_sock.h
+@@ -98,7 +98,8 @@ struct inet_connection_sock {
+ const struct tcp_congestion_ops *icsk_ca_ops;
+ const struct inet_connection_sock_af_ops *icsk_af_ops;
+ unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
+- __u8 icsk_ca_state:7,
++ __u8 icsk_ca_state:6,
++ icsk_ca_setsockopt:1,
+ icsk_ca_dst_locked:1;
+ __u8 icsk_retransmits;
+ __u8 icsk_pending;
+--- a/net/ipv4/tcp_cong.c
++++ b/net/ipv4/tcp_cong.c
+@@ -187,6 +187,7 @@ static void tcp_reinit_congestion_contro
+
+ tcp_cleanup_congestion_control(sk);
+ icsk->icsk_ca_ops = ca;
++ icsk->icsk_ca_setsockopt = 1;
+
+ if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
+ icsk->icsk_ca_ops->init(sk);
+@@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct so
+ rcu_read_lock();
+ ca = __tcp_ca_find_autoload(name);
+ /* No change asking for existing value */
+- if (ca == icsk->icsk_ca_ops)
++ if (ca == icsk->icsk_ca_ops) {
++ icsk->icsk_ca_setsockopt = 1;
+ goto out;
++ }
+ if (!ca)
+ err = -ENOENT;
+ else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -437,7 +437,10 @@ void tcp_ca_openreq_child(struct sock *s
+ rcu_read_unlock();
+ }
+
+- if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner))
++ /* If no valid choice made yet, assign current system default ca. */
++ if (!ca_got_dst &&
++ (!icsk->icsk_ca_setsockopt ||
++ !try_module_get(icsk->icsk_ca_ops->owner)))
+ tcp_assign_congestion_control(sk);
+
+ tcp_set_ca_state(sk, TCP_CA_Open);
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Florent Fourcot <florent.fourcot@enst-bretagne.fr>
+Date: Sat, 16 May 2015 00:24:59 +0200
+Subject: tcp/ipv6: fix flow label setting in TIME_WAIT state
+
+From: Florent Fourcot <florent.fourcot@enst-bretagne.fr>
+
+[ Upstream commit 21858cd02dabcf290564cbf4769b101eba54d7bb ]
+
+commit 1d13a96c74fc ("ipv6: tcp: fix flowlabel value in ACK messages
+send from TIME_WAIT") added the flow label in the last TCP packets.
+Unfortunately, it was not casted properly.
+
+This patch replace the buggy shift with be32_to_cpu/cpu_to_be32.
+
+Fixes: 1d13a96c74fc ("ipv6: tcp: fix flowlabel value in ACK messages")
+Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Florent Fourcot <florent.fourcot@enst-bretagne.fr>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_minisocks.c | 2 +-
+ net/ipv6/tcp_ipv6.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int
+ tw->tw_v6_daddr = sk->sk_v6_daddr;
+ tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+ tw->tw_tclass = np->tclass;
+- tw->tw_flowlabel = np->flow_label >> 12;
++ tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
+ tw->tw_ipv6only = sk->sk_ipv6only;
+ }
+ #endif
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -975,7 +975,7 @@ static void tcp_v6_timewait_ack(struct s
+ tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
+ tcp_time_stamp + tcptw->tw_ts_offset,
+ tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
+- tw->tw_tclass, (tw->tw_flowlabel << 12));
++ tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
+
+ inet_twsk_put(tw);
+ }
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Sat, 30 May 2015 09:16:53 -0700
+Subject: udp: fix behavior of wrong checksums
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit beb39db59d14990e401e235faf66a6b9b31240b0 ]
+
+We have two problems in UDP stack related to bogus checksums :
+
+1) We return -EAGAIN to application even if receive queue is not empty.
+ This breaks applications using edge trigger epoll()
+
+2) Under UDP flood, we can loop forever without yielding to other
+ processes, potentially hanging the host, especially on non SMP.
+
+This patch is an attempt to make things better.
+
+We might in the future add extra support for rt applications
+wanting to better control time spent doing a recv() in a hostile
+environment. For example we could validate checksums before queuing
+packets in socket receive queue.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/udp.c | 6 ++----
+ net/ipv6/udp.c | 6 ++----
+ 2 files changed, 4 insertions(+), 8 deletions(-)
+
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1348,10 +1348,8 @@ csum_copy_err:
+ }
+ unlock_sock_fast(sk, slow);
+
+- if (noblock)
+- return -EAGAIN;
+-
+- /* starting over for a new packet */
++ /* starting over for a new packet, but check if we need to yield */
++ cond_resched();
+ msg->msg_flags &= ~MSG_TRUNC;
+ goto try_again;
+ }
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -528,10 +528,8 @@ csum_copy_err:
+ }
+ unlock_sock_fast(sk, slow);
+
+- if (noblock)
+- return -EAGAIN;
+-
+- /* starting over for a new packet */
++ /* starting over for a new packet, but check if we need to yield */
++ cond_resched();
+ msg->msg_flags &= ~MSG_TRUNC;
+ goto try_again;
+ }
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Mark Salyzyn <salyzyn@android.com>
+Date: Tue, 26 May 2015 08:22:19 -0700
+Subject: unix/caif: sk_socket can disappear when state is unlocked
+
+From: Mark Salyzyn <salyzyn@android.com>
+
+[ Upstream commit b48732e4a48d80ed4a14812f0bab09560846514e ]
+
+got a rare NULL pointer dereference in clear_bit
+
+Signed-off-by: Mark Salyzyn <salyzyn@android.com>
+Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+----
+v2: switch to sock_flag(sk, SOCK_DEAD) and added net/caif/caif_socket.c
+v3: return -ECONNRESET in upstream caller of wait function for SOCK_DEAD
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/caif/caif_socket.c | 8 ++++++++
+ net/unix/af_unix.c | 8 ++++++++
+ 2 files changed, 16 insertions(+)
+
+--- a/net/caif/caif_socket.c
++++ b/net/caif/caif_socket.c
+@@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
++
++ if (sock_flag(sk, SOCK_DEAD))
++ break;
++
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ }
+
+@@ -374,6 +378,10 @@ static int caif_stream_recvmsg(struct ki
+ struct sk_buff *skb;
+
+ lock_sock(sk);
++ if (sock_flag(sk, SOCK_DEAD)) {
++ err = -ECONNRESET;
++ goto unlock;
++ }
+ skb = skb_dequeue(&sk->sk_receive_queue);
+ caif_check_flow_release(sk);
+
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1887,6 +1887,10 @@ static long unix_stream_data_wait(struct
+ unix_state_unlock(sk);
+ timeo = freezable_schedule_timeout(timeo);
+ unix_state_lock(sk);
++
++ if (sock_flag(sk, SOCK_DEAD))
++ break;
++
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ }
+
+@@ -1947,6 +1951,10 @@ static int unix_stream_recvmsg(struct ki
+ struct sk_buff *skb, *last;
+
+ unix_state_lock(sk);
++ if (sock_flag(sk, SOCK_DEAD)) {
++ err = -ECONNRESET;
++ goto unlock;
++ }
+ last = skb = skb_peek(&sk->sk_receive_queue);
+ again:
+ if (skb == NULL) {
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Alexei Starovoitov <ast@plumgrid.com>
+Date: Fri, 22 May 2015 15:42:55 -0700
+Subject: x86: bpf_jit: fix compilation of large bpf programs
+
+From: Alexei Starovoitov <ast@plumgrid.com>
+
+[ Upstream commit 3f7352bf21f8fd7ba3e2fcef9488756f188e12be ]
+
+x86 has variable length encoding. x86 JIT compiler is trying
+to pick the shortest encoding for given bpf instruction.
+While doing so the jump targets are changing, so JIT is doing
+multiple passes over the program. Typical program needs 3 passes.
+Some very short programs converge with 2 passes. Large programs
+may need 4 or 5. But specially crafted bpf programs may hit the
+pass limit and if the program converges on the last iteration
+the JIT compiler will be producing an image full of 'int 3' insns.
+Fix this corner case by doing final iteration over bpf program.
+
+Fixes: 0a14842f5a3c ("net: filter: Just In Time compiler for x86-64")
+Reported-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
+Tested-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/net/bpf_jit_comp.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -966,7 +966,12 @@ void bpf_int_jit_compile(struct bpf_prog
+ }
+ ctx.cleanup_addr = proglen;
+
+- for (pass = 0; pass < 10; pass++) {
++ /* JITed image shrinks with every pass and the loop iterates
++ * until the image stops shrinking. Very large bpf programs
++ * may converge on the last pass. In such case do one more
++ * pass to emit the final image
++ */
++ for (pass = 0; pass < 10 || image; pass++) {
+ proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
+ if (proglen <= 0) {
+ image = NULL;
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Alexei Starovoitov <ast@plumgrid.com>
+Date: Mon, 11 May 2015 23:25:16 -0700
+Subject: x86: bpf_jit: fix FROM_BE16 and FROM_LE16/32 instructions
+
+From: Alexei Starovoitov <ast@plumgrid.com>
+
+[ Upstream commit 343f845b375989f1753f605902931fa939aa2223 ]
+
+FROM_BE16:
+'ror %reg, 8' doesn't clear upper bits of the register,
+so use additional 'movzwl' insn to zero extend 16 bits into 64
+
+FROM_LE16:
+should zero extend lower 16 bits into 64 bit
+
+FROM_LE32:
+should zero extend lower 32 bits into 64 bit
+
+Fixes: 89aa075832b0 ("net: sock: allow eBPF programs to be attached to sockets")
+Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/net/bpf_jit_comp.c | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+--- a/arch/x86/net/bpf_jit_comp.c
++++ b/arch/x86/net/bpf_jit_comp.c
+@@ -559,6 +559,13 @@ static int do_jit(struct bpf_prog *bpf_p
+ if (is_ereg(dst_reg))
+ EMIT1(0x41);
+ EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
++
++ /* emit 'movzwl eax, ax' */
++ if (is_ereg(dst_reg))
++ EMIT3(0x45, 0x0F, 0xB7);
++ else
++ EMIT2(0x0F, 0xB7);
++ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
+ break;
+ case 32:
+ /* emit 'bswap eax' to swap lower 4 bytes */
+@@ -577,6 +584,27 @@ static int do_jit(struct bpf_prog *bpf_p
+ break;
+
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
++ switch (imm32) {
++ case 16:
++ /* emit 'movzwl eax, ax' to zero extend 16-bit
++ * into 64 bit
++ */
++ if (is_ereg(dst_reg))
++ EMIT3(0x45, 0x0F, 0xB7);
++ else
++ EMIT2(0x0F, 0xB7);
++ EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
++ break;
++ case 32:
++ /* emit 'mov eax, eax' to clear upper 32-bits */
++ if (is_ereg(dst_reg))
++ EMIT1(0x45);
++ EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
++ break;
++ case 64:
++ /* nop */
++ break;
++ }
+ break;
+
+ /* ST: *(u8*)(dst_reg + off) = imm */
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+Date: Wed, 27 May 2015 11:44:32 +0100
+Subject: xen/netback: Properly initialize credit_bytes
+
+From: Ross Lagerwall <ross.lagerwall@citrix.com>
+
+[ Upstream commit ce0e5c522d3924090c20e774359809a7aa08c44c ]
+
+Commit e9ce7cb6b107 ("xen-netback: Factor queue-specific data into queue
+struct") introduced a regression when moving queue-specific data into
+the queue struct by failing to set the credit_bytes field. This
+prevented bandwidth limiting from working. Initialize the field as it
+was done before multiqueue support was added.
+
+Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
+Acked-by: Wei Liu <wei.liu2@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/xenbus.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -736,6 +736,7 @@ static void connect(struct backend_info
+ goto err;
+ }
+
++ queue->credit_bytes = credit_bytes;
+ queue->remaining_credit = credit_bytes;
+ queue->credit_usec = credit_usec;
+
--- /dev/null
+From foo@baz Sat Jun 13 09:48:35 PDT 2015
+From: Ian Campbell <Ian.Campbell@citrix.com>
+Date: Mon, 1 Jun 2015 11:30:24 +0100
+Subject: xen: netback: read hotplug script once at start of day.
+
+From: Ian Campbell <Ian.Campbell@citrix.com>
+
+[ Upstream commit 31a418986a5852034d520a5bab546821ff1ccf3d ]
+
+When we come to tear things down in netback_remove() and generate the
+uevent it is possible that the xenstore directory has already been
+removed (details below).
+
+In such cases netback_uevent() won't be able to read the hotplug
+script and will write a xenstore error node.
+
+A recent change to the hypervisor exposed this race such that we now
+sometimes lose it (where apparently we didn't ever before).
+
+Instead read the hotplug script configuration during setup and use it
+for the lifetime of the backend device.
+
+The apparently more obvious fix of moving the transition to
+state=Closed in netback_remove() to after the uevent does not work
+because it is possible that we are already in state=Closed (in
+reaction to the guest having disconnected as it shutdown). Being
+already in Closed means the toolstack is at liberty to start tearing
+down the xenstore directories. In principal it might be possible to
+arrange to unregister the device sooner (e.g on transition to Closing)
+such that xenstore would still be there but this state machine is
+fragile and prone to anger...
+
+A modern Xen system only relies on the hotplug uevent for driver
+domains, when the backend is in the same domain as the toolstack it
+will run the necessary setup/teardown directly in the correct sequence
+wrt xenstore changes.
+
+Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
+Acked-by: Wei Liu <wei.liu2@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/xenbus.c | 33 +++++++++++++++++++--------------
+ 1 file changed, 19 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -34,6 +34,8 @@ struct backend_info {
+ enum xenbus_state frontend_state;
+ struct xenbus_watch hotplug_status_watch;
+ u8 have_hotplug_status_watch:1;
++
++ const char *hotplug_script;
+ };
+
+ static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
+@@ -236,6 +238,7 @@ static int netback_remove(struct xenbus_
+ xenvif_free(be->vif);
+ be->vif = NULL;
+ }
++ kfree(be->hotplug_script);
+ kfree(be);
+ dev_set_drvdata(&dev->dev, NULL);
+ return 0;
+@@ -253,6 +256,7 @@ static int netback_probe(struct xenbus_d
+ struct xenbus_transaction xbt;
+ int err;
+ int sg;
++ const char *script;
+ struct backend_info *be = kzalloc(sizeof(struct backend_info),
+ GFP_KERNEL);
+ if (!be) {
+@@ -345,6 +349,15 @@ static int netback_probe(struct xenbus_d
+ if (err)
+ pr_debug("Error writing multi-queue-max-queues\n");
+
++ script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
++ if (IS_ERR(script)) {
++ err = PTR_ERR(script);
++ xenbus_dev_fatal(dev, err, "reading script");
++ goto fail;
++ }
++
++ be->hotplug_script = script;
++
+ err = xenbus_switch_state(dev, XenbusStateInitWait);
+ if (err)
+ goto fail;
+@@ -377,22 +390,14 @@ static int netback_uevent(struct xenbus_
+ struct kobj_uevent_env *env)
+ {
+ struct backend_info *be = dev_get_drvdata(&xdev->dev);
+- char *val;
+
+- val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
+- if (IS_ERR(val)) {
+- int err = PTR_ERR(val);
+- xenbus_dev_fatal(xdev, err, "reading script");
+- return err;
+- } else {
+- if (add_uevent_var(env, "script=%s", val)) {
+- kfree(val);
+- return -ENOMEM;
+- }
+- kfree(val);
+- }
++ if (!be)
++ return 0;
++
++ if (add_uevent_var(env, "script=%s", be->hotplug_script))
++ return -ENOMEM;
+
+- if (!be || !be->vif)
++ if (!be->vif)
+ return 0;
+
+ return add_uevent_var(env, "vif=%s", be->vif->dev->name);