return true;
}
+/* Use this to update send_peer_notif when RTNL may be held in other places. */
+void bond_peer_notify_work_rearm(struct bonding *bond, unsigned long delay)
+{
+ queue_delayed_work(bond->wq, &bond->peer_notify_work, delay);
+}
+
+/* Peer notify update handler. Holds only RTNL */
+static void bond_peer_notify_reset(struct bonding *bond)
+{
+ bond->send_peer_notif = bond->params.num_peer_notif *
+ max(1, bond->params.peer_notif_delay);
+}
+
+static void bond_peer_notify_handler(struct work_struct *work)
+{
+ struct bonding *bond = container_of(work, struct bonding,
+ peer_notify_work.work);
+
+ if (!rtnl_trylock()) {
+ bond_peer_notify_work_rearm(bond, 1);
+ return;
+ }
+
+ bond_peer_notify_reset(bond);
+
+ rtnl_unlock();
+}
+
+/* Peer notify events post. Holds only RTNL */
+static void bond_peer_notify_may_events(struct bonding *bond, bool force)
+{
+ bool notified = false;
+
+ if (bond_should_notify_peers(bond)) {
+ notified = true;
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
+ }
+
+ if (notified || force)
+ bond->send_peer_notif--;
+}
+
/**
* bond_change_active_slave - change the active slave into the specified one
* @bond: our bonding struct
BOND_SLAVE_NOTIFY_NOW);
if (new_active) {
- bool should_notify_peers = false;
-
bond_set_slave_active_flags(new_active,
BOND_SLAVE_NOTIFY_NOW);
bond_do_fail_over_mac(bond, new_active,
old_active);
- if (netif_running(bond->dev)) {
- bond->send_peer_notif =
- bond->params.num_peer_notif *
- max(1, bond->params.peer_notif_delay);
- should_notify_peers =
- bond_should_notify_peers(bond);
- }
-
call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, bond->dev);
- if (should_notify_peers) {
- bond->send_peer_notif--;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
- bond->dev);
+
+ if (netif_running(bond->dev)) {
+ bond_peer_notify_reset(bond);
+ bond_peer_notify_may_events(bond, false);
}
}
}
void bond_work_init_all(struct bonding *bond)
{
+ /* ndo_stop, bond_close() will try to flush the work under
+ * the rtnl lock. The workqueue must not block on rtnl lock
+ * to avoid deadlock.
+ */
INIT_DELAYED_WORK(&bond->mcast_work,
bond_resend_igmp_join_requests_delayed);
INIT_DELAYED_WORK(&bond->alb_work, bond_alb_monitor);
INIT_DELAYED_WORK(&bond->arp_work, bond_arp_monitor);
INIT_DELAYED_WORK(&bond->ad_work, bond_3ad_state_machine_handler);
INIT_DELAYED_WORK(&bond->slave_arr_work, bond_slave_arr_handler);
+ INIT_DELAYED_WORK(&bond->peer_notify_work, bond_peer_notify_handler);
}
void bond_work_cancel_all(struct bonding *bond)
cancel_delayed_work_sync(&bond->ad_work);
cancel_delayed_work_sync(&bond->mcast_work);
cancel_delayed_work_sync(&bond->slave_arr_work);
+ cancel_delayed_work_sync(&bond->peer_notify_work);
}
static int bond_open(struct net_device *bond_dev)
struct delayed_work ad_work;
struct delayed_work mcast_work;
struct delayed_work slave_arr_work;
+ struct delayed_work peer_notify_work;
#ifdef CONFIG_DEBUG_FS
/* debugging support via debugfs */
struct dentry *debug_dir;
int level);
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
+void bond_peer_notify_work_rearm(struct bonding *bond, unsigned long delay);
void bond_work_init_all(struct bonding *bond);
void bond_work_cancel_all(struct bonding *bond);