* therefore trigger warnings.
* Defer the xmit to rds_send_worker() instead.
*/
- queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
+ queue_delayed_work(cp->cp_wq, &cp->cp_send_w, 0);
}
rcu_read_unlock();
}
__rds_conn_path_init(conn, &conn->c_path[i],
is_outgoing);
conn->c_path[i].cp_index = i;
+ conn->c_path[i].cp_wq = rds_wq;
}
rcu_read_lock();
if (rds_destroy_pending(conn))
rcu_read_unlock();
return;
}
- queue_work(rds_wq, &cp->cp_down_w);
+ queue_work(cp->cp_wq, &cp->cp_down_w);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rds_conn_path_drop);
}
if (rds_conn_path_state(cp) == RDS_CONN_DOWN &&
!test_and_set_bit(RDS_RECONNECT_PENDING, &cp->cp_flags))
- queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+ queue_delayed_work(cp->cp_wq, &cp->cp_conn_w, 0);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rds_conn_path_connect_if_down);
(must_wake ||
(can_wait && rds_ib_ring_low(&ic->i_recv_ring)) ||
rds_ib_ring_empty(&ic->i_recv_ring))) {
- queue_delayed_work(rds_wq, &conn->c_recv_w, 1);
+ queue_delayed_work(conn->c_path->cp_wq, &conn->c_recv_w, 1);
}
if (can_wait)
cond_resched();
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
test_bit(0, &conn->c_map_queued))
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ queue_delayed_work(conn->c_path->cp_wq, &conn->c_send_w, 0);
/* We expect errors as the qp is drained during shutdown */
if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) {
atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
- queue_delayed_work(rds_wq, &conn->c_send_w, 0);
+ queue_delayed_work(conn->c_path->cp_wq, &conn->c_send_w, 0);
WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
void *cp_transport_data;
+ struct workqueue_struct *cp_wq;
atomic_t cp_state;
unsigned long cp_send_gen;
unsigned long cp_flags;
if (rds_destroy_pending(cp->cp_conn))
ret = -ENETUNREACH;
else
- queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+ queue_delayed_work(cp->cp_wq,
+ &cp->cp_send_w, 1);
rcu_read_unlock();
} else if (raced) {
rds_stats_inc(s_send_lock_queue_raced);
if (rds_destroy_pending(cpath->cp_conn))
ret = -ENETUNREACH;
else
- queue_delayed_work(rds_wq, &cpath->cp_send_w, 1);
+ queue_delayed_work(cpath->cp_wq, &cpath->cp_send_w, 1);
rcu_read_unlock();
}
if (ret)
rds_stats_inc(s_send_queued);
rds_stats_inc(s_send_pong);
- /* schedule the send work on rds_wq */
+ /* schedule the send work on cp_wq */
rcu_read_lock();
if (!rds_destroy_pending(cp->cp_conn))
- queue_delayed_work(rds_wq, &cp->cp_send_w, 1);
+ queue_delayed_work(cp->cp_wq, &cp->cp_send_w, 1);
rcu_read_unlock();
rds_message_put(rm);
if (rds_tcp_read_sock(cp, GFP_ATOMIC) == -ENOMEM) {
rcu_read_lock();
if (!rds_destroy_pending(cp->cp_conn))
- queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+ queue_delayed_work(cp->cp_wq, &cp->cp_recv_w, 0);
rcu_read_unlock();
}
out:
rcu_read_lock();
if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf &&
!rds_destroy_pending(cp->cp_conn))
- queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
+ queue_delayed_work(cp->cp_wq, &cp->cp_send_w, 0);
rcu_read_unlock();
out:
set_bit(0, &cp->cp_conn->c_map_queued);
rcu_read_lock();
if (!rds_destroy_pending(cp->cp_conn)) {
- queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
- queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+ queue_delayed_work(cp->cp_wq, &cp->cp_send_w, 0);
+ queue_delayed_work(cp->cp_wq, &cp->cp_recv_w, 0);
}
rcu_read_unlock();
cp->cp_conn->c_proposed_version = RDS_PROTOCOL_VERSION;
cp->cp_reconnect_jiffies = rds_sysctl_reconnect_min_jiffies;
rcu_read_lock();
if (!rds_destroy_pending(cp->cp_conn))
- queue_delayed_work(rds_wq, &cp->cp_conn_w, 0);
+ queue_delayed_work(cp->cp_wq, &cp->cp_conn_w, 0);
rcu_read_unlock();
return;
}
conn, &conn->c_laddr, &conn->c_faddr);
rcu_read_lock();
if (!rds_destroy_pending(cp->cp_conn))
- queue_delayed_work(rds_wq, &cp->cp_conn_w,
+ queue_delayed_work(cp->cp_wq, &cp->cp_conn_w,
rand % cp->cp_reconnect_jiffies);
rcu_read_unlock();
switch (ret) {
case -EAGAIN:
rds_stats_inc(s_send_immediate_retry);
- queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
+ queue_delayed_work(cp->cp_wq, &cp->cp_send_w, 0);
break;
case -ENOMEM:
rds_stats_inc(s_send_delayed_retry);
- queue_delayed_work(rds_wq, &cp->cp_send_w, 2);
+ queue_delayed_work(cp->cp_wq, &cp->cp_send_w, 2);
break;
default:
break;
switch (ret) {
case -EAGAIN:
rds_stats_inc(s_recv_immediate_retry);
- queue_delayed_work(rds_wq, &cp->cp_recv_w, 0);
+ queue_delayed_work(cp->cp_wq, &cp->cp_recv_w, 0);
break;
case -ENOMEM:
rds_stats_inc(s_recv_delayed_retry);
- queue_delayed_work(rds_wq, &cp->cp_recv_w, 2);
+ queue_delayed_work(cp->cp_wq, &cp->cp_recv_w, 2);
break;
default:
break;