}
}
- if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
+ if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
/* state_lock used by requester & completer */
spin_lock_bh(&qp->state_lock);
- if ((qp->req.state == QP_STATE_DRAIN) &&
- (qp->comp.psn == qp->req.psn)) {
- qp->req.state = QP_STATE_DRAINED;
+ if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
+ qp->attr.sq_draining = 0;
spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
* (4) the timeout parameter is set
*/
if ((qp_type(qp) == IB_QPT_RC) &&
- (qp->req.state == QP_STATE_READY) &&
+ (qp_state(qp) >= IB_QPS_RTS) &&
(psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
qp->qp_timeout_jiffies)
mod_timer(&qp->retrans_timer,
int is_request = pkt->mask & RXE_REQ_MASK;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- if ((is_request && (qp->req.state != QP_STATE_READY)) ||
- (!is_request && (qp_state(qp) <= IB_QPS_RTR))) {
+ if ((is_request && (qp_state(qp) < IB_QPS_RTS)) ||
+ (!is_request && (qp_state(qp) < IB_QPS_RTR))) {
rxe_dbg_qp(qp, "Packet dropped. QP is not in ready state\n");
goto drop;
}
qp->req.wqe_index = queue_get_producer(qp->sq.queue,
QUEUE_TYPE_FROM_CLIENT);
- qp->req.state = QP_STATE_RESET;
qp->req.opcode = -1;
qp->comp.opcode = -1;
goto err1;
}
- if (mask & IB_QP_STATE) {
- if (cur_state == IB_QPS_SQD) {
- if (qp->req.state == QP_STATE_DRAIN &&
- new_state != IB_QPS_ERR)
- goto err1;
- }
+ if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) {
+ if (qp->attr.sq_draining && new_state != IB_QPS_ERR)
+ goto err1;
}
if (mask & IB_QP_PORT) {
rxe_disable_task(&qp->comp.task);
rxe_disable_task(&qp->req.task);
- /* move qp to the reset state */
- qp->req.state = QP_STATE_RESET;
-
/* drain work and packet queuesc */
rxe_requester(qp);
rxe_completer(qp);
rxe_enable_task(&qp->req.task);
}
-/* drain the send queue */
-static void rxe_qp_drain(struct rxe_qp *qp)
-{
- if (qp->sq.queue) {
- if (qp->req.state != QP_STATE_DRAINED) {
- qp->req.state = QP_STATE_DRAIN;
- rxe_sched_task(&qp->comp.task);
- rxe_sched_task(&qp->req.task);
- }
- }
-}
-
/* move the qp to the error state */
void rxe_qp_error(struct rxe_qp *qp)
{
- qp->req.state = QP_STATE_ERROR;
qp->attr.qp_state = IB_QPS_ERR;
/* drain work and packet queues */
int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
struct ib_udata *udata)
{
+ enum ib_qp_state cur_state = (mask & IB_QP_CUR_STATE) ?
+ attr->cur_qp_state : qp->attr.qp_state;
int err;
if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
case IB_QPS_INIT:
rxe_dbg_qp(qp, "state -> INIT\n");
- qp->req.state = QP_STATE_INIT;
break;
case IB_QPS_RTR:
case IB_QPS_RTS:
rxe_dbg_qp(qp, "state -> RTS\n");
- qp->req.state = QP_STATE_READY;
break;
case IB_QPS_SQD:
rxe_dbg_qp(qp, "state -> SQD\n");
- rxe_qp_drain(qp);
+ if (cur_state != IB_QPS_SQD) {
+ qp->attr.sq_draining = 1;
+ rxe_sched_task(&qp->comp.task);
+ rxe_sched_task(&qp->req.task);
+ }
break;
case IB_QPS_SQE:
rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
- if (qp->req.state == QP_STATE_DRAIN) {
- attr->sq_draining = 1;
- /* applications that get this state
- * typically spin on it. yield the
- * processor
- */
+ /* Applications that get this state typically spin on it.
+ * Yield the processor
+ */
+ if (qp->attr.sq_draining)
cond_resched();
- } else {
- attr->sq_draining = 0;
- }
rxe_dbg_qp(qp, "attr->sq_draining = %d\n", attr->sq_draining);
}
if (pkt->mask & RXE_REQ_MASK) {
- if (unlikely(qp_state(qp) <= IB_QPS_RTR))
+ if (unlikely(qp_state(qp) < IB_QPS_RTR))
return -EINVAL;
- } else if (unlikely(qp->req.state < QP_STATE_READY ||
- qp->req.state > QP_STATE_DRAINED))
- return -EINVAL;
+ } else {
+ if (unlikely(qp_state(qp) < IB_QPS_RTS))
+ return -EINVAL;
+ }
return 0;
}
cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
- if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
+ if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
/* check to see if we are drained;
* state_lock used by requester and completer
*/
spin_lock_bh(&qp->state_lock);
do {
- if (qp->req.state != QP_STATE_DRAIN) {
+ if (!qp->attr.sq_draining) {
/* comp just finished */
spin_unlock_bh(&qp->state_lock);
break;
break;
}
- qp->req.state = QP_STATE_DRAINED;
+ qp->attr.sq_draining = 0;
spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
wqe = queue_addr_from_index(q, index);
- if (unlikely((qp->req.state == QP_STATE_DRAIN ||
- qp->req.state == QP_STATE_DRAINED) &&
+ if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
(wqe->state != wqe_state_processing)))
return NULL;
if (unlikely(!qp->valid))
goto exit;
- if (unlikely(qp->req.state == QP_STATE_ERROR)) {
+ if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
wqe = req_next_wqe(qp);
if (wqe)
/*
goto exit;
}
- if (unlikely(qp->req.state == QP_STATE_RESET)) {
+ if (unlikely(qp_state(qp) == IB_QPS_RESET)) {
qp->req.wqe_index = queue_get_consumer(q,
QUEUE_TYPE_FROM_CLIENT);
qp->req.opcode = -1;
/* update wqe_index for each wqe completion */
qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
wqe->state = wqe_state_error;
- qp->req.state = QP_STATE_ERROR;
+ qp->attr.qp_state = IB_QPS_ERR;
rxe_sched_task(&qp->comp.task);
exit:
ret = -EAGAIN;
if (!err)
rxe_sched_task(&qp->req.task);
- if (unlikely(qp->req.state == QP_STATE_ERROR))
+ if (unlikely(qp_state(qp) == IB_QPS_ERR))
rxe_sched_task(&qp->comp.task);
return err;
goto err_out;
}
- if (unlikely(qp->req.state < QP_STATE_READY)) {
+ if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
*bad_wr = wr;
err = -EINVAL;
rxe_dbg_qp(qp, "qp not ready to send");
int error;
};
-enum rxe_qp_state {
- QP_STATE_RESET,
- QP_STATE_INIT,
- QP_STATE_READY,
- QP_STATE_DRAIN, /* req only */
- QP_STATE_DRAINED, /* req only */
- QP_STATE_ERROR
-};
-
struct rxe_req_info {
- enum rxe_qp_state state;
int wqe_index;
u32 psn;
int opcode;