struct iwcm_work {
struct work_struct work;
struct iwcm_id_private *cm_id;
- struct list_head list;
struct iw_cm_event event;
struct list_head free_list;
};
return -ENOMEM;
}
work->cm_id = cm_id_priv;
- INIT_LIST_HEAD(&work->list);
put_work(work);
}
return 0;
static bool iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
{
if (refcount_dec_and_test(&cm_id_priv->refcount)) {
- BUG_ON(!list_empty(&cm_id_priv->work_list));
free_cm_id(cm_id_priv);
return true;
}
refcount_set(&cm_id_priv->refcount, 1);
init_waitqueue_head(&cm_id_priv->connect_wait);
init_completion(&cm_id_priv->destroy_comp);
- INIT_LIST_HEAD(&cm_id_priv->work_list);
INIT_LIST_HEAD(&cm_id_priv->work_free_list);
return &cm_id_priv->id;
}
/*
- * Process events on the work_list for the cm_id. If the callback
- * function requests that the cm_id be deleted, a flag is set in the
- * cm_id flags to indicate that when the last reference is
- * removed, the cm_id is to be destroyed. This is necessary to
- * distinguish between an object that will be destroyed by the app
- * thread asleep on the destroy_comp list vs. an object destroyed
- * here synchronously when the last reference is removed.
+ * Process events for the cm_id. If the callback function requests
+ * that the cm_id be deleted, a flag is set in the cm_id flags to
+ * indicate that when the last reference is removed, the cm_id is
+ * to be destroyed. This is necessary to distinguish between an
+ * object that will be destroyed by the app thread asleep on the
+ * destroy_comp list vs. an object destroyed here synchronously
+ * when the last reference is removed.
*/
static void cm_work_handler(struct work_struct *_work)
{
int ret = 0;
spin_lock_irqsave(&cm_id_priv->lock, flags);
- while (!list_empty(&cm_id_priv->work_list)) {
- work = list_first_entry(&cm_id_priv->work_list,
- struct iwcm_work, list);
- list_del_init(&work->list);
- levent = work->event;
- put_work(work);
- spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-
- if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
- ret = process_event(cm_id_priv, &levent);
- if (ret) {
- destroy_cm_id(&cm_id_priv->id);
- WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
- }
- } else
- pr_debug("dropping event %d\n", levent.event);
- if (iwcm_deref_id(cm_id_priv))
- return;
- spin_lock_irqsave(&cm_id_priv->lock, flags);
- }
+ levent = work->event;
+ put_work(work);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+
+ if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
+ ret = process_event(cm_id_priv, &levent);
+ if (ret) {
+ destroy_cm_id(&cm_id_priv->id);
+ WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
+ }
+ } else
+ pr_debug("dropping event %d\n", levent.event);
+ if (iwcm_deref_id(cm_id_priv))
+ return;
}
/*
* This function is called on interrupt context. Schedule events on
* the iwcm_wq thread to allow callback functions to downcall into
- * the CM and/or block. Events are queued to a per-CM_ID
- * work_list. If this is the first event on the work_list, the work
- * element is also queued on the iwcm_wq thread.
+ * the CM and/or block.
*
* Each event holds a reference on the cm_id. Until the last posted
* event has been delivered and processed, the cm_id cannot be
}
refcount_inc(&cm_id_priv->refcount);
- list_add_tail(&work->list, &cm_id_priv->work_list);
queue_work(iwcm_wq, &work->work);
out:
spin_unlock_irqrestore(&cm_id_priv->lock, flags);