1 From d050ffb922c782f092234611b9019e95024481ab Mon Sep 17 00:00:00 2001
2 From: Nicholas Bellinger <nab@linux-iscsi.org>
3 Date: Wed, 28 Sep 2011 21:37:29 -0700
4 Subject: target: Re-org of core_tmr_lun_reset
6 From: Nicholas Bellinger <nab@linux-iscsi.org>
8 commit d050ffb922c782f092234611b9019e95024481ab upstream.
10 This patch is a re-orginzation of core_tmr_lun_reset() logic to properly
11 scan the active tmr_list, dev->state_task_list and qobj->qobj_list w/ the
12 relivent locks held, and performing a list_move_tail onto seperate local
13 scope lists before performing the full drain.
15 This involves breaking out the code into three seperate list specific
16 functions: core_tmr_drain_tmr_list(), core_tmr_drain_task_list() and
17 core_tmr_drain_cmd_list().
19 (nab: Include target: Remove non-active tasks from execute list during
20 LUN_RESET patch to address original breakage)
22 Reported-by: Roland Dreier <roland@purestorage.com>
23 Cc: Roland Dreier <roland@purestorage.com>
24 Cc: Christoph Hellwig <hch@lst.de>
25 Signed-off-by: Nicholas Bellinger <nab@risingtidesystems.com>
26 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
29 drivers/target/target_core_tmr.c | 197 ++++++++++++++++++++++++---------------
30 1 file changed, 125 insertions(+), 72 deletions(-)
32 --- a/drivers/target/target_core_tmr.c
33 +++ b/drivers/target/target_core_tmr.c
34 @@ -67,15 +67,16 @@ void core_tmr_release_req(
35 struct se_tmr_req *tmr)
37 struct se_device *dev = tmr->tmr_dev;
38 + unsigned long flags;
41 kmem_cache_free(se_tmr_req_cache, tmr);
45 - spin_lock_irq(&dev->se_tmr_lock);
46 + spin_lock_irqsave(&dev->se_tmr_lock, flags);
47 list_del(&tmr->tmr_list);
48 - spin_unlock_irq(&dev->se_tmr_lock);
49 + spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
51 kmem_cache_free(se_tmr_req_cache, tmr);
53 @@ -100,54 +101,20 @@ static void core_tmr_handle_tas_abort(
54 transport_cmd_finish_abort(cmd, 0);
57 -int core_tmr_lun_reset(
58 +static void core_tmr_drain_tmr_list(
59 struct se_device *dev,
60 struct se_tmr_req *tmr,
61 - struct list_head *preempt_and_abort_list,
62 - struct se_cmd *prout_cmd)
63 + struct list_head *preempt_and_abort_list)
65 - struct se_cmd *cmd, *tcmd;
66 - struct se_node_acl *tmr_nacl = NULL;
67 - struct se_portal_group *tmr_tpg = NULL;
68 - struct se_queue_obj *qobj = &dev->dev_queue_obj;
69 + LIST_HEAD(drain_tmr_list);
70 struct se_tmr_req *tmr_p, *tmr_pp;
71 - struct se_task *task, *task_tmp;
76 - * TASK_ABORTED status bit, this is configurable via ConfigFS
77 - * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
79 - * A task aborted status (TAS) bit set to zero specifies that aborted
80 - * tasks shall be terminated by the device server without any response
81 - * to the application client. A TAS bit set to one specifies that tasks
82 - * aborted by the actions of an I_T nexus other than the I_T nexus on
83 - * which the command was received shall be completed with TASK ABORTED
84 - * status (see SAM-4).
86 - tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
88 - * Determine if this se_tmr is coming from a $FABRIC_MOD
89 - * or struct se_device passthrough..
91 - if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
92 - tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
93 - tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
94 - if (tmr_nacl && tmr_tpg) {
95 - pr_debug("LUN_RESET: TMR caller fabric: %s"
96 - " initiator port %s\n",
97 - tmr_tpg->se_tpg_tfo->get_fabric_name(),
98 - tmr_nacl->initiatorname);
101 - pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
102 - (preempt_and_abort_list) ? "Preempt" : "TMR",
103 - dev->transport->name, tas);
105 * Release all pending and outgoing TMRs aside from the received
108 - spin_lock_irq(&dev->se_tmr_lock);
109 + spin_lock_irqsave(&dev->se_tmr_lock, flags);
110 list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
112 * Allow the received TMR to return with FUNCTION_COMPLETE.
113 @@ -169,29 +136,48 @@ int core_tmr_lun_reset(
114 (core_scsi3_check_cdb_abort_and_preempt(
115 preempt_and_abort_list, cmd) != 0))
117 - spin_unlock_irq(&dev->se_tmr_lock);
119 - spin_lock_irqsave(&cmd->t_state_lock, flags);
120 + spin_lock(&cmd->t_state_lock);
121 if (!atomic_read(&cmd->t_transport_active)) {
122 - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
123 - spin_lock_irq(&dev->se_tmr_lock);
124 + spin_unlock(&cmd->t_state_lock);
127 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
128 - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
129 - spin_lock_irq(&dev->se_tmr_lock);
130 + spin_unlock(&cmd->t_state_lock);
133 + spin_unlock(&cmd->t_state_lock);
135 + list_move_tail(&tmr->tmr_list, &drain_tmr_list);
137 + spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
139 + while (!list_empty(&drain_tmr_list)) {
140 + tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list);
141 + list_del(&tmr->tmr_list);
142 + cmd = tmr_p->task_cmd;
144 pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
145 " Response: 0x%02x, t_state: %d\n",
146 - (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
147 - tmr_p->function, tmr_p->response, cmd->t_state);
148 - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
149 + (preempt_and_abort_list) ? "Preempt" : "", tmr,
150 + tmr->function, tmr->response, cmd->t_state);
152 transport_cmd_finish_abort_tmr(cmd);
153 - spin_lock_irq(&dev->se_tmr_lock);
155 - spin_unlock_irq(&dev->se_tmr_lock);
158 +static void core_tmr_drain_task_list(
159 + struct se_device *dev,
160 + struct se_cmd *prout_cmd,
161 + struct se_node_acl *tmr_nacl,
163 + struct list_head *preempt_and_abort_list)
165 + LIST_HEAD(drain_task_list);
166 + struct se_cmd *cmd;
167 + struct se_task *task, *task_tmp;
168 + unsigned long flags;
171 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
172 * This is following sam4r17, section 5.6 Aborting commands, Table 38
173 @@ -236,9 +222,23 @@ int core_tmr_lun_reset(
174 if (prout_cmd == cmd)
177 - list_del(&task->t_state_list);
178 + list_move_tail(&task->t_state_list, &drain_task_list);
179 atomic_set(&task->task_state_active, 0);
180 - spin_unlock_irqrestore(&dev->execute_task_lock, flags);
182 + * Remove from task execute list before processing drain_task_list
184 + if (atomic_read(&task->task_execute_queue) != 0) {
185 + list_del(&task->t_execute_list);
186 + atomic_set(&task->task_execute_queue, 0);
187 + atomic_dec(&dev->execute_tasks);
190 + spin_unlock_irqrestore(&dev->execute_task_lock, flags);
192 + while (!list_empty(&drain_task_list)) {
193 + task = list_entry(drain_task_list.next, struct se_task, t_state_list);
194 + list_del(&task->t_state_list);
195 + cmd = task->task_se_cmd;
197 spin_lock_irqsave(&cmd->t_state_lock, flags);
198 pr_debug("LUN_RESET: %s cmd: %p task: %p"
199 @@ -275,20 +275,14 @@ int core_tmr_lun_reset(
201 atomic_set(&task->task_active, 0);
202 atomic_set(&task->task_stop, 0);
204 - if (atomic_read(&task->task_execute_queue) != 0)
205 - transport_remove_task_from_execute_queue(task, dev);
207 __transport_stop_task_timer(task, &flags);
209 if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
210 - spin_unlock_irqrestore(
211 - &cmd->t_state_lock, flags);
212 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
213 pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
214 " t_task_cdbs_ex_left: %d\n", task, dev,
215 atomic_read(&cmd->t_task_cdbs_ex_left));
217 - spin_lock_irqsave(&dev->execute_task_lock, flags);
220 fe_count = atomic_read(&cmd->t_fe_count);
221 @@ -298,22 +292,31 @@ int core_tmr_lun_reset(
222 " task: %p, t_fe_count: %d dev: %p\n", task,
224 atomic_set(&cmd->t_transport_aborted, 1);
225 - spin_unlock_irqrestore(&cmd->t_state_lock,
227 - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
228 + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
230 - spin_lock_irqsave(&dev->execute_task_lock, flags);
231 + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
234 pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
235 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
236 atomic_set(&cmd->t_transport_aborted, 1);
237 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
238 - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
240 - spin_lock_irqsave(&dev->execute_task_lock, flags);
241 + core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
243 - spin_unlock_irqrestore(&dev->execute_task_lock, flags);
246 +static void core_tmr_drain_cmd_list(
247 + struct se_device *dev,
248 + struct se_cmd *prout_cmd,
249 + struct se_node_acl *tmr_nacl,
251 + struct list_head *preempt_and_abort_list)
253 + LIST_HEAD(drain_cmd_list);
254 + struct se_queue_obj *qobj = &dev->dev_queue_obj;
255 + struct se_cmd *cmd, *tcmd;
256 + unsigned long flags;
258 * Release all commands remaining in the struct se_device cmd queue.
260 @@ -348,10 +351,15 @@ int core_tmr_lun_reset(
262 spin_unlock(&cmd->t_state_lock);
264 - atomic_dec(&cmd->t_transport_queue_active);
265 + atomic_set(&cmd->t_transport_queue_active, 0);
266 atomic_dec(&qobj->queue_cnt);
267 + list_move_tail(&cmd->se_queue_node, &drain_cmd_list);
269 + spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
271 + while (!list_empty(&drain_cmd_list)) {
272 + cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node);
273 list_del_init(&cmd->se_queue_node);
274 - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
276 pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
277 " %d t_fe_count: %d\n", (preempt_and_abort_list) ?
278 @@ -364,9 +372,53 @@ int core_tmr_lun_reset(
280 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
281 atomic_read(&cmd->t_fe_count));
282 - spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
284 - spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
287 +int core_tmr_lun_reset(
288 + struct se_device *dev,
289 + struct se_tmr_req *tmr,
290 + struct list_head *preempt_and_abort_list,
291 + struct se_cmd *prout_cmd)
293 + struct se_node_acl *tmr_nacl = NULL;
294 + struct se_portal_group *tmr_tpg = NULL;
297 + * TASK_ABORTED status bit, this is configurable via ConfigFS
298 + * struct se_device attributes. spc4r17 section 7.4.6 Control mode page
300 + * A task aborted status (TAS) bit set to zero specifies that aborted
301 + * tasks shall be terminated by the device server without any response
302 + * to the application client. A TAS bit set to one specifies that tasks
303 + * aborted by the actions of an I_T nexus other than the I_T nexus on
304 + * which the command was received shall be completed with TASK ABORTED
305 + * status (see SAM-4).
307 + tas = dev->se_sub_dev->se_dev_attrib.emulate_tas;
309 + * Determine if this se_tmr is coming from a $FABRIC_MOD
310 + * or struct se_device passthrough..
312 + if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
313 + tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
314 + tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
315 + if (tmr_nacl && tmr_tpg) {
316 + pr_debug("LUN_RESET: TMR caller fabric: %s"
317 + " initiator port %s\n",
318 + tmr_tpg->se_tpg_tfo->get_fabric_name(),
319 + tmr_nacl->initiatorname);
322 + pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
323 + (preempt_and_abort_list) ? "Preempt" : "TMR",
324 + dev->transport->name, tas);
326 + core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
327 + core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas,
328 + preempt_and_abort_list);
329 + core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas,
330 + preempt_and_abort_list);
332 * Clear any legacy SPC-2 reservation when called during
334 @@ -389,3 +441,4 @@ int core_tmr_lun_reset(
335 dev->transport->name);