]>
Commit | Line | Data |
---|---|---|
42e9a92f RL |
1 | /* |
2 | * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms and conditions of the GNU General Public License, | |
6 | * version 2, as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope it will be useful, but WITHOUT | |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
11 | * more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public License along with | |
14 | * this program; if not, write to the Free Software Foundation, Inc., | |
15 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
16 | * | |
17 | * Maintained at www.Open-FCoE.org | |
18 | */ | |
19 | ||
20 | /* | |
21 | * RPORT GENERAL INFO | |
22 | * | |
23 | * This file contains all processing regarding fc_rports. It contains the | |
24 | * rport state machine and does all rport interaction with the transport class. | |
25 | * There should be no other places in libfc that interact directly with the | |
26 | * transport class in regards to adding and deleting rports. | |
27 | * | |
28 | * fc_rport's represent N_Port's within the fabric. | |
29 | */ | |
30 | ||
31 | /* | |
32 | * RPORT LOCKING | |
33 | * | |
34 | * The rport should never hold the rport mutex and then attempt to acquire | |
35 | * either the lport or disc mutexes. The rport's mutex is considered lesser | |
36 | * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for | |
37 | * more comments on the heirarchy. | |
38 | * | |
39 | * The locking strategy is similar to the lport's strategy. The lock protects | |
40 | * the rport's states and is held and released by the entry points to the rport | |
41 | * block. All _enter_* functions correspond to rport states and expect the rport | |
42 | * mutex to be locked before calling them. This means that rports only handle | |
43 | * one request or response at a time, since they're not critical for the I/O | |
44 | * path this potential over-use of the mutex is acceptable. | |
45 | */ | |
46 | ||
47 | #include <linux/kernel.h> | |
48 | #include <linux/spinlock.h> | |
49 | #include <linux/interrupt.h> | |
50 | #include <linux/rcupdate.h> | |
51 | #include <linux/timer.h> | |
52 | #include <linux/workqueue.h> | |
53 | #include <asm/unaligned.h> | |
54 | ||
55 | #include <scsi/libfc.h> | |
56 | #include <scsi/fc_encode.h> | |
57 | ||
58 | static int fc_rport_debug; | |
59 | ||
60 | #define FC_DEBUG_RPORT(fmt...) \ | |
61 | do { \ | |
62 | if (fc_rport_debug) \ | |
63 | FC_DBG(fmt); \ | |
64 | } while (0) | |
65 | ||
66 | struct workqueue_struct *rport_event_queue; | |
67 | ||
68 | static void fc_rport_enter_plogi(struct fc_rport *); | |
69 | static void fc_rport_enter_prli(struct fc_rport *); | |
70 | static void fc_rport_enter_rtv(struct fc_rport *); | |
71 | static void fc_rport_enter_ready(struct fc_rport *); | |
72 | static void fc_rport_enter_logo(struct fc_rport *); | |
73 | ||
74 | static void fc_rport_recv_plogi_req(struct fc_rport *, | |
75 | struct fc_seq *, struct fc_frame *); | |
76 | static void fc_rport_recv_prli_req(struct fc_rport *, | |
77 | struct fc_seq *, struct fc_frame *); | |
78 | static void fc_rport_recv_prlo_req(struct fc_rport *, | |
79 | struct fc_seq *, struct fc_frame *); | |
80 | static void fc_rport_recv_logo_req(struct fc_rport *, | |
81 | struct fc_seq *, struct fc_frame *); | |
82 | static void fc_rport_timeout(struct work_struct *); | |
83 | static void fc_rport_error(struct fc_rport *, struct fc_frame *); | |
6755db1c | 84 | static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *); |
42e9a92f RL |
85 | static void fc_rport_work(struct work_struct *); |
86 | ||
87 | static const char *fc_rport_state_names[] = { | |
88 | [RPORT_ST_NONE] = "None", | |
89 | [RPORT_ST_INIT] = "Init", | |
90 | [RPORT_ST_PLOGI] = "PLOGI", | |
91 | [RPORT_ST_PRLI] = "PRLI", | |
92 | [RPORT_ST_RTV] = "RTV", | |
93 | [RPORT_ST_READY] = "Ready", | |
94 | [RPORT_ST_LOGO] = "LOGO", | |
95 | }; | |
96 | ||
97 | static void fc_rport_rogue_destroy(struct device *dev) | |
98 | { | |
99 | struct fc_rport *rport = dev_to_rport(dev); | |
100 | FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id); | |
101 | kfree(rport); | |
102 | } | |
103 | ||
104 | struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp) | |
105 | { | |
106 | struct fc_rport *rport; | |
107 | struct fc_rport_libfc_priv *rdata; | |
108 | rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL); | |
109 | ||
110 | if (!rport) | |
111 | return NULL; | |
112 | ||
113 | rdata = RPORT_TO_PRIV(rport); | |
114 | ||
115 | rport->dd_data = rdata; | |
116 | rport->port_id = dp->ids.port_id; | |
117 | rport->port_name = dp->ids.port_name; | |
118 | rport->node_name = dp->ids.node_name; | |
119 | rport->roles = dp->ids.roles; | |
120 | rport->maxframe_size = FC_MIN_MAX_PAYLOAD; | |
121 | /* | |
122 | * Note: all this libfc rogue rport code will be removed for | |
123 | * upstream so it fine that this is really ugly and hacky right now. | |
124 | */ | |
125 | device_initialize(&rport->dev); | |
126 | rport->dev.release = fc_rport_rogue_destroy; | |
127 | ||
128 | mutex_init(&rdata->rp_mutex); | |
129 | rdata->local_port = dp->lp; | |
130 | rdata->trans_state = FC_PORTSTATE_ROGUE; | |
131 | rdata->rp_state = RPORT_ST_INIT; | |
132 | rdata->event = RPORT_EV_NONE; | |
133 | rdata->flags = FC_RP_FLAGS_REC_SUPPORTED; | |
134 | rdata->ops = NULL; | |
135 | rdata->e_d_tov = dp->lp->e_d_tov; | |
136 | rdata->r_a_tov = dp->lp->r_a_tov; | |
137 | INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout); | |
138 | INIT_WORK(&rdata->event_work, fc_rport_work); | |
139 | /* | |
140 | * For good measure, but not necessary as we should only | |
141 | * add REAL rport to the lport list. | |
142 | */ | |
143 | INIT_LIST_HEAD(&rdata->peers); | |
144 | ||
145 | return rport; | |
146 | } | |
147 | ||
148 | /** | |
34f42a07 | 149 | * fc_rport_state() - return a string for the state the rport is in |
42e9a92f RL |
150 | * @rport: The rport whose state we want to get a string for |
151 | */ | |
152 | static const char *fc_rport_state(struct fc_rport *rport) | |
153 | { | |
154 | const char *cp; | |
155 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
156 | ||
157 | cp = fc_rport_state_names[rdata->rp_state]; | |
158 | if (!cp) | |
159 | cp = "Unknown"; | |
160 | return cp; | |
161 | } | |
162 | ||
163 | /** | |
34f42a07 | 164 | * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds. |
42e9a92f RL |
165 | * @rport: Pointer to Fibre Channel remote port structure |
166 | * @timeout: timeout in seconds | |
167 | */ | |
168 | void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout) | |
169 | { | |
170 | if (timeout) | |
171 | rport->dev_loss_tmo = timeout + 5; | |
172 | else | |
173 | rport->dev_loss_tmo = 30; | |
174 | } | |
175 | EXPORT_SYMBOL(fc_set_rport_loss_tmo); | |
176 | ||
177 | /** | |
34f42a07 | 178 | * fc_plogi_get_maxframe() - Get max payload from the common service parameters |
42e9a92f RL |
179 | * @flp: FLOGI payload structure |
180 | * @maxval: upper limit, may be less than what is in the service parameters | |
181 | */ | |
b2ab99c9 RL |
182 | static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp, |
183 | unsigned int maxval) | |
42e9a92f RL |
184 | { |
185 | unsigned int mfs; | |
186 | ||
187 | /* | |
188 | * Get max payload from the common service parameters and the | |
189 | * class 3 receive data field size. | |
190 | */ | |
191 | mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK; | |
192 | if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval) | |
193 | maxval = mfs; | |
194 | mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs); | |
195 | if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval) | |
196 | maxval = mfs; | |
197 | return maxval; | |
198 | } | |
199 | ||
200 | /** | |
34f42a07 | 201 | * fc_rport_state_enter() - Change the rport's state |
42e9a92f RL |
202 | * @rport: The rport whose state should change |
203 | * @new: The new state of the rport | |
204 | * | |
205 | * Locking Note: Called with the rport lock held | |
206 | */ | |
207 | static void fc_rport_state_enter(struct fc_rport *rport, | |
208 | enum fc_rport_state new) | |
209 | { | |
210 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
211 | if (rdata->rp_state != new) | |
212 | rdata->retries = 0; | |
213 | rdata->rp_state = new; | |
214 | } | |
215 | ||
216 | static void fc_rport_work(struct work_struct *work) | |
217 | { | |
571f824c | 218 | u32 port_id; |
42e9a92f RL |
219 | struct fc_rport_libfc_priv *rdata = |
220 | container_of(work, struct fc_rport_libfc_priv, event_work); | |
221 | enum fc_rport_event event; | |
222 | enum fc_rport_trans_state trans_state; | |
223 | struct fc_lport *lport = rdata->local_port; | |
224 | struct fc_rport_operations *rport_ops; | |
225 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); | |
226 | ||
227 | mutex_lock(&rdata->rp_mutex); | |
228 | event = rdata->event; | |
229 | rport_ops = rdata->ops; | |
230 | ||
231 | if (event == RPORT_EV_CREATED) { | |
232 | struct fc_rport *new_rport; | |
233 | struct fc_rport_libfc_priv *new_rdata; | |
234 | struct fc_rport_identifiers ids; | |
235 | ||
236 | ids.port_id = rport->port_id; | |
237 | ids.roles = rport->roles; | |
238 | ids.port_name = rport->port_name; | |
239 | ids.node_name = rport->node_name; | |
240 | ||
241 | mutex_unlock(&rdata->rp_mutex); | |
242 | ||
243 | new_rport = fc_remote_port_add(lport->host, 0, &ids); | |
244 | if (new_rport) { | |
245 | /* | |
246 | * Switch from the rogue rport to the rport | |
247 | * returned by the FC class. | |
248 | */ | |
249 | new_rport->maxframe_size = rport->maxframe_size; | |
250 | ||
251 | new_rdata = new_rport->dd_data; | |
252 | new_rdata->e_d_tov = rdata->e_d_tov; | |
253 | new_rdata->r_a_tov = rdata->r_a_tov; | |
254 | new_rdata->ops = rdata->ops; | |
255 | new_rdata->local_port = rdata->local_port; | |
256 | new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED; | |
257 | new_rdata->trans_state = FC_PORTSTATE_REAL; | |
258 | mutex_init(&new_rdata->rp_mutex); | |
259 | INIT_DELAYED_WORK(&new_rdata->retry_work, | |
260 | fc_rport_timeout); | |
261 | INIT_LIST_HEAD(&new_rdata->peers); | |
262 | INIT_WORK(&new_rdata->event_work, fc_rport_work); | |
263 | ||
264 | fc_rport_state_enter(new_rport, RPORT_ST_READY); | |
265 | } else { | |
266 | FC_DBG("Failed to create the rport for port " | |
267 | "(%6x).\n", ids.port_id); | |
268 | event = RPORT_EV_FAILED; | |
269 | } | |
270 | put_device(&rport->dev); | |
271 | rport = new_rport; | |
272 | rdata = new_rport->dd_data; | |
273 | if (rport_ops->event_callback) | |
274 | rport_ops->event_callback(lport, rport, event); | |
275 | } else if ((event == RPORT_EV_FAILED) || | |
276 | (event == RPORT_EV_LOGO) || | |
277 | (event == RPORT_EV_STOP)) { | |
278 | trans_state = rdata->trans_state; | |
279 | mutex_unlock(&rdata->rp_mutex); | |
280 | if (rport_ops->event_callback) | |
281 | rport_ops->event_callback(lport, rport, event); | |
282 | if (trans_state == FC_PORTSTATE_ROGUE) | |
283 | put_device(&rport->dev); | |
571f824c AJ |
284 | else { |
285 | port_id = rport->port_id; | |
42e9a92f | 286 | fc_remote_port_delete(rport); |
571f824c AJ |
287 | lport->tt.exch_mgr_reset(lport, 0, port_id); |
288 | lport->tt.exch_mgr_reset(lport, port_id, 0); | |
289 | } | |
42e9a92f RL |
290 | } else |
291 | mutex_unlock(&rdata->rp_mutex); | |
292 | } | |
293 | ||
294 | /** | |
34f42a07 | 295 | * fc_rport_login() - Start the remote port login state machine |
42e9a92f RL |
296 | * @rport: Fibre Channel remote port |
297 | * | |
298 | * Locking Note: Called without the rport lock held. This | |
299 | * function will hold the rport lock, call an _enter_* | |
300 | * function and then unlock the rport. | |
301 | */ | |
302 | int fc_rport_login(struct fc_rport *rport) | |
303 | { | |
304 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
305 | ||
306 | mutex_lock(&rdata->rp_mutex); | |
307 | ||
308 | FC_DEBUG_RPORT("Login to port (%6x)\n", rport->port_id); | |
309 | ||
310 | fc_rport_enter_plogi(rport); | |
311 | ||
312 | mutex_unlock(&rdata->rp_mutex); | |
313 | ||
314 | return 0; | |
315 | } | |
316 | ||
317 | /** | |
34f42a07 | 318 | * fc_rport_logoff() - Logoff and remove an rport |
42e9a92f RL |
319 | * @rport: Fibre Channel remote port to be removed |
320 | * | |
321 | * Locking Note: Called without the rport lock held. This | |
322 | * function will hold the rport lock, call an _enter_* | |
323 | * function and then unlock the rport. | |
324 | */ | |
325 | int fc_rport_logoff(struct fc_rport *rport) | |
326 | { | |
327 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
328 | ||
329 | mutex_lock(&rdata->rp_mutex); | |
330 | ||
331 | FC_DEBUG_RPORT("Remove port (%6x)\n", rport->port_id); | |
332 | ||
333 | fc_rport_enter_logo(rport); | |
334 | ||
335 | /* | |
336 | * Change the state to NONE so that we discard | |
337 | * the response. | |
338 | */ | |
339 | fc_rport_state_enter(rport, RPORT_ST_NONE); | |
340 | ||
341 | mutex_unlock(&rdata->rp_mutex); | |
342 | ||
343 | cancel_delayed_work_sync(&rdata->retry_work); | |
344 | ||
345 | mutex_lock(&rdata->rp_mutex); | |
346 | ||
347 | rdata->event = RPORT_EV_STOP; | |
348 | queue_work(rport_event_queue, &rdata->event_work); | |
349 | ||
350 | mutex_unlock(&rdata->rp_mutex); | |
351 | ||
352 | return 0; | |
353 | } | |
354 | ||
355 | /** | |
34f42a07 | 356 | * fc_rport_enter_ready() - The rport is ready |
42e9a92f RL |
357 | * @rport: Fibre Channel remote port that is ready |
358 | * | |
359 | * Locking Note: The rport lock is expected to be held before calling | |
360 | * this routine. | |
361 | */ | |
362 | static void fc_rport_enter_ready(struct fc_rport *rport) | |
363 | { | |
364 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
365 | ||
366 | fc_rport_state_enter(rport, RPORT_ST_READY); | |
367 | ||
368 | FC_DEBUG_RPORT("Port (%6x) is Ready\n", rport->port_id); | |
369 | ||
370 | rdata->event = RPORT_EV_CREATED; | |
371 | queue_work(rport_event_queue, &rdata->event_work); | |
372 | } | |
373 | ||
374 | /** | |
34f42a07 | 375 | * fc_rport_timeout() - Handler for the retry_work timer. |
42e9a92f RL |
376 | * @work: The work struct of the fc_rport_libfc_priv |
377 | * | |
378 | * Locking Note: Called without the rport lock held. This | |
379 | * function will hold the rport lock, call an _enter_* | |
380 | * function and then unlock the rport. | |
381 | */ | |
382 | static void fc_rport_timeout(struct work_struct *work) | |
383 | { | |
384 | struct fc_rport_libfc_priv *rdata = | |
385 | container_of(work, struct fc_rport_libfc_priv, retry_work.work); | |
386 | struct fc_rport *rport = PRIV_TO_RPORT(rdata); | |
387 | ||
388 | mutex_lock(&rdata->rp_mutex); | |
389 | ||
390 | switch (rdata->rp_state) { | |
391 | case RPORT_ST_PLOGI: | |
392 | fc_rport_enter_plogi(rport); | |
393 | break; | |
394 | case RPORT_ST_PRLI: | |
395 | fc_rport_enter_prli(rport); | |
396 | break; | |
397 | case RPORT_ST_RTV: | |
398 | fc_rport_enter_rtv(rport); | |
399 | break; | |
400 | case RPORT_ST_LOGO: | |
401 | fc_rport_enter_logo(rport); | |
402 | break; | |
403 | case RPORT_ST_READY: | |
404 | case RPORT_ST_INIT: | |
405 | case RPORT_ST_NONE: | |
406 | break; | |
407 | } | |
408 | ||
409 | mutex_unlock(&rdata->rp_mutex); | |
410 | put_device(&rport->dev); | |
411 | } | |
412 | ||
413 | /** | |
34f42a07 | 414 | * fc_rport_error() - Error handler, called once retries have been exhausted |
42e9a92f RL |
415 | * @rport: The fc_rport object |
416 | * @fp: The frame pointer | |
417 | * | |
42e9a92f RL |
418 | * Locking Note: The rport lock is expected to be held before |
419 | * calling this routine | |
420 | */ | |
421 | static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp) | |
422 | { | |
423 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
42e9a92f RL |
424 | |
425 | FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n", | |
426 | PTR_ERR(fp), fc_rport_state(rport), rdata->retries); | |
427 | ||
6755db1c CL |
428 | switch (rdata->rp_state) { |
429 | case RPORT_ST_PLOGI: | |
430 | case RPORT_ST_PRLI: | |
431 | case RPORT_ST_LOGO: | |
432 | rdata->event = RPORT_EV_FAILED; | |
433 | queue_work(rport_event_queue, | |
434 | &rdata->event_work); | |
435 | break; | |
436 | case RPORT_ST_RTV: | |
437 | fc_rport_enter_ready(rport); | |
438 | break; | |
439 | case RPORT_ST_NONE: | |
440 | case RPORT_ST_READY: | |
441 | case RPORT_ST_INIT: | |
442 | break; | |
42e9a92f RL |
443 | } |
444 | } | |
445 | ||
6755db1c | 446 | /** |
34f42a07 | 447 | * fc_rport_error_retry() - Error handler when retries are desired |
6755db1c CL |
448 | * @rport: The fc_rport object |
449 | * @fp: The frame pointer | |
450 | * | |
451 | * If the error was an exchange timeout retry immediately, | |
452 | * otherwise wait for E_D_TOV. | |
453 | * | |
454 | * Locking Note: The rport lock is expected to be held before | |
455 | * calling this routine | |
456 | */ | |
457 | static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp) | |
458 | { | |
459 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
460 | unsigned long delay = FC_DEF_E_D_TOV; | |
461 | ||
462 | /* make sure this isn't an FC_EX_CLOSED error, never retry those */ | |
463 | if (PTR_ERR(fp) == -FC_EX_CLOSED) | |
464 | return fc_rport_error(rport, fp); | |
465 | ||
466 | if (rdata->retries < rdata->local_port->max_retry_count) { | |
467 | FC_DEBUG_RPORT("Error %ld in state %s, retrying\n", | |
468 | PTR_ERR(fp), fc_rport_state(rport)); | |
469 | rdata->retries++; | |
470 | /* no additional delay on exchange timeouts */ | |
471 | if (PTR_ERR(fp) == -FC_EX_TIMEOUT) | |
472 | delay = 0; | |
473 | get_device(&rport->dev); | |
474 | schedule_delayed_work(&rdata->retry_work, delay); | |
475 | return; | |
476 | } | |
477 | ||
478 | return fc_rport_error(rport, fp); | |
479 | } | |
480 | ||
42e9a92f | 481 | /** |
34f42a07 | 482 | * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response |
42e9a92f RL |
483 | * @sp: current sequence in the PLOGI exchange |
484 | * @fp: response frame | |
485 | * @rp_arg: Fibre Channel remote port | |
486 | * | |
487 | * Locking Note: This function will be called without the rport lock | |
488 | * held, but it will lock, call an _enter_* function or fc_rport_error | |
489 | * and then unlock the rport. | |
490 | */ | |
491 | static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |
492 | void *rp_arg) | |
493 | { | |
494 | struct fc_rport *rport = rp_arg; | |
495 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
496 | struct fc_lport *lport = rdata->local_port; | |
497 | struct fc_els_flogi *plp; | |
498 | unsigned int tov; | |
499 | u16 csp_seq; | |
500 | u16 cssp_seq; | |
501 | u8 op; | |
502 | ||
503 | mutex_lock(&rdata->rp_mutex); | |
504 | ||
505 | FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n", | |
506 | rport->port_id); | |
507 | ||
508 | if (rdata->rp_state != RPORT_ST_PLOGI) { | |
509 | FC_DBG("Received a PLOGI response, but in state %s\n", | |
510 | fc_rport_state(rport)); | |
76f6804e AJ |
511 | if (IS_ERR(fp)) |
512 | goto err; | |
42e9a92f RL |
513 | goto out; |
514 | } | |
515 | ||
76f6804e AJ |
516 | if (IS_ERR(fp)) { |
517 | fc_rport_error_retry(rport, fp); | |
518 | goto err; | |
519 | } | |
520 | ||
42e9a92f RL |
521 | op = fc_frame_payload_op(fp); |
522 | if (op == ELS_LS_ACC && | |
523 | (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) { | |
524 | rport->port_name = get_unaligned_be64(&plp->fl_wwpn); | |
525 | rport->node_name = get_unaligned_be64(&plp->fl_wwnn); | |
526 | ||
527 | tov = ntohl(plp->fl_csp.sp_e_d_tov); | |
528 | if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR) | |
529 | tov /= 1000; | |
530 | if (tov > rdata->e_d_tov) | |
531 | rdata->e_d_tov = tov; | |
532 | csp_seq = ntohs(plp->fl_csp.sp_tot_seq); | |
533 | cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq); | |
534 | if (cssp_seq < csp_seq) | |
535 | csp_seq = cssp_seq; | |
536 | rdata->max_seq = csp_seq; | |
537 | rport->maxframe_size = | |
538 | fc_plogi_get_maxframe(plp, lport->mfs); | |
539 | ||
540 | /* | |
541 | * If the rport is one of the well known addresses | |
542 | * we skip PRLI and RTV and go straight to READY. | |
543 | */ | |
544 | if (rport->port_id >= FC_FID_DOM_MGR) | |
545 | fc_rport_enter_ready(rport); | |
546 | else | |
547 | fc_rport_enter_prli(rport); | |
548 | } else | |
6755db1c | 549 | fc_rport_error_retry(rport, fp); |
42e9a92f RL |
550 | |
551 | out: | |
552 | fc_frame_free(fp); | |
553 | err: | |
554 | mutex_unlock(&rdata->rp_mutex); | |
555 | put_device(&rport->dev); | |
556 | } | |
557 | ||
558 | /** | |
34f42a07 | 559 | * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer |
42e9a92f RL |
560 | * @rport: Fibre Channel remote port to send PLOGI to |
561 | * | |
562 | * Locking Note: The rport lock is expected to be held before calling | |
563 | * this routine. | |
564 | */ | |
565 | static void fc_rport_enter_plogi(struct fc_rport *rport) | |
566 | { | |
567 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
568 | struct fc_lport *lport = rdata->local_port; | |
569 | struct fc_frame *fp; | |
570 | ||
571 | FC_DEBUG_RPORT("Port (%6x) entered PLOGI state from %s state\n", | |
572 | rport->port_id, fc_rport_state(rport)); | |
573 | ||
574 | fc_rport_state_enter(rport, RPORT_ST_PLOGI); | |
575 | ||
576 | rport->maxframe_size = FC_MIN_MAX_PAYLOAD; | |
577 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); | |
578 | if (!fp) { | |
6755db1c | 579 | fc_rport_error_retry(rport, fp); |
42e9a92f RL |
580 | return; |
581 | } | |
582 | rdata->e_d_tov = lport->e_d_tov; | |
583 | ||
584 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI, | |
585 | fc_rport_plogi_resp, rport, lport->e_d_tov)) | |
6755db1c | 586 | fc_rport_error_retry(rport, fp); |
42e9a92f RL |
587 | else |
588 | get_device(&rport->dev); | |
589 | } | |
590 | ||
591 | /** | |
34f42a07 | 592 | * fc_rport_prli_resp() - Process Login (PRLI) response handler |
42e9a92f RL |
593 | * @sp: current sequence in the PRLI exchange |
594 | * @fp: response frame | |
595 | * @rp_arg: Fibre Channel remote port | |
596 | * | |
597 | * Locking Note: This function will be called without the rport lock | |
598 | * held, but it will lock, call an _enter_* function or fc_rport_error | |
599 | * and then unlock the rport. | |
600 | */ | |
601 | static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp, | |
602 | void *rp_arg) | |
603 | { | |
604 | struct fc_rport *rport = rp_arg; | |
605 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
606 | struct { | |
607 | struct fc_els_prli prli; | |
608 | struct fc_els_spp spp; | |
609 | } *pp; | |
610 | u32 roles = FC_RPORT_ROLE_UNKNOWN; | |
611 | u32 fcp_parm = 0; | |
612 | u8 op; | |
613 | ||
614 | mutex_lock(&rdata->rp_mutex); | |
615 | ||
616 | FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n", | |
617 | rport->port_id); | |
618 | ||
619 | if (rdata->rp_state != RPORT_ST_PRLI) { | |
620 | FC_DBG("Received a PRLI response, but in state %s\n", | |
621 | fc_rport_state(rport)); | |
76f6804e AJ |
622 | if (IS_ERR(fp)) |
623 | goto err; | |
42e9a92f RL |
624 | goto out; |
625 | } | |
626 | ||
76f6804e AJ |
627 | if (IS_ERR(fp)) { |
628 | fc_rport_error_retry(rport, fp); | |
629 | goto err; | |
630 | } | |
631 | ||
42e9a92f RL |
632 | op = fc_frame_payload_op(fp); |
633 | if (op == ELS_LS_ACC) { | |
634 | pp = fc_frame_payload_get(fp, sizeof(*pp)); | |
635 | if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) { | |
636 | fcp_parm = ntohl(pp->spp.spp_params); | |
637 | if (fcp_parm & FCP_SPPF_RETRY) | |
638 | rdata->flags |= FC_RP_FLAGS_RETRY; | |
639 | } | |
640 | ||
641 | rport->supported_classes = FC_COS_CLASS3; | |
642 | if (fcp_parm & FCP_SPPF_INIT_FCN) | |
643 | roles |= FC_RPORT_ROLE_FCP_INITIATOR; | |
644 | if (fcp_parm & FCP_SPPF_TARG_FCN) | |
645 | roles |= FC_RPORT_ROLE_FCP_TARGET; | |
646 | ||
647 | rport->roles = roles; | |
648 | fc_rport_enter_rtv(rport); | |
649 | ||
650 | } else { | |
651 | FC_DBG("Bad ELS response\n"); | |
652 | rdata->event = RPORT_EV_FAILED; | |
653 | queue_work(rport_event_queue, &rdata->event_work); | |
654 | } | |
655 | ||
656 | out: | |
657 | fc_frame_free(fp); | |
658 | err: | |
659 | mutex_unlock(&rdata->rp_mutex); | |
660 | put_device(&rport->dev); | |
661 | } | |
662 | ||
663 | /** | |
34f42a07 | 664 | * fc_rport_logo_resp() - Logout (LOGO) response handler |
42e9a92f RL |
665 | * @sp: current sequence in the LOGO exchange |
666 | * @fp: response frame | |
667 | * @rp_arg: Fibre Channel remote port | |
668 | * | |
669 | * Locking Note: This function will be called without the rport lock | |
670 | * held, but it will lock, call an _enter_* function or fc_rport_error | |
671 | * and then unlock the rport. | |
672 | */ | |
673 | static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp, | |
674 | void *rp_arg) | |
675 | { | |
676 | struct fc_rport *rport = rp_arg; | |
677 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
678 | u8 op; | |
679 | ||
680 | mutex_lock(&rdata->rp_mutex); | |
681 | ||
682 | FC_DEBUG_RPORT("Received a LOGO response from port (%6x)\n", | |
683 | rport->port_id); | |
684 | ||
42e9a92f RL |
685 | if (rdata->rp_state != RPORT_ST_LOGO) { |
686 | FC_DEBUG_RPORT("Received a LOGO response, but in state %s\n", | |
687 | fc_rport_state(rport)); | |
76f6804e AJ |
688 | if (IS_ERR(fp)) |
689 | goto err; | |
42e9a92f RL |
690 | goto out; |
691 | } | |
692 | ||
76f6804e AJ |
693 | if (IS_ERR(fp)) { |
694 | fc_rport_error_retry(rport, fp); | |
695 | goto err; | |
696 | } | |
697 | ||
42e9a92f RL |
698 | op = fc_frame_payload_op(fp); |
699 | if (op == ELS_LS_ACC) { | |
700 | fc_rport_enter_rtv(rport); | |
701 | } else { | |
702 | FC_DBG("Bad ELS response\n"); | |
703 | rdata->event = RPORT_EV_LOGO; | |
704 | queue_work(rport_event_queue, &rdata->event_work); | |
705 | } | |
706 | ||
707 | out: | |
708 | fc_frame_free(fp); | |
709 | err: | |
710 | mutex_unlock(&rdata->rp_mutex); | |
711 | put_device(&rport->dev); | |
712 | } | |
713 | ||
714 | /** | |
34f42a07 | 715 | * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer |
42e9a92f RL |
716 | * @rport: Fibre Channel remote port to send PRLI to |
717 | * | |
718 | * Locking Note: The rport lock is expected to be held before calling | |
719 | * this routine. | |
720 | */ | |
721 | static void fc_rport_enter_prli(struct fc_rport *rport) | |
722 | { | |
723 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
724 | struct fc_lport *lport = rdata->local_port; | |
725 | struct { | |
726 | struct fc_els_prli prli; | |
727 | struct fc_els_spp spp; | |
728 | } *pp; | |
729 | struct fc_frame *fp; | |
730 | ||
731 | FC_DEBUG_RPORT("Port (%6x) entered PRLI state from %s state\n", | |
732 | rport->port_id, fc_rport_state(rport)); | |
733 | ||
734 | fc_rport_state_enter(rport, RPORT_ST_PRLI); | |
735 | ||
736 | fp = fc_frame_alloc(lport, sizeof(*pp)); | |
737 | if (!fp) { | |
6755db1c | 738 | fc_rport_error_retry(rport, fp); |
42e9a92f RL |
739 | return; |
740 | } | |
741 | ||
742 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI, | |
743 | fc_rport_prli_resp, rport, lport->e_d_tov)) | |
6755db1c | 744 | fc_rport_error_retry(rport, fp); |
42e9a92f RL |
745 | else |
746 | get_device(&rport->dev); | |
747 | } | |
748 | ||
749 | /** | |
34f42a07 | 750 | * fc_rport_els_rtv_resp() - Request Timeout Value response handler |
42e9a92f RL |
751 | * @sp: current sequence in the RTV exchange |
752 | * @fp: response frame | |
753 | * @rp_arg: Fibre Channel remote port | |
754 | * | |
755 | * Many targets don't seem to support this. | |
756 | * | |
757 | * Locking Note: This function will be called without the rport lock | |
758 | * held, but it will lock, call an _enter_* function or fc_rport_error | |
759 | * and then unlock the rport. | |
760 | */ | |
761 | static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp, | |
762 | void *rp_arg) | |
763 | { | |
764 | struct fc_rport *rport = rp_arg; | |
765 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
766 | u8 op; | |
767 | ||
768 | mutex_lock(&rdata->rp_mutex); | |
769 | ||
770 | FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n", | |
771 | rport->port_id); | |
772 | ||
773 | if (rdata->rp_state != RPORT_ST_RTV) { | |
774 | FC_DBG("Received a RTV response, but in state %s\n", | |
775 | fc_rport_state(rport)); | |
76f6804e AJ |
776 | if (IS_ERR(fp)) |
777 | goto err; | |
42e9a92f RL |
778 | goto out; |
779 | } | |
780 | ||
76f6804e AJ |
781 | if (IS_ERR(fp)) { |
782 | fc_rport_error(rport, fp); | |
783 | goto err; | |
784 | } | |
785 | ||
42e9a92f RL |
786 | op = fc_frame_payload_op(fp); |
787 | if (op == ELS_LS_ACC) { | |
788 | struct fc_els_rtv_acc *rtv; | |
789 | u32 toq; | |
790 | u32 tov; | |
791 | ||
792 | rtv = fc_frame_payload_get(fp, sizeof(*rtv)); | |
793 | if (rtv) { | |
794 | toq = ntohl(rtv->rtv_toq); | |
795 | tov = ntohl(rtv->rtv_r_a_tov); | |
796 | if (tov == 0) | |
797 | tov = 1; | |
798 | rdata->r_a_tov = tov; | |
799 | tov = ntohl(rtv->rtv_e_d_tov); | |
800 | if (toq & FC_ELS_RTV_EDRES) | |
801 | tov /= 1000000; | |
802 | if (tov == 0) | |
803 | tov = 1; | |
804 | rdata->e_d_tov = tov; | |
805 | } | |
806 | } | |
807 | ||
808 | fc_rport_enter_ready(rport); | |
809 | ||
810 | out: | |
811 | fc_frame_free(fp); | |
812 | err: | |
813 | mutex_unlock(&rdata->rp_mutex); | |
814 | put_device(&rport->dev); | |
815 | } | |
816 | ||
817 | /** | |
34f42a07 | 818 | * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer |
42e9a92f RL |
819 | * @rport: Fibre Channel remote port to send RTV to |
820 | * | |
821 | * Locking Note: The rport lock is expected to be held before calling | |
822 | * this routine. | |
823 | */ | |
824 | static void fc_rport_enter_rtv(struct fc_rport *rport) | |
825 | { | |
826 | struct fc_frame *fp; | |
827 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
828 | struct fc_lport *lport = rdata->local_port; | |
829 | ||
830 | FC_DEBUG_RPORT("Port (%6x) entered RTV state from %s state\n", | |
831 | rport->port_id, fc_rport_state(rport)); | |
832 | ||
833 | fc_rport_state_enter(rport, RPORT_ST_RTV); | |
834 | ||
835 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv)); | |
836 | if (!fp) { | |
6755db1c | 837 | fc_rport_error_retry(rport, fp); |
42e9a92f RL |
838 | return; |
839 | } | |
840 | ||
841 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV, | |
842 | fc_rport_rtv_resp, rport, lport->e_d_tov)) | |
6755db1c | 843 | fc_rport_error_retry(rport, fp); |
42e9a92f RL |
844 | else |
845 | get_device(&rport->dev); | |
846 | } | |
847 | ||
848 | /** | |
34f42a07 | 849 | * fc_rport_enter_logo() - Send Logout (LOGO) request to peer |
42e9a92f RL |
850 | * @rport: Fibre Channel remote port to send LOGO to |
851 | * | |
852 | * Locking Note: The rport lock is expected to be held before calling | |
853 | * this routine. | |
854 | */ | |
855 | static void fc_rport_enter_logo(struct fc_rport *rport) | |
856 | { | |
857 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
858 | struct fc_lport *lport = rdata->local_port; | |
859 | struct fc_frame *fp; | |
860 | ||
861 | FC_DEBUG_RPORT("Port (%6x) entered LOGO state from %s state\n", | |
862 | rport->port_id, fc_rport_state(rport)); | |
863 | ||
864 | fc_rport_state_enter(rport, RPORT_ST_LOGO); | |
865 | ||
866 | fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo)); | |
867 | if (!fp) { | |
6755db1c | 868 | fc_rport_error_retry(rport, fp); |
42e9a92f RL |
869 | return; |
870 | } | |
871 | ||
872 | if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO, | |
873 | fc_rport_logo_resp, rport, lport->e_d_tov)) | |
6755db1c | 874 | fc_rport_error_retry(rport, fp); |
42e9a92f RL |
875 | else |
876 | get_device(&rport->dev); | |
877 | } | |
878 | ||
879 | ||
880 | /** | |
34f42a07 | 881 | * fc_rport_recv_req() - Receive a request from a rport |
42e9a92f RL |
882 | * @sp: current sequence in the PLOGI exchange |
883 | * @fp: response frame | |
884 | * @rp_arg: Fibre Channel remote port | |
885 | * | |
886 | * Locking Note: Called without the rport lock held. This | |
887 | * function will hold the rport lock, call an _enter_* | |
888 | * function and then unlock the rport. | |
889 | */ | |
890 | void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp, | |
891 | struct fc_rport *rport) | |
892 | { | |
893 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
894 | struct fc_lport *lport = rdata->local_port; | |
895 | ||
896 | struct fc_frame_header *fh; | |
897 | struct fc_seq_els_data els_data; | |
898 | u8 op; | |
899 | ||
900 | mutex_lock(&rdata->rp_mutex); | |
901 | ||
902 | els_data.fp = NULL; | |
903 | els_data.explan = ELS_EXPL_NONE; | |
904 | els_data.reason = ELS_RJT_NONE; | |
905 | ||
906 | fh = fc_frame_header_get(fp); | |
907 | ||
908 | if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) { | |
909 | op = fc_frame_payload_op(fp); | |
910 | switch (op) { | |
911 | case ELS_PLOGI: | |
912 | fc_rport_recv_plogi_req(rport, sp, fp); | |
913 | break; | |
914 | case ELS_PRLI: | |
915 | fc_rport_recv_prli_req(rport, sp, fp); | |
916 | break; | |
917 | case ELS_PRLO: | |
918 | fc_rport_recv_prlo_req(rport, sp, fp); | |
919 | break; | |
920 | case ELS_LOGO: | |
921 | fc_rport_recv_logo_req(rport, sp, fp); | |
922 | break; | |
923 | case ELS_RRQ: | |
924 | els_data.fp = fp; | |
925 | lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data); | |
926 | break; | |
927 | case ELS_REC: | |
928 | els_data.fp = fp; | |
929 | lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data); | |
930 | break; | |
931 | default: | |
932 | els_data.reason = ELS_RJT_UNSUP; | |
933 | lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data); | |
934 | break; | |
935 | } | |
936 | } | |
937 | ||
938 | mutex_unlock(&rdata->rp_mutex); | |
939 | } | |
940 | ||
941 | /** | |
34f42a07 | 942 | * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request |
42e9a92f RL |
943 | * @rport: Fibre Channel remote port that initiated PLOGI |
944 | * @sp: current sequence in the PLOGI exchange | |
945 | * @fp: PLOGI request frame | |
946 | * | |
947 | * Locking Note: The rport lock is exected to be held before calling | |
948 | * this function. | |
949 | */ | |
950 | static void fc_rport_recv_plogi_req(struct fc_rport *rport, | |
951 | struct fc_seq *sp, struct fc_frame *rx_fp) | |
952 | { | |
953 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
954 | struct fc_lport *lport = rdata->local_port; | |
955 | struct fc_frame *fp = rx_fp; | |
956 | struct fc_exch *ep; | |
957 | struct fc_frame_header *fh; | |
958 | struct fc_els_flogi *pl; | |
959 | struct fc_seq_els_data rjt_data; | |
960 | u32 sid; | |
961 | u64 wwpn; | |
962 | u64 wwnn; | |
963 | enum fc_els_rjt_reason reject = 0; | |
964 | u32 f_ctl; | |
965 | rjt_data.fp = NULL; | |
966 | ||
967 | fh = fc_frame_header_get(fp); | |
968 | ||
969 | FC_DEBUG_RPORT("Received PLOGI request from port (%6x) " | |
970 | "while in state %s\n", ntoh24(fh->fh_s_id), | |
971 | fc_rport_state(rport)); | |
972 | ||
973 | sid = ntoh24(fh->fh_s_id); | |
974 | pl = fc_frame_payload_get(fp, sizeof(*pl)); | |
975 | if (!pl) { | |
976 | FC_DBG("incoming PLOGI from %x too short\n", sid); | |
977 | WARN_ON(1); | |
978 | /* XXX TBD: send reject? */ | |
979 | fc_frame_free(fp); | |
980 | return; | |
981 | } | |
982 | wwpn = get_unaligned_be64(&pl->fl_wwpn); | |
983 | wwnn = get_unaligned_be64(&pl->fl_wwnn); | |
984 | ||
985 | /* | |
986 | * If the session was just created, possibly due to the incoming PLOGI, | |
987 | * set the state appropriately and accept the PLOGI. | |
988 | * | |
989 | * If we had also sent a PLOGI, and if the received PLOGI is from a | |
990 | * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason | |
991 | * "command already in progress". | |
992 | * | |
993 | * XXX TBD: If the session was ready before, the PLOGI should result in | |
994 | * all outstanding exchanges being reset. | |
995 | */ | |
996 | switch (rdata->rp_state) { | |
997 | case RPORT_ST_INIT: | |
998 | FC_DEBUG_RPORT("incoming PLOGI from %6x wwpn %llx state INIT " | |
6e7490c7 | 999 | "- reject\n", sid, (unsigned long long)wwpn); |
42e9a92f RL |
1000 | reject = ELS_RJT_UNSUP; |
1001 | break; | |
1002 | case RPORT_ST_PLOGI: | |
1003 | FC_DEBUG_RPORT("incoming PLOGI from %x in PLOGI state %d\n", | |
1004 | sid, rdata->rp_state); | |
1005 | if (wwpn < lport->wwpn) | |
1006 | reject = ELS_RJT_INPROG; | |
1007 | break; | |
1008 | case RPORT_ST_PRLI: | |
1009 | case RPORT_ST_READY: | |
1010 | FC_DEBUG_RPORT("incoming PLOGI from %x in logged-in state %d " | |
1011 | "- ignored for now\n", sid, rdata->rp_state); | |
1012 | /* XXX TBD - should reset */ | |
1013 | break; | |
1014 | case RPORT_ST_NONE: | |
1015 | default: | |
1016 | FC_DEBUG_RPORT("incoming PLOGI from %x in unexpected " | |
1017 | "state %d\n", sid, rdata->rp_state); | |
1018 | break; | |
1019 | } | |
1020 | ||
1021 | if (reject) { | |
1022 | rjt_data.reason = reject; | |
1023 | rjt_data.explan = ELS_EXPL_NONE; | |
1024 | lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); | |
1025 | fc_frame_free(fp); | |
1026 | } else { | |
1027 | fp = fc_frame_alloc(lport, sizeof(*pl)); | |
1028 | if (fp == NULL) { | |
1029 | fp = rx_fp; | |
1030 | rjt_data.reason = ELS_RJT_UNAB; | |
1031 | rjt_data.explan = ELS_EXPL_NONE; | |
1032 | lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); | |
1033 | fc_frame_free(fp); | |
1034 | } else { | |
1035 | sp = lport->tt.seq_start_next(sp); | |
1036 | WARN_ON(!sp); | |
1037 | fc_rport_set_name(rport, wwpn, wwnn); | |
1038 | ||
1039 | /* | |
1040 | * Get session payload size from incoming PLOGI. | |
1041 | */ | |
1042 | rport->maxframe_size = | |
1043 | fc_plogi_get_maxframe(pl, lport->mfs); | |
1044 | fc_frame_free(rx_fp); | |
1045 | fc_plogi_fill(lport, fp, ELS_LS_ACC); | |
1046 | ||
1047 | /* | |
1048 | * Send LS_ACC. If this fails, | |
1049 | * the originator should retry. | |
1050 | */ | |
1051 | f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; | |
1052 | f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; | |
1053 | ep = fc_seq_exch(sp); | |
1054 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, | |
1055 | FC_TYPE_ELS, f_ctl, 0); | |
1056 | lport->tt.seq_send(lport, sp, fp); | |
1057 | if (rdata->rp_state == RPORT_ST_PLOGI) | |
1058 | fc_rport_enter_prli(rport); | |
1059 | } | |
1060 | } | |
1061 | } | |
1062 | ||
1063 | /** | |
34f42a07 | 1064 | * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request |
42e9a92f RL |
1065 | * @rport: Fibre Channel remote port that initiated PRLI |
1066 | * @sp: current sequence in the PRLI exchange | |
1067 | * @fp: PRLI request frame | |
1068 | * | |
1069 | * Locking Note: The rport lock is exected to be held before calling | |
1070 | * this function. | |
1071 | */ | |
1072 | static void fc_rport_recv_prli_req(struct fc_rport *rport, | |
1073 | struct fc_seq *sp, struct fc_frame *rx_fp) | |
1074 | { | |
1075 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
1076 | struct fc_lport *lport = rdata->local_port; | |
1077 | struct fc_exch *ep; | |
1078 | struct fc_frame *fp; | |
1079 | struct fc_frame_header *fh; | |
1080 | struct { | |
1081 | struct fc_els_prli prli; | |
1082 | struct fc_els_spp spp; | |
1083 | } *pp; | |
1084 | struct fc_els_spp *rspp; /* request service param page */ | |
1085 | struct fc_els_spp *spp; /* response spp */ | |
1086 | unsigned int len; | |
1087 | unsigned int plen; | |
1088 | enum fc_els_rjt_reason reason = ELS_RJT_UNAB; | |
1089 | enum fc_els_rjt_explan explan = ELS_EXPL_NONE; | |
1090 | enum fc_els_spp_resp resp; | |
1091 | struct fc_seq_els_data rjt_data; | |
1092 | u32 f_ctl; | |
1093 | u32 fcp_parm; | |
1094 | u32 roles = FC_RPORT_ROLE_UNKNOWN; | |
1095 | rjt_data.fp = NULL; | |
1096 | ||
1097 | fh = fc_frame_header_get(rx_fp); | |
1098 | ||
1099 | FC_DEBUG_RPORT("Received PRLI request from port (%6x) " | |
1100 | "while in state %s\n", ntoh24(fh->fh_s_id), | |
1101 | fc_rport_state(rport)); | |
1102 | ||
1103 | switch (rdata->rp_state) { | |
1104 | case RPORT_ST_PRLI: | |
1105 | case RPORT_ST_READY: | |
1106 | reason = ELS_RJT_NONE; | |
1107 | break; | |
1108 | default: | |
1109 | break; | |
1110 | } | |
1111 | len = fr_len(rx_fp) - sizeof(*fh); | |
1112 | pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); | |
1113 | if (pp == NULL) { | |
1114 | reason = ELS_RJT_PROT; | |
1115 | explan = ELS_EXPL_INV_LEN; | |
1116 | } else { | |
1117 | plen = ntohs(pp->prli.prli_len); | |
1118 | if ((plen % 4) != 0 || plen > len) { | |
1119 | reason = ELS_RJT_PROT; | |
1120 | explan = ELS_EXPL_INV_LEN; | |
1121 | } else if (plen < len) { | |
1122 | len = plen; | |
1123 | } | |
1124 | plen = pp->prli.prli_spp_len; | |
1125 | if ((plen % 4) != 0 || plen < sizeof(*spp) || | |
1126 | plen > len || len < sizeof(*pp)) { | |
1127 | reason = ELS_RJT_PROT; | |
1128 | explan = ELS_EXPL_INV_LEN; | |
1129 | } | |
1130 | rspp = &pp->spp; | |
1131 | } | |
1132 | if (reason != ELS_RJT_NONE || | |
1133 | (fp = fc_frame_alloc(lport, len)) == NULL) { | |
1134 | rjt_data.reason = reason; | |
1135 | rjt_data.explan = explan; | |
1136 | lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); | |
1137 | } else { | |
1138 | sp = lport->tt.seq_start_next(sp); | |
1139 | WARN_ON(!sp); | |
1140 | pp = fc_frame_payload_get(fp, len); | |
1141 | WARN_ON(!pp); | |
1142 | memset(pp, 0, len); | |
1143 | pp->prli.prli_cmd = ELS_LS_ACC; | |
1144 | pp->prli.prli_spp_len = plen; | |
1145 | pp->prli.prli_len = htons(len); | |
1146 | len -= sizeof(struct fc_els_prli); | |
1147 | ||
1148 | /* | |
1149 | * Go through all the service parameter pages and build | |
1150 | * response. If plen indicates longer SPP than standard, | |
1151 | * use that. The entire response has been pre-cleared above. | |
1152 | */ | |
1153 | spp = &pp->spp; | |
1154 | while (len >= plen) { | |
1155 | spp->spp_type = rspp->spp_type; | |
1156 | spp->spp_type_ext = rspp->spp_type_ext; | |
1157 | spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR; | |
1158 | resp = FC_SPP_RESP_ACK; | |
1159 | if (rspp->spp_flags & FC_SPP_RPA_VAL) | |
1160 | resp = FC_SPP_RESP_NO_PA; | |
1161 | switch (rspp->spp_type) { | |
1162 | case 0: /* common to all FC-4 types */ | |
1163 | break; | |
1164 | case FC_TYPE_FCP: | |
1165 | fcp_parm = ntohl(rspp->spp_params); | |
1166 | if (fcp_parm * FCP_SPPF_RETRY) | |
1167 | rdata->flags |= FC_RP_FLAGS_RETRY; | |
1168 | rport->supported_classes = FC_COS_CLASS3; | |
1169 | if (fcp_parm & FCP_SPPF_INIT_FCN) | |
1170 | roles |= FC_RPORT_ROLE_FCP_INITIATOR; | |
1171 | if (fcp_parm & FCP_SPPF_TARG_FCN) | |
1172 | roles |= FC_RPORT_ROLE_FCP_TARGET; | |
1173 | rport->roles = roles; | |
1174 | ||
1175 | spp->spp_params = | |
1176 | htonl(lport->service_params); | |
1177 | break; | |
1178 | default: | |
1179 | resp = FC_SPP_RESP_INVL; | |
1180 | break; | |
1181 | } | |
1182 | spp->spp_flags |= resp; | |
1183 | len -= plen; | |
1184 | rspp = (struct fc_els_spp *)((char *)rspp + plen); | |
1185 | spp = (struct fc_els_spp *)((char *)spp + plen); | |
1186 | } | |
1187 | ||
1188 | /* | |
1189 | * Send LS_ACC. If this fails, the originator should retry. | |
1190 | */ | |
1191 | f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ; | |
1192 | f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT; | |
1193 | ep = fc_seq_exch(sp); | |
1194 | fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid, | |
1195 | FC_TYPE_ELS, f_ctl, 0); | |
1196 | lport->tt.seq_send(lport, sp, fp); | |
1197 | ||
1198 | /* | |
1199 | * Get lock and re-check state. | |
1200 | */ | |
1201 | switch (rdata->rp_state) { | |
1202 | case RPORT_ST_PRLI: | |
1203 | fc_rport_enter_ready(rport); | |
1204 | break; | |
1205 | case RPORT_ST_READY: | |
1206 | break; | |
1207 | default: | |
1208 | break; | |
1209 | } | |
1210 | } | |
1211 | fc_frame_free(rx_fp); | |
1212 | } | |
1213 | ||
1214 | /** | |
34f42a07 | 1215 | * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request |
42e9a92f RL |
1216 | * @rport: Fibre Channel remote port that initiated PRLO |
1217 | * @sp: current sequence in the PRLO exchange | |
1218 | * @fp: PRLO request frame | |
1219 | * | |
1220 | * Locking Note: The rport lock is exected to be held before calling | |
1221 | * this function. | |
1222 | */ | |
1223 | static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp, | |
1224 | struct fc_frame *fp) | |
1225 | { | |
1226 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
1227 | struct fc_lport *lport = rdata->local_port; | |
1228 | ||
1229 | struct fc_frame_header *fh; | |
1230 | struct fc_seq_els_data rjt_data; | |
1231 | ||
1232 | fh = fc_frame_header_get(fp); | |
1233 | ||
1234 | FC_DEBUG_RPORT("Received PRLO request from port (%6x) " | |
1235 | "while in state %s\n", ntoh24(fh->fh_s_id), | |
1236 | fc_rport_state(rport)); | |
1237 | ||
1238 | rjt_data.fp = NULL; | |
1239 | rjt_data.reason = ELS_RJT_UNAB; | |
1240 | rjt_data.explan = ELS_EXPL_NONE; | |
1241 | lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data); | |
1242 | fc_frame_free(fp); | |
1243 | } | |
1244 | ||
1245 | /** | |
34f42a07 | 1246 | * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request |
42e9a92f RL |
1247 | * @rport: Fibre Channel remote port that initiated LOGO |
1248 | * @sp: current sequence in the LOGO exchange | |
1249 | * @fp: LOGO request frame | |
1250 | * | |
1251 | * Locking Note: The rport lock is exected to be held before calling | |
1252 | * this function. | |
1253 | */ | |
1254 | static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp, | |
1255 | struct fc_frame *fp) | |
1256 | { | |
1257 | struct fc_frame_header *fh; | |
1258 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
1259 | struct fc_lport *lport = rdata->local_port; | |
1260 | ||
1261 | fh = fc_frame_header_get(fp); | |
1262 | ||
1263 | FC_DEBUG_RPORT("Received LOGO request from port (%6x) " | |
1264 | "while in state %s\n", ntoh24(fh->fh_s_id), | |
1265 | fc_rport_state(rport)); | |
1266 | ||
1267 | rdata->event = RPORT_EV_LOGO; | |
1268 | queue_work(rport_event_queue, &rdata->event_work); | |
1269 | ||
1270 | lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL); | |
1271 | fc_frame_free(fp); | |
1272 | } | |
1273 | ||
1274 | static void fc_rport_flush_queue(void) | |
1275 | { | |
1276 | flush_workqueue(rport_event_queue); | |
1277 | } | |
1278 | ||
1279 | ||
1280 | int fc_rport_init(struct fc_lport *lport) | |
1281 | { | |
5101ff99 RL |
1282 | if (!lport->tt.rport_create) |
1283 | lport->tt.rport_create = fc_rport_rogue_create; | |
1284 | ||
42e9a92f RL |
1285 | if (!lport->tt.rport_login) |
1286 | lport->tt.rport_login = fc_rport_login; | |
1287 | ||
1288 | if (!lport->tt.rport_logoff) | |
1289 | lport->tt.rport_logoff = fc_rport_logoff; | |
1290 | ||
1291 | if (!lport->tt.rport_recv_req) | |
1292 | lport->tt.rport_recv_req = fc_rport_recv_req; | |
1293 | ||
1294 | if (!lport->tt.rport_flush_queue) | |
1295 | lport->tt.rport_flush_queue = fc_rport_flush_queue; | |
1296 | ||
1297 | return 0; | |
1298 | } | |
1299 | EXPORT_SYMBOL(fc_rport_init); | |
1300 | ||
1301 | int fc_setup_rport() | |
1302 | { | |
1303 | rport_event_queue = create_singlethread_workqueue("fc_rport_eq"); | |
1304 | if (!rport_event_queue) | |
1305 | return -ENOMEM; | |
1306 | return 0; | |
1307 | } | |
1308 | EXPORT_SYMBOL(fc_setup_rport); | |
1309 | ||
1310 | void fc_destroy_rport() | |
1311 | { | |
1312 | destroy_workqueue(rport_event_queue); | |
1313 | } | |
1314 | EXPORT_SYMBOL(fc_destroy_rport); | |
1315 | ||
1316 | void fc_rport_terminate_io(struct fc_rport *rport) | |
1317 | { | |
1318 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | |
1319 | struct fc_lport *lport = rdata->local_port; | |
1320 | ||
1f6ff364 AJ |
1321 | lport->tt.exch_mgr_reset(lport, 0, rport->port_id); |
1322 | lport->tt.exch_mgr_reset(lport, rport->port_id, 0); | |
42e9a92f RL |
1323 | } |
1324 | EXPORT_SYMBOL(fc_rport_terminate_io); |