]> git.ipfire.org Git - thirdparty/linux.git/blame - drivers/scsi/libfc/fc_rport.c
[SCSI] libfc: change elsct to use FC_ID instead of rdata
[thirdparty/linux.git] / drivers / scsi / libfc / fc_rport.c
CommitLineData
42e9a92f
RL
1/*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20/*
21 * RPORT GENERAL INFO
22 *
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
27 *
28 * fc_rport's represent N_Port's within the fabric.
29 */
30
31/*
32 * RPORT LOCKING
33 *
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
38 *
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
45 */
46
47#include <linux/kernel.h>
48#include <linux/spinlock.h>
49#include <linux/interrupt.h>
50#include <linux/rcupdate.h>
51#include <linux/timer.h>
52#include <linux/workqueue.h>
53#include <asm/unaligned.h>
54
55#include <scsi/libfc.h>
56#include <scsi/fc_encode.h>
57
42e9a92f
RL
58struct workqueue_struct *rport_event_queue;
59
9fb9d328
JE
60static void fc_rport_enter_plogi(struct fc_rport_priv *);
61static void fc_rport_enter_prli(struct fc_rport_priv *);
62static void fc_rport_enter_rtv(struct fc_rport_priv *);
63static void fc_rport_enter_ready(struct fc_rport_priv *);
64static void fc_rport_enter_logo(struct fc_rport_priv *);
42e9a92f 65
9fb9d328 66static void fc_rport_recv_plogi_req(struct fc_rport_priv *,
42e9a92f 67 struct fc_seq *, struct fc_frame *);
9fb9d328 68static void fc_rport_recv_prli_req(struct fc_rport_priv *,
42e9a92f 69 struct fc_seq *, struct fc_frame *);
9fb9d328 70static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
42e9a92f 71 struct fc_seq *, struct fc_frame *);
9fb9d328 72static void fc_rport_recv_logo_req(struct fc_rport_priv *,
42e9a92f
RL
73 struct fc_seq *, struct fc_frame *);
74static void fc_rport_timeout(struct work_struct *);
9fb9d328
JE
75static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
76static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
42e9a92f
RL
77static void fc_rport_work(struct work_struct *);
78
79static const char *fc_rport_state_names[] = {
42e9a92f
RL
80 [RPORT_ST_INIT] = "Init",
81 [RPORT_ST_PLOGI] = "PLOGI",
82 [RPORT_ST_PRLI] = "PRLI",
83 [RPORT_ST_RTV] = "RTV",
84 [RPORT_ST_READY] = "Ready",
85 [RPORT_ST_LOGO] = "LOGO",
14194054 86 [RPORT_ST_DELETE] = "Delete",
42e9a92f
RL
87};
88
89static void fc_rport_rogue_destroy(struct device *dev)
90{
91 struct fc_rport *rport = dev_to_rport(dev);
9fb9d328
JE
92 struct fc_rport_priv *rdata = RPORT_TO_PRIV(rport);
93
94 FC_RPORT_DBG(rdata, "Destroying rogue rport\n");
42e9a92f
RL
95 kfree(rport);
96}
97
9fb9d328
JE
98struct fc_rport_priv *fc_rport_rogue_create(struct fc_lport *lport,
99 struct fc_rport_identifiers *ids)
42e9a92f
RL
100{
101 struct fc_rport *rport;
ab28f1fd 102 struct fc_rport_priv *rdata;
42e9a92f
RL
103 rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
104
105 if (!rport)
106 return NULL;
107
108 rdata = RPORT_TO_PRIV(rport);
109
110 rport->dd_data = rdata;
795d86f5
JE
111 rport->port_id = ids->port_id;
112 rport->port_name = ids->port_name;
113 rport->node_name = ids->node_name;
114 rport->roles = ids->roles;
42e9a92f
RL
115 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
116 /*
117 * Note: all this libfc rogue rport code will be removed for
118 * upstream so it fine that this is really ugly and hacky right now.
119 */
120 device_initialize(&rport->dev);
121 rport->dev.release = fc_rport_rogue_destroy;
122
123 mutex_init(&rdata->rp_mutex);
795d86f5 124 rdata->local_port = lport;
42e9a92f
RL
125 rdata->trans_state = FC_PORTSTATE_ROGUE;
126 rdata->rp_state = RPORT_ST_INIT;
127 rdata->event = RPORT_EV_NONE;
128 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
129 rdata->ops = NULL;
795d86f5
JE
130 rdata->e_d_tov = lport->e_d_tov;
131 rdata->r_a_tov = lport->r_a_tov;
42e9a92f
RL
132 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
133 INIT_WORK(&rdata->event_work, fc_rport_work);
134 /*
135 * For good measure, but not necessary as we should only
136 * add REAL rport to the lport list.
137 */
138 INIT_LIST_HEAD(&rdata->peers);
139
9fb9d328 140 return rdata;
42e9a92f
RL
141}
142
143/**
34f42a07 144 * fc_rport_state() - return a string for the state the rport is in
9fb9d328 145 * @rdata: remote port private data
42e9a92f 146 */
9fb9d328 147static const char *fc_rport_state(struct fc_rport_priv *rdata)
42e9a92f
RL
148{
149 const char *cp;
42e9a92f
RL
150
151 cp = fc_rport_state_names[rdata->rp_state];
152 if (!cp)
153 cp = "Unknown";
154 return cp;
155}
156
157/**
34f42a07 158 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
42e9a92f
RL
159 * @rport: Pointer to Fibre Channel remote port structure
160 * @timeout: timeout in seconds
161 */
162void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
163{
164 if (timeout)
165 rport->dev_loss_tmo = timeout + 5;
166 else
167 rport->dev_loss_tmo = 30;
168}
169EXPORT_SYMBOL(fc_set_rport_loss_tmo);
170
171/**
34f42a07 172 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
42e9a92f
RL
173 * @flp: FLOGI payload structure
174 * @maxval: upper limit, may be less than what is in the service parameters
175 */
b2ab99c9
RL
176static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
177 unsigned int maxval)
42e9a92f
RL
178{
179 unsigned int mfs;
180
181 /*
182 * Get max payload from the common service parameters and the
183 * class 3 receive data field size.
184 */
185 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
186 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
187 maxval = mfs;
188 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
189 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
190 maxval = mfs;
191 return maxval;
192}
193
194/**
34f42a07 195 * fc_rport_state_enter() - Change the rport's state
9fb9d328 196 * @rdata: The rport whose state should change
42e9a92f
RL
197 * @new: The new state of the rport
198 *
199 * Locking Note: Called with the rport lock held
200 */
9fb9d328 201static void fc_rport_state_enter(struct fc_rport_priv *rdata,
42e9a92f
RL
202 enum fc_rport_state new)
203{
42e9a92f
RL
204 if (rdata->rp_state != new)
205 rdata->retries = 0;
206 rdata->rp_state = new;
207}
208
209static void fc_rport_work(struct work_struct *work)
210{
571f824c 211 u32 port_id;
ab28f1fd
JE
212 struct fc_rport_priv *rdata =
213 container_of(work, struct fc_rport_priv, event_work);
42e9a92f
RL
214 enum fc_rport_event event;
215 enum fc_rport_trans_state trans_state;
216 struct fc_lport *lport = rdata->local_port;
217 struct fc_rport_operations *rport_ops;
218 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
219
220 mutex_lock(&rdata->rp_mutex);
221 event = rdata->event;
222 rport_ops = rdata->ops;
223
224 if (event == RPORT_EV_CREATED) {
225 struct fc_rport *new_rport;
ab28f1fd 226 struct fc_rport_priv *new_rdata;
42e9a92f
RL
227 struct fc_rport_identifiers ids;
228
229 ids.port_id = rport->port_id;
230 ids.roles = rport->roles;
231 ids.port_name = rport->port_name;
232 ids.node_name = rport->node_name;
233
5f7ea3b7 234 rdata->event = RPORT_EV_NONE;
42e9a92f
RL
235 mutex_unlock(&rdata->rp_mutex);
236
237 new_rport = fc_remote_port_add(lport->host, 0, &ids);
238 if (new_rport) {
239 /*
240 * Switch from the rogue rport to the rport
241 * returned by the FC class.
242 */
243 new_rport->maxframe_size = rport->maxframe_size;
244
245 new_rdata = new_rport->dd_data;
246 new_rdata->e_d_tov = rdata->e_d_tov;
247 new_rdata->r_a_tov = rdata->r_a_tov;
248 new_rdata->ops = rdata->ops;
249 new_rdata->local_port = rdata->local_port;
250 new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
251 new_rdata->trans_state = FC_PORTSTATE_REAL;
252 mutex_init(&new_rdata->rp_mutex);
253 INIT_DELAYED_WORK(&new_rdata->retry_work,
254 fc_rport_timeout);
255 INIT_LIST_HEAD(&new_rdata->peers);
256 INIT_WORK(&new_rdata->event_work, fc_rport_work);
257
9fb9d328 258 fc_rport_state_enter(new_rdata, RPORT_ST_READY);
42e9a92f 259 } else {
7414705e
RL
260 printk(KERN_WARNING "libfc: Failed to allocate "
261 " memory for rport (%6x)\n", ids.port_id);
42e9a92f
RL
262 event = RPORT_EV_FAILED;
263 }
b4c6f546
AJ
264 if (rport->port_id != FC_FID_DIR_SERV)
265 if (rport_ops->event_callback)
9fb9d328 266 rport_ops->event_callback(lport, rdata,
b4c6f546 267 RPORT_EV_FAILED);
42e9a92f
RL
268 put_device(&rport->dev);
269 rport = new_rport;
270 rdata = new_rport->dd_data;
271 if (rport_ops->event_callback)
9fb9d328 272 rport_ops->event_callback(lport, rdata, event);
42e9a92f
RL
273 } else if ((event == RPORT_EV_FAILED) ||
274 (event == RPORT_EV_LOGO) ||
275 (event == RPORT_EV_STOP)) {
276 trans_state = rdata->trans_state;
277 mutex_unlock(&rdata->rp_mutex);
278 if (rport_ops->event_callback)
9fb9d328 279 rport_ops->event_callback(lport, rdata, event);
201e5795 280 cancel_delayed_work_sync(&rdata->retry_work);
42e9a92f
RL
281 if (trans_state == FC_PORTSTATE_ROGUE)
282 put_device(&rport->dev);
571f824c
AJ
283 else {
284 port_id = rport->port_id;
42e9a92f 285 fc_remote_port_delete(rport);
571f824c
AJ
286 lport->tt.exch_mgr_reset(lport, 0, port_id);
287 lport->tt.exch_mgr_reset(lport, port_id, 0);
288 }
42e9a92f
RL
289 } else
290 mutex_unlock(&rdata->rp_mutex);
291}
292
293/**
34f42a07 294 * fc_rport_login() - Start the remote port login state machine
9fb9d328 295 * @rdata: private remote port
42e9a92f
RL
296 *
297 * Locking Note: Called without the rport lock held. This
298 * function will hold the rport lock, call an _enter_*
299 * function and then unlock the rport.
300 */
9fb9d328 301int fc_rport_login(struct fc_rport_priv *rdata)
42e9a92f 302{
42e9a92f
RL
303 mutex_lock(&rdata->rp_mutex);
304
9fb9d328 305 FC_RPORT_DBG(rdata, "Login to port\n");
42e9a92f 306
9fb9d328 307 fc_rport_enter_plogi(rdata);
42e9a92f
RL
308
309 mutex_unlock(&rdata->rp_mutex);
310
311 return 0;
312}
313
5f7ea3b7
JE
314/**
315 * fc_rport_enter_delete() - schedule a remote port to be deleted.
9fb9d328 316 * @rdata: private remote port
5f7ea3b7
JE
317 * @event: event to report as the reason for deletion
318 *
319 * Locking Note: Called with the rport lock held.
320 *
321 * Allow state change into DELETE only once.
322 *
323 * Call queue_work only if there's no event already pending.
324 * Set the new event so that the old pending event will not occur.
325 * Since we have the mutex, even if fc_rport_work() is already started,
326 * it'll see the new event.
327 */
9fb9d328 328static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
5f7ea3b7
JE
329 enum fc_rport_event event)
330{
5f7ea3b7
JE
331 if (rdata->rp_state == RPORT_ST_DELETE)
332 return;
333
9fb9d328 334 FC_RPORT_DBG(rdata, "Delete port\n");
5f7ea3b7 335
9fb9d328 336 fc_rport_state_enter(rdata, RPORT_ST_DELETE);
5f7ea3b7
JE
337
338 if (rdata->event == RPORT_EV_NONE)
339 queue_work(rport_event_queue, &rdata->event_work);
340 rdata->event = event;
341}
342
42e9a92f 343/**
34f42a07 344 * fc_rport_logoff() - Logoff and remove an rport
9fb9d328 345 * @rdata: private remote port
42e9a92f
RL
346 *
347 * Locking Note: Called without the rport lock held. This
348 * function will hold the rport lock, call an _enter_*
349 * function and then unlock the rport.
350 */
9fb9d328 351int fc_rport_logoff(struct fc_rport_priv *rdata)
42e9a92f 352{
42e9a92f
RL
353 mutex_lock(&rdata->rp_mutex);
354
9fb9d328 355 FC_RPORT_DBG(rdata, "Remove port\n");
42e9a92f 356
14194054 357 if (rdata->rp_state == RPORT_ST_DELETE) {
9fb9d328 358 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
b4c6f546
AJ
359 mutex_unlock(&rdata->rp_mutex);
360 goto out;
361 }
362
9fb9d328 363 fc_rport_enter_logo(rdata);
42e9a92f
RL
364
365 /*
14194054 366 * Change the state to Delete so that we discard
42e9a92f
RL
367 * the response.
368 */
9fb9d328 369 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
42e9a92f
RL
370 mutex_unlock(&rdata->rp_mutex);
371
b4c6f546 372out:
42e9a92f
RL
373 return 0;
374}
375
376/**
34f42a07 377 * fc_rport_enter_ready() - The rport is ready
9fb9d328 378 * @rdata: private remote port
42e9a92f
RL
379 *
380 * Locking Note: The rport lock is expected to be held before calling
381 * this routine.
382 */
9fb9d328 383static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
42e9a92f 384{
9fb9d328 385 fc_rport_state_enter(rdata, RPORT_ST_READY);
42e9a92f 386
9fb9d328 387 FC_RPORT_DBG(rdata, "Port is Ready\n");
42e9a92f 388
5f7ea3b7
JE
389 if (rdata->event == RPORT_EV_NONE)
390 queue_work(rport_event_queue, &rdata->event_work);
42e9a92f 391 rdata->event = RPORT_EV_CREATED;
42e9a92f
RL
392}
393
394/**
34f42a07 395 * fc_rport_timeout() - Handler for the retry_work timer.
ab28f1fd 396 * @work: The work struct of the fc_rport_priv
42e9a92f
RL
397 *
398 * Locking Note: Called without the rport lock held. This
399 * function will hold the rport lock, call an _enter_*
400 * function and then unlock the rport.
401 */
402static void fc_rport_timeout(struct work_struct *work)
403{
ab28f1fd
JE
404 struct fc_rport_priv *rdata =
405 container_of(work, struct fc_rport_priv, retry_work.work);
42e9a92f
RL
406
407 mutex_lock(&rdata->rp_mutex);
408
409 switch (rdata->rp_state) {
410 case RPORT_ST_PLOGI:
9fb9d328 411 fc_rport_enter_plogi(rdata);
42e9a92f
RL
412 break;
413 case RPORT_ST_PRLI:
9fb9d328 414 fc_rport_enter_prli(rdata);
42e9a92f
RL
415 break;
416 case RPORT_ST_RTV:
9fb9d328 417 fc_rport_enter_rtv(rdata);
42e9a92f
RL
418 break;
419 case RPORT_ST_LOGO:
9fb9d328 420 fc_rport_enter_logo(rdata);
42e9a92f
RL
421 break;
422 case RPORT_ST_READY:
423 case RPORT_ST_INIT:
14194054 424 case RPORT_ST_DELETE:
42e9a92f
RL
425 break;
426 }
427
428 mutex_unlock(&rdata->rp_mutex);
42e9a92f
RL
429}
430
431/**
34f42a07 432 * fc_rport_error() - Error handler, called once retries have been exhausted
9fb9d328 433 * @rdata: private remote port
42e9a92f
RL
434 * @fp: The frame pointer
435 *
42e9a92f
RL
436 * Locking Note: The rport lock is expected to be held before
437 * calling this routine
438 */
9fb9d328 439static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
42e9a92f 440{
9fb9d328
JE
441 FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
442 PTR_ERR(fp), fc_rport_state(rdata), rdata->retries);
42e9a92f 443
6755db1c
CL
444 switch (rdata->rp_state) {
445 case RPORT_ST_PLOGI:
446 case RPORT_ST_PRLI:
447 case RPORT_ST_LOGO:
9fb9d328 448 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
6755db1c
CL
449 break;
450 case RPORT_ST_RTV:
9fb9d328 451 fc_rport_enter_ready(rdata);
6755db1c 452 break;
14194054 453 case RPORT_ST_DELETE:
6755db1c
CL
454 case RPORT_ST_READY:
455 case RPORT_ST_INIT:
456 break;
42e9a92f
RL
457 }
458}
459
6755db1c 460/**
34f42a07 461 * fc_rport_error_retry() - Error handler when retries are desired
9fb9d328 462 * @rdata: private remote port data
6755db1c
CL
463 * @fp: The frame pointer
464 *
465 * If the error was an exchange timeout retry immediately,
466 * otherwise wait for E_D_TOV.
467 *
468 * Locking Note: The rport lock is expected to be held before
469 * calling this routine
470 */
9fb9d328
JE
471static void fc_rport_error_retry(struct fc_rport_priv *rdata,
472 struct fc_frame *fp)
6755db1c 473{
6755db1c
CL
474 unsigned long delay = FC_DEF_E_D_TOV;
475
476 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
477 if (PTR_ERR(fp) == -FC_EX_CLOSED)
9fb9d328 478 return fc_rport_error(rdata, fp);
6755db1c 479
a3666955 480 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
9fb9d328
JE
481 FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
482 PTR_ERR(fp), fc_rport_state(rdata));
6755db1c
CL
483 rdata->retries++;
484 /* no additional delay on exchange timeouts */
485 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
486 delay = 0;
6755db1c
CL
487 schedule_delayed_work(&rdata->retry_work, delay);
488 return;
489 }
490
9fb9d328 491 return fc_rport_error(rdata, fp);
6755db1c
CL
492}
493
42e9a92f 494/**
34f42a07 495 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
42e9a92f
RL
496 * @sp: current sequence in the PLOGI exchange
497 * @fp: response frame
9fb9d328 498 * @rdata_arg: private remote port data
42e9a92f
RL
499 *
500 * Locking Note: This function will be called without the rport lock
501 * held, but it will lock, call an _enter_* function or fc_rport_error
502 * and then unlock the rport.
503 */
504static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 505 void *rdata_arg)
42e9a92f 506{
9fb9d328
JE
507 struct fc_rport_priv *rdata = rdata_arg;
508 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
42e9a92f 509 struct fc_lport *lport = rdata->local_port;
a29e7646 510 struct fc_els_flogi *plp = NULL;
42e9a92f
RL
511 unsigned int tov;
512 u16 csp_seq;
513 u16 cssp_seq;
514 u8 op;
515
516 mutex_lock(&rdata->rp_mutex);
517
9fb9d328 518 FC_RPORT_DBG(rdata, "Received a PLOGI response\n");
42e9a92f
RL
519
520 if (rdata->rp_state != RPORT_ST_PLOGI) {
9fb9d328
JE
521 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
522 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
523 if (IS_ERR(fp))
524 goto err;
42e9a92f
RL
525 goto out;
526 }
527
76f6804e 528 if (IS_ERR(fp)) {
9fb9d328 529 fc_rport_error_retry(rdata, fp);
76f6804e
AJ
530 goto err;
531 }
532
42e9a92f
RL
533 op = fc_frame_payload_op(fp);
534 if (op == ELS_LS_ACC &&
535 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
536 rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
537 rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
538
539 tov = ntohl(plp->fl_csp.sp_e_d_tov);
540 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
541 tov /= 1000;
542 if (tov > rdata->e_d_tov)
543 rdata->e_d_tov = tov;
544 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
545 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
546 if (cssp_seq < csp_seq)
547 csp_seq = cssp_seq;
548 rdata->max_seq = csp_seq;
549 rport->maxframe_size =
550 fc_plogi_get_maxframe(plp, lport->mfs);
551
552 /*
553 * If the rport is one of the well known addresses
554 * we skip PRLI and RTV and go straight to READY.
555 */
556 if (rport->port_id >= FC_FID_DOM_MGR)
9fb9d328 557 fc_rport_enter_ready(rdata);
42e9a92f 558 else
9fb9d328 559 fc_rport_enter_prli(rdata);
42e9a92f 560 } else
9fb9d328 561 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
562
563out:
564 fc_frame_free(fp);
565err:
566 mutex_unlock(&rdata->rp_mutex);
567 put_device(&rport->dev);
568}
569
570/**
34f42a07 571 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
9fb9d328 572 * @rdata: private remote port data
42e9a92f
RL
573 *
574 * Locking Note: The rport lock is expected to be held before calling
575 * this routine.
576 */
9fb9d328 577static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
42e9a92f 578{
42e9a92f 579 struct fc_lport *lport = rdata->local_port;
9fb9d328 580 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
42e9a92f
RL
581 struct fc_frame *fp;
582
9fb9d328
JE
583 FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
584 fc_rport_state(rdata));
42e9a92f 585
9fb9d328 586 fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
42e9a92f
RL
587
588 rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
589 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
590 if (!fp) {
9fb9d328 591 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
592 return;
593 }
594 rdata->e_d_tov = lport->e_d_tov;
595
a46f327a 596 if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_PLOGI,
9fb9d328
JE
597 fc_rport_plogi_resp, rdata, lport->e_d_tov))
598 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
599 else
600 get_device(&rport->dev);
601}
602
603/**
34f42a07 604 * fc_rport_prli_resp() - Process Login (PRLI) response handler
42e9a92f
RL
605 * @sp: current sequence in the PRLI exchange
606 * @fp: response frame
9fb9d328 607 * @rdata_arg: private remote port data
42e9a92f
RL
608 *
609 * Locking Note: This function will be called without the rport lock
610 * held, but it will lock, call an _enter_* function or fc_rport_error
611 * and then unlock the rport.
612 */
613static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 614 void *rdata_arg)
42e9a92f 615{
9fb9d328
JE
616 struct fc_rport_priv *rdata = rdata_arg;
617 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
42e9a92f
RL
618 struct {
619 struct fc_els_prli prli;
620 struct fc_els_spp spp;
621 } *pp;
622 u32 roles = FC_RPORT_ROLE_UNKNOWN;
623 u32 fcp_parm = 0;
624 u8 op;
625
626 mutex_lock(&rdata->rp_mutex);
627
9fb9d328 628 FC_RPORT_DBG(rdata, "Received a PRLI response\n");
42e9a92f
RL
629
630 if (rdata->rp_state != RPORT_ST_PRLI) {
9fb9d328
JE
631 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
632 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
633 if (IS_ERR(fp))
634 goto err;
42e9a92f
RL
635 goto out;
636 }
637
76f6804e 638 if (IS_ERR(fp)) {
9fb9d328 639 fc_rport_error_retry(rdata, fp);
76f6804e
AJ
640 goto err;
641 }
642
42e9a92f
RL
643 op = fc_frame_payload_op(fp);
644 if (op == ELS_LS_ACC) {
645 pp = fc_frame_payload_get(fp, sizeof(*pp));
646 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
647 fcp_parm = ntohl(pp->spp.spp_params);
648 if (fcp_parm & FCP_SPPF_RETRY)
649 rdata->flags |= FC_RP_FLAGS_RETRY;
650 }
651
652 rport->supported_classes = FC_COS_CLASS3;
653 if (fcp_parm & FCP_SPPF_INIT_FCN)
654 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
655 if (fcp_parm & FCP_SPPF_TARG_FCN)
656 roles |= FC_RPORT_ROLE_FCP_TARGET;
657
658 rport->roles = roles;
9fb9d328 659 fc_rport_enter_rtv(rdata);
42e9a92f
RL
660
661 } else {
9fb9d328
JE
662 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
663 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
42e9a92f
RL
664 }
665
666out:
667 fc_frame_free(fp);
668err:
669 mutex_unlock(&rdata->rp_mutex);
670 put_device(&rport->dev);
671}
672
673/**
34f42a07 674 * fc_rport_logo_resp() - Logout (LOGO) response handler
42e9a92f
RL
675 * @sp: current sequence in the LOGO exchange
676 * @fp: response frame
9fb9d328 677 * @rdata_arg: private remote port data
42e9a92f
RL
678 *
679 * Locking Note: This function will be called without the rport lock
680 * held, but it will lock, call an _enter_* function or fc_rport_error
681 * and then unlock the rport.
682 */
683static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 684 void *rdata_arg)
42e9a92f 685{
9fb9d328
JE
686 struct fc_rport_priv *rdata = rdata_arg;
687 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
42e9a92f
RL
688 u8 op;
689
690 mutex_lock(&rdata->rp_mutex);
691
9fb9d328 692 FC_RPORT_DBG(rdata, "Received a LOGO response\n");
42e9a92f 693
42e9a92f 694 if (rdata->rp_state != RPORT_ST_LOGO) {
9fb9d328
JE
695 FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
696 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
697 if (IS_ERR(fp))
698 goto err;
42e9a92f
RL
699 goto out;
700 }
701
76f6804e 702 if (IS_ERR(fp)) {
9fb9d328 703 fc_rport_error_retry(rdata, fp);
76f6804e
AJ
704 goto err;
705 }
706
42e9a92f
RL
707 op = fc_frame_payload_op(fp);
708 if (op == ELS_LS_ACC) {
9fb9d328 709 fc_rport_enter_rtv(rdata);
42e9a92f 710 } else {
9fb9d328
JE
711 FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n");
712 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
42e9a92f
RL
713 }
714
715out:
716 fc_frame_free(fp);
717err:
718 mutex_unlock(&rdata->rp_mutex);
719 put_device(&rport->dev);
720}
721
722/**
34f42a07 723 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
9fb9d328 724 * @rdata: private remote port data
42e9a92f
RL
725 *
726 * Locking Note: The rport lock is expected to be held before calling
727 * this routine.
728 */
9fb9d328 729static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
42e9a92f 730{
9fb9d328 731 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
42e9a92f
RL
732 struct fc_lport *lport = rdata->local_port;
733 struct {
734 struct fc_els_prli prli;
735 struct fc_els_spp spp;
736 } *pp;
737 struct fc_frame *fp;
738
9fb9d328
JE
739 FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
740 fc_rport_state(rdata));
42e9a92f 741
9fb9d328 742 fc_rport_state_enter(rdata, RPORT_ST_PRLI);
42e9a92f
RL
743
744 fp = fc_frame_alloc(lport, sizeof(*pp));
745 if (!fp) {
9fb9d328 746 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
747 return;
748 }
749
a46f327a 750 if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_PRLI,
9fb9d328
JE
751 fc_rport_prli_resp, rdata, lport->e_d_tov))
752 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
753 else
754 get_device(&rport->dev);
755}
756
757/**
34f42a07 758 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
42e9a92f
RL
759 * @sp: current sequence in the RTV exchange
760 * @fp: response frame
9fb9d328 761 * @rdata_arg: private remote port data
42e9a92f
RL
762 *
763 * Many targets don't seem to support this.
764 *
765 * Locking Note: This function will be called without the rport lock
766 * held, but it will lock, call an _enter_* function or fc_rport_error
767 * and then unlock the rport.
768 */
769static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 770 void *rdata_arg)
42e9a92f 771{
9fb9d328
JE
772 struct fc_rport_priv *rdata = rdata_arg;
773 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
42e9a92f
RL
774 u8 op;
775
776 mutex_lock(&rdata->rp_mutex);
777
9fb9d328 778 FC_RPORT_DBG(rdata, "Received a RTV response\n");
42e9a92f
RL
779
780 if (rdata->rp_state != RPORT_ST_RTV) {
9fb9d328
JE
781 FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
782 "%s\n", fc_rport_state(rdata));
76f6804e
AJ
783 if (IS_ERR(fp))
784 goto err;
42e9a92f
RL
785 goto out;
786 }
787
76f6804e 788 if (IS_ERR(fp)) {
9fb9d328 789 fc_rport_error(rdata, fp);
76f6804e
AJ
790 goto err;
791 }
792
42e9a92f
RL
793 op = fc_frame_payload_op(fp);
794 if (op == ELS_LS_ACC) {
795 struct fc_els_rtv_acc *rtv;
796 u32 toq;
797 u32 tov;
798
799 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
800 if (rtv) {
801 toq = ntohl(rtv->rtv_toq);
802 tov = ntohl(rtv->rtv_r_a_tov);
803 if (tov == 0)
804 tov = 1;
805 rdata->r_a_tov = tov;
806 tov = ntohl(rtv->rtv_e_d_tov);
807 if (toq & FC_ELS_RTV_EDRES)
808 tov /= 1000000;
809 if (tov == 0)
810 tov = 1;
811 rdata->e_d_tov = tov;
812 }
813 }
814
9fb9d328 815 fc_rport_enter_ready(rdata);
42e9a92f
RL
816
817out:
818 fc_frame_free(fp);
819err:
820 mutex_unlock(&rdata->rp_mutex);
821 put_device(&rport->dev);
822}
823
824/**
34f42a07 825 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
9fb9d328 826 * @rdata: private remote port data
42e9a92f
RL
827 *
828 * Locking Note: The rport lock is expected to be held before calling
829 * this routine.
830 */
9fb9d328 831static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
42e9a92f
RL
832{
833 struct fc_frame *fp;
42e9a92f 834 struct fc_lport *lport = rdata->local_port;
9fb9d328 835 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
42e9a92f 836
9fb9d328
JE
837 FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
838 fc_rport_state(rdata));
42e9a92f 839
9fb9d328 840 fc_rport_state_enter(rdata, RPORT_ST_RTV);
42e9a92f
RL
841
842 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
843 if (!fp) {
9fb9d328 844 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
845 return;
846 }
847
a46f327a 848 if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_RTV,
9fb9d328
JE
849 fc_rport_rtv_resp, rdata, lport->e_d_tov))
850 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
851 else
852 get_device(&rport->dev);
853}
854
855/**
34f42a07 856 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
9fb9d328 857 * @rdata: private remote port data
42e9a92f
RL
858 *
859 * Locking Note: The rport lock is expected to be held before calling
860 * this routine.
861 */
9fb9d328 862static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
42e9a92f 863{
42e9a92f 864 struct fc_lport *lport = rdata->local_port;
9fb9d328 865 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
42e9a92f
RL
866 struct fc_frame *fp;
867
9fb9d328
JE
868 FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
869 fc_rport_state(rdata));
42e9a92f 870
9fb9d328 871 fc_rport_state_enter(rdata, RPORT_ST_LOGO);
42e9a92f
RL
872
873 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
874 if (!fp) {
9fb9d328 875 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
876 return;
877 }
878
a46f327a 879 if (!lport->tt.elsct_send(lport, rport->port_id, fp, ELS_LOGO,
9fb9d328
JE
880 fc_rport_logo_resp, rdata, lport->e_d_tov))
881 fc_rport_error_retry(rdata, fp);
42e9a92f
RL
882 else
883 get_device(&rport->dev);
884}
885
886
887/**
34f42a07 888 * fc_rport_recv_req() - Receive a request from a rport
42e9a92f
RL
889 * @sp: current sequence in the PLOGI exchange
890 * @fp: response frame
9fb9d328 891 * @rdata_arg: private remote port data
42e9a92f
RL
892 *
893 * Locking Note: Called without the rport lock held. This
894 * function will hold the rport lock, call an _enter_*
895 * function and then unlock the rport.
896 */
897void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
9fb9d328 898 struct fc_rport_priv *rdata)
42e9a92f 899{
42e9a92f
RL
900 struct fc_lport *lport = rdata->local_port;
901
902 struct fc_frame_header *fh;
903 struct fc_seq_els_data els_data;
904 u8 op;
905
906 mutex_lock(&rdata->rp_mutex);
907
908 els_data.fp = NULL;
909 els_data.explan = ELS_EXPL_NONE;
910 els_data.reason = ELS_RJT_NONE;
911
912 fh = fc_frame_header_get(fp);
913
914 if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
915 op = fc_frame_payload_op(fp);
916 switch (op) {
917 case ELS_PLOGI:
9fb9d328 918 fc_rport_recv_plogi_req(rdata, sp, fp);
42e9a92f
RL
919 break;
920 case ELS_PRLI:
9fb9d328 921 fc_rport_recv_prli_req(rdata, sp, fp);
42e9a92f
RL
922 break;
923 case ELS_PRLO:
9fb9d328 924 fc_rport_recv_prlo_req(rdata, sp, fp);
42e9a92f
RL
925 break;
926 case ELS_LOGO:
9fb9d328 927 fc_rport_recv_logo_req(rdata, sp, fp);
42e9a92f
RL
928 break;
929 case ELS_RRQ:
930 els_data.fp = fp;
931 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
932 break;
933 case ELS_REC:
934 els_data.fp = fp;
935 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
936 break;
937 default:
938 els_data.reason = ELS_RJT_UNSUP;
939 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
940 break;
941 }
942 }
943
944 mutex_unlock(&rdata->rp_mutex);
945}
946
947/**
34f42a07 948 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
9fb9d328 949 * @rdata: private remote port data
42e9a92f
RL
950 * @sp: current sequence in the PLOGI exchange
951 * @fp: PLOGI request frame
952 *
953 * Locking Note: The rport lock is exected to be held before calling
954 * this function.
955 */
9fb9d328 956static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata,
42e9a92f
RL
957 struct fc_seq *sp, struct fc_frame *rx_fp)
958{
9fb9d328 959 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
42e9a92f
RL
960 struct fc_lport *lport = rdata->local_port;
961 struct fc_frame *fp = rx_fp;
962 struct fc_exch *ep;
963 struct fc_frame_header *fh;
964 struct fc_els_flogi *pl;
965 struct fc_seq_els_data rjt_data;
966 u32 sid;
967 u64 wwpn;
968 u64 wwnn;
969 enum fc_els_rjt_reason reject = 0;
970 u32 f_ctl;
971 rjt_data.fp = NULL;
972
973 fh = fc_frame_header_get(fp);
974
9fb9d328
JE
975 FC_RPORT_DBG(rdata, "Received PLOGI request while in state %s\n",
976 fc_rport_state(rdata));
42e9a92f
RL
977
978 sid = ntoh24(fh->fh_s_id);
979 pl = fc_frame_payload_get(fp, sizeof(*pl));
980 if (!pl) {
9fb9d328 981 FC_RPORT_DBG(rdata, "Received PLOGI too short\n");
42e9a92f
RL
982 WARN_ON(1);
983 /* XXX TBD: send reject? */
984 fc_frame_free(fp);
985 return;
986 }
987 wwpn = get_unaligned_be64(&pl->fl_wwpn);
988 wwnn = get_unaligned_be64(&pl->fl_wwnn);
989
990 /*
991 * If the session was just created, possibly due to the incoming PLOGI,
992 * set the state appropriately and accept the PLOGI.
993 *
994 * If we had also sent a PLOGI, and if the received PLOGI is from a
995 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
996 * "command already in progress".
997 *
998 * XXX TBD: If the session was ready before, the PLOGI should result in
999 * all outstanding exchanges being reset.
1000 */
1001 switch (rdata->rp_state) {
1002 case RPORT_ST_INIT:
9fb9d328 1003 FC_RPORT_DBG(rdata, "Received PLOGI, wwpn %llx state INIT "
7414705e 1004 "- reject\n", (unsigned long long)wwpn);
42e9a92f
RL
1005 reject = ELS_RJT_UNSUP;
1006 break;
1007 case RPORT_ST_PLOGI:
9fb9d328 1008 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state %d\n",
7414705e 1009 rdata->rp_state);
42e9a92f
RL
1010 if (wwpn < lport->wwpn)
1011 reject = ELS_RJT_INPROG;
1012 break;
1013 case RPORT_ST_PRLI:
1014 case RPORT_ST_READY:
9fb9d328 1015 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
7414705e 1016 "- ignored for now\n", rdata->rp_state);
42e9a92f
RL
1017 /* XXX TBD - should reset */
1018 break;
14194054 1019 case RPORT_ST_DELETE:
42e9a92f 1020 default:
9fb9d328 1021 FC_RPORT_DBG(rdata, "Received PLOGI in unexpected "
7414705e 1022 "state %d\n", rdata->rp_state);
b4c6f546
AJ
1023 fc_frame_free(fp);
1024 return;
42e9a92f
RL
1025 break;
1026 }
1027
1028 if (reject) {
1029 rjt_data.reason = reject;
1030 rjt_data.explan = ELS_EXPL_NONE;
1031 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1032 fc_frame_free(fp);
1033 } else {
1034 fp = fc_frame_alloc(lport, sizeof(*pl));
1035 if (fp == NULL) {
1036 fp = rx_fp;
1037 rjt_data.reason = ELS_RJT_UNAB;
1038 rjt_data.explan = ELS_EXPL_NONE;
1039 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1040 fc_frame_free(fp);
1041 } else {
1042 sp = lport->tt.seq_start_next(sp);
1043 WARN_ON(!sp);
1044 fc_rport_set_name(rport, wwpn, wwnn);
1045
1046 /*
1047 * Get session payload size from incoming PLOGI.
1048 */
1049 rport->maxframe_size =
1050 fc_plogi_get_maxframe(pl, lport->mfs);
1051 fc_frame_free(rx_fp);
1052 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1053
1054 /*
1055 * Send LS_ACC. If this fails,
1056 * the originator should retry.
1057 */
1058 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1059 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1060 ep = fc_seq_exch(sp);
1061 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1062 FC_TYPE_ELS, f_ctl, 0);
1063 lport->tt.seq_send(lport, sp, fp);
1064 if (rdata->rp_state == RPORT_ST_PLOGI)
9fb9d328 1065 fc_rport_enter_prli(rdata);
42e9a92f
RL
1066 }
1067 }
1068}
1069
1070/**
34f42a07 1071 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
9fb9d328 1072 * @rdata: private remote port data
42e9a92f
RL
1073 * @sp: current sequence in the PRLI exchange
1074 * @fp: PRLI request frame
1075 *
1076 * Locking Note: The rport lock is exected to be held before calling
1077 * this function.
1078 */
9fb9d328 1079static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
42e9a92f
RL
1080 struct fc_seq *sp, struct fc_frame *rx_fp)
1081{
9fb9d328 1082 struct fc_rport *rport = PRIV_TO_RPORT(rdata);
42e9a92f
RL
1083 struct fc_lport *lport = rdata->local_port;
1084 struct fc_exch *ep;
1085 struct fc_frame *fp;
1086 struct fc_frame_header *fh;
1087 struct {
1088 struct fc_els_prli prli;
1089 struct fc_els_spp spp;
1090 } *pp;
1091 struct fc_els_spp *rspp; /* request service param page */
1092 struct fc_els_spp *spp; /* response spp */
1093 unsigned int len;
1094 unsigned int plen;
1095 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1096 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1097 enum fc_els_spp_resp resp;
1098 struct fc_seq_els_data rjt_data;
1099 u32 f_ctl;
1100 u32 fcp_parm;
1101 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1102 rjt_data.fp = NULL;
1103
1104 fh = fc_frame_header_get(rx_fp);
1105
9fb9d328
JE
1106 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1107 fc_rport_state(rdata));
42e9a92f
RL
1108
1109 switch (rdata->rp_state) {
1110 case RPORT_ST_PRLI:
1111 case RPORT_ST_READY:
1112 reason = ELS_RJT_NONE;
1113 break;
1114 default:
b4c6f546
AJ
1115 fc_frame_free(rx_fp);
1116 return;
42e9a92f
RL
1117 break;
1118 }
1119 len = fr_len(rx_fp) - sizeof(*fh);
1120 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1121 if (pp == NULL) {
1122 reason = ELS_RJT_PROT;
1123 explan = ELS_EXPL_INV_LEN;
1124 } else {
1125 plen = ntohs(pp->prli.prli_len);
1126 if ((plen % 4) != 0 || plen > len) {
1127 reason = ELS_RJT_PROT;
1128 explan = ELS_EXPL_INV_LEN;
1129 } else if (plen < len) {
1130 len = plen;
1131 }
1132 plen = pp->prli.prli_spp_len;
1133 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1134 plen > len || len < sizeof(*pp)) {
1135 reason = ELS_RJT_PROT;
1136 explan = ELS_EXPL_INV_LEN;
1137 }
1138 rspp = &pp->spp;
1139 }
1140 if (reason != ELS_RJT_NONE ||
1141 (fp = fc_frame_alloc(lport, len)) == NULL) {
1142 rjt_data.reason = reason;
1143 rjt_data.explan = explan;
1144 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1145 } else {
1146 sp = lport->tt.seq_start_next(sp);
1147 WARN_ON(!sp);
1148 pp = fc_frame_payload_get(fp, len);
1149 WARN_ON(!pp);
1150 memset(pp, 0, len);
1151 pp->prli.prli_cmd = ELS_LS_ACC;
1152 pp->prli.prli_spp_len = plen;
1153 pp->prli.prli_len = htons(len);
1154 len -= sizeof(struct fc_els_prli);
1155
1156 /*
1157 * Go through all the service parameter pages and build
1158 * response. If plen indicates longer SPP than standard,
1159 * use that. The entire response has been pre-cleared above.
1160 */
1161 spp = &pp->spp;
1162 while (len >= plen) {
1163 spp->spp_type = rspp->spp_type;
1164 spp->spp_type_ext = rspp->spp_type_ext;
1165 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1166 resp = FC_SPP_RESP_ACK;
1167 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1168 resp = FC_SPP_RESP_NO_PA;
1169 switch (rspp->spp_type) {
1170 case 0: /* common to all FC-4 types */
1171 break;
1172 case FC_TYPE_FCP:
1173 fcp_parm = ntohl(rspp->spp_params);
1174 if (fcp_parm * FCP_SPPF_RETRY)
1175 rdata->flags |= FC_RP_FLAGS_RETRY;
1176 rport->supported_classes = FC_COS_CLASS3;
1177 if (fcp_parm & FCP_SPPF_INIT_FCN)
1178 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1179 if (fcp_parm & FCP_SPPF_TARG_FCN)
1180 roles |= FC_RPORT_ROLE_FCP_TARGET;
1181 rport->roles = roles;
1182
1183 spp->spp_params =
1184 htonl(lport->service_params);
1185 break;
1186 default:
1187 resp = FC_SPP_RESP_INVL;
1188 break;
1189 }
1190 spp->spp_flags |= resp;
1191 len -= plen;
1192 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1193 spp = (struct fc_els_spp *)((char *)spp + plen);
1194 }
1195
1196 /*
1197 * Send LS_ACC. If this fails, the originator should retry.
1198 */
1199 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1200 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1201 ep = fc_seq_exch(sp);
1202 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1203 FC_TYPE_ELS, f_ctl, 0);
1204 lport->tt.seq_send(lport, sp, fp);
1205
1206 /*
1207 * Get lock and re-check state.
1208 */
1209 switch (rdata->rp_state) {
1210 case RPORT_ST_PRLI:
9fb9d328 1211 fc_rport_enter_ready(rdata);
42e9a92f
RL
1212 break;
1213 case RPORT_ST_READY:
1214 break;
1215 default:
1216 break;
1217 }
1218 }
1219 fc_frame_free(rx_fp);
1220}
1221
1222/**
34f42a07 1223 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
9fb9d328 1224 * @rdata: private remote port data
42e9a92f
RL
1225 * @sp: current sequence in the PRLO exchange
1226 * @fp: PRLO request frame
1227 *
1228 * Locking Note: The rport lock is exected to be held before calling
1229 * this function.
1230 */
9fb9d328
JE
1231static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1232 struct fc_seq *sp,
42e9a92f
RL
1233 struct fc_frame *fp)
1234{
42e9a92f
RL
1235 struct fc_lport *lport = rdata->local_port;
1236
1237 struct fc_frame_header *fh;
1238 struct fc_seq_els_data rjt_data;
1239
1240 fh = fc_frame_header_get(fp);
1241
9fb9d328
JE
1242 FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1243 fc_rport_state(rdata));
42e9a92f 1244
14194054 1245 if (rdata->rp_state == RPORT_ST_DELETE) {
b4c6f546
AJ
1246 fc_frame_free(fp);
1247 return;
1248 }
1249
42e9a92f
RL
1250 rjt_data.fp = NULL;
1251 rjt_data.reason = ELS_RJT_UNAB;
1252 rjt_data.explan = ELS_EXPL_NONE;
1253 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1254 fc_frame_free(fp);
1255}
1256
1257/**
34f42a07 1258 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
9fb9d328 1259 * @rdata: private remote port data
42e9a92f
RL
1260 * @sp: current sequence in the LOGO exchange
1261 * @fp: LOGO request frame
1262 *
1263 * Locking Note: The rport lock is exected to be held before calling
1264 * this function.
1265 */
9fb9d328
JE
1266static void fc_rport_recv_logo_req(struct fc_rport_priv *rdata,
1267 struct fc_seq *sp,
42e9a92f
RL
1268 struct fc_frame *fp)
1269{
1270 struct fc_frame_header *fh;
42e9a92f
RL
1271 struct fc_lport *lport = rdata->local_port;
1272
1273 fh = fc_frame_header_get(fp);
1274
9fb9d328
JE
1275 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1276 fc_rport_state(rdata));
42e9a92f 1277
14194054 1278 if (rdata->rp_state == RPORT_ST_DELETE) {
b4c6f546
AJ
1279 fc_frame_free(fp);
1280 return;
1281 }
1282
42e9a92f 1283 rdata->event = RPORT_EV_LOGO;
9fb9d328 1284 fc_rport_state_enter(rdata, RPORT_ST_DELETE);
42e9a92f
RL
1285 queue_work(rport_event_queue, &rdata->event_work);
1286
1287 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1288 fc_frame_free(fp);
1289}
1290
1291static void fc_rport_flush_queue(void)
1292{
1293 flush_workqueue(rport_event_queue);
1294}
1295
42e9a92f
RL
1296int fc_rport_init(struct fc_lport *lport)
1297{
5101ff99
RL
1298 if (!lport->tt.rport_create)
1299 lport->tt.rport_create = fc_rport_rogue_create;
1300
42e9a92f
RL
1301 if (!lport->tt.rport_login)
1302 lport->tt.rport_login = fc_rport_login;
1303
1304 if (!lport->tt.rport_logoff)
1305 lport->tt.rport_logoff = fc_rport_logoff;
1306
1307 if (!lport->tt.rport_recv_req)
1308 lport->tt.rport_recv_req = fc_rport_recv_req;
1309
1310 if (!lport->tt.rport_flush_queue)
1311 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1312
1313 return 0;
1314}
1315EXPORT_SYMBOL(fc_rport_init);
1316
b0d428ad 1317int fc_setup_rport(void)
42e9a92f
RL
1318{
1319 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1320 if (!rport_event_queue)
1321 return -ENOMEM;
1322 return 0;
1323}
1324EXPORT_SYMBOL(fc_setup_rport);
1325
b0d428ad 1326void fc_destroy_rport(void)
42e9a92f
RL
1327{
1328 destroy_workqueue(rport_event_queue);
1329}
1330EXPORT_SYMBOL(fc_destroy_rport);
1331
1332void fc_rport_terminate_io(struct fc_rport *rport)
1333{
ab28f1fd
JE
1334 struct fc_rport_libfc_priv *rp = rport->dd_data;
1335 struct fc_lport *lport = rp->local_port;
42e9a92f 1336
1f6ff364
AJ
1337 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1338 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
42e9a92f
RL
1339}
1340EXPORT_SYMBOL(fc_rport_terminate_io);