]> git.ipfire.org Git - people/arne_f/kernel.git/blame - drivers/scsi/be2iscsi/be_main.c
[SCSI] be2iscsi: Fix MSIx creation for SKH-R adapter
[people/arne_f/kernel.git] / drivers / scsi / be2iscsi / be_main.c
CommitLineData
6733b39a 1/**
533c165f 2 * Copyright (C) 2005 - 2013 Emulex
6733b39a
JK
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
255fa9a3 10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
6733b39a
JK
11 *
12 * Contact Information:
255fa9a3 13 * linux-drivers@emulex.com
6733b39a 14 *
255fa9a3
JK
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
6733b39a 18 */
255fa9a3 19
6733b39a
JK
20#include <linux/reboot.h>
21#include <linux/delay.h>
5a0e3ad6 22#include <linux/slab.h>
6733b39a
JK
23#include <linux/interrupt.h>
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/string.h>
27#include <linux/kernel.h>
28#include <linux/semaphore.h>
c7acc5b8 29#include <linux/iscsi_boot_sysfs.h>
acf3368f 30#include <linux/module.h>
ffce3e2e 31#include <linux/bsg-lib.h>
6733b39a
JK
32
33#include <scsi/libiscsi.h>
ffce3e2e
JK
34#include <scsi/scsi_bsg_iscsi.h>
35#include <scsi/scsi_netlink.h>
6733b39a
JK
36#include <scsi/scsi_transport_iscsi.h>
37#include <scsi/scsi_transport.h>
38#include <scsi/scsi_cmnd.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h>
41#include <scsi/scsi.h>
42#include "be_main.h"
43#include "be_iscsi.h"
44#include "be_mgmt.h"
0a513dd8 45#include "be_cmds.h"
6733b39a
JK
46
47static unsigned int be_iopoll_budget = 10;
48static unsigned int be_max_phys_size = 64;
bfead3b2 49static unsigned int enable_msix = 1;
6733b39a
JK
50
51MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
52MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
76d15dbd 53MODULE_VERSION(BUILD_STR);
2f635883 54MODULE_AUTHOR("Emulex Corporation");
6733b39a
JK
55MODULE_LICENSE("GPL");
56module_param(be_iopoll_budget, int, 0);
57module_param(enable_msix, int, 0);
58module_param(be_max_phys_size, uint, S_IRUGO);
99bc5d55
JSJ
59MODULE_PARM_DESC(be_max_phys_size,
60 "Maximum Size (In Kilobytes) of physically contiguous "
61 "memory that can be allocated. Range is 16 - 128");
62
63#define beiscsi_disp_param(_name)\
64ssize_t \
65beiscsi_##_name##_disp(struct device *dev,\
66 struct device_attribute *attrib, char *buf) \
67{ \
68 struct Scsi_Host *shost = class_to_shost(dev);\
69 struct beiscsi_hba *phba = iscsi_host_priv(shost); \
70 uint32_t param_val = 0; \
71 param_val = phba->attr_##_name;\
72 return snprintf(buf, PAGE_SIZE, "%d\n",\
73 phba->attr_##_name);\
74}
75
76#define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
77int \
78beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
79{\
80 if (val >= _minval && val <= _maxval) {\
81 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
82 "BA_%d : beiscsi_"#_name" updated "\
83 "from 0x%x ==> 0x%x\n",\
84 phba->attr_##_name, val); \
85 phba->attr_##_name = val;\
86 return 0;\
87 } \
88 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
89 "BA_%d beiscsi_"#_name" attribute "\
90 "cannot be updated to 0x%x, "\
91 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
92 return -EINVAL;\
93}
94
95#define beiscsi_store_param(_name) \
96ssize_t \
97beiscsi_##_name##_store(struct device *dev,\
98 struct device_attribute *attr, const char *buf,\
99 size_t count) \
100{ \
101 struct Scsi_Host *shost = class_to_shost(dev);\
102 struct beiscsi_hba *phba = iscsi_host_priv(shost);\
103 uint32_t param_val = 0;\
104 if (!isdigit(buf[0]))\
105 return -EINVAL;\
106 if (sscanf(buf, "%i", &param_val) != 1)\
107 return -EINVAL;\
108 if (beiscsi_##_name##_change(phba, param_val) == 0) \
109 return strlen(buf);\
110 else \
111 return -EINVAL;\
112}
113
114#define beiscsi_init_param(_name, _minval, _maxval, _defval) \
115int \
116beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
117{ \
118 if (val >= _minval && val <= _maxval) {\
119 phba->attr_##_name = val;\
120 return 0;\
121 } \
122 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
123 "BA_%d beiscsi_"#_name" attribute " \
124 "cannot be updated to 0x%x, "\
125 "range allowed is ["#_minval" - "#_maxval"]\n", val);\
126 phba->attr_##_name = _defval;\
127 return -EINVAL;\
128}
129
130#define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
131static uint beiscsi_##_name = _defval;\
132module_param(beiscsi_##_name, uint, S_IRUGO);\
133MODULE_PARM_DESC(beiscsi_##_name, _descp);\
134beiscsi_disp_param(_name)\
135beiscsi_change_param(_name, _minval, _maxval, _defval)\
136beiscsi_store_param(_name)\
137beiscsi_init_param(_name, _minval, _maxval, _defval)\
138DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
139 beiscsi_##_name##_disp, beiscsi_##_name##_store)
140
141/*
142 * When new log level added update the
143 * the MAX allowed value for log_enable
144 */
145BEISCSI_RW_ATTR(log_enable, 0x00,
146 0xFF, 0x00, "Enable logging Bit Mask\n"
147 "\t\t\t\tInitialization Events : 0x01\n"
148 "\t\t\t\tMailbox Events : 0x02\n"
149 "\t\t\t\tMiscellaneous Events : 0x04\n"
150 "\t\t\t\tError Handling : 0x08\n"
151 "\t\t\t\tIO Path Events : 0x10\n"
152 "\t\t\t\tConfiguration Path : 0x20\n");
153
5cac7596 154DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
26000db7 155DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
22661e25 156DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
d3fea9af 157DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
6103c1f7
JK
158DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
159 beiscsi_active_session_disp, NULL);
160DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
161 beiscsi_free_session_disp, NULL);
99bc5d55
JSJ
162struct device_attribute *beiscsi_attrs[] = {
163 &dev_attr_beiscsi_log_enable,
5cac7596 164 &dev_attr_beiscsi_drvr_ver,
26000db7 165 &dev_attr_beiscsi_adapter_family,
22661e25 166 &dev_attr_beiscsi_fw_ver,
6103c1f7
JK
167 &dev_attr_beiscsi_active_session_count,
168 &dev_attr_beiscsi_free_session_count,
d3fea9af 169 &dev_attr_beiscsi_phys_port,
99bc5d55
JSJ
170 NULL,
171};
6733b39a 172
6763daae
JSJ
173static char const *cqe_desc[] = {
174 "RESERVED_DESC",
175 "SOL_CMD_COMPLETE",
176 "SOL_CMD_KILLED_DATA_DIGEST_ERR",
177 "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
178 "CXN_KILLED_BURST_LEN_MISMATCH",
179 "CXN_KILLED_AHS_RCVD",
180 "CXN_KILLED_HDR_DIGEST_ERR",
181 "CXN_KILLED_UNKNOWN_HDR",
182 "CXN_KILLED_STALE_ITT_TTT_RCVD",
183 "CXN_KILLED_INVALID_ITT_TTT_RCVD",
184 "CXN_KILLED_RST_RCVD",
185 "CXN_KILLED_TIMED_OUT",
186 "CXN_KILLED_RST_SENT",
187 "CXN_KILLED_FIN_RCVD",
188 "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
189 "CXN_KILLED_BAD_WRB_INDEX_ERROR",
190 "CXN_KILLED_OVER_RUN_RESIDUAL",
191 "CXN_KILLED_UNDER_RUN_RESIDUAL",
192 "CMD_KILLED_INVALID_STATSN_RCVD",
193 "CMD_KILLED_INVALID_R2T_RCVD",
194 "CMD_CXN_KILLED_LUN_INVALID",
195 "CMD_CXN_KILLED_ICD_INVALID",
196 "CMD_CXN_KILLED_ITT_INVALID",
197 "CMD_CXN_KILLED_SEQ_OUTOFORDER",
198 "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
199 "CXN_INVALIDATE_NOTIFY",
200 "CXN_INVALIDATE_INDEX_NOTIFY",
201 "CMD_INVALIDATED_NOTIFY",
202 "UNSOL_HDR_NOTIFY",
203 "UNSOL_DATA_NOTIFY",
204 "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
205 "DRIVERMSG_NOTIFY",
206 "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
207 "SOL_CMD_KILLED_DIF_ERR",
208 "CXN_KILLED_SYN_RCVD",
209 "CXN_KILLED_IMM_DATA_RCVD"
210};
211
6733b39a
JK
212static int beiscsi_slave_configure(struct scsi_device *sdev)
213{
214 blk_queue_max_segment_size(sdev->request_queue, 65536);
215 return 0;
216}
217
4183122d
JK
218static int beiscsi_eh_abort(struct scsi_cmnd *sc)
219{
220 struct iscsi_cls_session *cls_session;
221 struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
222 struct beiscsi_io_task *aborted_io_task;
223 struct iscsi_conn *conn;
224 struct beiscsi_conn *beiscsi_conn;
225 struct beiscsi_hba *phba;
226 struct iscsi_session *session;
227 struct invalidate_command_table *inv_tbl;
3cbb7a74 228 struct be_dma_mem nonemb_cmd;
4183122d
JK
229 unsigned int cid, tag, num_invalidate;
230
231 cls_session = starget_to_session(scsi_target(sc->device));
232 session = cls_session->dd_data;
233
234 spin_lock_bh(&session->lock);
235 if (!aborted_task || !aborted_task->sc) {
236 /* we raced */
237 spin_unlock_bh(&session->lock);
238 return SUCCESS;
239 }
240
241 aborted_io_task = aborted_task->dd_data;
242 if (!aborted_io_task->scsi_cmnd) {
243 /* raced or invalid command */
244 spin_unlock_bh(&session->lock);
245 return SUCCESS;
246 }
247 spin_unlock_bh(&session->lock);
248 conn = aborted_task->conn;
249 beiscsi_conn = conn->dd_data;
250 phba = beiscsi_conn->phba;
251
252 /* invalidate iocb */
253 cid = beiscsi_conn->beiscsi_conn_cid;
254 inv_tbl = phba->inv_tbl;
255 memset(inv_tbl, 0x0, sizeof(*inv_tbl));
256 inv_tbl->cid = cid;
257 inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
258 num_invalidate = 1;
3cbb7a74
JK
259 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
260 sizeof(struct invalidate_commands_params_in),
261 &nonemb_cmd.dma);
262 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
263 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
264 "BM_%d : Failed to allocate memory for"
265 "mgmt_invalidate_icds\n");
3cbb7a74
JK
266 return FAILED;
267 }
268 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
269
270 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
271 cid, &nonemb_cmd);
4183122d 272 if (!tag) {
99bc5d55
JSJ
273 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
274 "BM_%d : mgmt_invalidate_icds could not be"
275 "submitted\n");
3cbb7a74
JK
276 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
277 nonemb_cmd.va, nonemb_cmd.dma);
278
4183122d 279 return FAILED;
4183122d 280 }
e175defe
JSJ
281
282 beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
3cbb7a74
JK
283 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
284 nonemb_cmd.va, nonemb_cmd.dma);
4183122d
JK
285 return iscsi_eh_abort(sc);
286}
287
288static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
289{
290 struct iscsi_task *abrt_task;
291 struct beiscsi_io_task *abrt_io_task;
292 struct iscsi_conn *conn;
293 struct beiscsi_conn *beiscsi_conn;
294 struct beiscsi_hba *phba;
295 struct iscsi_session *session;
296 struct iscsi_cls_session *cls_session;
297 struct invalidate_command_table *inv_tbl;
3cbb7a74 298 struct be_dma_mem nonemb_cmd;
4183122d 299 unsigned int cid, tag, i, num_invalidate;
4183122d
JK
300
301 /* invalidate iocbs */
302 cls_session = starget_to_session(scsi_target(sc->device));
303 session = cls_session->dd_data;
304 spin_lock_bh(&session->lock);
db7f7709
JK
305 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
306 spin_unlock_bh(&session->lock);
307 return FAILED;
308 }
4183122d
JK
309 conn = session->leadconn;
310 beiscsi_conn = conn->dd_data;
311 phba = beiscsi_conn->phba;
312 cid = beiscsi_conn->beiscsi_conn_cid;
313 inv_tbl = phba->inv_tbl;
314 memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
315 num_invalidate = 0;
316 for (i = 0; i < conn->session->cmds_max; i++) {
317 abrt_task = conn->session->cmds[i];
318 abrt_io_task = abrt_task->dd_data;
319 if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
320 continue;
321
322 if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
323 continue;
324
325 inv_tbl->cid = cid;
326 inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
327 num_invalidate++;
328 inv_tbl++;
329 }
330 spin_unlock_bh(&session->lock);
331 inv_tbl = phba->inv_tbl;
332
3cbb7a74
JK
333 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
334 sizeof(struct invalidate_commands_params_in),
335 &nonemb_cmd.dma);
336 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
337 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
338 "BM_%d : Failed to allocate memory for"
339 "mgmt_invalidate_icds\n");
3cbb7a74
JK
340 return FAILED;
341 }
342 nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
343 memset(nonemb_cmd.va, 0, nonemb_cmd.size);
344 tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
345 cid, &nonemb_cmd);
4183122d 346 if (!tag) {
99bc5d55
JSJ
347 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
348 "BM_%d : mgmt_invalidate_icds could not be"
349 " submitted\n");
3cbb7a74
JK
350 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
351 nonemb_cmd.va, nonemb_cmd.dma);
4183122d 352 return FAILED;
4183122d 353 }
e175defe
JSJ
354
355 beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
3cbb7a74
JK
356 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
357 nonemb_cmd.va, nonemb_cmd.dma);
4183122d 358 return iscsi_eh_device_reset(sc);
4183122d
JK
359}
360
c7acc5b8
JK
361static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
362{
363 struct beiscsi_hba *phba = data;
f457a46f
MC
364 struct mgmt_session_info *boot_sess = &phba->boot_sess;
365 struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
c7acc5b8
JK
366 char *str = buf;
367 int rc;
368
369 switch (type) {
370 case ISCSI_BOOT_TGT_NAME:
371 rc = sprintf(buf, "%.*s\n",
f457a46f
MC
372 (int)strlen(boot_sess->target_name),
373 (char *)&boot_sess->target_name);
c7acc5b8
JK
374 break;
375 case ISCSI_BOOT_TGT_IP_ADDR:
f457a46f 376 if (boot_conn->dest_ipaddr.ip_type == 0x1)
c7acc5b8 377 rc = sprintf(buf, "%pI4\n",
0e43895e 378 (char *)&boot_conn->dest_ipaddr.addr);
c7acc5b8
JK
379 else
380 rc = sprintf(str, "%pI6\n",
0e43895e 381 (char *)&boot_conn->dest_ipaddr.addr);
c7acc5b8
JK
382 break;
383 case ISCSI_BOOT_TGT_PORT:
f457a46f 384 rc = sprintf(str, "%d\n", boot_conn->dest_port);
c7acc5b8
JK
385 break;
386
387 case ISCSI_BOOT_TGT_CHAP_NAME:
388 rc = sprintf(str, "%.*s\n",
f457a46f
MC
389 boot_conn->negotiated_login_options.auth_data.chap.
390 target_chap_name_length,
391 (char *)&boot_conn->negotiated_login_options.
392 auth_data.chap.target_chap_name);
c7acc5b8
JK
393 break;
394 case ISCSI_BOOT_TGT_CHAP_SECRET:
395 rc = sprintf(str, "%.*s\n",
f457a46f
MC
396 boot_conn->negotiated_login_options.auth_data.chap.
397 target_secret_length,
398 (char *)&boot_conn->negotiated_login_options.
399 auth_data.chap.target_secret);
c7acc5b8
JK
400 break;
401 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
402 rc = sprintf(str, "%.*s\n",
f457a46f
MC
403 boot_conn->negotiated_login_options.auth_data.chap.
404 intr_chap_name_length,
405 (char *)&boot_conn->negotiated_login_options.
406 auth_data.chap.intr_chap_name);
c7acc5b8
JK
407 break;
408 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
f457a46f
MC
409 rc = sprintf(str, "%.*s\n",
410 boot_conn->negotiated_login_options.auth_data.chap.
411 intr_secret_length,
412 (char *)&boot_conn->negotiated_login_options.
413 auth_data.chap.intr_secret);
c7acc5b8
JK
414 break;
415 case ISCSI_BOOT_TGT_FLAGS:
f457a46f 416 rc = sprintf(str, "2\n");
c7acc5b8
JK
417 break;
418 case ISCSI_BOOT_TGT_NIC_ASSOC:
f457a46f 419 rc = sprintf(str, "0\n");
c7acc5b8
JK
420 break;
421 default:
422 rc = -ENOSYS;
423 break;
424 }
425 return rc;
426}
427
428static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
429{
430 struct beiscsi_hba *phba = data;
431 char *str = buf;
432 int rc;
433
434 switch (type) {
435 case ISCSI_BOOT_INI_INITIATOR_NAME:
436 rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
437 break;
438 default:
439 rc = -ENOSYS;
440 break;
441 }
442 return rc;
443}
444
445static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
446{
447 struct beiscsi_hba *phba = data;
448 char *str = buf;
449 int rc;
450
451 switch (type) {
452 case ISCSI_BOOT_ETH_FLAGS:
f457a46f 453 rc = sprintf(str, "2\n");
c7acc5b8
JK
454 break;
455 case ISCSI_BOOT_ETH_INDEX:
f457a46f 456 rc = sprintf(str, "0\n");
c7acc5b8
JK
457 break;
458 case ISCSI_BOOT_ETH_MAC:
0e43895e
MC
459 rc = beiscsi_get_macaddr(str, phba);
460 break;
c7acc5b8
JK
461 default:
462 rc = -ENOSYS;
463 break;
464 }
465 return rc;
466}
467
468
587a1f16 469static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
c7acc5b8 470{
587a1f16 471 umode_t rc;
c7acc5b8
JK
472
473 switch (type) {
474 case ISCSI_BOOT_TGT_NAME:
475 case ISCSI_BOOT_TGT_IP_ADDR:
476 case ISCSI_BOOT_TGT_PORT:
477 case ISCSI_BOOT_TGT_CHAP_NAME:
478 case ISCSI_BOOT_TGT_CHAP_SECRET:
479 case ISCSI_BOOT_TGT_REV_CHAP_NAME:
480 case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
481 case ISCSI_BOOT_TGT_NIC_ASSOC:
482 case ISCSI_BOOT_TGT_FLAGS:
483 rc = S_IRUGO;
484 break;
485 default:
486 rc = 0;
487 break;
488 }
489 return rc;
490}
491
587a1f16 492static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
c7acc5b8 493{
587a1f16 494 umode_t rc;
c7acc5b8
JK
495
496 switch (type) {
497 case ISCSI_BOOT_INI_INITIATOR_NAME:
498 rc = S_IRUGO;
499 break;
500 default:
501 rc = 0;
502 break;
503 }
504 return rc;
505}
506
507
587a1f16 508static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
c7acc5b8 509{
587a1f16 510 umode_t rc;
c7acc5b8
JK
511
512 switch (type) {
513 case ISCSI_BOOT_ETH_FLAGS:
514 case ISCSI_BOOT_ETH_MAC:
515 case ISCSI_BOOT_ETH_INDEX:
516 rc = S_IRUGO;
517 break;
518 default:
519 rc = 0;
520 break;
521 }
522 return rc;
523}
524
bfead3b2
JK
525/*------------------- PCI Driver operations and data ----------------- */
526static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
527 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
f98c96b0 528 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
bfead3b2
JK
529 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
530 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
531 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
139a1b1e 532 { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
bfead3b2
JK
533 { 0 }
534};
535MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
536
99bc5d55 537
6733b39a
JK
538static struct scsi_host_template beiscsi_sht = {
539 .module = THIS_MODULE,
2f635883 540 .name = "Emulex 10Gbe open-iscsi Initiator Driver",
6733b39a
JK
541 .proc_name = DRV_NAME,
542 .queuecommand = iscsi_queuecommand,
6733b39a
JK
543 .change_queue_depth = iscsi_change_queue_depth,
544 .slave_configure = beiscsi_slave_configure,
545 .target_alloc = iscsi_target_alloc,
4183122d
JK
546 .eh_abort_handler = beiscsi_eh_abort,
547 .eh_device_reset_handler = beiscsi_eh_device_reset,
309ce156 548 .eh_target_reset_handler = iscsi_eh_session_reset,
99bc5d55 549 .shost_attrs = beiscsi_attrs,
6733b39a
JK
550 .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
551 .can_queue = BE2_IO_DEPTH,
552 .this_id = -1,
553 .max_sectors = BEISCSI_MAX_SECTORS,
554 .cmd_per_lun = BEISCSI_CMD_PER_LUN,
555 .use_clustering = ENABLE_CLUSTERING,
ffce3e2e
JK
556 .vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
557
6733b39a 558};
6733b39a 559
bfead3b2 560static struct scsi_transport_template *beiscsi_scsi_transport;
6733b39a
JK
561
562static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
563{
564 struct beiscsi_hba *phba;
565 struct Scsi_Host *shost;
566
567 shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
568 if (!shost) {
99bc5d55
JSJ
569 dev_err(&pcidev->dev,
570 "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
6733b39a
JK
571 return NULL;
572 }
573 shost->dma_boundary = pcidev->dma_mask;
574 shost->max_id = BE2_MAX_SESSIONS;
575 shost->max_channel = 0;
576 shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
577 shost->max_lun = BEISCSI_NUM_MAX_LUN;
578 shost->transportt = beiscsi_scsi_transport;
6733b39a
JK
579 phba = iscsi_host_priv(shost);
580 memset(phba, 0, sizeof(*phba));
581 phba->shost = shost;
582 phba->pcidev = pci_dev_get(pcidev);
2807afb7 583 pci_set_drvdata(pcidev, phba);
0e43895e 584 phba->interface_handle = 0xFFFFFFFF;
6733b39a
JK
585
586 if (iscsi_host_add(shost, &phba->pcidev->dev))
587 goto free_devices;
c7acc5b8 588
6733b39a
JK
589 return phba;
590
591free_devices:
592 pci_dev_put(phba->pcidev);
593 iscsi_host_free(phba->shost);
594 return NULL;
595}
596
597static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
598{
599 if (phba->csr_va) {
600 iounmap(phba->csr_va);
601 phba->csr_va = NULL;
602 }
603 if (phba->db_va) {
604 iounmap(phba->db_va);
605 phba->db_va = NULL;
606 }
607 if (phba->pci_va) {
608 iounmap(phba->pci_va);
609 phba->pci_va = NULL;
610 }
611}
612
613static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
614 struct pci_dev *pcidev)
615{
616 u8 __iomem *addr;
f98c96b0 617 int pcicfg_reg;
6733b39a
JK
618
619 addr = ioremap_nocache(pci_resource_start(pcidev, 2),
620 pci_resource_len(pcidev, 2));
621 if (addr == NULL)
622 return -ENOMEM;
623 phba->ctrl.csr = addr;
624 phba->csr_va = addr;
625 phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
626
627 addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
628 if (addr == NULL)
629 goto pci_map_err;
630 phba->ctrl.db = addr;
631 phba->db_va = addr;
632 phba->db_pa.u.a64.address = pci_resource_start(pcidev, 4);
633
f98c96b0
JK
634 if (phba->generation == BE_GEN2)
635 pcicfg_reg = 1;
636 else
637 pcicfg_reg = 0;
638
639 addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
640 pci_resource_len(pcidev, pcicfg_reg));
641
6733b39a
JK
642 if (addr == NULL)
643 goto pci_map_err;
644 phba->ctrl.pcicfg = addr;
645 phba->pci_va = addr;
f98c96b0 646 phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
6733b39a
JK
647 return 0;
648
649pci_map_err:
650 beiscsi_unmap_pci_function(phba);
651 return -ENOMEM;
652}
653
654static int beiscsi_enable_pci(struct pci_dev *pcidev)
655{
656 int ret;
657
658 ret = pci_enable_device(pcidev);
659 if (ret) {
99bc5d55
JSJ
660 dev_err(&pcidev->dev,
661 "beiscsi_enable_pci - enable device failed\n");
6733b39a
JK
662 return ret;
663 }
664
bfead3b2 665 pci_set_master(pcidev);
6733b39a
JK
666 if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
667 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
668 if (ret) {
669 dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
670 pci_disable_device(pcidev);
671 return ret;
672 }
673 }
674 return 0;
675}
676
677static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
678{
679 struct be_ctrl_info *ctrl = &phba->ctrl;
680 struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
681 struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
682 int status = 0;
683
684 ctrl->pdev = pdev;
685 status = beiscsi_map_pci_bars(phba, pdev);
686 if (status)
687 return status;
6733b39a
JK
688 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
689 mbox_mem_alloc->va = pci_alloc_consistent(pdev,
690 mbox_mem_alloc->size,
691 &mbox_mem_alloc->dma);
692 if (!mbox_mem_alloc->va) {
693 beiscsi_unmap_pci_function(phba);
a49e06d5 694 return -ENOMEM;
6733b39a
JK
695 }
696
697 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
698 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
699 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
700 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
701 spin_lock_init(&ctrl->mbox_lock);
bfead3b2
JK
702 spin_lock_init(&phba->ctrl.mcc_lock);
703 spin_lock_init(&phba->ctrl.mcc_cq_lock);
704
6733b39a
JK
705 return status;
706}
707
843ae752
JK
708/**
709 * beiscsi_get_params()- Set the config paramters
710 * @phba: ptr device priv structure
711 **/
6733b39a
JK
712static void beiscsi_get_params(struct beiscsi_hba *phba)
713{
843ae752
JK
714 uint32_t total_cid_count = 0;
715 uint32_t total_icd_count = 0;
716 uint8_t ulp_num = 0;
717
718 total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
719 BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
720
721 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
722 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
723 total_icd_count = phba->fw_config.
724 iscsi_icd_count[ulp_num];
725 break;
726 }
727
728 phba->params.ios_per_ctrl = (total_icd_count -
729 (total_cid_count +
730 BE2_TMFS + BE2_NOPOUT_REQ));
731 phba->params.cxns_per_ctrl = total_cid_count;
732 phba->params.asyncpdus_per_ctrl = total_cid_count;
733 phba->params.icds_per_ctrl = total_icd_count;
6733b39a
JK
734 phba->params.num_sge_per_io = BE2_SGE;
735 phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
736 phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
737 phba->params.eq_timer = 64;
843ae752
JK
738 phba->params.num_eq_entries = 1024;
739 phba->params.num_cq_entries = 1024;
6733b39a
JK
740 phba->params.wrbs_per_cxn = 256;
741}
742
743static void hwi_ring_eq_db(struct beiscsi_hba *phba,
744 unsigned int id, unsigned int clr_interrupt,
745 unsigned int num_processed,
746 unsigned char rearm, unsigned char event)
747{
748 u32 val = 0;
749 val |= id & DB_EQ_RING_ID_MASK;
750 if (rearm)
751 val |= 1 << DB_EQ_REARM_SHIFT;
752 if (clr_interrupt)
753 val |= 1 << DB_EQ_CLR_SHIFT;
754 if (event)
755 val |= 1 << DB_EQ_EVNT_SHIFT;
756 val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
757 iowrite32(val, phba->db_va + DB_EQ_OFFSET);
758}
759
bfead3b2
JK
760/**
761 * be_isr_mcc - The isr routine of the driver.
762 * @irq: Not used
763 * @dev_id: Pointer to host adapter structure
764 */
765static irqreturn_t be_isr_mcc(int irq, void *dev_id)
766{
767 struct beiscsi_hba *phba;
768 struct be_eq_entry *eqe = NULL;
769 struct be_queue_info *eq;
770 struct be_queue_info *mcc;
771 unsigned int num_eq_processed;
772 struct be_eq_obj *pbe_eq;
773 unsigned long flags;
774
775 pbe_eq = dev_id;
776 eq = &pbe_eq->q;
777 phba = pbe_eq->phba;
778 mcc = &phba->ctrl.mcc_obj.cq;
779 eqe = queue_tail_node(eq);
bfead3b2
JK
780
781 num_eq_processed = 0;
782
783 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
784 & EQE_VALID_MASK) {
785 if (((eqe->dw[offsetof(struct amap_eq_entry,
786 resource_id) / 32] &
787 EQE_RESID_MASK) >> 16) == mcc->id) {
788 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 789 pbe_eq->todo_mcc_cq = true;
bfead3b2
JK
790 spin_unlock_irqrestore(&phba->isr_lock, flags);
791 }
792 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
793 queue_tail_inc(eq);
794 eqe = queue_tail_node(eq);
795 num_eq_processed++;
796 }
72fb46a9
JSJ
797 if (pbe_eq->todo_mcc_cq)
798 queue_work(phba->wq, &pbe_eq->work_cqs);
bfead3b2
JK
799 if (num_eq_processed)
800 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
801
802 return IRQ_HANDLED;
803}
804
805/**
806 * be_isr_msix - The isr routine of the driver.
807 * @irq: Not used
808 * @dev_id: Pointer to host adapter structure
809 */
810static irqreturn_t be_isr_msix(int irq, void *dev_id)
811{
812 struct beiscsi_hba *phba;
813 struct be_eq_entry *eqe = NULL;
814 struct be_queue_info *eq;
815 struct be_queue_info *cq;
816 unsigned int num_eq_processed;
817 struct be_eq_obj *pbe_eq;
818 unsigned long flags;
819
820 pbe_eq = dev_id;
821 eq = &pbe_eq->q;
822 cq = pbe_eq->cq;
823 eqe = queue_tail_node(eq);
bfead3b2
JK
824
825 phba = pbe_eq->phba;
826 num_eq_processed = 0;
827 if (blk_iopoll_enabled) {
828 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
829 & EQE_VALID_MASK) {
830 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
831 blk_iopoll_sched(&pbe_eq->iopoll);
832
833 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
834 queue_tail_inc(eq);
835 eqe = queue_tail_node(eq);
836 num_eq_processed++;
837 }
bfead3b2
JK
838 } else {
839 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
840 & EQE_VALID_MASK) {
841 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 842 pbe_eq->todo_cq = true;
bfead3b2
JK
843 spin_unlock_irqrestore(&phba->isr_lock, flags);
844 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
845 queue_tail_inc(eq);
846 eqe = queue_tail_node(eq);
847 num_eq_processed++;
848 }
bfead3b2 849
72fb46a9
JSJ
850 if (pbe_eq->todo_cq)
851 queue_work(phba->wq, &pbe_eq->work_cqs);
bfead3b2 852 }
72fb46a9
JSJ
853
854 if (num_eq_processed)
855 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
856
857 return IRQ_HANDLED;
bfead3b2
JK
858}
859
6733b39a
JK
860/**
861 * be_isr - The isr routine of the driver.
862 * @irq: Not used
863 * @dev_id: Pointer to host adapter structure
864 */
865static irqreturn_t be_isr(int irq, void *dev_id)
866{
867 struct beiscsi_hba *phba;
868 struct hwi_controller *phwi_ctrlr;
869 struct hwi_context_memory *phwi_context;
870 struct be_eq_entry *eqe = NULL;
871 struct be_queue_info *eq;
872 struct be_queue_info *cq;
bfead3b2 873 struct be_queue_info *mcc;
6733b39a 874 unsigned long flags, index;
bfead3b2 875 unsigned int num_mcceq_processed, num_ioeq_processed;
6733b39a 876 struct be_ctrl_info *ctrl;
bfead3b2 877 struct be_eq_obj *pbe_eq;
6733b39a
JK
878 int isr;
879
880 phba = dev_id;
6eab04a8 881 ctrl = &phba->ctrl;
bfead3b2
JK
882 isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
883 (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
884 if (!isr)
885 return IRQ_NONE;
6733b39a
JK
886
887 phwi_ctrlr = phba->phwi_ctrlr;
888 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
889 pbe_eq = &phwi_context->be_eq[0];
890
891 eq = &phwi_context->be_eq[0].q;
892 mcc = &phba->ctrl.mcc_obj.cq;
6733b39a
JK
893 index = 0;
894 eqe = queue_tail_node(eq);
6733b39a 895
bfead3b2
JK
896 num_ioeq_processed = 0;
897 num_mcceq_processed = 0;
6733b39a
JK
898 if (blk_iopoll_enabled) {
899 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
900 & EQE_VALID_MASK) {
bfead3b2
JK
901 if (((eqe->dw[offsetof(struct amap_eq_entry,
902 resource_id) / 32] &
903 EQE_RESID_MASK) >> 16) == mcc->id) {
904 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 905 pbe_eq->todo_mcc_cq = true;
bfead3b2
JK
906 spin_unlock_irqrestore(&phba->isr_lock, flags);
907 num_mcceq_processed++;
908 } else {
909 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
910 blk_iopoll_sched(&pbe_eq->iopoll);
911 num_ioeq_processed++;
912 }
6733b39a
JK
913 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
914 queue_tail_inc(eq);
915 eqe = queue_tail_node(eq);
6733b39a 916 }
bfead3b2 917 if (num_ioeq_processed || num_mcceq_processed) {
72fb46a9
JSJ
918 if (pbe_eq->todo_mcc_cq)
919 queue_work(phba->wq, &pbe_eq->work_cqs);
bfead3b2 920
756d29c8 921 if ((num_mcceq_processed) && (!num_ioeq_processed))
bfead3b2
JK
922 hwi_ring_eq_db(phba, eq->id, 0,
923 (num_ioeq_processed +
924 num_mcceq_processed) , 1, 1);
925 else
926 hwi_ring_eq_db(phba, eq->id, 0,
927 (num_ioeq_processed +
928 num_mcceq_processed), 0, 1);
929
6733b39a
JK
930 return IRQ_HANDLED;
931 } else
932 return IRQ_NONE;
933 } else {
bfead3b2 934 cq = &phwi_context->be_cq[0];
6733b39a
JK
935 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
936 & EQE_VALID_MASK) {
937
938 if (((eqe->dw[offsetof(struct amap_eq_entry,
939 resource_id) / 32] &
940 EQE_RESID_MASK) >> 16) != cq->id) {
941 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 942 pbe_eq->todo_mcc_cq = true;
6733b39a
JK
943 spin_unlock_irqrestore(&phba->isr_lock, flags);
944 } else {
945 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 946 pbe_eq->todo_cq = true;
6733b39a
JK
947 spin_unlock_irqrestore(&phba->isr_lock, flags);
948 }
949 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
950 queue_tail_inc(eq);
951 eqe = queue_tail_node(eq);
bfead3b2 952 num_ioeq_processed++;
6733b39a 953 }
72fb46a9
JSJ
954 if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
955 queue_work(phba->wq, &pbe_eq->work_cqs);
6733b39a 956
bfead3b2
JK
957 if (num_ioeq_processed) {
958 hwi_ring_eq_db(phba, eq->id, 0,
959 num_ioeq_processed, 1, 1);
6733b39a
JK
960 return IRQ_HANDLED;
961 } else
962 return IRQ_NONE;
963 }
964}
965
966static int beiscsi_init_irqs(struct beiscsi_hba *phba)
967{
968 struct pci_dev *pcidev = phba->pcidev;
bfead3b2
JK
969 struct hwi_controller *phwi_ctrlr;
970 struct hwi_context_memory *phwi_context;
4f5af07e 971 int ret, msix_vec, i, j;
6733b39a 972
bfead3b2
JK
973 phwi_ctrlr = phba->phwi_ctrlr;
974 phwi_context = phwi_ctrlr->phwi_ctxt;
975
976 if (phba->msix_enabled) {
977 for (i = 0; i < phba->num_cpus; i++) {
8fcfb210
JK
978 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
979 GFP_KERNEL);
980 if (!phba->msi_name[i]) {
981 ret = -ENOMEM;
982 goto free_msix_irqs;
983 }
984
985 sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
986 phba->shost->host_no, i);
bfead3b2 987 msix_vec = phba->msix_entries[i].vector;
8fcfb210
JK
988 ret = request_irq(msix_vec, be_isr_msix, 0,
989 phba->msi_name[i],
bfead3b2 990 &phwi_context->be_eq[i]);
4f5af07e 991 if (ret) {
99bc5d55
JSJ
992 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
993 "BM_%d : beiscsi_init_irqs-Failed to"
994 "register msix for i = %d\n",
995 i);
8fcfb210 996 kfree(phba->msi_name[i]);
4f5af07e
JK
997 goto free_msix_irqs;
998 }
bfead3b2 999 }
8fcfb210
JK
1000 phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
1001 if (!phba->msi_name[i]) {
1002 ret = -ENOMEM;
1003 goto free_msix_irqs;
1004 }
1005 sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
1006 phba->shost->host_no);
bfead3b2 1007 msix_vec = phba->msix_entries[i].vector;
8fcfb210 1008 ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
bfead3b2 1009 &phwi_context->be_eq[i]);
4f5af07e 1010 if (ret) {
99bc5d55
JSJ
1011 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
1012 "BM_%d : beiscsi_init_irqs-"
1013 "Failed to register beiscsi_msix_mcc\n");
8fcfb210 1014 kfree(phba->msi_name[i]);
4f5af07e
JK
1015 goto free_msix_irqs;
1016 }
1017
bfead3b2
JK
1018 } else {
1019 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
1020 "beiscsi", phba);
1021 if (ret) {
99bc5d55
JSJ
1022 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
1023 "BM_%d : beiscsi_init_irqs-"
1024 "Failed to register irq\\n");
bfead3b2
JK
1025 return ret;
1026 }
6733b39a
JK
1027 }
1028 return 0;
4f5af07e 1029free_msix_irqs:
8fcfb210
JK
1030 for (j = i - 1; j >= 0; j--) {
1031 kfree(phba->msi_name[j]);
1032 msix_vec = phba->msix_entries[j].vector;
4f5af07e 1033 free_irq(msix_vec, &phwi_context->be_eq[j]);
8fcfb210 1034 }
4f5af07e 1035 return ret;
6733b39a
JK
1036}
1037
1038static void hwi_ring_cq_db(struct beiscsi_hba *phba,
1039 unsigned int id, unsigned int num_processed,
1040 unsigned char rearm, unsigned char event)
1041{
1042 u32 val = 0;
1043 val |= id & DB_CQ_RING_ID_MASK;
1044 if (rearm)
1045 val |= 1 << DB_CQ_REARM_SHIFT;
1046 val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
1047 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
1048}
1049
6733b39a
JK
1050static unsigned int
1051beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
1052 struct beiscsi_hba *phba,
6733b39a
JK
1053 struct pdu_base *ppdu,
1054 unsigned long pdu_len,
1055 void *pbuffer, unsigned long buf_len)
1056{
1057 struct iscsi_conn *conn = beiscsi_conn->conn;
1058 struct iscsi_session *session = conn->session;
bfead3b2
JK
1059 struct iscsi_task *task;
1060 struct beiscsi_io_task *io_task;
1061 struct iscsi_hdr *login_hdr;
6733b39a
JK
1062
1063 switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
1064 PDUBASE_OPCODE_MASK) {
1065 case ISCSI_OP_NOOP_IN:
1066 pbuffer = NULL;
1067 buf_len = 0;
1068 break;
1069 case ISCSI_OP_ASYNC_EVENT:
1070 break;
1071 case ISCSI_OP_REJECT:
1072 WARN_ON(!pbuffer);
1073 WARN_ON(!(buf_len == 48));
99bc5d55
JSJ
1074 beiscsi_log(phba, KERN_ERR,
1075 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1076 "BM_%d : In ISCSI_OP_REJECT\n");
6733b39a
JK
1077 break;
1078 case ISCSI_OP_LOGIN_RSP:
7bd6e25c 1079 case ISCSI_OP_TEXT_RSP:
bfead3b2
JK
1080 task = conn->login_task;
1081 io_task = task->dd_data;
1082 login_hdr = (struct iscsi_hdr *)ppdu;
1083 login_hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1084 break;
1085 default:
99bc5d55
JSJ
1086 beiscsi_log(phba, KERN_WARNING,
1087 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1088 "BM_%d : Unrecognized opcode 0x%x in async msg\n",
1089 (ppdu->
6733b39a 1090 dw[offsetof(struct amap_pdu_base, opcode) / 32]
99bc5d55 1091 & PDUBASE_OPCODE_MASK));
6733b39a
JK
1092 return 1;
1093 }
1094
1095 spin_lock_bh(&session->lock);
1096 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
1097 spin_unlock_bh(&session->lock);
1098 return 0;
1099}
1100
1101static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
1102{
1103 struct sgl_handle *psgl_handle;
1104
1105 if (phba->io_sgl_hndl_avbl) {
99bc5d55
JSJ
1106 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1107 "BM_%d : In alloc_io_sgl_handle,"
1108 " io_sgl_alloc_index=%d\n",
1109 phba->io_sgl_alloc_index);
1110
6733b39a
JK
1111 psgl_handle = phba->io_sgl_hndl_base[phba->
1112 io_sgl_alloc_index];
1113 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
1114 phba->io_sgl_hndl_avbl--;
bfead3b2
JK
1115 if (phba->io_sgl_alloc_index == (phba->params.
1116 ios_per_ctrl - 1))
6733b39a
JK
1117 phba->io_sgl_alloc_index = 0;
1118 else
1119 phba->io_sgl_alloc_index++;
1120 } else
1121 psgl_handle = NULL;
1122 return psgl_handle;
1123}
1124
1125static void
1126free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1127{
99bc5d55
JSJ
1128 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1129 "BM_%d : In free_,io_sgl_free_index=%d\n",
1130 phba->io_sgl_free_index);
1131
6733b39a
JK
1132 if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
1133 /*
1134 * this can happen if clean_task is called on a task that
1135 * failed in xmit_task or alloc_pdu.
1136 */
99bc5d55
JSJ
1137 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
1138 "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
1139 "value there=%p\n", phba->io_sgl_free_index,
1140 phba->io_sgl_hndl_base
1141 [phba->io_sgl_free_index]);
6733b39a
JK
1142 return;
1143 }
1144 phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
1145 phba->io_sgl_hndl_avbl++;
1146 if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
1147 phba->io_sgl_free_index = 0;
1148 else
1149 phba->io_sgl_free_index++;
1150}
1151
1152/**
1153 * alloc_wrb_handle - To allocate a wrb handle
1154 * @phba: The hba pointer
1155 * @cid: The cid to use for allocation
6733b39a
JK
1156 *
1157 * This happens under session_lock until submission to chip
1158 */
d5431488 1159struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
6733b39a
JK
1160{
1161 struct hwi_wrb_context *pwrb_context;
1162 struct hwi_controller *phwi_ctrlr;
d5431488 1163 struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
a7909b39 1164 uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
6733b39a
JK
1165
1166 phwi_ctrlr = phba->phwi_ctrlr;
a7909b39 1167 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
d5431488 1168 if (pwrb_context->wrb_handles_available >= 2) {
bfead3b2
JK
1169 pwrb_handle = pwrb_context->pwrb_handle_base[
1170 pwrb_context->alloc_index];
1171 pwrb_context->wrb_handles_available--;
bfead3b2
JK
1172 if (pwrb_context->alloc_index ==
1173 (phba->params.wrbs_per_cxn - 1))
1174 pwrb_context->alloc_index = 0;
1175 else
1176 pwrb_context->alloc_index++;
d5431488
JK
1177 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1178 pwrb_context->alloc_index];
1179 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
bfead3b2
JK
1180 } else
1181 pwrb_handle = NULL;
6733b39a
JK
1182 return pwrb_handle;
1183}
1184
1185/**
1186 * free_wrb_handle - To free the wrb handle back to pool
1187 * @phba: The hba pointer
1188 * @pwrb_context: The context to free from
1189 * @pwrb_handle: The wrb_handle to free
1190 *
1191 * This happens under session_lock until submission to chip
1192 */
1193static void
1194free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1195 struct wrb_handle *pwrb_handle)
1196{
32951dd8 1197 pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
bfead3b2
JK
1198 pwrb_context->wrb_handles_available++;
1199 if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1200 pwrb_context->free_index = 0;
1201 else
1202 pwrb_context->free_index++;
1203
99bc5d55
JSJ
1204 beiscsi_log(phba, KERN_INFO,
1205 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1206 "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1207 "wrb_handles_available=%d\n",
1208 pwrb_handle, pwrb_context->free_index,
1209 pwrb_context->wrb_handles_available);
6733b39a
JK
1210}
1211
1212static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1213{
1214 struct sgl_handle *psgl_handle;
1215
1216 if (phba->eh_sgl_hndl_avbl) {
1217 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1218 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
99bc5d55
JSJ
1219 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1220 "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1221 phba->eh_sgl_alloc_index,
1222 phba->eh_sgl_alloc_index);
1223
6733b39a
JK
1224 phba->eh_sgl_hndl_avbl--;
1225 if (phba->eh_sgl_alloc_index ==
1226 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1227 1))
1228 phba->eh_sgl_alloc_index = 0;
1229 else
1230 phba->eh_sgl_alloc_index++;
1231 } else
1232 psgl_handle = NULL;
1233 return psgl_handle;
1234}
1235
1236void
1237free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1238{
1239
99bc5d55
JSJ
1240 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1241 "BM_%d : In free_mgmt_sgl_handle,"
1242 "eh_sgl_free_index=%d\n",
1243 phba->eh_sgl_free_index);
1244
6733b39a
JK
1245 if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1246 /*
1247 * this can happen if clean_task is called on a task that
1248 * failed in xmit_task or alloc_pdu.
1249 */
99bc5d55
JSJ
1250 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1251 "BM_%d : Double Free in eh SGL ,"
1252 "eh_sgl_free_index=%d\n",
1253 phba->eh_sgl_free_index);
6733b39a
JK
1254 return;
1255 }
1256 phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1257 phba->eh_sgl_hndl_avbl++;
1258 if (phba->eh_sgl_free_index ==
1259 (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1260 phba->eh_sgl_free_index = 0;
1261 else
1262 phba->eh_sgl_free_index++;
1263}
1264
1265static void
1266be_complete_io(struct beiscsi_conn *beiscsi_conn,
73133261
JSJ
1267 struct iscsi_task *task,
1268 struct common_sol_cqe *csol_cqe)
6733b39a
JK
1269{
1270 struct beiscsi_io_task *io_task = task->dd_data;
1271 struct be_status_bhs *sts_bhs =
1272 (struct be_status_bhs *)io_task->cmd_bhs;
1273 struct iscsi_conn *conn = beiscsi_conn->conn;
6733b39a
JK
1274 unsigned char *sense;
1275 u32 resid = 0, exp_cmdsn, max_cmdsn;
1276 u8 rsp, status, flags;
1277
73133261
JSJ
1278 exp_cmdsn = csol_cqe->exp_cmdsn;
1279 max_cmdsn = (csol_cqe->exp_cmdsn +
1280 csol_cqe->cmd_wnd - 1);
1281 rsp = csol_cqe->i_resp;
1282 status = csol_cqe->i_sts;
1283 flags = csol_cqe->i_flags;
1284 resid = csol_cqe->res_cnt;
1285
bd535451
JK
1286 if (!task->sc) {
1287 if (io_task->scsi_cmnd)
1288 scsi_dma_unmap(io_task->scsi_cmnd);
6733b39a 1289
bd535451
JK
1290 return;
1291 }
6733b39a
JK
1292 task->sc->result = (DID_OK << 16) | status;
1293 if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1294 task->sc->result = DID_ERROR << 16;
1295 goto unmap;
1296 }
1297
1298 /* bidi not initially supported */
1299 if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
6733b39a
JK
1300 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1301 task->sc->result = DID_ERROR << 16;
1302
1303 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1304 scsi_set_resid(task->sc, resid);
1305 if (!status && (scsi_bufflen(task->sc) - resid <
1306 task->sc->underflow))
1307 task->sc->result = DID_ERROR << 16;
1308 }
1309 }
1310
1311 if (status == SAM_STAT_CHECK_CONDITION) {
4053a4be 1312 u16 sense_len;
bfead3b2 1313 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
4053a4be 1314
6733b39a 1315 sense = sts_bhs->sense_info + sizeof(unsigned short);
4053a4be 1316 sense_len = be16_to_cpu(*slen);
6733b39a
JK
1317 memcpy(task->sc->sense_buffer, sense,
1318 min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1319 }
756d29c8 1320
73133261
JSJ
1321 if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
1322 conn->rxdata_octets += resid;
6733b39a
JK
1323unmap:
1324 scsi_dma_unmap(io_task->scsi_cmnd);
1325 iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1326}
1327
1328static void
1329be_complete_logout(struct beiscsi_conn *beiscsi_conn,
73133261
JSJ
1330 struct iscsi_task *task,
1331 struct common_sol_cqe *csol_cqe)
6733b39a
JK
1332{
1333 struct iscsi_logout_rsp *hdr;
bfead3b2 1334 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1335 struct iscsi_conn *conn = beiscsi_conn->conn;
1336
1337 hdr = (struct iscsi_logout_rsp *)task->hdr;
7bd6e25c 1338 hdr->opcode = ISCSI_OP_LOGOUT_RSP;
6733b39a
JK
1339 hdr->t2wait = 5;
1340 hdr->t2retain = 0;
73133261
JSJ
1341 hdr->flags = csol_cqe->i_flags;
1342 hdr->response = csol_cqe->i_resp;
702dc5e8
JK
1343 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1344 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1345 csol_cqe->cmd_wnd - 1);
73133261 1346
7bd6e25c
JK
1347 hdr->dlength[0] = 0;
1348 hdr->dlength[1] = 0;
1349 hdr->dlength[2] = 0;
6733b39a 1350 hdr->hlength = 0;
bfead3b2 1351 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1352 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1353}
1354
1355static void
1356be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
73133261
JSJ
1357 struct iscsi_task *task,
1358 struct common_sol_cqe *csol_cqe)
6733b39a
JK
1359{
1360 struct iscsi_tm_rsp *hdr;
1361 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1362 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1363
1364 hdr = (struct iscsi_tm_rsp *)task->hdr;
7bd6e25c 1365 hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
73133261
JSJ
1366 hdr->flags = csol_cqe->i_flags;
1367 hdr->response = csol_cqe->i_resp;
702dc5e8
JK
1368 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1369 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1370 csol_cqe->cmd_wnd - 1);
73133261 1371
bfead3b2 1372 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1373 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1374}
1375
1376static void
1377hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1378 struct beiscsi_hba *phba, struct sol_cqe *psol)
1379{
1380 struct hwi_wrb_context *pwrb_context;
bfead3b2 1381 struct wrb_handle *pwrb_handle = NULL;
6733b39a 1382 struct hwi_controller *phwi_ctrlr;
bfead3b2
JK
1383 struct iscsi_task *task;
1384 struct beiscsi_io_task *io_task;
a7909b39 1385 uint16_t wrb_index, cid, cri_index;
6733b39a
JK
1386
1387 phwi_ctrlr = phba->phwi_ctrlr;
2c9dfd36
JK
1388 if (is_chip_be2_be3r(phba)) {
1389 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
73133261 1390 wrb_idx, psol);
2c9dfd36 1391 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
73133261
JSJ
1392 cid, psol);
1393 } else {
2c9dfd36 1394 wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
73133261 1395 wrb_idx, psol);
2c9dfd36 1396 cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
73133261
JSJ
1397 cid, psol);
1398 }
1399
a7909b39
JK
1400 cri_index = BE_GET_CRI_FROM_CID(cid);
1401 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
73133261 1402 pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
32951dd8 1403 task = pwrb_handle->pio_handle;
35e66019 1404
bfead3b2 1405 io_task = task->dd_data;
4a4a11b9
JK
1406 memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb));
1407 iscsi_put_task(task);
6733b39a
JK
1408}
1409
1410static void
1411be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
73133261
JSJ
1412 struct iscsi_task *task,
1413 struct common_sol_cqe *csol_cqe)
6733b39a
JK
1414{
1415 struct iscsi_nopin *hdr;
1416 struct iscsi_conn *conn = beiscsi_conn->conn;
bfead3b2 1417 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
1418
1419 hdr = (struct iscsi_nopin *)task->hdr;
73133261
JSJ
1420 hdr->flags = csol_cqe->i_flags;
1421 hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
702dc5e8
JK
1422 hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1423 csol_cqe->cmd_wnd - 1);
73133261 1424
6733b39a 1425 hdr->opcode = ISCSI_OP_NOOP_IN;
bfead3b2 1426 hdr->itt = io_task->libiscsi_itt;
6733b39a
JK
1427 __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1428}
1429
73133261
JSJ
1430static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1431 struct sol_cqe *psol,
1432 struct common_sol_cqe *csol_cqe)
1433{
2c9dfd36
JK
1434 if (is_chip_be2_be3r(phba)) {
1435 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1436 i_exp_cmd_sn, psol);
1437 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1438 i_res_cnt, psol);
1439 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1440 i_cmd_wnd, psol);
1441 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1442 wrb_index, psol);
1443 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1444 cid, psol);
1445 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1446 hw_sts, psol);
1447 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1448 i_resp, psol);
1449 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1450 i_sts, psol);
1451 csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1452 i_flags, psol);
1453 } else {
73133261
JSJ
1454 csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1455 i_exp_cmd_sn, psol);
1456 csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1457 i_res_cnt, psol);
1458 csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1459 wrb_index, psol);
1460 csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1461 cid, psol);
1462 csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1463 hw_sts, psol);
702dc5e8 1464 csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
73133261
JSJ
1465 i_cmd_wnd, psol);
1466 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1467 cmd_cmpl, psol))
1468 csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1469 i_sts, psol);
1470 else
1471 csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1472 i_sts, psol);
1473 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1474 u, psol))
1475 csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
1476
1477 if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1478 o, psol))
1479 csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
73133261
JSJ
1480 }
1481}
1482
1483
6733b39a
JK
1484static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1485 struct beiscsi_hba *phba, struct sol_cqe *psol)
1486{
1487 struct hwi_wrb_context *pwrb_context;
1488 struct wrb_handle *pwrb_handle;
1489 struct iscsi_wrb *pwrb = NULL;
1490 struct hwi_controller *phwi_ctrlr;
1491 struct iscsi_task *task;
bfead3b2 1492 unsigned int type;
6733b39a
JK
1493 struct iscsi_conn *conn = beiscsi_conn->conn;
1494 struct iscsi_session *session = conn->session;
73133261 1495 struct common_sol_cqe csol_cqe = {0};
a7909b39 1496 uint16_t cri_index = 0;
6733b39a
JK
1497
1498 phwi_ctrlr = phba->phwi_ctrlr;
73133261
JSJ
1499
1500 /* Copy the elements to a common structure */
1501 adapter_get_sol_cqe(phba, psol, &csol_cqe);
1502
a7909b39
JK
1503 cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
1504 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
73133261
JSJ
1505
1506 pwrb_handle = pwrb_context->pwrb_handle_basestd[
1507 csol_cqe.wrb_index];
1508
32951dd8
JK
1509 task = pwrb_handle->pio_handle;
1510 pwrb = pwrb_handle->pwrb;
73133261 1511 type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
32951dd8 1512
bfead3b2
JK
1513 spin_lock_bh(&session->lock);
1514 switch (type) {
6733b39a
JK
1515 case HWH_TYPE_IO:
1516 case HWH_TYPE_IO_RD:
1517 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
dafab8e0 1518 ISCSI_OP_NOOP_OUT)
73133261 1519 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
dafab8e0 1520 else
73133261 1521 be_complete_io(beiscsi_conn, task, &csol_cqe);
6733b39a
JK
1522 break;
1523
1524 case HWH_TYPE_LOGOUT:
dafab8e0 1525 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
73133261 1526 be_complete_logout(beiscsi_conn, task, &csol_cqe);
dafab8e0 1527 else
73133261 1528 be_complete_tmf(beiscsi_conn, task, &csol_cqe);
6733b39a
JK
1529 break;
1530
1531 case HWH_TYPE_LOGIN:
99bc5d55
JSJ
1532 beiscsi_log(phba, KERN_ERR,
1533 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1534 "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1535 " hwi_complete_cmd- Solicited path\n");
6733b39a
JK
1536 break;
1537
6733b39a 1538 case HWH_TYPE_NOP:
73133261 1539 be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
6733b39a
JK
1540 break;
1541
1542 default:
99bc5d55
JSJ
1543 beiscsi_log(phba, KERN_WARNING,
1544 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1545 "BM_%d : In hwi_complete_cmd, unknown type = %d"
1546 "wrb_index 0x%x CID 0x%x\n", type,
73133261
JSJ
1547 csol_cqe.wrb_index,
1548 csol_cqe.cid);
6733b39a
JK
1549 break;
1550 }
35e66019 1551
6733b39a
JK
1552 spin_unlock_bh(&session->lock);
1553}
1554
1555static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1556 *pasync_ctx, unsigned int is_header,
1557 unsigned int host_write_ptr)
1558{
1559 if (is_header)
1560 return &pasync_ctx->async_entry[host_write_ptr].
1561 header_busy_list;
1562 else
1563 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1564}
1565
1566static struct async_pdu_handle *
1567hwi_get_async_handle(struct beiscsi_hba *phba,
1568 struct beiscsi_conn *beiscsi_conn,
1569 struct hwi_async_pdu_context *pasync_ctx,
1570 struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1571{
1572 struct be_bus_address phys_addr;
1573 struct list_head *pbusy_list;
1574 struct async_pdu_handle *pasync_handle = NULL;
6733b39a 1575 unsigned char is_header = 0;
73133261
JSJ
1576 unsigned int index, dpl;
1577
2c9dfd36
JK
1578 if (is_chip_be2_be3r(phba)) {
1579 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
73133261 1580 dpl, pdpdu_cqe);
2c9dfd36 1581 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
73133261
JSJ
1582 index, pdpdu_cqe);
1583 } else {
2c9dfd36 1584 dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
73133261 1585 dpl, pdpdu_cqe);
2c9dfd36 1586 index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
73133261
JSJ
1587 index, pdpdu_cqe);
1588 }
6733b39a
JK
1589
1590 phys_addr.u.a32.address_lo =
73133261
JSJ
1591 (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1592 db_addr_lo) / 32] - dpl);
6733b39a 1593 phys_addr.u.a32.address_hi =
73133261
JSJ
1594 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1595 db_addr_hi) / 32];
6733b39a
JK
1596
1597 phys_addr.u.a64.address =
1598 *((unsigned long long *)(&phys_addr.u.a64.address));
1599
1600 switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1601 & PDUCQE_CODE_MASK) {
1602 case UNSOL_HDR_NOTIFY:
1603 is_header = 1;
1604
73133261
JSJ
1605 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
1606 is_header, index);
6733b39a
JK
1607 break;
1608 case UNSOL_DATA_NOTIFY:
73133261
JSJ
1609 pbusy_list = hwi_get_async_busy_list(pasync_ctx,
1610 is_header, index);
6733b39a
JK
1611 break;
1612 default:
1613 pbusy_list = NULL;
99bc5d55
JSJ
1614 beiscsi_log(phba, KERN_WARNING,
1615 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1616 "BM_%d : Unexpected code=%d\n",
1617 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1618 code) / 32] & PDUCQE_CODE_MASK);
6733b39a
JK
1619 return NULL;
1620 }
1621
6733b39a
JK
1622 WARN_ON(list_empty(pbusy_list));
1623 list_for_each_entry(pasync_handle, pbusy_list, link) {
dc63aac6 1624 if (pasync_handle->pa.u.a64.address == phys_addr.u.a64.address)
6733b39a
JK
1625 break;
1626 }
1627
1628 WARN_ON(!pasync_handle);
1629
8a86e833
JK
1630 pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(
1631 beiscsi_conn->beiscsi_conn_cid);
6733b39a 1632 pasync_handle->is_header = is_header;
73133261
JSJ
1633 pasync_handle->buffer_len = dpl;
1634 *pcq_index = index;
6733b39a 1635
6733b39a
JK
1636 return pasync_handle;
1637}
1638
1639static unsigned int
99bc5d55
JSJ
1640hwi_update_async_writables(struct beiscsi_hba *phba,
1641 struct hwi_async_pdu_context *pasync_ctx,
1642 unsigned int is_header, unsigned int cq_index)
6733b39a
JK
1643{
1644 struct list_head *pbusy_list;
1645 struct async_pdu_handle *pasync_handle;
1646 unsigned int num_entries, writables = 0;
1647 unsigned int *pep_read_ptr, *pwritables;
1648
dc63aac6 1649 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1650 if (is_header) {
1651 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1652 pwritables = &pasync_ctx->async_header.writables;
6733b39a
JK
1653 } else {
1654 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1655 pwritables = &pasync_ctx->async_data.writables;
6733b39a
JK
1656 }
1657
1658 while ((*pep_read_ptr) != cq_index) {
1659 (*pep_read_ptr)++;
1660 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1661
1662 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1663 *pep_read_ptr);
1664 if (writables == 0)
1665 WARN_ON(list_empty(pbusy_list));
1666
1667 if (!list_empty(pbusy_list)) {
1668 pasync_handle = list_entry(pbusy_list->next,
1669 struct async_pdu_handle,
1670 link);
1671 WARN_ON(!pasync_handle);
1672 pasync_handle->consumed = 1;
1673 }
1674
1675 writables++;
1676 }
1677
1678 if (!writables) {
99bc5d55
JSJ
1679 beiscsi_log(phba, KERN_ERR,
1680 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1681 "BM_%d : Duplicate notification received - index 0x%x!!\n",
1682 cq_index);
6733b39a
JK
1683 WARN_ON(1);
1684 }
1685
1686 *pwritables = *pwritables + writables;
1687 return 0;
1688}
1689
9728d8d0 1690static void hwi_free_async_msg(struct beiscsi_hba *phba,
8a86e833
JK
1691 struct hwi_async_pdu_context *pasync_ctx,
1692 unsigned int cri)
6733b39a 1693{
6733b39a
JK
1694 struct async_pdu_handle *pasync_handle, *tmp_handle;
1695 struct list_head *plist;
6733b39a 1696
6733b39a 1697 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
6733b39a
JK
1698 list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1699 list_del(&pasync_handle->link);
1700
9728d8d0 1701 if (pasync_handle->is_header) {
6733b39a
JK
1702 list_add_tail(&pasync_handle->link,
1703 &pasync_ctx->async_header.free_list);
1704 pasync_ctx->async_header.free_entries++;
6733b39a
JK
1705 } else {
1706 list_add_tail(&pasync_handle->link,
1707 &pasync_ctx->async_data.free_list);
1708 pasync_ctx->async_data.free_entries++;
6733b39a
JK
1709 }
1710 }
1711
1712 INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1713 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1714 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
6733b39a
JK
1715}
1716
1717static struct phys_addr *
1718hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1719 unsigned int is_header, unsigned int host_write_ptr)
1720{
1721 struct phys_addr *pasync_sge = NULL;
1722
1723 if (is_header)
1724 pasync_sge = pasync_ctx->async_header.ring_base;
1725 else
1726 pasync_sge = pasync_ctx->async_data.ring_base;
1727
1728 return pasync_sge + host_write_ptr;
1729}
1730
1731static void hwi_post_async_buffers(struct beiscsi_hba *phba,
8a86e833 1732 unsigned int is_header, uint8_t ulp_num)
6733b39a
JK
1733{
1734 struct hwi_controller *phwi_ctrlr;
1735 struct hwi_async_pdu_context *pasync_ctx;
1736 struct async_pdu_handle *pasync_handle;
1737 struct list_head *pfree_link, *pbusy_list;
1738 struct phys_addr *pasync_sge;
1739 unsigned int ring_id, num_entries;
8a86e833 1740 unsigned int host_write_num, doorbell_offset;
6733b39a
JK
1741 unsigned int writables;
1742 unsigned int i = 0;
1743 u32 doorbell = 0;
1744
1745 phwi_ctrlr = phba->phwi_ctrlr;
8a86e833 1746 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
dc63aac6 1747 num_entries = pasync_ctx->num_entries;
6733b39a
JK
1748
1749 if (is_header) {
6733b39a
JK
1750 writables = min(pasync_ctx->async_header.writables,
1751 pasync_ctx->async_header.free_entries);
1752 pfree_link = pasync_ctx->async_header.free_list.next;
1753 host_write_num = pasync_ctx->async_header.host_write_ptr;
8a86e833
JK
1754 ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
1755 doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
1756 doorbell_offset;
6733b39a 1757 } else {
6733b39a
JK
1758 writables = min(pasync_ctx->async_data.writables,
1759 pasync_ctx->async_data.free_entries);
1760 pfree_link = pasync_ctx->async_data.free_list.next;
1761 host_write_num = pasync_ctx->async_data.host_write_ptr;
8a86e833
JK
1762 ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
1763 doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
1764 doorbell_offset;
6733b39a
JK
1765 }
1766
1767 writables = (writables / 8) * 8;
1768 if (writables) {
1769 for (i = 0; i < writables; i++) {
1770 pbusy_list =
1771 hwi_get_async_busy_list(pasync_ctx, is_header,
1772 host_write_num);
1773 pasync_handle =
1774 list_entry(pfree_link, struct async_pdu_handle,
1775 link);
1776 WARN_ON(!pasync_handle);
1777 pasync_handle->consumed = 0;
1778
1779 pfree_link = pfree_link->next;
1780
1781 pasync_sge = hwi_get_ring_address(pasync_ctx,
1782 is_header, host_write_num);
1783
1784 pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1785 pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1786
1787 list_move(&pasync_handle->link, pbusy_list);
1788
1789 host_write_num++;
1790 host_write_num = host_write_num % num_entries;
1791 }
1792
1793 if (is_header) {
1794 pasync_ctx->async_header.host_write_ptr =
1795 host_write_num;
1796 pasync_ctx->async_header.free_entries -= writables;
1797 pasync_ctx->async_header.writables -= writables;
1798 pasync_ctx->async_header.busy_entries += writables;
1799 } else {
1800 pasync_ctx->async_data.host_write_ptr = host_write_num;
1801 pasync_ctx->async_data.free_entries -= writables;
1802 pasync_ctx->async_data.writables -= writables;
1803 pasync_ctx->async_data.busy_entries += writables;
1804 }
1805
1806 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1807 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1808 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1809 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1810 << DB_DEF_PDU_CQPROC_SHIFT;
1811
8a86e833 1812 iowrite32(doorbell, phba->db_va + doorbell_offset);
6733b39a
JK
1813 }
1814}
1815
1816static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1817 struct beiscsi_conn *beiscsi_conn,
1818 struct i_t_dpdu_cqe *pdpdu_cqe)
1819{
1820 struct hwi_controller *phwi_ctrlr;
1821 struct hwi_async_pdu_context *pasync_ctx;
1822 struct async_pdu_handle *pasync_handle = NULL;
1823 unsigned int cq_index = -1;
8a86e833
JK
1824 uint16_t cri_index = BE_GET_CRI_FROM_CID(
1825 beiscsi_conn->beiscsi_conn_cid);
6733b39a
JK
1826
1827 phwi_ctrlr = phba->phwi_ctrlr;
8a86e833
JK
1828 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1829 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1830 cri_index));
6733b39a
JK
1831
1832 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1833 pdpdu_cqe, &cq_index);
1834 BUG_ON(pasync_handle->is_header != 0);
1835 if (pasync_handle->consumed == 0)
99bc5d55
JSJ
1836 hwi_update_async_writables(phba, pasync_ctx,
1837 pasync_handle->is_header, cq_index);
6733b39a 1838
8a86e833
JK
1839 hwi_free_async_msg(phba, pasync_ctx, pasync_handle->cri);
1840 hwi_post_async_buffers(phba, pasync_handle->is_header,
1841 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1842 cri_index));
6733b39a
JK
1843}
1844
1845static unsigned int
1846hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1847 struct beiscsi_hba *phba,
1848 struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1849{
1850 struct list_head *plist;
1851 struct async_pdu_handle *pasync_handle;
1852 void *phdr = NULL;
1853 unsigned int hdr_len = 0, buf_len = 0;
1854 unsigned int status, index = 0, offset = 0;
1855 void *pfirst_buffer = NULL;
1856 unsigned int num_buf = 0;
1857
1858 plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1859
1860 list_for_each_entry(pasync_handle, plist, link) {
1861 if (index == 0) {
1862 phdr = pasync_handle->pbuffer;
1863 hdr_len = pasync_handle->buffer_len;
1864 } else {
1865 buf_len = pasync_handle->buffer_len;
1866 if (!num_buf) {
1867 pfirst_buffer = pasync_handle->pbuffer;
1868 num_buf++;
1869 }
1870 memcpy(pfirst_buffer + offset,
1871 pasync_handle->pbuffer, buf_len);
f2ba02b8 1872 offset += buf_len;
6733b39a
JK
1873 }
1874 index++;
1875 }
1876
1877 status = beiscsi_process_async_pdu(beiscsi_conn, phba,
7da50879 1878 phdr, hdr_len, pfirst_buffer,
f2ba02b8 1879 offset);
6733b39a 1880
8a86e833 1881 hwi_free_async_msg(phba, pasync_ctx, cri);
6733b39a
JK
1882 return 0;
1883}
1884
1885static unsigned int
1886hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1887 struct beiscsi_hba *phba,
1888 struct async_pdu_handle *pasync_handle)
1889{
1890 struct hwi_async_pdu_context *pasync_ctx;
1891 struct hwi_controller *phwi_ctrlr;
1892 unsigned int bytes_needed = 0, status = 0;
1893 unsigned short cri = pasync_handle->cri;
1894 struct pdu_base *ppdu;
1895
1896 phwi_ctrlr = phba->phwi_ctrlr;
8a86e833
JK
1897 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1898 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1899 BE_GET_CRI_FROM_CID(beiscsi_conn->
1900 beiscsi_conn_cid)));
6733b39a
JK
1901
1902 list_del(&pasync_handle->link);
1903 if (pasync_handle->is_header) {
1904 pasync_ctx->async_header.busy_entries--;
1905 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
8a86e833 1906 hwi_free_async_msg(phba, pasync_ctx, cri);
6733b39a
JK
1907 BUG();
1908 }
1909
1910 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1911 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1912 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1913 (unsigned short)pasync_handle->buffer_len;
1914 list_add_tail(&pasync_handle->link,
1915 &pasync_ctx->async_entry[cri].wait_queue.list);
1916
1917 ppdu = pasync_handle->pbuffer;
1918 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1919 data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1920 0xFFFF0000) | ((be16_to_cpu((ppdu->
1921 dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1922 & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1923
1924 if (status == 0) {
1925 pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1926 bytes_needed;
1927
1928 if (bytes_needed == 0)
1929 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1930 pasync_ctx, cri);
1931 }
1932 } else {
1933 pasync_ctx->async_data.busy_entries--;
1934 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1935 list_add_tail(&pasync_handle->link,
1936 &pasync_ctx->async_entry[cri].wait_queue.
1937 list);
1938 pasync_ctx->async_entry[cri].wait_queue.
1939 bytes_received +=
1940 (unsigned short)pasync_handle->buffer_len;
1941
1942 if (pasync_ctx->async_entry[cri].wait_queue.
1943 bytes_received >=
1944 pasync_ctx->async_entry[cri].wait_queue.
1945 bytes_needed)
1946 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1947 pasync_ctx, cri);
1948 }
1949 }
1950 return status;
1951}
1952
1953static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1954 struct beiscsi_hba *phba,
1955 struct i_t_dpdu_cqe *pdpdu_cqe)
1956{
1957 struct hwi_controller *phwi_ctrlr;
1958 struct hwi_async_pdu_context *pasync_ctx;
1959 struct async_pdu_handle *pasync_handle = NULL;
1960 unsigned int cq_index = -1;
8a86e833
JK
1961 uint16_t cri_index = BE_GET_CRI_FROM_CID(
1962 beiscsi_conn->beiscsi_conn_cid);
6733b39a
JK
1963
1964 phwi_ctrlr = phba->phwi_ctrlr;
8a86e833
JK
1965 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr,
1966 BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr,
1967 cri_index));
1968
6733b39a
JK
1969 pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1970 pdpdu_cqe, &cq_index);
1971
1972 if (pasync_handle->consumed == 0)
99bc5d55
JSJ
1973 hwi_update_async_writables(phba, pasync_ctx,
1974 pasync_handle->is_header, cq_index);
1975
6733b39a 1976 hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
8a86e833
JK
1977 hwi_post_async_buffers(phba, pasync_handle->is_header,
1978 BEISCSI_GET_ULP_FROM_CRI(
1979 phwi_ctrlr, cri_index));
6733b39a
JK
1980}
1981
756d29c8
JK
1982static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1983{
1984 struct be_queue_info *mcc_cq;
1985 struct be_mcc_compl *mcc_compl;
1986 unsigned int num_processed = 0;
1987
1988 mcc_cq = &phba->ctrl.mcc_obj.cq;
1989 mcc_compl = queue_tail_node(mcc_cq);
1990 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1991 while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1992
1993 if (num_processed >= 32) {
1994 hwi_ring_cq_db(phba, mcc_cq->id,
1995 num_processed, 0, 0);
1996 num_processed = 0;
1997 }
1998 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1999 /* Interpret flags as an async trailer */
2000 if (is_link_state_evt(mcc_compl->flags))
2001 /* Interpret compl as a async link evt */
2002 beiscsi_async_link_state_process(phba,
2003 (struct be_async_event_link_state *) mcc_compl);
2004 else
99bc5d55
JSJ
2005 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_MBOX,
2006 "BM_%d : Unsupported Async Event, flags"
2007 " = 0x%08x\n",
2008 mcc_compl->flags);
756d29c8
JK
2009 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
2010 be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
2011 atomic_dec(&phba->ctrl.mcc_obj.q.used);
2012 }
2013
2014 mcc_compl->flags = 0;
2015 queue_tail_inc(mcc_cq);
2016 mcc_compl = queue_tail_node(mcc_cq);
2017 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
2018 num_processed++;
2019 }
2020
2021 if (num_processed > 0)
2022 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
2023
2024}
bfead3b2 2025
6763daae
JSJ
2026/**
2027 * beiscsi_process_cq()- Process the Completion Queue
2028 * @pbe_eq: Event Q on which the Completion has come
2029 *
2030 * return
2031 * Number of Completion Entries processed.
2032 **/
bfead3b2 2033static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
6733b39a 2034{
6733b39a
JK
2035 struct be_queue_info *cq;
2036 struct sol_cqe *sol;
2037 struct dmsg_cqe *dmsg;
2038 unsigned int num_processed = 0;
2039 unsigned int tot_nump = 0;
0a513dd8 2040 unsigned short code = 0, cid = 0;
a7909b39 2041 uint16_t cri_index = 0;
6733b39a 2042 struct beiscsi_conn *beiscsi_conn;
c2462288
JK
2043 struct beiscsi_endpoint *beiscsi_ep;
2044 struct iscsi_endpoint *ep;
bfead3b2 2045 struct beiscsi_hba *phba;
6733b39a 2046
bfead3b2 2047 cq = pbe_eq->cq;
6733b39a 2048 sol = queue_tail_node(cq);
bfead3b2 2049 phba = pbe_eq->phba;
6733b39a
JK
2050
2051 while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
2052 CQE_VALID_MASK) {
2053 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
2054
73133261
JSJ
2055 code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
2056 32] & CQE_CODE_MASK);
2057
2058 /* Get the CID */
2c9dfd36
JK
2059 if (is_chip_be2_be3r(phba)) {
2060 cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
2061 } else {
73133261
JSJ
2062 if ((code == DRIVERMSG_NOTIFY) ||
2063 (code == UNSOL_HDR_NOTIFY) ||
2064 (code == UNSOL_DATA_NOTIFY))
2065 cid = AMAP_GET_BITS(
2066 struct amap_i_t_dpdu_cqe_v2,
2067 cid, sol);
2068 else
2069 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
2070 cid, sol);
2c9dfd36 2071 }
32951dd8 2072
a7909b39
JK
2073 cri_index = BE_GET_CRI_FROM_CID(cid);
2074 ep = phba->ep_array[cri_index];
c2462288
JK
2075 beiscsi_ep = ep->dd_data;
2076 beiscsi_conn = beiscsi_ep->conn;
756d29c8 2077
6733b39a 2078 if (num_processed >= 32) {
bfead3b2 2079 hwi_ring_cq_db(phba, cq->id,
6733b39a
JK
2080 num_processed, 0, 0);
2081 tot_nump += num_processed;
2082 num_processed = 0;
2083 }
2084
0a513dd8 2085 switch (code) {
6733b39a
JK
2086 case SOL_CMD_COMPLETE:
2087 hwi_complete_cmd(beiscsi_conn, phba, sol);
2088 break;
2089 case DRIVERMSG_NOTIFY:
99bc5d55
JSJ
2090 beiscsi_log(phba, KERN_INFO,
2091 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2092 "BM_%d : Received %s[%d] on CID : %d\n",
2093 cqe_desc[code], code, cid);
99bc5d55 2094
6733b39a
JK
2095 dmsg = (struct dmsg_cqe *)sol;
2096 hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
2097 break;
2098 case UNSOL_HDR_NOTIFY:
99bc5d55
JSJ
2099 beiscsi_log(phba, KERN_INFO,
2100 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2101 "BM_%d : Received %s[%d] on CID : %d\n",
2102 cqe_desc[code], code, cid);
99bc5d55 2103
8f09a3b9 2104 spin_lock_bh(&phba->async_pdu_lock);
bfead3b2
JK
2105 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2106 (struct i_t_dpdu_cqe *)sol);
8f09a3b9 2107 spin_unlock_bh(&phba->async_pdu_lock);
bfead3b2 2108 break;
6733b39a 2109 case UNSOL_DATA_NOTIFY:
99bc5d55
JSJ
2110 beiscsi_log(phba, KERN_INFO,
2111 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
6763daae
JSJ
2112 "BM_%d : Received %s[%d] on CID : %d\n",
2113 cqe_desc[code], code, cid);
99bc5d55 2114
8f09a3b9 2115 spin_lock_bh(&phba->async_pdu_lock);
6733b39a
JK
2116 hwi_process_default_pdu_ring(beiscsi_conn, phba,
2117 (struct i_t_dpdu_cqe *)sol);
8f09a3b9 2118 spin_unlock_bh(&phba->async_pdu_lock);
6733b39a
JK
2119 break;
2120 case CXN_INVALIDATE_INDEX_NOTIFY:
2121 case CMD_INVALIDATED_NOTIFY:
2122 case CXN_INVALIDATE_NOTIFY:
99bc5d55
JSJ
2123 beiscsi_log(phba, KERN_ERR,
2124 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2125 "BM_%d : Ignoring %s[%d] on CID : %d\n",
2126 cqe_desc[code], code, cid);
6733b39a
JK
2127 break;
2128 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
2129 case CMD_KILLED_INVALID_STATSN_RCVD:
2130 case CMD_KILLED_INVALID_R2T_RCVD:
2131 case CMD_CXN_KILLED_LUN_INVALID:
2132 case CMD_CXN_KILLED_ICD_INVALID:
2133 case CMD_CXN_KILLED_ITT_INVALID:
2134 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
2135 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
99bc5d55
JSJ
2136 beiscsi_log(phba, KERN_ERR,
2137 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
6763daae
JSJ
2138 "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
2139 cqe_desc[code], code, cid);
6733b39a
JK
2140 break;
2141 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
99bc5d55
JSJ
2142 beiscsi_log(phba, KERN_ERR,
2143 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2144 "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
2145 cqe_desc[code], code, cid);
8f09a3b9 2146 spin_lock_bh(&phba->async_pdu_lock);
6733b39a
JK
2147 hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
2148 (struct i_t_dpdu_cqe *) sol);
8f09a3b9 2149 spin_unlock_bh(&phba->async_pdu_lock);
6733b39a
JK
2150 break;
2151 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
2152 case CXN_KILLED_BURST_LEN_MISMATCH:
2153 case CXN_KILLED_AHS_RCVD:
2154 case CXN_KILLED_HDR_DIGEST_ERR:
2155 case CXN_KILLED_UNKNOWN_HDR:
2156 case CXN_KILLED_STALE_ITT_TTT_RCVD:
2157 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
2158 case CXN_KILLED_TIMED_OUT:
2159 case CXN_KILLED_FIN_RCVD:
6763daae
JSJ
2160 case CXN_KILLED_RST_SENT:
2161 case CXN_KILLED_RST_RCVD:
6733b39a
JK
2162 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
2163 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2164 case CXN_KILLED_OVER_RUN_RESIDUAL:
2165 case CXN_KILLED_UNDER_RUN_RESIDUAL:
2166 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
99bc5d55
JSJ
2167 beiscsi_log(phba, KERN_ERR,
2168 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2169 "BM_%d : Event %s[%d] received on CID : %d\n",
2170 cqe_desc[code], code, cid);
0a513dd8
JSJ
2171 if (beiscsi_conn)
2172 iscsi_conn_failure(beiscsi_conn->conn,
2173 ISCSI_ERR_CONN_FAILED);
6733b39a
JK
2174 break;
2175 default:
99bc5d55
JSJ
2176 beiscsi_log(phba, KERN_ERR,
2177 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
6763daae
JSJ
2178 "BM_%d : Invalid CQE Event Received Code : %d"
2179 "CID 0x%x...\n",
0a513dd8 2180 code, cid);
6733b39a
JK
2181 break;
2182 }
2183
2184 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2185 queue_tail_inc(cq);
2186 sol = queue_tail_node(cq);
2187 num_processed++;
2188 }
2189
2190 if (num_processed > 0) {
2191 tot_nump += num_processed;
bfead3b2 2192 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
6733b39a
JK
2193 }
2194 return tot_nump;
2195}
2196
756d29c8 2197void beiscsi_process_all_cqs(struct work_struct *work)
6733b39a
JK
2198{
2199 unsigned long flags;
bfead3b2
JK
2200 struct hwi_controller *phwi_ctrlr;
2201 struct hwi_context_memory *phwi_context;
72fb46a9
JSJ
2202 struct beiscsi_hba *phba;
2203 struct be_eq_obj *pbe_eq =
2204 container_of(work, struct be_eq_obj, work_cqs);
6733b39a 2205
72fb46a9 2206 phba = pbe_eq->phba;
bfead3b2
JK
2207 phwi_ctrlr = phba->phwi_ctrlr;
2208 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2 2209
72fb46a9 2210 if (pbe_eq->todo_mcc_cq) {
6733b39a 2211 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 2212 pbe_eq->todo_mcc_cq = false;
6733b39a 2213 spin_unlock_irqrestore(&phba->isr_lock, flags);
756d29c8 2214 beiscsi_process_mcc_isr(phba);
6733b39a
JK
2215 }
2216
72fb46a9 2217 if (pbe_eq->todo_cq) {
6733b39a 2218 spin_lock_irqsave(&phba->isr_lock, flags);
72fb46a9 2219 pbe_eq->todo_cq = false;
6733b39a 2220 spin_unlock_irqrestore(&phba->isr_lock, flags);
bfead3b2 2221 beiscsi_process_cq(pbe_eq);
6733b39a 2222 }
72fb46a9
JSJ
2223
2224 /* rearm EQ for further interrupts */
2225 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
6733b39a
JK
2226}
2227
2228static int be_iopoll(struct blk_iopoll *iop, int budget)
2229{
ad3f428e 2230 unsigned int ret;
6733b39a 2231 struct beiscsi_hba *phba;
bfead3b2 2232 struct be_eq_obj *pbe_eq;
6733b39a 2233
bfead3b2
JK
2234 pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2235 ret = beiscsi_process_cq(pbe_eq);
6733b39a 2236 if (ret < budget) {
bfead3b2 2237 phba = pbe_eq->phba;
6733b39a 2238 blk_iopoll_complete(iop);
99bc5d55
JSJ
2239 beiscsi_log(phba, KERN_INFO,
2240 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2241 "BM_%d : rearm pbe_eq->q.id =%d\n",
2242 pbe_eq->q.id);
bfead3b2 2243 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
6733b39a
JK
2244 }
2245 return ret;
2246}
2247
09a1093a
JSJ
2248static void
2249hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2250 unsigned int num_sg, struct beiscsi_io_task *io_task)
2251{
2252 struct iscsi_sge *psgl;
2253 unsigned int sg_len, index;
2254 unsigned int sge_len = 0;
2255 unsigned long long addr;
2256 struct scatterlist *l_sg;
2257 unsigned int offset;
2258
2259 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
2260 io_task->bhs_pa.u.a32.address_lo);
2261 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
2262 io_task->bhs_pa.u.a32.address_hi);
2263
2264 l_sg = sg;
2265 for (index = 0; (index < num_sg) && (index < 2); index++,
2266 sg = sg_next(sg)) {
2267 if (index == 0) {
2268 sg_len = sg_dma_len(sg);
2269 addr = (u64) sg_dma_address(sg);
2270 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2271 sge0_addr_lo, pwrb,
2272 lower_32_bits(addr));
2273 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2274 sge0_addr_hi, pwrb,
2275 upper_32_bits(addr));
2276 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2277 sge0_len, pwrb,
2278 sg_len);
2279 sge_len = sg_len;
2280 } else {
2281 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
2282 pwrb, sge_len);
2283 sg_len = sg_dma_len(sg);
2284 addr = (u64) sg_dma_address(sg);
2285 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2286 sge1_addr_lo, pwrb,
2287 lower_32_bits(addr));
2288 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2289 sge1_addr_hi, pwrb,
2290 upper_32_bits(addr));
2291 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2292 sge1_len, pwrb,
2293 sg_len);
2294 }
2295 }
2296 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2297 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2298
2299 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2300
2301 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2302 io_task->bhs_pa.u.a32.address_hi);
2303 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2304 io_task->bhs_pa.u.a32.address_lo);
2305
2306 if (num_sg == 1) {
2307 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2308 1);
2309 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2310 0);
2311 } else if (num_sg == 2) {
2312 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2313 0);
2314 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2315 1);
2316 } else {
2317 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2318 0);
2319 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2320 0);
2321 }
2322
2323 sg = l_sg;
2324 psgl++;
2325 psgl++;
2326 offset = 0;
2327 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2328 sg_len = sg_dma_len(sg);
2329 addr = (u64) sg_dma_address(sg);
2330 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2331 lower_32_bits(addr));
2332 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2333 upper_32_bits(addr));
2334 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2335 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2336 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2337 offset += sg_len;
2338 }
2339 psgl--;
2340 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2341}
2342
6733b39a
JK
2343static void
2344hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2345 unsigned int num_sg, struct beiscsi_io_task *io_task)
2346{
2347 struct iscsi_sge *psgl;
58ff4bd0 2348 unsigned int sg_len, index;
6733b39a
JK
2349 unsigned int sge_len = 0;
2350 unsigned long long addr;
2351 struct scatterlist *l_sg;
2352 unsigned int offset;
2353
2354 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2355 io_task->bhs_pa.u.a32.address_lo);
2356 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2357 io_task->bhs_pa.u.a32.address_hi);
2358
2359 l_sg = sg;
48bd86cf
JK
2360 for (index = 0; (index < num_sg) && (index < 2); index++,
2361 sg = sg_next(sg)) {
6733b39a
JK
2362 if (index == 0) {
2363 sg_len = sg_dma_len(sg);
2364 addr = (u64) sg_dma_address(sg);
2365 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
457ff3b7 2366 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2367 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
457ff3b7 2368 ((u32)(addr >> 32)));
6733b39a
JK
2369 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2370 sg_len);
2371 sge_len = sg_len;
6733b39a 2372 } else {
6733b39a
JK
2373 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2374 pwrb, sge_len);
2375 sg_len = sg_dma_len(sg);
2376 addr = (u64) sg_dma_address(sg);
2377 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
457ff3b7 2378 ((u32)(addr & 0xFFFFFFFF)));
6733b39a 2379 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
457ff3b7 2380 ((u32)(addr >> 32)));
6733b39a
JK
2381 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2382 sg_len);
2383 }
2384 }
2385 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2386 memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2387
2388 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2389
2390 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2391 io_task->bhs_pa.u.a32.address_hi);
2392 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2393 io_task->bhs_pa.u.a32.address_lo);
2394
caf818f1
JK
2395 if (num_sg == 1) {
2396 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2397 1);
2398 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2399 0);
2400 } else if (num_sg == 2) {
2401 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2402 0);
2403 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2404 1);
2405 } else {
2406 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2407 0);
2408 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2409 0);
2410 }
6733b39a
JK
2411 sg = l_sg;
2412 psgl++;
2413 psgl++;
2414 offset = 0;
48bd86cf 2415 for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
6733b39a
JK
2416 sg_len = sg_dma_len(sg);
2417 addr = (u64) sg_dma_address(sg);
2418 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2419 (addr & 0xFFFFFFFF));
2420 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2421 (addr >> 32));
2422 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2423 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2424 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2425 offset += sg_len;
2426 }
2427 psgl--;
2428 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2429}
2430
d629c471
JSJ
2431/**
2432 * hwi_write_buffer()- Populate the WRB with task info
2433 * @pwrb: ptr to the WRB entry
2434 * @task: iscsi task which is to be executed
2435 **/
6733b39a
JK
2436static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2437{
2438 struct iscsi_sge *psgl;
6733b39a
JK
2439 struct beiscsi_io_task *io_task = task->dd_data;
2440 struct beiscsi_conn *beiscsi_conn = io_task->conn;
2441 struct beiscsi_hba *phba = beiscsi_conn->phba;
09a1093a 2442 uint8_t dsp_value = 0;
6733b39a
JK
2443
2444 io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2445 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2446 io_task->bhs_pa.u.a32.address_lo);
2447 AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2448 io_task->bhs_pa.u.a32.address_hi);
2449
2450 if (task->data) {
09a1093a
JSJ
2451
2452 /* Check for the data_count */
2453 dsp_value = (task->data_count) ? 1 : 0;
2454
2c9dfd36
JK
2455 if (is_chip_be2_be3r(phba))
2456 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
09a1093a
JSJ
2457 pwrb, dsp_value);
2458 else
2c9dfd36 2459 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
09a1093a
JSJ
2460 pwrb, dsp_value);
2461
2462 /* Map addr only if there is data_count */
2463 if (dsp_value) {
d629c471
JSJ
2464 io_task->mtask_addr = pci_map_single(phba->pcidev,
2465 task->data,
2466 task->data_count,
2467 PCI_DMA_TODEVICE);
d629c471 2468 io_task->mtask_data_count = task->data_count;
09a1093a 2469 } else
d629c471 2470 io_task->mtask_addr = 0;
09a1093a 2471
6733b39a 2472 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
d629c471 2473 lower_32_bits(io_task->mtask_addr));
6733b39a 2474 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
d629c471 2475 upper_32_bits(io_task->mtask_addr));
6733b39a
JK
2476 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2477 task->data_count);
2478
2479 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2480 } else {
2481 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
d629c471 2482 io_task->mtask_addr = 0;
6733b39a
JK
2483 }
2484
2485 psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2486
2487 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2488
2489 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2490 io_task->bhs_pa.u.a32.address_hi);
2491 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2492 io_task->bhs_pa.u.a32.address_lo);
2493 if (task->data) {
2494 psgl++;
2495 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2496 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2497 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2498 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2499 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2500 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2501
2502 psgl++;
2503 if (task->data) {
2504 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
d629c471 2505 lower_32_bits(io_task->mtask_addr));
6733b39a 2506 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
d629c471 2507 upper_32_bits(io_task->mtask_addr));
6733b39a
JK
2508 }
2509 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2510 }
2511 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2512}
2513
843ae752
JK
2514/**
2515 * beiscsi_find_mem_req()- Find mem needed
2516 * @phba: ptr to HBA struct
2517 **/
6733b39a
JK
2518static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2519{
8a86e833 2520 uint8_t mem_descr_index, ulp_num;
bfead3b2 2521 unsigned int num_cq_pages, num_async_pdu_buf_pages;
6733b39a
JK
2522 unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2523 unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2524
2525 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2526 sizeof(struct sol_cqe));
6733b39a
JK
2527
2528 phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2529
2530 phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2531 BE_ISCSI_PDU_HEADER_SIZE;
2532 phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2533 sizeof(struct hwi_context_memory);
2534
6733b39a
JK
2535
2536 phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2537 * (phba->params.wrbs_per_cxn)
2538 * phba->params.cxns_per_ctrl;
2539 wrb_sz_per_cxn = sizeof(struct wrb_handle) *
2540 (phba->params.wrbs_per_cxn);
2541 phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2542 phba->params.cxns_per_ctrl);
2543
2544 phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2545 phba->params.icds_per_ctrl;
2546 phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2547 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
8a86e833
JK
2548 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2549 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
6733b39a 2550
8a86e833
JK
2551 num_async_pdu_buf_sgl_pages =
2552 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2553 phba, ulp_num) *
2554 sizeof(struct phys_addr));
2555
2556 num_async_pdu_buf_pages =
2557 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2558 phba, ulp_num) *
2559 phba->params.defpdu_hdr_sz);
2560
2561 num_async_pdu_data_pages =
2562 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2563 phba, ulp_num) *
2564 phba->params.defpdu_data_sz);
2565
2566 num_async_pdu_data_sgl_pages =
2567 PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2568 phba, ulp_num) *
2569 sizeof(struct phys_addr));
2570
a129d92f
JK
2571 mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
2572 (ulp_num * MEM_DESCR_OFFSET));
2573 phba->mem_req[mem_descr_index] =
2574 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2575 BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
2576
8a86e833
JK
2577 mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2578 (ulp_num * MEM_DESCR_OFFSET));
2579 phba->mem_req[mem_descr_index] =
2580 num_async_pdu_buf_pages *
2581 PAGE_SIZE;
2582
2583 mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2584 (ulp_num * MEM_DESCR_OFFSET));
2585 phba->mem_req[mem_descr_index] =
2586 num_async_pdu_data_pages *
2587 PAGE_SIZE;
2588
2589 mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2590 (ulp_num * MEM_DESCR_OFFSET));
2591 phba->mem_req[mem_descr_index] =
2592 num_async_pdu_buf_sgl_pages *
2593 PAGE_SIZE;
2594
2595 mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
2596 (ulp_num * MEM_DESCR_OFFSET));
2597 phba->mem_req[mem_descr_index] =
2598 num_async_pdu_data_sgl_pages *
2599 PAGE_SIZE;
2600
2601 mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2602 (ulp_num * MEM_DESCR_OFFSET));
2603 phba->mem_req[mem_descr_index] =
2604 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2605 sizeof(struct async_pdu_handle);
2606
2607 mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2608 (ulp_num * MEM_DESCR_OFFSET));
2609 phba->mem_req[mem_descr_index] =
2610 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2611 sizeof(struct async_pdu_handle);
2612
2613 mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2614 (ulp_num * MEM_DESCR_OFFSET));
2615 phba->mem_req[mem_descr_index] =
2616 sizeof(struct hwi_async_pdu_context) +
2617 (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2618 sizeof(struct hwi_async_entry));
2619 }
2620 }
6733b39a
JK
2621}
2622
2623static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2624{
6733b39a 2625 dma_addr_t bus_add;
a7909b39
JK
2626 struct hwi_controller *phwi_ctrlr;
2627 struct be_mem_descriptor *mem_descr;
6733b39a
JK
2628 struct mem_array *mem_arr, *mem_arr_orig;
2629 unsigned int i, j, alloc_size, curr_alloc_size;
2630
3ec78271 2631 phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
6733b39a
JK
2632 if (!phba->phwi_ctrlr)
2633 return -ENOMEM;
2634
a7909b39
JK
2635 /* Allocate memory for wrb_context */
2636 phwi_ctrlr = phba->phwi_ctrlr;
2637 phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
2638 phba->params.cxns_per_ctrl,
2639 GFP_KERNEL);
2640 if (!phwi_ctrlr->wrb_context)
2641 return -ENOMEM;
2642
6733b39a
JK
2643 phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2644 GFP_KERNEL);
2645 if (!phba->init_mem) {
a7909b39 2646 kfree(phwi_ctrlr->wrb_context);
6733b39a
JK
2647 kfree(phba->phwi_ctrlr);
2648 return -ENOMEM;
2649 }
2650
2651 mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2652 GFP_KERNEL);
2653 if (!mem_arr_orig) {
2654 kfree(phba->init_mem);
a7909b39 2655 kfree(phwi_ctrlr->wrb_context);
6733b39a
JK
2656 kfree(phba->phwi_ctrlr);
2657 return -ENOMEM;
2658 }
2659
2660 mem_descr = phba->init_mem;
2661 for (i = 0; i < SE_MEM_MAX; i++) {
8a86e833
JK
2662 if (!phba->mem_req[i]) {
2663 mem_descr->mem_array = NULL;
2664 mem_descr++;
2665 continue;
2666 }
2667
6733b39a
JK
2668 j = 0;
2669 mem_arr = mem_arr_orig;
2670 alloc_size = phba->mem_req[i];
2671 memset(mem_arr, 0, sizeof(struct mem_array) *
2672 BEISCSI_MAX_FRAGS_INIT);
2673 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2674 do {
2675 mem_arr->virtual_address = pci_alloc_consistent(
2676 phba->pcidev,
2677 curr_alloc_size,
2678 &bus_add);
2679 if (!mem_arr->virtual_address) {
2680 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2681 goto free_mem;
2682 if (curr_alloc_size -
2683 rounddown_pow_of_two(curr_alloc_size))
2684 curr_alloc_size = rounddown_pow_of_two
2685 (curr_alloc_size);
2686 else
2687 curr_alloc_size = curr_alloc_size / 2;
2688 } else {
2689 mem_arr->bus_address.u.
2690 a64.address = (__u64) bus_add;
2691 mem_arr->size = curr_alloc_size;
2692 alloc_size -= curr_alloc_size;
2693 curr_alloc_size = min(be_max_phys_size *
2694 1024, alloc_size);
2695 j++;
2696 mem_arr++;
2697 }
2698 } while (alloc_size);
2699 mem_descr->num_elements = j;
2700 mem_descr->size_in_bytes = phba->mem_req[i];
2701 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2702 GFP_KERNEL);
2703 if (!mem_descr->mem_array)
2704 goto free_mem;
2705
2706 memcpy(mem_descr->mem_array, mem_arr_orig,
2707 sizeof(struct mem_array) * j);
2708 mem_descr++;
2709 }
2710 kfree(mem_arr_orig);
2711 return 0;
2712free_mem:
2713 mem_descr->num_elements = j;
2714 while ((i) || (j)) {
2715 for (j = mem_descr->num_elements; j > 0; j--) {
2716 pci_free_consistent(phba->pcidev,
2717 mem_descr->mem_array[j - 1].size,
2718 mem_descr->mem_array[j - 1].
2719 virtual_address,
457ff3b7
JK
2720 (unsigned long)mem_descr->
2721 mem_array[j - 1].
6733b39a
JK
2722 bus_address.u.a64.address);
2723 }
2724 if (i) {
2725 i--;
2726 kfree(mem_descr->mem_array);
2727 mem_descr--;
2728 }
2729 }
2730 kfree(mem_arr_orig);
2731 kfree(phba->init_mem);
a7909b39 2732 kfree(phba->phwi_ctrlr->wrb_context);
6733b39a
JK
2733 kfree(phba->phwi_ctrlr);
2734 return -ENOMEM;
2735}
2736
2737static int beiscsi_get_memory(struct beiscsi_hba *phba)
2738{
2739 beiscsi_find_mem_req(phba);
2740 return beiscsi_alloc_mem(phba);
2741}
2742
2743static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2744{
2745 struct pdu_data_out *pdata_out;
2746 struct pdu_nop_out *pnop_out;
2747 struct be_mem_descriptor *mem_descr;
2748
2749 mem_descr = phba->init_mem;
2750 mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2751 pdata_out =
2752 (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2753 memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2754
2755 AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2756 IIOC_SCSI_DATA);
2757
2758 pnop_out =
2759 (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2760 virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2761
2762 memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2763 AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2764 AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2765 AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2766}
2767
3ec78271 2768static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
6733b39a
JK
2769{
2770 struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
a7909b39 2771 struct hwi_context_memory *phwi_ctxt;
3ec78271 2772 struct wrb_handle *pwrb_handle = NULL;
6733b39a
JK
2773 struct hwi_controller *phwi_ctrlr;
2774 struct hwi_wrb_context *pwrb_context;
3ec78271
JK
2775 struct iscsi_wrb *pwrb = NULL;
2776 unsigned int num_cxn_wrbh = 0;
2777 unsigned int num_cxn_wrb = 0, j, idx = 0, index;
6733b39a
JK
2778
2779 mem_descr_wrbh = phba->init_mem;
2780 mem_descr_wrbh += HWI_MEM_WRBH;
2781
2782 mem_descr_wrb = phba->init_mem;
2783 mem_descr_wrb += HWI_MEM_WRB;
6733b39a
JK
2784 phwi_ctrlr = phba->phwi_ctrlr;
2785
a7909b39
JK
2786 /* Allocate memory for WRBQ */
2787 phwi_ctxt = phwi_ctrlr->phwi_ctxt;
2788 phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
843ae752 2789 phba->params.cxns_per_ctrl,
a7909b39
JK
2790 GFP_KERNEL);
2791 if (!phwi_ctxt->be_wrbq) {
2792 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2793 "BM_%d : WRBQ Mem Alloc Failed\n");
2794 return -ENOMEM;
2795 }
2796
2797 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
6733b39a 2798 pwrb_context = &phwi_ctrlr->wrb_context[index];
6733b39a
JK
2799 pwrb_context->pwrb_handle_base =
2800 kzalloc(sizeof(struct wrb_handle *) *
2801 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271 2802 if (!pwrb_context->pwrb_handle_base) {
99bc5d55
JSJ
2803 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2804 "BM_%d : Mem Alloc Failed. Failing to load\n");
3ec78271
JK
2805 goto init_wrb_hndl_failed;
2806 }
6733b39a
JK
2807 pwrb_context->pwrb_handle_basestd =
2808 kzalloc(sizeof(struct wrb_handle *) *
2809 phba->params.wrbs_per_cxn, GFP_KERNEL);
3ec78271 2810 if (!pwrb_context->pwrb_handle_basestd) {
99bc5d55
JSJ
2811 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2812 "BM_%d : Mem Alloc Failed. Failing to load\n");
3ec78271
JK
2813 goto init_wrb_hndl_failed;
2814 }
2815 if (!num_cxn_wrbh) {
2816 pwrb_handle =
2817 mem_descr_wrbh->mem_array[idx].virtual_address;
2818 num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2819 ((sizeof(struct wrb_handle)) *
2820 phba->params.wrbs_per_cxn));
2821 idx++;
2822 }
2823 pwrb_context->alloc_index = 0;
2824 pwrb_context->wrb_handles_available = 0;
2825 pwrb_context->free_index = 0;
2826
6733b39a 2827 if (num_cxn_wrbh) {
6733b39a
JK
2828 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2829 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2830 pwrb_context->pwrb_handle_basestd[j] =
2831 pwrb_handle;
2832 pwrb_context->wrb_handles_available++;
bfead3b2 2833 pwrb_handle->wrb_index = j;
6733b39a
JK
2834 pwrb_handle++;
2835 }
6733b39a
JK
2836 num_cxn_wrbh--;
2837 }
2838 }
2839 idx = 0;
a7909b39 2840 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
6733b39a 2841 pwrb_context = &phwi_ctrlr->wrb_context[index];
3ec78271 2842 if (!num_cxn_wrb) {
6733b39a 2843 pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
7c56533c 2844 num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
3ec78271
JK
2845 ((sizeof(struct iscsi_wrb) *
2846 phba->params.wrbs_per_cxn));
2847 idx++;
2848 }
2849
2850 if (num_cxn_wrb) {
6733b39a
JK
2851 for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2852 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2853 pwrb_handle->pwrb = pwrb;
2854 pwrb++;
2855 }
2856 num_cxn_wrb--;
2857 }
2858 }
3ec78271
JK
2859 return 0;
2860init_wrb_hndl_failed:
2861 for (j = index; j > 0; j--) {
2862 pwrb_context = &phwi_ctrlr->wrb_context[j];
2863 kfree(pwrb_context->pwrb_handle_base);
2864 kfree(pwrb_context->pwrb_handle_basestd);
2865 }
2866 return -ENOMEM;
6733b39a
JK
2867}
2868
a7909b39 2869static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
6733b39a 2870{
8a86e833 2871 uint8_t ulp_num;
6733b39a
JK
2872 struct hwi_controller *phwi_ctrlr;
2873 struct hba_parameters *p = &phba->params;
2874 struct hwi_async_pdu_context *pasync_ctx;
2875 struct async_pdu_handle *pasync_header_h, *pasync_data_h;
dc63aac6 2876 unsigned int index, idx, num_per_mem, num_async_data;
6733b39a
JK
2877 struct be_mem_descriptor *mem_descr;
2878
8a86e833
JK
2879 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2880 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
6733b39a 2881
8a86e833
JK
2882 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2883 mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2884 (ulp_num * MEM_DESCR_OFFSET));
2885
2886 phwi_ctrlr = phba->phwi_ctrlr;
2887 phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
2888 (struct hwi_async_pdu_context *)
2889 mem_descr->mem_array[0].virtual_address;
2890
2891 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
2892 memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2893
2894 pasync_ctx->async_entry =
2895 (struct hwi_async_entry *)
2896 ((long unsigned int)pasync_ctx +
2897 sizeof(struct hwi_async_pdu_context));
2898
2899 pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
2900 ulp_num);
2901 pasync_ctx->buffer_size = p->defpdu_hdr_sz;
2902
2903 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2904 mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2905 (ulp_num * MEM_DESCR_OFFSET);
2906 if (mem_descr->mem_array[0].virtual_address) {
2907 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2908 "BM_%d : hwi_init_async_pdu_ctx"
2909 " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
2910 ulp_num,
2911 mem_descr->mem_array[0].
2912 virtual_address);
2913 } else
2914 beiscsi_log(phba, KERN_WARNING,
2915 BEISCSI_LOG_INIT,
2916 "BM_%d : No Virtual address for ULP : %d\n",
2917 ulp_num);
2918
2919 pasync_ctx->async_header.va_base =
6733b39a 2920 mem_descr->mem_array[0].virtual_address;
6733b39a 2921
8a86e833
JK
2922 pasync_ctx->async_header.pa_base.u.a64.address =
2923 mem_descr->mem_array[0].
2924 bus_address.u.a64.address;
6733b39a 2925
8a86e833
JK
2926 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2927 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2928 (ulp_num * MEM_DESCR_OFFSET);
2929 if (mem_descr->mem_array[0].virtual_address) {
2930 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2931 "BM_%d : hwi_init_async_pdu_ctx"
2932 " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
2933 ulp_num,
2934 mem_descr->mem_array[0].
2935 virtual_address);
2936 } else
2937 beiscsi_log(phba, KERN_WARNING,
2938 BEISCSI_LOG_INIT,
2939 "BM_%d : No Virtual address for ULP : %d\n",
2940 ulp_num);
2941
2942 pasync_ctx->async_header.ring_base =
2943 mem_descr->mem_array[0].virtual_address;
6733b39a 2944
8a86e833
JK
2945 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2946 mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2947 (ulp_num * MEM_DESCR_OFFSET);
2948 if (mem_descr->mem_array[0].virtual_address) {
2949 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2950 "BM_%d : hwi_init_async_pdu_ctx"
2951 " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
2952 ulp_num,
2953 mem_descr->mem_array[0].
2954 virtual_address);
2955 } else
2956 beiscsi_log(phba, KERN_WARNING,
2957 BEISCSI_LOG_INIT,
2958 "BM_%d : No Virtual address for ULP : %d\n",
2959 ulp_num);
2960
2961 pasync_ctx->async_header.handle_base =
2962 mem_descr->mem_array[0].virtual_address;
2963 pasync_ctx->async_header.writables = 0;
2964 INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2965
2966 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2967 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
2968 (ulp_num * MEM_DESCR_OFFSET);
2969 if (mem_descr->mem_array[0].virtual_address) {
2970 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2971 "BM_%d : hwi_init_async_pdu_ctx"
2972 " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
2973 ulp_num,
2974 mem_descr->mem_array[0].
2975 virtual_address);
2976 } else
2977 beiscsi_log(phba, KERN_WARNING,
2978 BEISCSI_LOG_INIT,
2979 "BM_%d : No Virtual address for ULP : %d\n",
2980 ulp_num);
2981
2982 pasync_ctx->async_data.ring_base =
2983 mem_descr->mem_array[0].virtual_address;
6733b39a 2984
8a86e833
JK
2985 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2986 mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2987 (ulp_num * MEM_DESCR_OFFSET);
2988 if (!mem_descr->mem_array[0].virtual_address)
2989 beiscsi_log(phba, KERN_WARNING,
2990 BEISCSI_LOG_INIT,
2991 "BM_%d : No Virtual address for ULP : %d\n",
2992 ulp_num);
99bc5d55 2993
8a86e833
JK
2994 pasync_ctx->async_data.handle_base =
2995 mem_descr->mem_array[0].virtual_address;
2996 pasync_ctx->async_data.writables = 0;
2997 INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2998
2999 pasync_header_h =
3000 (struct async_pdu_handle *)
3001 pasync_ctx->async_header.handle_base;
3002 pasync_data_h =
3003 (struct async_pdu_handle *)
3004 pasync_ctx->async_data.handle_base;
3005
3006 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3007 mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
3008 (ulp_num * MEM_DESCR_OFFSET);
3009 if (mem_descr->mem_array[0].virtual_address) {
3010 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3011 "BM_%d : hwi_init_async_pdu_ctx"
3012 " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
3013 ulp_num,
3014 mem_descr->mem_array[0].
3015 virtual_address);
3016 } else
3017 beiscsi_log(phba, KERN_WARNING,
3018 BEISCSI_LOG_INIT,
3019 "BM_%d : No Virtual address for ULP : %d\n",
3020 ulp_num);
3021
3022 idx = 0;
dc63aac6
JK
3023 pasync_ctx->async_data.va_base =
3024 mem_descr->mem_array[idx].virtual_address;
3025 pasync_ctx->async_data.pa_base.u.a64.address =
3026 mem_descr->mem_array[idx].
3027 bus_address.u.a64.address;
3028
3029 num_async_data = ((mem_descr->mem_array[idx].size) /
3030 phba->params.defpdu_data_sz);
8a86e833 3031 num_per_mem = 0;
6733b39a 3032
8a86e833
JK
3033 for (index = 0; index < BEISCSI_GET_CID_COUNT
3034 (phba, ulp_num); index++) {
3035 pasync_header_h->cri = -1;
3036 pasync_header_h->index = (char)index;
3037 INIT_LIST_HEAD(&pasync_header_h->link);
3038 pasync_header_h->pbuffer =
3039 (void *)((unsigned long)
3040 (pasync_ctx->
3041 async_header.va_base) +
3042 (p->defpdu_hdr_sz * index));
3043
3044 pasync_header_h->pa.u.a64.address =
3045 pasync_ctx->async_header.pa_base.u.a64.
3046 address + (p->defpdu_hdr_sz * index);
3047
3048 list_add_tail(&pasync_header_h->link,
3049 &pasync_ctx->async_header.
3050 free_list);
3051 pasync_header_h++;
3052 pasync_ctx->async_header.free_entries++;
3053 pasync_ctx->async_header.writables++;
3054
3055 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3056 wait_queue.list);
3057 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3058 header_busy_list);
3059 pasync_data_h->cri = -1;
3060 pasync_data_h->index = (char)index;
3061 INIT_LIST_HEAD(&pasync_data_h->link);
3062
3063 if (!num_async_data) {
3064 num_per_mem = 0;
3065 idx++;
3066 pasync_ctx->async_data.va_base =
3067 mem_descr->mem_array[idx].
3068 virtual_address;
3069 pasync_ctx->async_data.pa_base.u.
3070 a64.address =
3071 mem_descr->mem_array[idx].
3072 bus_address.u.a64.address;
3073 num_async_data =
3074 ((mem_descr->mem_array[idx].
3075 size) /
3076 phba->params.defpdu_data_sz);
3077 }
3078 pasync_data_h->pbuffer =
3079 (void *)((unsigned long)
3080 (pasync_ctx->async_data.va_base) +
3081 (p->defpdu_data_sz * num_per_mem));
3082
3083 pasync_data_h->pa.u.a64.address =
3084 pasync_ctx->async_data.pa_base.u.a64.
3085 address + (p->defpdu_data_sz *
3086 num_per_mem);
3087 num_per_mem++;
3088 num_async_data--;
3089
3090 list_add_tail(&pasync_data_h->link,
3091 &pasync_ctx->async_data.
3092 free_list);
3093 pasync_data_h++;
3094 pasync_ctx->async_data.free_entries++;
3095 pasync_ctx->async_data.writables++;
3096
3097 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
3098 data_busy_list);
3099 }
6733b39a 3100
8a86e833
JK
3101 pasync_ctx->async_header.host_write_ptr = 0;
3102 pasync_ctx->async_header.ep_read_ptr = -1;
3103 pasync_ctx->async_data.host_write_ptr = 0;
3104 pasync_ctx->async_data.ep_read_ptr = -1;
3105 }
6733b39a
JK
3106 }
3107
a7909b39 3108 return 0;
6733b39a
JK
3109}
3110
3111static int
3112be_sgl_create_contiguous(void *virtual_address,
3113 u64 physical_address, u32 length,
3114 struct be_dma_mem *sgl)
3115{
3116 WARN_ON(!virtual_address);
3117 WARN_ON(!physical_address);
3118 WARN_ON(!length > 0);
3119 WARN_ON(!sgl);
3120
3121 sgl->va = virtual_address;
457ff3b7 3122 sgl->dma = (unsigned long)physical_address;
6733b39a
JK
3123 sgl->size = length;
3124
3125 return 0;
3126}
3127
3128static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
3129{
3130 memset(sgl, 0, sizeof(*sgl));
3131}
3132
3133static void
3134hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
3135 struct mem_array *pmem, struct be_dma_mem *sgl)
3136{
3137 if (sgl->va)
3138 be_sgl_destroy_contiguous(sgl);
3139
3140 be_sgl_create_contiguous(pmem->virtual_address,
3141 pmem->bus_address.u.a64.address,
3142 pmem->size, sgl);
3143}
3144
3145static void
3146hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
3147 struct mem_array *pmem, struct be_dma_mem *sgl)
3148{
3149 if (sgl->va)
3150 be_sgl_destroy_contiguous(sgl);
3151
3152 be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
3153 pmem->bus_address.u.a64.address,
3154 pmem->size, sgl);
3155}
3156
3157static int be_fill_queue(struct be_queue_info *q,
3158 u16 len, u16 entry_size, void *vaddress)
3159{
3160 struct be_dma_mem *mem = &q->dma_mem;
3161
3162 memset(q, 0, sizeof(*q));
3163 q->len = len;
3164 q->entry_size = entry_size;
3165 mem->size = len * entry_size;
3166 mem->va = vaddress;
3167 if (!mem->va)
3168 return -ENOMEM;
3169 memset(mem->va, 0, mem->size);
3170 return 0;
3171}
3172
bfead3b2 3173static int beiscsi_create_eqs(struct beiscsi_hba *phba,
6733b39a
JK
3174 struct hwi_context_memory *phwi_context)
3175{
bfead3b2 3176 unsigned int i, num_eq_pages;
99bc5d55 3177 int ret = 0, eq_for_mcc;
6733b39a
JK
3178 struct be_queue_info *eq;
3179 struct be_dma_mem *mem;
6733b39a 3180 void *eq_vaddress;
bfead3b2 3181 dma_addr_t paddr;
6733b39a 3182
bfead3b2
JK
3183 num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
3184 sizeof(struct be_eq_entry));
6733b39a 3185
bfead3b2
JK
3186 if (phba->msix_enabled)
3187 eq_for_mcc = 1;
3188 else
3189 eq_for_mcc = 0;
3190 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3191 eq = &phwi_context->be_eq[i].q;
3192 mem = &eq->dma_mem;
3193 phwi_context->be_eq[i].phba = phba;
3194 eq_vaddress = pci_alloc_consistent(phba->pcidev,
3195 num_eq_pages * PAGE_SIZE,
3196 &paddr);
3197 if (!eq_vaddress)
3198 goto create_eq_error;
3199
3200 mem->va = eq_vaddress;
3201 ret = be_fill_queue(eq, phba->params.num_eq_entries,
3202 sizeof(struct be_eq_entry), eq_vaddress);
3203 if (ret) {
99bc5d55
JSJ
3204 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3205 "BM_%d : be_fill_queue Failed for EQ\n");
bfead3b2
JK
3206 goto create_eq_error;
3207 }
6733b39a 3208
bfead3b2
JK
3209 mem->dma = paddr;
3210 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
3211 phwi_context->cur_eqd);
3212 if (ret) {
99bc5d55
JSJ
3213 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3214 "BM_%d : beiscsi_cmd_eq_create"
3215 "Failed for EQ\n");
bfead3b2
JK
3216 goto create_eq_error;
3217 }
99bc5d55
JSJ
3218
3219 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3220 "BM_%d : eqid = %d\n",
3221 phwi_context->be_eq[i].q.id);
6733b39a 3222 }
6733b39a 3223 return 0;
bfead3b2 3224create_eq_error:
107dfcba 3225 for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
bfead3b2
JK
3226 eq = &phwi_context->be_eq[i].q;
3227 mem = &eq->dma_mem;
3228 if (mem->va)
3229 pci_free_consistent(phba->pcidev, num_eq_pages
3230 * PAGE_SIZE,
3231 mem->va, mem->dma);
3232 }
3233 return ret;
6733b39a
JK
3234}
3235
bfead3b2 3236static int beiscsi_create_cqs(struct beiscsi_hba *phba,
6733b39a
JK
3237 struct hwi_context_memory *phwi_context)
3238{
bfead3b2 3239 unsigned int i, num_cq_pages;
99bc5d55 3240 int ret = 0;
6733b39a
JK
3241 struct be_queue_info *cq, *eq;
3242 struct be_dma_mem *mem;
bfead3b2 3243 struct be_eq_obj *pbe_eq;
6733b39a 3244 void *cq_vaddress;
bfead3b2 3245 dma_addr_t paddr;
6733b39a 3246
bfead3b2
JK
3247 num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
3248 sizeof(struct sol_cqe));
6733b39a 3249
bfead3b2
JK
3250 for (i = 0; i < phba->num_cpus; i++) {
3251 cq = &phwi_context->be_cq[i];
3252 eq = &phwi_context->be_eq[i].q;
3253 pbe_eq = &phwi_context->be_eq[i];
3254 pbe_eq->cq = cq;
3255 pbe_eq->phba = phba;
3256 mem = &cq->dma_mem;
3257 cq_vaddress = pci_alloc_consistent(phba->pcidev,
3258 num_cq_pages * PAGE_SIZE,
3259 &paddr);
3260 if (!cq_vaddress)
3261 goto create_cq_error;
7da50879 3262 ret = be_fill_queue(cq, phba->params.num_cq_entries,
bfead3b2
JK
3263 sizeof(struct sol_cqe), cq_vaddress);
3264 if (ret) {
99bc5d55
JSJ
3265 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3266 "BM_%d : be_fill_queue Failed "
3267 "for ISCSI CQ\n");
bfead3b2
JK
3268 goto create_cq_error;
3269 }
3270
3271 mem->dma = paddr;
3272 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
3273 false, 0);
3274 if (ret) {
99bc5d55
JSJ
3275 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3276 "BM_%d : beiscsi_cmd_eq_create"
3277 "Failed for ISCSI CQ\n");
bfead3b2
JK
3278 goto create_cq_error;
3279 }
99bc5d55
JSJ
3280 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3281 "BM_%d : iscsi cq_id is %d for eq_id %d\n"
3282 "iSCSI CQ CREATED\n", cq->id, eq->id);
6733b39a 3283 }
6733b39a 3284 return 0;
bfead3b2
JK
3285
3286create_cq_error:
3287 for (i = 0; i < phba->num_cpus; i++) {
3288 cq = &phwi_context->be_cq[i];
3289 mem = &cq->dma_mem;
3290 if (mem->va)
3291 pci_free_consistent(phba->pcidev, num_cq_pages
3292 * PAGE_SIZE,
3293 mem->va, mem->dma);
3294 }
3295 return ret;
3296
6733b39a
JK
3297}
3298
3299static int
3300beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3301 struct hwi_context_memory *phwi_context,
3302 struct hwi_controller *phwi_ctrlr,
8a86e833 3303 unsigned int def_pdu_ring_sz, uint8_t ulp_num)
6733b39a
JK
3304{
3305 unsigned int idx;
3306 int ret;
3307 struct be_queue_info *dq, *cq;
3308 struct be_dma_mem *mem;
3309 struct be_mem_descriptor *mem_descr;
3310 void *dq_vaddress;
3311
3312 idx = 0;
8a86e833 3313 dq = &phwi_context->be_def_hdrq[ulp_num];
bfead3b2 3314 cq = &phwi_context->be_cq[0];
6733b39a
JK
3315 mem = &dq->dma_mem;
3316 mem_descr = phba->init_mem;
8a86e833
JK
3317 mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
3318 (ulp_num * MEM_DESCR_OFFSET);
6733b39a
JK
3319 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3320 ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
3321 sizeof(struct phys_addr),
3322 sizeof(struct phys_addr), dq_vaddress);
3323 if (ret) {
99bc5d55 3324 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
8a86e833
JK
3325 "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
3326 ulp_num);
3327
6733b39a
JK
3328 return ret;
3329 }
457ff3b7
JK
3330 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3331 bus_address.u.a64.address;
6733b39a
JK
3332 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
3333 def_pdu_ring_sz,
8a86e833
JK
3334 phba->params.defpdu_hdr_sz,
3335 BEISCSI_DEFQ_HDR, ulp_num);
6733b39a 3336 if (ret) {
99bc5d55 3337 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
8a86e833
JK
3338 "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
3339 ulp_num);
3340
6733b39a
JK
3341 return ret;
3342 }
99bc5d55 3343
8a86e833
JK
3344 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3345 "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
3346 ulp_num,
3347 phwi_context->be_def_hdrq[ulp_num].id);
3348 hwi_post_async_buffers(phba, BEISCSI_DEFQ_HDR, ulp_num);
6733b39a
JK
3349 return 0;
3350}
3351
3352static int
3353beiscsi_create_def_data(struct beiscsi_hba *phba,
3354 struct hwi_context_memory *phwi_context,
3355 struct hwi_controller *phwi_ctrlr,
8a86e833 3356 unsigned int def_pdu_ring_sz, uint8_t ulp_num)
6733b39a
JK
3357{
3358 unsigned int idx;
3359 int ret;
3360 struct be_queue_info *dataq, *cq;
3361 struct be_dma_mem *mem;
3362 struct be_mem_descriptor *mem_descr;
3363 void *dq_vaddress;
3364
3365 idx = 0;
8a86e833 3366 dataq = &phwi_context->be_def_dataq[ulp_num];
bfead3b2 3367 cq = &phwi_context->be_cq[0];
6733b39a
JK
3368 mem = &dataq->dma_mem;
3369 mem_descr = phba->init_mem;
8a86e833
JK
3370 mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3371 (ulp_num * MEM_DESCR_OFFSET);
6733b39a
JK
3372 dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3373 ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3374 sizeof(struct phys_addr),
3375 sizeof(struct phys_addr), dq_vaddress);
3376 if (ret) {
99bc5d55 3377 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
8a86e833
JK
3378 "BM_%d : be_fill_queue Failed for DEF PDU "
3379 "DATA on ULP : %d\n",
3380 ulp_num);
3381
6733b39a
JK
3382 return ret;
3383 }
457ff3b7
JK
3384 mem->dma = (unsigned long)mem_descr->mem_array[idx].
3385 bus_address.u.a64.address;
6733b39a
JK
3386 ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3387 def_pdu_ring_sz,
8a86e833
JK
3388 phba->params.defpdu_data_sz,
3389 BEISCSI_DEFQ_DATA, ulp_num);
6733b39a 3390 if (ret) {
99bc5d55
JSJ
3391 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3392 "BM_%d be_cmd_create_default_pdu_queue"
8a86e833
JK
3393 " Failed for DEF PDU DATA on ULP : %d\n",
3394 ulp_num);
6733b39a
JK
3395 return ret;
3396 }
8a86e833 3397
99bc5d55 3398 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
8a86e833
JK
3399 "BM_%d : iscsi def data id on ULP : %d is %d\n",
3400 ulp_num,
3401 phwi_context->be_def_dataq[ulp_num].id);
99bc5d55 3402
8a86e833 3403 hwi_post_async_buffers(phba, BEISCSI_DEFQ_DATA, ulp_num);
99bc5d55 3404 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
8a86e833
JK
3405 "BM_%d : DEFAULT PDU DATA RING CREATED"
3406 "on ULP : %d\n", ulp_num);
99bc5d55 3407
6733b39a
JK
3408 return 0;
3409}
3410
15a90fe0
JK
3411
3412static int
3413beiscsi_post_template_hdr(struct beiscsi_hba *phba)
3414{
3415 struct be_mem_descriptor *mem_descr;
3416 struct mem_array *pm_arr;
3417 struct be_dma_mem sgl;
a129d92f 3418 int status, ulp_num;
15a90fe0 3419
a129d92f
JK
3420 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3421 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3422 mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3423 mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
3424 (ulp_num * MEM_DESCR_OFFSET);
3425 pm_arr = mem_descr->mem_array;
15a90fe0 3426
a129d92f
JK
3427 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3428 status = be_cmd_iscsi_post_template_hdr(
3429 &phba->ctrl, &sgl);
15a90fe0 3430
a129d92f
JK
3431 if (status != 0) {
3432 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3433 "BM_%d : Post Template HDR Failed for"
3434 "ULP_%d\n", ulp_num);
3435 return status;
3436 }
3437
3438 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3439 "BM_%d : Template HDR Pages Posted for"
3440 "ULP_%d\n", ulp_num);
15a90fe0
JK
3441 }
3442 }
15a90fe0
JK
3443 return 0;
3444}
3445
6733b39a
JK
3446static int
3447beiscsi_post_pages(struct beiscsi_hba *phba)
3448{
3449 struct be_mem_descriptor *mem_descr;
3450 struct mem_array *pm_arr;
3451 unsigned int page_offset, i;
3452 struct be_dma_mem sgl;
843ae752 3453 int status, ulp_num = 0;
6733b39a
JK
3454
3455 mem_descr = phba->init_mem;
3456 mem_descr += HWI_MEM_SGE;
3457 pm_arr = mem_descr->mem_array;
3458
90622db3
JK
3459 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3460 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3461 break;
3462
6733b39a 3463 page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
843ae752 3464 phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
6733b39a
JK
3465 for (i = 0; i < mem_descr->num_elements; i++) {
3466 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3467 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3468 page_offset,
3469 (pm_arr->size / PAGE_SIZE));
3470 page_offset += pm_arr->size / PAGE_SIZE;
3471 if (status != 0) {
99bc5d55
JSJ
3472 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3473 "BM_%d : post sgl failed.\n");
6733b39a
JK
3474 return status;
3475 }
3476 pm_arr++;
3477 }
99bc5d55
JSJ
3478 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3479 "BM_%d : POSTED PAGES\n");
6733b39a
JK
3480 return 0;
3481}
3482
bfead3b2
JK
3483static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3484{
3485 struct be_dma_mem *mem = &q->dma_mem;
c8b25598 3486 if (mem->va) {
bfead3b2
JK
3487 pci_free_consistent(phba->pcidev, mem->size,
3488 mem->va, mem->dma);
c8b25598
JK
3489 mem->va = NULL;
3490 }
bfead3b2
JK
3491}
3492
3493static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3494 u16 len, u16 entry_size)
3495{
3496 struct be_dma_mem *mem = &q->dma_mem;
3497
3498 memset(q, 0, sizeof(*q));
3499 q->len = len;
3500 q->entry_size = entry_size;
3501 mem->size = len * entry_size;
3502 mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
3503 if (!mem->va)
d3ad2bb3 3504 return -ENOMEM;
bfead3b2
JK
3505 memset(mem->va, 0, mem->size);
3506 return 0;
3507}
3508
6733b39a
JK
3509static int
3510beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3511 struct hwi_context_memory *phwi_context,
3512 struct hwi_controller *phwi_ctrlr)
3513{
3514 unsigned int wrb_mem_index, offset, size, num_wrb_rings;
3515 u64 pa_addr_lo;
4eea99d5 3516 unsigned int idx, num, i, ulp_num;
6733b39a
JK
3517 struct mem_array *pwrb_arr;
3518 void *wrb_vaddr;
3519 struct be_dma_mem sgl;
3520 struct be_mem_descriptor *mem_descr;
a7909b39 3521 struct hwi_wrb_context *pwrb_context;
6733b39a 3522 int status;
4eea99d5
JK
3523 uint8_t ulp_count = 0, ulp_base_num = 0;
3524 uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
6733b39a
JK
3525
3526 idx = 0;
3527 mem_descr = phba->init_mem;
3528 mem_descr += HWI_MEM_WRB;
3529 pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
3530 GFP_KERNEL);
3531 if (!pwrb_arr) {
99bc5d55
JSJ
3532 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3533 "BM_%d : Memory alloc failed in create wrb ring.\n");
6733b39a
JK
3534 return -ENOMEM;
3535 }
3536 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3537 pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3538 num_wrb_rings = mem_descr->mem_array[idx].size /
3539 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3540
3541 for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3542 if (num_wrb_rings) {
3543 pwrb_arr[num].virtual_address = wrb_vaddr;
3544 pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
3545 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3546 sizeof(struct iscsi_wrb);
3547 wrb_vaddr += pwrb_arr[num].size;
3548 pa_addr_lo += pwrb_arr[num].size;
3549 num_wrb_rings--;
3550 } else {
3551 idx++;
3552 wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3553 pa_addr_lo = mem_descr->mem_array[idx].\
3554 bus_address.u.a64.address;
3555 num_wrb_rings = mem_descr->mem_array[idx].size /
3556 (phba->params.wrbs_per_cxn *
3557 sizeof(struct iscsi_wrb));
3558 pwrb_arr[num].virtual_address = wrb_vaddr;
3559 pwrb_arr[num].bus_address.u.a64.address\
3560 = pa_addr_lo;
3561 pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3562 sizeof(struct iscsi_wrb);
3563 wrb_vaddr += pwrb_arr[num].size;
3564 pa_addr_lo += pwrb_arr[num].size;
3565 num_wrb_rings--;
3566 }
3567 }
4eea99d5
JK
3568
3569 /* Get the ULP Count */
3570 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3571 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3572 ulp_count++;
3573 ulp_base_num = ulp_num;
3574 cid_count_ulp[ulp_num] =
3575 BEISCSI_GET_CID_COUNT(phba, ulp_num);
3576 }
3577
6733b39a
JK
3578 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3579 wrb_mem_index = 0;
3580 offset = 0;
3581 size = 0;
3582
4eea99d5
JK
3583 if (ulp_count > 1) {
3584 ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
3585
3586 if (!cid_count_ulp[ulp_base_num])
3587 ulp_base_num = (ulp_base_num + 1) %
3588 BEISCSI_ULP_COUNT;
3589
3590 cid_count_ulp[ulp_base_num]--;
3591 }
3592
3593
6733b39a
JK
3594 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3595 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
4eea99d5
JK
3596 &phwi_context->be_wrbq[i],
3597 &phwi_ctrlr->wrb_context[i],
3598 ulp_base_num);
6733b39a 3599 if (status != 0) {
99bc5d55
JSJ
3600 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3601 "BM_%d : wrbq create failed.");
1462b8ff 3602 kfree(pwrb_arr);
6733b39a
JK
3603 return status;
3604 }
a7909b39 3605 pwrb_context = &phwi_ctrlr->wrb_context[i];
a7909b39 3606 BE_SET_CID_TO_CRI(i, pwrb_context->cid);
6733b39a
JK
3607 }
3608 kfree(pwrb_arr);
3609 return 0;
3610}
3611
3612static void free_wrb_handles(struct beiscsi_hba *phba)
3613{
3614 unsigned int index;
3615 struct hwi_controller *phwi_ctrlr;
3616 struct hwi_wrb_context *pwrb_context;
3617
3618 phwi_ctrlr = phba->phwi_ctrlr;
a7909b39 3619 for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
6733b39a
JK
3620 pwrb_context = &phwi_ctrlr->wrb_context[index];
3621 kfree(pwrb_context->pwrb_handle_base);
3622 kfree(pwrb_context->pwrb_handle_basestd);
3623 }
3624}
3625
bfead3b2
JK
3626static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3627{
3628 struct be_queue_info *q;
3629 struct be_ctrl_info *ctrl = &phba->ctrl;
3630
3631 q = &phba->ctrl.mcc_obj.q;
3632 if (q->created)
3633 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3634 be_queue_free(phba, q);
3635
3636 q = &phba->ctrl.mcc_obj.cq;
3637 if (q->created)
3638 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3639 be_queue_free(phba, q);
3640}
3641
6733b39a
JK
3642static void hwi_cleanup(struct beiscsi_hba *phba)
3643{
3644 struct be_queue_info *q;
3645 struct be_ctrl_info *ctrl = &phba->ctrl;
3646 struct hwi_controller *phwi_ctrlr;
3647 struct hwi_context_memory *phwi_context;
a7909b39 3648 struct hwi_async_pdu_context *pasync_ctx;
8a86e833 3649 int i, eq_num, ulp_num;
6733b39a
JK
3650
3651 phwi_ctrlr = phba->phwi_ctrlr;
3652 phwi_context = phwi_ctrlr->phwi_ctxt;
15a90fe0
JK
3653
3654 be_cmd_iscsi_remove_template_hdr(ctrl);
3655
6733b39a
JK
3656 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3657 q = &phwi_context->be_wrbq[i];
3658 if (q->created)
3659 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3660 }
a7909b39 3661 kfree(phwi_context->be_wrbq);
6733b39a
JK
3662 free_wrb_handles(phba);
3663
8a86e833
JK
3664 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3665 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
6733b39a 3666
8a86e833
JK
3667 q = &phwi_context->be_def_hdrq[ulp_num];
3668 if (q->created)
3669 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3670
3671 q = &phwi_context->be_def_dataq[ulp_num];
3672 if (q->created)
3673 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3674
3675 pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
3676 }
3677 }
6733b39a
JK
3678
3679 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3680
bfead3b2
JK
3681 for (i = 0; i < (phba->num_cpus); i++) {
3682 q = &phwi_context->be_cq[i];
3683 if (q->created)
3684 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3685 }
3686 if (phba->msix_enabled)
3687 eq_num = 1;
3688 else
3689 eq_num = 0;
3690 for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3691 q = &phwi_context->be_eq[i].q;
3692 if (q->created)
3693 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3694 }
3695 be_mcc_queues_destroy(phba);
0283fbb1 3696 be_cmd_fw_uninit(ctrl);
bfead3b2 3697}
6733b39a 3698
bfead3b2
JK
3699static int be_mcc_queues_create(struct beiscsi_hba *phba,
3700 struct hwi_context_memory *phwi_context)
3701{
3702 struct be_queue_info *q, *cq;
3703 struct be_ctrl_info *ctrl = &phba->ctrl;
3704
3705 /* Alloc MCC compl queue */
3706 cq = &phba->ctrl.mcc_obj.cq;
3707 if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3708 sizeof(struct be_mcc_compl)))
3709 goto err;
3710 /* Ask BE to create MCC compl queue; */
3711 if (phba->msix_enabled) {
3712 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3713 [phba->num_cpus].q, false, true, 0))
3714 goto mcc_cq_free;
3715 } else {
3716 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3717 false, true, 0))
3718 goto mcc_cq_free;
3719 }
3720
3721 /* Alloc MCC queue */
3722 q = &phba->ctrl.mcc_obj.q;
3723 if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3724 goto mcc_cq_destroy;
3725
3726 /* Ask BE to create MCC queue */
35e66019 3727 if (beiscsi_cmd_mccq_create(phba, q, cq))
bfead3b2
JK
3728 goto mcc_q_free;
3729
3730 return 0;
3731
3732mcc_q_free:
3733 be_queue_free(phba, q);
3734mcc_cq_destroy:
3735 beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3736mcc_cq_free:
3737 be_queue_free(phba, cq);
3738err:
d3ad2bb3 3739 return -ENOMEM;
bfead3b2
JK
3740}
3741
107dfcba
JSJ
3742/**
3743 * find_num_cpus()- Get the CPU online count
3744 * @phba: ptr to priv structure
3745 *
3746 * CPU count is used for creating EQ.
3747 **/
3748static void find_num_cpus(struct beiscsi_hba *phba)
bfead3b2
JK
3749{
3750 int num_cpus = 0;
3751
3752 num_cpus = num_online_cpus();
bfead3b2 3753
22abeef0
JSJ
3754 switch (phba->generation) {
3755 case BE_GEN2:
3756 case BE_GEN3:
3757 phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
3758 BEISCSI_MAX_NUM_CPUS : num_cpus;
3759 break;
3760 case BE_GEN4:
68c26a3a
JK
3761 /*
3762 * If eqid_count == 1 fall back to
3763 * INTX mechanism
3764 **/
3765 if (phba->fw_config.eqid_count == 1) {
3766 enable_msix = 0;
3767 phba->num_cpus = 1;
3768 return;
3769 }
3770
3771 phba->num_cpus =
3772 (num_cpus > (phba->fw_config.eqid_count - 1)) ?
3773 (phba->fw_config.eqid_count - 1) : num_cpus;
22abeef0
JSJ
3774 break;
3775 default:
3776 phba->num_cpus = 1;
3777 }
6733b39a
JK
3778}
3779
3780static int hwi_init_port(struct beiscsi_hba *phba)
3781{
3782 struct hwi_controller *phwi_ctrlr;
3783 struct hwi_context_memory *phwi_context;
3784 unsigned int def_pdu_ring_sz;
3785 struct be_ctrl_info *ctrl = &phba->ctrl;
8a86e833 3786 int status, ulp_num;
6733b39a 3787
6733b39a 3788 phwi_ctrlr = phba->phwi_ctrlr;
6733b39a 3789 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
3790 phwi_context->max_eqd = 0;
3791 phwi_context->min_eqd = 0;
3792 phwi_context->cur_eqd = 64;
6733b39a 3793 be_cmd_fw_initialize(&phba->ctrl);
bfead3b2
JK
3794
3795 status = beiscsi_create_eqs(phba, phwi_context);
6733b39a 3796 if (status != 0) {
99bc5d55
JSJ
3797 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3798 "BM_%d : EQ not created\n");
6733b39a
JK
3799 goto error;
3800 }
3801
bfead3b2
JK
3802 status = be_mcc_queues_create(phba, phwi_context);
3803 if (status != 0)
3804 goto error;
3805
3806 status = mgmt_check_supported_fw(ctrl, phba);
6733b39a 3807 if (status != 0) {
99bc5d55
JSJ
3808 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3809 "BM_%d : Unsupported fw version\n");
6733b39a
JK
3810 goto error;
3811 }
3812
bfead3b2 3813 status = beiscsi_create_cqs(phba, phwi_context);
6733b39a 3814 if (status != 0) {
99bc5d55
JSJ
3815 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3816 "BM_%d : CQ not created\n");
6733b39a
JK
3817 goto error;
3818 }
3819
8a86e833
JK
3820 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3821 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
6733b39a 3822
8a86e833
JK
3823 def_pdu_ring_sz =
3824 BEISCSI_GET_CID_COUNT(phba, ulp_num) *
3825 sizeof(struct phys_addr);
3826
3827 status = beiscsi_create_def_hdr(phba, phwi_context,
3828 phwi_ctrlr,
3829 def_pdu_ring_sz,
3830 ulp_num);
3831 if (status != 0) {
3832 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3833 "BM_%d : Default Header not created for ULP : %d\n",
3834 ulp_num);
3835 goto error;
3836 }
3837
3838 status = beiscsi_create_def_data(phba, phwi_context,
3839 phwi_ctrlr,
3840 def_pdu_ring_sz,
3841 ulp_num);
3842 if (status != 0) {
3843 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3844 "BM_%d : Default Data not created for ULP : %d\n",
3845 ulp_num);
3846 goto error;
3847 }
3848 }
6733b39a
JK
3849 }
3850
3851 status = beiscsi_post_pages(phba);
3852 if (status != 0) {
99bc5d55
JSJ
3853 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3854 "BM_%d : Post SGL Pages Failed\n");
6733b39a
JK
3855 goto error;
3856 }
3857
15a90fe0
JK
3858 status = beiscsi_post_template_hdr(phba);
3859 if (status != 0) {
3860 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3861 "BM_%d : Template HDR Posting for CXN Failed\n");
3862 }
3863
6733b39a
JK
3864 status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
3865 if (status != 0) {
99bc5d55
JSJ
3866 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3867 "BM_%d : WRB Rings not created\n");
6733b39a
JK
3868 goto error;
3869 }
3870
8a86e833
JK
3871 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3872 uint16_t async_arr_idx = 0;
3873
3874 if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3875 uint16_t cri = 0;
3876 struct hwi_async_pdu_context *pasync_ctx;
3877
3878 pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
3879 phwi_ctrlr, ulp_num);
3880 for (cri = 0; cri <
3881 phba->params.cxns_per_ctrl; cri++) {
3882 if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
3883 (phwi_ctrlr, cri))
3884 pasync_ctx->cid_to_async_cri_map[
3885 phwi_ctrlr->wrb_context[cri].cid] =
3886 async_arr_idx++;
3887 }
3888 }
3889 }
3890
99bc5d55
JSJ
3891 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3892 "BM_%d : hwi_init_port success\n");
6733b39a
JK
3893 return 0;
3894
3895error:
99bc5d55
JSJ
3896 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3897 "BM_%d : hwi_init_port failed");
6733b39a 3898 hwi_cleanup(phba);
a49e06d5 3899 return status;
6733b39a
JK
3900}
3901
6733b39a
JK
3902static int hwi_init_controller(struct beiscsi_hba *phba)
3903{
3904 struct hwi_controller *phwi_ctrlr;
3905
3906 phwi_ctrlr = phba->phwi_ctrlr;
3907 if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3908 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3909 init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
99bc5d55
JSJ
3910 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3911 "BM_%d : phwi_ctrlr->phwi_ctxt=%p\n",
3912 phwi_ctrlr->phwi_ctxt);
6733b39a 3913 } else {
99bc5d55
JSJ
3914 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3915 "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3916 "than one element.Failing to load\n");
6733b39a
JK
3917 return -ENOMEM;
3918 }
3919
3920 iscsi_init_global_templates(phba);
3ec78271
JK
3921 if (beiscsi_init_wrb_handle(phba))
3922 return -ENOMEM;
3923
a7909b39
JK
3924 if (hwi_init_async_pdu_ctx(phba)) {
3925 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3926 "BM_%d : hwi_init_async_pdu_ctx failed\n");
3927 return -ENOMEM;
3928 }
3929
6733b39a 3930 if (hwi_init_port(phba) != 0) {
99bc5d55
JSJ
3931 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3932 "BM_%d : hwi_init_controller failed\n");
3933
6733b39a
JK
3934 return -ENOMEM;
3935 }
3936 return 0;
3937}
3938
3939static void beiscsi_free_mem(struct beiscsi_hba *phba)
3940{
3941 struct be_mem_descriptor *mem_descr;
3942 int i, j;
3943
3944 mem_descr = phba->init_mem;
3945 i = 0;
3946 j = 0;
3947 for (i = 0; i < SE_MEM_MAX; i++) {
3948 for (j = mem_descr->num_elements; j > 0; j--) {
3949 pci_free_consistent(phba->pcidev,
3950 mem_descr->mem_array[j - 1].size,
3951 mem_descr->mem_array[j - 1].virtual_address,
457ff3b7
JK
3952 (unsigned long)mem_descr->mem_array[j - 1].
3953 bus_address.u.a64.address);
6733b39a 3954 }
8a86e833 3955
6733b39a
JK
3956 kfree(mem_descr->mem_array);
3957 mem_descr++;
3958 }
3959 kfree(phba->init_mem);
a7909b39 3960 kfree(phba->phwi_ctrlr->wrb_context);
6733b39a
JK
3961 kfree(phba->phwi_ctrlr);
3962}
3963
3964static int beiscsi_init_controller(struct beiscsi_hba *phba)
3965{
3966 int ret = -ENOMEM;
3967
3968 ret = beiscsi_get_memory(phba);
3969 if (ret < 0) {
99bc5d55
JSJ
3970 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3971 "BM_%d : beiscsi_dev_probe -"
3972 "Failed in beiscsi_alloc_memory\n");
6733b39a
JK
3973 return ret;
3974 }
3975
3976 ret = hwi_init_controller(phba);
3977 if (ret)
3978 goto free_init;
99bc5d55
JSJ
3979 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3980 "BM_%d : Return success from beiscsi_init_controller");
3981
6733b39a
JK
3982 return 0;
3983
3984free_init:
3985 beiscsi_free_mem(phba);
a49e06d5 3986 return ret;
6733b39a
JK
3987}
3988
3989static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3990{
3991 struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3992 struct sgl_handle *psgl_handle;
3993 struct iscsi_sge *pfrag;
90622db3
JK
3994 unsigned int arr_index, i, idx;
3995 unsigned int ulp_icd_start, ulp_num = 0;
6733b39a
JK
3996
3997 phba->io_sgl_hndl_avbl = 0;
3998 phba->eh_sgl_hndl_avbl = 0;
bfead3b2 3999
6733b39a
JK
4000 mem_descr_sglh = phba->init_mem;
4001 mem_descr_sglh += HWI_MEM_SGLH;
4002 if (1 == mem_descr_sglh->num_elements) {
4003 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
4004 phba->params.ios_per_ctrl,
4005 GFP_KERNEL);
4006 if (!phba->io_sgl_hndl_base) {
99bc5d55
JSJ
4007 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4008 "BM_%d : Mem Alloc Failed. Failing to load\n");
6733b39a
JK
4009 return -ENOMEM;
4010 }
4011 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
4012 (phba->params.icds_per_ctrl -
4013 phba->params.ios_per_ctrl),
4014 GFP_KERNEL);
4015 if (!phba->eh_sgl_hndl_base) {
4016 kfree(phba->io_sgl_hndl_base);
99bc5d55
JSJ
4017 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4018 "BM_%d : Mem Alloc Failed. Failing to load\n");
6733b39a
JK
4019 return -ENOMEM;
4020 }
4021 } else {
99bc5d55
JSJ
4022 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4023 "BM_%d : HWI_MEM_SGLH is more than one element."
4024 "Failing to load\n");
6733b39a
JK
4025 return -ENOMEM;
4026 }
4027
4028 arr_index = 0;
4029 idx = 0;
4030 while (idx < mem_descr_sglh->num_elements) {
4031 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
4032
4033 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
4034 sizeof(struct sgl_handle)); i++) {
4035 if (arr_index < phba->params.ios_per_ctrl) {
4036 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
4037 phba->io_sgl_hndl_avbl++;
4038 arr_index++;
4039 } else {
4040 phba->eh_sgl_hndl_base[arr_index -
4041 phba->params.ios_per_ctrl] =
4042 psgl_handle;
4043 arr_index++;
4044 phba->eh_sgl_hndl_avbl++;
4045 }
4046 psgl_handle++;
4047 }
4048 idx++;
4049 }
99bc5d55
JSJ
4050 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4051 "BM_%d : phba->io_sgl_hndl_avbl=%d"
4052 "phba->eh_sgl_hndl_avbl=%d\n",
4053 phba->io_sgl_hndl_avbl,
4054 phba->eh_sgl_hndl_avbl);
4055
6733b39a
JK
4056 mem_descr_sg = phba->init_mem;
4057 mem_descr_sg += HWI_MEM_SGE;
99bc5d55
JSJ
4058 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4059 "\n BM_%d : mem_descr_sg->num_elements=%d\n",
4060 mem_descr_sg->num_elements);
4061
90622db3
JK
4062 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
4063 if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
4064 break;
4065
4066 ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
4067
6733b39a
JK
4068 arr_index = 0;
4069 idx = 0;
4070 while (idx < mem_descr_sg->num_elements) {
4071 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
4072
4073 for (i = 0;
4074 i < (mem_descr_sg->mem_array[idx].size) /
4075 (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
4076 i++) {
4077 if (arr_index < phba->params.ios_per_ctrl)
4078 psgl_handle = phba->io_sgl_hndl_base[arr_index];
4079 else
4080 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
4081 phba->params.ios_per_ctrl];
4082 psgl_handle->pfrag = pfrag;
4083 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
4084 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
4085 pfrag += phba->params.num_sge_per_io;
90622db3 4086 psgl_handle->sgl_index = ulp_icd_start + arr_index++;
6733b39a
JK
4087 }
4088 idx++;
4089 }
4090 phba->io_sgl_free_index = 0;
4091 phba->io_sgl_alloc_index = 0;
4092 phba->eh_sgl_free_index = 0;
4093 phba->eh_sgl_alloc_index = 0;
4094 return 0;
4095}
4096
4097static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
4098{
0a3db7c0
JK
4099 int ret;
4100 uint16_t i, ulp_num;
4101 struct ulp_cid_info *ptr_cid_info = NULL;
6733b39a 4102
0a3db7c0
JK
4103 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4104 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4105 ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
4106 GFP_KERNEL);
4107
4108 if (!ptr_cid_info) {
4109 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4110 "BM_%d : Failed to allocate memory"
4111 "for ULP_CID_INFO for ULP : %d\n",
4112 ulp_num);
4113 ret = -ENOMEM;
4114 goto free_memory;
4115
4116 }
4117
4118 /* Allocate memory for CID array */
4119 ptr_cid_info->cid_array = kzalloc(sizeof(void *) *
4120 BEISCSI_GET_CID_COUNT(phba,
4121 ulp_num), GFP_KERNEL);
4122 if (!ptr_cid_info->cid_array) {
4123 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4124 "BM_%d : Failed to allocate memory"
4125 "for CID_ARRAY for ULP : %d\n",
4126 ulp_num);
4127 kfree(ptr_cid_info);
4128 ptr_cid_info = NULL;
4129 ret = -ENOMEM;
4130
4131 goto free_memory;
4132 }
4133 ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
4134 phba, ulp_num);
4135
4136 /* Save the cid_info_array ptr */
4137 phba->cid_array_info[ulp_num] = ptr_cid_info;
4138 }
6733b39a 4139 }
c2462288 4140 phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
a7909b39 4141 phba->params.cxns_per_ctrl, GFP_KERNEL);
6733b39a 4142 if (!phba->ep_array) {
99bc5d55
JSJ
4143 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4144 "BM_%d : Failed to allocate memory in "
4145 "hba_setup_cid_tbls\n");
0a3db7c0
JK
4146 ret = -ENOMEM;
4147
4148 goto free_memory;
6733b39a 4149 }
a7909b39
JK
4150
4151 phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
4152 phba->params.cxns_per_ctrl, GFP_KERNEL);
4153 if (!phba->conn_table) {
4154 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4155 "BM_%d : Failed to allocate memory in"
4156 "hba_setup_cid_tbls\n");
4157
a7909b39 4158 kfree(phba->ep_array);
a7909b39 4159 phba->ep_array = NULL;
0a3db7c0 4160 ret = -ENOMEM;
6733b39a 4161 }
a7909b39 4162
0a3db7c0
JK
4163 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
4164 ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
4165
4166 ptr_cid_info = phba->cid_array_info[ulp_num];
4167 ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
4168 phba->phwi_ctrlr->wrb_context[i].cid;
4169
4170 }
4171
4172 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4173 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4174 ptr_cid_info = phba->cid_array_info[ulp_num];
a7909b39 4175
0a3db7c0
JK
4176 ptr_cid_info->cid_alloc = 0;
4177 ptr_cid_info->cid_free = 0;
4178 }
4179 }
6733b39a 4180 return 0;
0a3db7c0
JK
4181
4182free_memory:
4183 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4184 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4185 ptr_cid_info = phba->cid_array_info[ulp_num];
4186
4187 if (ptr_cid_info) {
4188 kfree(ptr_cid_info->cid_array);
4189 kfree(ptr_cid_info);
4190 phba->cid_array_info[ulp_num] = NULL;
4191 }
4192 }
4193 }
4194
4195 return ret;
6733b39a
JK
4196}
4197
238f6b72 4198static void hwi_enable_intr(struct beiscsi_hba *phba)
6733b39a
JK
4199{
4200 struct be_ctrl_info *ctrl = &phba->ctrl;
4201 struct hwi_controller *phwi_ctrlr;
4202 struct hwi_context_memory *phwi_context;
4203 struct be_queue_info *eq;
4204 u8 __iomem *addr;
bfead3b2 4205 u32 reg, i;
6733b39a
JK
4206 u32 enabled;
4207
4208 phwi_ctrlr = phba->phwi_ctrlr;
4209 phwi_context = phwi_ctrlr->phwi_ctxt;
4210
6733b39a
JK
4211 addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
4212 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
4213 reg = ioread32(addr);
6733b39a
JK
4214
4215 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4216 if (!enabled) {
4217 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
99bc5d55
JSJ
4218 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4219 "BM_%d : reg =x%08x addr=%p\n", reg, addr);
6733b39a 4220 iowrite32(reg, addr);
665d6d94
JK
4221 }
4222
4223 if (!phba->msix_enabled) {
4224 eq = &phwi_context->be_eq[0].q;
99bc5d55
JSJ
4225 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4226 "BM_%d : eq->id=%d\n", eq->id);
4227
665d6d94
JK
4228 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4229 } else {
4230 for (i = 0; i <= phba->num_cpus; i++) {
4231 eq = &phwi_context->be_eq[i].q;
99bc5d55
JSJ
4232 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4233 "BM_%d : eq->id=%d\n", eq->id);
bfead3b2
JK
4234 hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4235 }
c03af1ae 4236 }
6733b39a
JK
4237}
4238
4239static void hwi_disable_intr(struct beiscsi_hba *phba)
4240{
4241 struct be_ctrl_info *ctrl = &phba->ctrl;
4242
4243 u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
4244 u32 reg = ioread32(addr);
4245
4246 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4247 if (enabled) {
4248 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4249 iowrite32(reg, addr);
4250 } else
99bc5d55
JSJ
4251 beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
4252 "BM_%d : In hwi_disable_intr, Already Disabled\n");
6733b39a
JK
4253}
4254
9aef4200
JSJ
4255/**
4256 * beiscsi_get_boot_info()- Get the boot session info
4257 * @phba: The device priv structure instance
4258 *
4259 * Get the boot target info and store in driver priv structure
4260 *
4261 * return values
4262 * Success: 0
4263 * Failure: Non-Zero Value
4264 **/
c7acc5b8
JK
4265static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
4266{
0e43895e 4267 struct be_cmd_get_session_resp *session_resp;
c7acc5b8 4268 struct be_dma_mem nonemb_cmd;
e175defe 4269 unsigned int tag;
9aef4200 4270 unsigned int s_handle;
f457a46f 4271 int ret = -ENOMEM;
c7acc5b8 4272
9aef4200
JSJ
4273 /* Get the session handle of the boot target */
4274 ret = be_mgmt_get_boot_shandle(phba, &s_handle);
4275 if (ret) {
99bc5d55
JSJ
4276 beiscsi_log(phba, KERN_ERR,
4277 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4278 "BM_%d : No boot session\n");
9aef4200 4279 return ret;
c7acc5b8 4280 }
c7acc5b8
JK
4281 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4282 sizeof(*session_resp),
4283 &nonemb_cmd.dma);
4284 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
4285 beiscsi_log(phba, KERN_ERR,
4286 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4287 "BM_%d : Failed to allocate memory for"
4288 "beiscsi_get_session_info\n");
4289
c7acc5b8
JK
4290 return -ENOMEM;
4291 }
4292
4293 memset(nonemb_cmd.va, 0, sizeof(*session_resp));
9aef4200 4294 tag = mgmt_get_session_info(phba, s_handle,
0e43895e 4295 &nonemb_cmd);
c7acc5b8 4296 if (!tag) {
99bc5d55
JSJ
4297 beiscsi_log(phba, KERN_ERR,
4298 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
4299 "BM_%d : beiscsi_get_session_info"
4300 " Failed\n");
4301
c7acc5b8 4302 goto boot_freemem;
e175defe 4303 }
c7acc5b8 4304
e175defe
JSJ
4305 ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
4306 if (ret) {
99bc5d55
JSJ
4307 beiscsi_log(phba, KERN_ERR,
4308 BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
e175defe 4309 "BM_%d : beiscsi_get_session_info Failed");
c7acc5b8
JK
4310 goto boot_freemem;
4311 }
e175defe 4312
c7acc5b8 4313 session_resp = nonemb_cmd.va ;
f457a46f 4314
c7acc5b8
JK
4315 memcpy(&phba->boot_sess, &session_resp->session_info,
4316 sizeof(struct mgmt_session_info));
f457a46f
MC
4317 ret = 0;
4318
c7acc5b8
JK
4319boot_freemem:
4320 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4321 nonemb_cmd.va, nonemb_cmd.dma);
f457a46f
MC
4322 return ret;
4323}
4324
4325static void beiscsi_boot_release(void *data)
4326{
4327 struct beiscsi_hba *phba = data;
4328
4329 scsi_host_put(phba->shost);
4330}
4331
4332static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
4333{
4334 struct iscsi_boot_kobj *boot_kobj;
4335
4336 /* get boot info using mgmt cmd */
4337 if (beiscsi_get_boot_info(phba))
4338 /* Try to see if we can carry on without this */
4339 return 0;
4340
4341 phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
4342 if (!phba->boot_kset)
4343 return -ENOMEM;
4344
4345 /* get a ref because the show function will ref the phba */
4346 if (!scsi_host_get(phba->shost))
4347 goto free_kset;
4348 boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
4349 beiscsi_show_boot_tgt_info,
4350 beiscsi_tgt_get_attr_visibility,
4351 beiscsi_boot_release);
4352 if (!boot_kobj)
4353 goto put_shost;
4354
4355 if (!scsi_host_get(phba->shost))
4356 goto free_kset;
4357 boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
4358 beiscsi_show_boot_ini_info,
4359 beiscsi_ini_get_attr_visibility,
4360 beiscsi_boot_release);
4361 if (!boot_kobj)
4362 goto put_shost;
4363
4364 if (!scsi_host_get(phba->shost))
4365 goto free_kset;
4366 boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
4367 beiscsi_show_boot_eth_info,
4368 beiscsi_eth_get_attr_visibility,
4369 beiscsi_boot_release);
4370 if (!boot_kobj)
4371 goto put_shost;
4372 return 0;
4373
4374put_shost:
4375 scsi_host_put(phba->shost);
4376free_kset:
4377 iscsi_boot_destroy_kset(phba->boot_kset);
c7acc5b8
JK
4378 return -ENOMEM;
4379}
4380
6733b39a
JK
4381static int beiscsi_init_port(struct beiscsi_hba *phba)
4382{
4383 int ret;
4384
4385 ret = beiscsi_init_controller(phba);
4386 if (ret < 0) {
99bc5d55
JSJ
4387 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4388 "BM_%d : beiscsi_dev_probe - Failed in"
4389 "beiscsi_init_controller\n");
6733b39a
JK
4390 return ret;
4391 }
4392 ret = beiscsi_init_sgl_handle(phba);
4393 if (ret < 0) {
99bc5d55
JSJ
4394 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4395 "BM_%d : beiscsi_dev_probe - Failed in"
4396 "beiscsi_init_sgl_handle\n");
6733b39a
JK
4397 goto do_cleanup_ctrlr;
4398 }
4399
4400 if (hba_setup_cid_tbls(phba)) {
99bc5d55
JSJ
4401 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4402 "BM_%d : Failed in hba_setup_cid_tbls\n");
6733b39a
JK
4403 kfree(phba->io_sgl_hndl_base);
4404 kfree(phba->eh_sgl_hndl_base);
4405 goto do_cleanup_ctrlr;
4406 }
4407
4408 return ret;
4409
4410do_cleanup_ctrlr:
4411 hwi_cleanup(phba);
4412 return ret;
4413}
4414
4415static void hwi_purge_eq(struct beiscsi_hba *phba)
4416{
4417 struct hwi_controller *phwi_ctrlr;
4418 struct hwi_context_memory *phwi_context;
4419 struct be_queue_info *eq;
4420 struct be_eq_entry *eqe = NULL;
bfead3b2 4421 int i, eq_msix;
756d29c8 4422 unsigned int num_processed;
6733b39a
JK
4423
4424 phwi_ctrlr = phba->phwi_ctrlr;
4425 phwi_context = phwi_ctrlr->phwi_ctxt;
bfead3b2
JK
4426 if (phba->msix_enabled)
4427 eq_msix = 1;
4428 else
4429 eq_msix = 0;
6733b39a 4430
bfead3b2
JK
4431 for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
4432 eq = &phwi_context->be_eq[i].q;
6733b39a 4433 eqe = queue_tail_node(eq);
756d29c8 4434 num_processed = 0;
bfead3b2
JK
4435 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
4436 & EQE_VALID_MASK) {
4437 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
4438 queue_tail_inc(eq);
4439 eqe = queue_tail_node(eq);
756d29c8 4440 num_processed++;
bfead3b2 4441 }
756d29c8
JK
4442
4443 if (num_processed)
4444 hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
6733b39a
JK
4445 }
4446}
4447
4448static void beiscsi_clean_port(struct beiscsi_hba *phba)
4449{
0a3db7c0
JK
4450 int mgmt_status, ulp_num;
4451 struct ulp_cid_info *ptr_cid_info = NULL;
6733b39a 4452
bd41c2bd
JK
4453 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4454 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4455 mgmt_status = mgmt_epfw_cleanup(phba, ulp_num);
4456 if (mgmt_status)
4457 beiscsi_log(phba, KERN_WARNING,
4458 BEISCSI_LOG_INIT,
4459 "BM_%d : mgmt_epfw_cleanup FAILED"
4460 " for ULP_%d\n", ulp_num);
4461 }
4462 }
756d29c8 4463
6733b39a 4464 hwi_purge_eq(phba);
756d29c8 4465 hwi_cleanup(phba);
6733b39a
JK
4466 kfree(phba->io_sgl_hndl_base);
4467 kfree(phba->eh_sgl_hndl_base);
6733b39a 4468 kfree(phba->ep_array);
a7909b39 4469 kfree(phba->conn_table);
0a3db7c0
JK
4470
4471 for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4472 if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4473 ptr_cid_info = phba->cid_array_info[ulp_num];
4474
4475 if (ptr_cid_info) {
4476 kfree(ptr_cid_info->cid_array);
4477 kfree(ptr_cid_info);
4478 phba->cid_array_info[ulp_num] = NULL;
4479 }
4480 }
4481 }
4482
6733b39a
JK
4483}
4484
43f388b0
JK
4485/**
4486 * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
4487 * @beiscsi_conn: ptr to the conn to be cleaned up
4a4a11b9 4488 * @task: ptr to iscsi_task resource to be freed.
43f388b0
JK
4489 *
4490 * Free driver mgmt resources binded to CXN.
4491 **/
4492void
4a4a11b9
JK
4493beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4494 struct iscsi_task *task)
43f388b0
JK
4495{
4496 struct beiscsi_io_task *io_task;
4497 struct beiscsi_hba *phba = beiscsi_conn->phba;
4498 struct hwi_wrb_context *pwrb_context;
4499 struct hwi_controller *phwi_ctrlr;
a7909b39
JK
4500 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4501 beiscsi_conn->beiscsi_conn_cid);
43f388b0
JK
4502
4503 phwi_ctrlr = phba->phwi_ctrlr;
a7909b39
JK
4504 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4505
4a4a11b9 4506 io_task = task->dd_data;
43f388b0
JK
4507
4508 if (io_task->pwrb_handle) {
4509 memset(io_task->pwrb_handle->pwrb, 0,
4510 sizeof(struct iscsi_wrb));
4511 free_wrb_handle(phba, pwrb_context,
4512 io_task->pwrb_handle);
4513 io_task->pwrb_handle = NULL;
4514 }
4515
4516 if (io_task->psgl_handle) {
4517 spin_lock_bh(&phba->mgmt_sgl_lock);
4518 free_mgmt_sgl_handle(phba,
4519 io_task->psgl_handle);
43f388b0 4520 io_task->psgl_handle = NULL;
4a4a11b9 4521 spin_unlock_bh(&phba->mgmt_sgl_lock);
43f388b0
JK
4522 }
4523
4524 if (io_task->mtask_addr)
4525 pci_unmap_single(phba->pcidev,
4526 io_task->mtask_addr,
4527 io_task->mtask_data_count,
4528 PCI_DMA_TODEVICE);
4529}
4530
d629c471
JSJ
4531/**
4532 * beiscsi_cleanup_task()- Free driver resources of the task
4533 * @task: ptr to the iscsi task
4534 *
4535 **/
1282ab76
MC
4536static void beiscsi_cleanup_task(struct iscsi_task *task)
4537{
4538 struct beiscsi_io_task *io_task = task->dd_data;
4539 struct iscsi_conn *conn = task->conn;
4540 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4541 struct beiscsi_hba *phba = beiscsi_conn->phba;
4542 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4543 struct hwi_wrb_context *pwrb_context;
4544 struct hwi_controller *phwi_ctrlr;
a7909b39
JK
4545 uint16_t cri_index = BE_GET_CRI_FROM_CID(
4546 beiscsi_conn->beiscsi_conn_cid);
1282ab76
MC
4547
4548 phwi_ctrlr = phba->phwi_ctrlr;
a7909b39 4549 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1282ab76
MC
4550
4551 if (io_task->cmd_bhs) {
4552 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4553 io_task->bhs_pa.u.a64.address);
4554 io_task->cmd_bhs = NULL;
4555 }
4556
4557 if (task->sc) {
4558 if (io_task->pwrb_handle) {
4559 free_wrb_handle(phba, pwrb_context,
4560 io_task->pwrb_handle);
4561 io_task->pwrb_handle = NULL;
4562 }
4563
4564 if (io_task->psgl_handle) {
4565 spin_lock(&phba->io_sgl_lock);
4566 free_io_sgl_handle(phba, io_task->psgl_handle);
4567 spin_unlock(&phba->io_sgl_lock);
4568 io_task->psgl_handle = NULL;
4569 }
4570 } else {
43f388b0 4571 if (!beiscsi_conn->login_in_progress)
4a4a11b9 4572 beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
1282ab76
MC
4573 }
4574}
4575
6733b39a
JK
4576void
4577beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4578 struct beiscsi_offload_params *params)
4579{
4580 struct wrb_handle *pwrb_handle;
6733b39a 4581 struct beiscsi_hba *phba = beiscsi_conn->phba;
1282ab76
MC
4582 struct iscsi_task *task = beiscsi_conn->task;
4583 struct iscsi_session *session = task->conn->session;
6733b39a
JK
4584 u32 doorbell = 0;
4585
4586 /*
4587 * We can always use 0 here because it is reserved by libiscsi for
4588 * login/startup related tasks.
4589 */
1282ab76
MC
4590 beiscsi_conn->login_in_progress = 0;
4591 spin_lock_bh(&session->lock);
4592 beiscsi_cleanup_task(task);
4593 spin_unlock_bh(&session->lock);
4594
a7909b39 4595 pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid);
6733b39a 4596
acb9693c 4597 /* Check for the adapter family */
2c9dfd36 4598 if (is_chip_be2_be3r(phba))
acb9693c
JSJ
4599 beiscsi_offload_cxn_v0(params, pwrb_handle,
4600 phba->init_mem);
2c9dfd36
JK
4601 else
4602 beiscsi_offload_cxn_v2(params, pwrb_handle);
6733b39a 4603
acb9693c
JSJ
4604 be_dws_le_to_cpu(pwrb_handle->pwrb,
4605 sizeof(struct iscsi_target_context_update_wrb));
6733b39a
JK
4606
4607 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4608 doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
bfead3b2 4609 << DB_DEF_PDU_WRB_INDEX_SHIFT;
6733b39a 4610 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
1e4be6ff
JK
4611 iowrite32(doorbell, phba->db_va +
4612 beiscsi_conn->doorbell_offset);
6733b39a
JK
4613}
4614
4615static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4616 int *index, int *age)
4617{
bfead3b2 4618 *index = (int)itt;
6733b39a
JK
4619 if (age)
4620 *age = conn->session->age;
4621}
4622
4623/**
4624 * beiscsi_alloc_pdu - allocates pdu and related resources
4625 * @task: libiscsi task
4626 * @opcode: opcode of pdu for task
4627 *
4628 * This is called with the session lock held. It will allocate
4629 * the wrb and sgl if needed for the command. And it will prep
4630 * the pdu's itt. beiscsi_parse_pdu will later translate
4631 * the pdu itt to the libiscsi task itt.
4632 */
4633static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4634{
4635 struct beiscsi_io_task *io_task = task->dd_data;
4636 struct iscsi_conn *conn = task->conn;
4637 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4638 struct beiscsi_hba *phba = beiscsi_conn->phba;
4639 struct hwi_wrb_context *pwrb_context;
4640 struct hwi_controller *phwi_ctrlr;
4641 itt_t itt;
a7909b39 4642 uint16_t cri_index = 0;
2afc95bf
JK
4643 struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4644 dma_addr_t paddr;
6733b39a 4645
2afc95bf 4646 io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
bc7accec 4647 GFP_ATOMIC, &paddr);
2afc95bf
JK
4648 if (!io_task->cmd_bhs)
4649 return -ENOMEM;
2afc95bf 4650 io_task->bhs_pa.u.a64.address = paddr;
bfead3b2 4651 io_task->libiscsi_itt = (itt_t)task->itt;
6733b39a
JK
4652 io_task->conn = beiscsi_conn;
4653
4654 task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4655 task->hdr_max = sizeof(struct be_cmd_bhs);
d2cecf0d 4656 io_task->psgl_handle = NULL;
3ec78271 4657 io_task->pwrb_handle = NULL;
6733b39a
JK
4658
4659 if (task->sc) {
4660 spin_lock(&phba->io_sgl_lock);
4661 io_task->psgl_handle = alloc_io_sgl_handle(phba);
4662 spin_unlock(&phba->io_sgl_lock);
8359c79b
JSJ
4663 if (!io_task->psgl_handle) {
4664 beiscsi_log(phba, KERN_ERR,
4665 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4666 "BM_%d : Alloc of IO_SGL_ICD Failed"
4667 "for the CID : %d\n",
4668 beiscsi_conn->beiscsi_conn_cid);
2afc95bf 4669 goto free_hndls;
8359c79b 4670 }
d2cecf0d 4671 io_task->pwrb_handle = alloc_wrb_handle(phba,
a7909b39 4672 beiscsi_conn->beiscsi_conn_cid);
8359c79b
JSJ
4673 if (!io_task->pwrb_handle) {
4674 beiscsi_log(phba, KERN_ERR,
4675 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4676 "BM_%d : Alloc of WRB_HANDLE Failed"
4677 "for the CID : %d\n",
4678 beiscsi_conn->beiscsi_conn_cid);
d2cecf0d 4679 goto free_io_hndls;
8359c79b 4680 }
6733b39a
JK
4681 } else {
4682 io_task->scsi_cmnd = NULL;
d7aea67b 4683 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
43f388b0 4684 beiscsi_conn->task = task;
6733b39a
JK
4685 if (!beiscsi_conn->login_in_progress) {
4686 spin_lock(&phba->mgmt_sgl_lock);
4687 io_task->psgl_handle = (struct sgl_handle *)
4688 alloc_mgmt_sgl_handle(phba);
4689 spin_unlock(&phba->mgmt_sgl_lock);
8359c79b
JSJ
4690 if (!io_task->psgl_handle) {
4691 beiscsi_log(phba, KERN_ERR,
4692 BEISCSI_LOG_IO |
4693 BEISCSI_LOG_CONFIG,
4694 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4695 "for the CID : %d\n",
4696 beiscsi_conn->
4697 beiscsi_conn_cid);
2afc95bf 4698 goto free_hndls;
8359c79b 4699 }
2afc95bf 4700
6733b39a
JK
4701 beiscsi_conn->login_in_progress = 1;
4702 beiscsi_conn->plogin_sgl_handle =
4703 io_task->psgl_handle;
d2cecf0d
JK
4704 io_task->pwrb_handle =
4705 alloc_wrb_handle(phba,
a7909b39 4706 beiscsi_conn->beiscsi_conn_cid);
8359c79b
JSJ
4707 if (!io_task->pwrb_handle) {
4708 beiscsi_log(phba, KERN_ERR,
4709 BEISCSI_LOG_IO |
4710 BEISCSI_LOG_CONFIG,
4711 "BM_%d : Alloc of WRB_HANDLE Failed"
4712 "for the CID : %d\n",
4713 beiscsi_conn->
4714 beiscsi_conn_cid);
4715 goto free_mgmt_hndls;
4716 }
d2cecf0d
JK
4717 beiscsi_conn->plogin_wrb_handle =
4718 io_task->pwrb_handle;
4719
6733b39a
JK
4720 } else {
4721 io_task->psgl_handle =
4722 beiscsi_conn->plogin_sgl_handle;
d2cecf0d
JK
4723 io_task->pwrb_handle =
4724 beiscsi_conn->plogin_wrb_handle;
6733b39a
JK
4725 }
4726 } else {
4727 spin_lock(&phba->mgmt_sgl_lock);
4728 io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4729 spin_unlock(&phba->mgmt_sgl_lock);
8359c79b
JSJ
4730 if (!io_task->psgl_handle) {
4731 beiscsi_log(phba, KERN_ERR,
4732 BEISCSI_LOG_IO |
4733 BEISCSI_LOG_CONFIG,
4734 "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4735 "for the CID : %d\n",
4736 beiscsi_conn->
4737 beiscsi_conn_cid);
2afc95bf 4738 goto free_hndls;
8359c79b 4739 }
d2cecf0d
JK
4740 io_task->pwrb_handle =
4741 alloc_wrb_handle(phba,
a7909b39 4742 beiscsi_conn->beiscsi_conn_cid);
8359c79b
JSJ
4743 if (!io_task->pwrb_handle) {
4744 beiscsi_log(phba, KERN_ERR,
4745 BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4746 "BM_%d : Alloc of WRB_HANDLE Failed"
4747 "for the CID : %d\n",
4748 beiscsi_conn->beiscsi_conn_cid);
d2cecf0d 4749 goto free_mgmt_hndls;
8359c79b 4750 }
d2cecf0d 4751
6733b39a
JK
4752 }
4753 }
bfead3b2
JK
4754 itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4755 wrb_index << 16) | (unsigned int)
4756 (io_task->psgl_handle->sgl_index));
32951dd8 4757 io_task->pwrb_handle->pio_handle = task;
bfead3b2 4758
6733b39a
JK
4759 io_task->cmd_bhs->iscsi_hdr.itt = itt;
4760 return 0;
2afc95bf 4761
d2cecf0d
JK
4762free_io_hndls:
4763 spin_lock(&phba->io_sgl_lock);
4764 free_io_sgl_handle(phba, io_task->psgl_handle);
4765 spin_unlock(&phba->io_sgl_lock);
4766 goto free_hndls;
4767free_mgmt_hndls:
4768 spin_lock(&phba->mgmt_sgl_lock);
4769 free_mgmt_sgl_handle(phba, io_task->psgl_handle);
a7909b39 4770 io_task->psgl_handle = NULL;
d2cecf0d 4771 spin_unlock(&phba->mgmt_sgl_lock);
2afc95bf
JK
4772free_hndls:
4773 phwi_ctrlr = phba->phwi_ctrlr;
a7909b39
JK
4774 cri_index = BE_GET_CRI_FROM_CID(
4775 beiscsi_conn->beiscsi_conn_cid);
4776 pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
d2cecf0d
JK
4777 if (io_task->pwrb_handle)
4778 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
2afc95bf
JK
4779 io_task->pwrb_handle = NULL;
4780 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4781 io_task->bhs_pa.u.a64.address);
1282ab76 4782 io_task->cmd_bhs = NULL;
2afc95bf 4783 return -ENOMEM;
6733b39a 4784}
09a1093a
JSJ
4785int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4786 unsigned int num_sg, unsigned int xferlen,
4787 unsigned int writedir)
4788{
4789
4790 struct beiscsi_io_task *io_task = task->dd_data;
4791 struct iscsi_conn *conn = task->conn;
4792 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4793 struct beiscsi_hba *phba = beiscsi_conn->phba;
4794 struct iscsi_wrb *pwrb = NULL;
4795 unsigned int doorbell = 0;
4796
4797 pwrb = io_task->pwrb_handle->pwrb;
09a1093a
JSJ
4798
4799 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4800 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4801
4802 if (writedir) {
4803 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4804 INI_WR_CMD);
4805 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
4806 } else {
4807 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4808 INI_RD_CMD);
4809 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
4810 }
4811
4812 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
4813 type, pwrb);
4814
4815 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
4816 cpu_to_be16(*(unsigned short *)
4817 &io_task->cmd_bhs->iscsi_hdr.lun));
4818 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
4819 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4820 io_task->pwrb_handle->wrb_index);
4821 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4822 be32_to_cpu(task->cmdsn));
4823 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4824 io_task->psgl_handle->sgl_index);
4825
4826 hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
4827 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4828 io_task->pwrb_handle->nxt_wrb_index);
4829
4830 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4831
4832 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4833 doorbell |= (io_task->pwrb_handle->wrb_index &
4834 DB_DEF_PDU_WRB_INDEX_MASK) <<
4835 DB_DEF_PDU_WRB_INDEX_SHIFT;
4836 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
1e4be6ff
JK
4837 iowrite32(doorbell, phba->db_va +
4838 beiscsi_conn->doorbell_offset);
09a1093a
JSJ
4839 return 0;
4840}
6733b39a 4841
6733b39a
JK
4842static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4843 unsigned int num_sg, unsigned int xferlen,
4844 unsigned int writedir)
4845{
4846
4847 struct beiscsi_io_task *io_task = task->dd_data;
4848 struct iscsi_conn *conn = task->conn;
4849 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4850 struct beiscsi_hba *phba = beiscsi_conn->phba;
4851 struct iscsi_wrb *pwrb = NULL;
4852 unsigned int doorbell = 0;
4853
4854 pwrb = io_task->pwrb_handle->pwrb;
6733b39a
JK
4855 io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
4856 io_task->bhs_len = sizeof(struct be_cmd_bhs);
4857
4858 if (writedir) {
32951dd8
JK
4859 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4860 INI_WR_CMD);
6733b39a 4861 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
6733b39a 4862 } else {
32951dd8
JK
4863 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4864 INI_RD_CMD);
6733b39a
JK
4865 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4866 }
6733b39a 4867
09a1093a
JSJ
4868 io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
4869 type, pwrb);
4870
6733b39a 4871 AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
dc63aac6
JK
4872 cpu_to_be16(*(unsigned short *)
4873 &io_task->cmd_bhs->iscsi_hdr.lun));
6733b39a
JK
4874 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4875 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4876 io_task->pwrb_handle->wrb_index);
4877 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4878 be32_to_cpu(task->cmdsn));
4879 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4880 io_task->psgl_handle->sgl_index);
4881
4882 hwi_write_sgl(pwrb, sg, num_sg, io_task);
4883
4884 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4885 io_task->pwrb_handle->nxt_wrb_index);
4886 be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4887
4888 doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
32951dd8 4889 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4890 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4891 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4892
1e4be6ff
JK
4893 iowrite32(doorbell, phba->db_va +
4894 beiscsi_conn->doorbell_offset);
6733b39a
JK
4895 return 0;
4896}
4897
4898static int beiscsi_mtask(struct iscsi_task *task)
4899{
dafab8e0 4900 struct beiscsi_io_task *io_task = task->dd_data;
6733b39a
JK
4901 struct iscsi_conn *conn = task->conn;
4902 struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4903 struct beiscsi_hba *phba = beiscsi_conn->phba;
4904 struct iscsi_wrb *pwrb = NULL;
4905 unsigned int doorbell = 0;
dafab8e0 4906 unsigned int cid;
09a1093a 4907 unsigned int pwrb_typeoffset = 0;
6733b39a 4908
bfead3b2 4909 cid = beiscsi_conn->beiscsi_conn_cid;
6733b39a 4910 pwrb = io_task->pwrb_handle->pwrb;
caf818f1 4911 memset(pwrb, 0, sizeof(*pwrb));
09a1093a 4912
2c9dfd36 4913 if (is_chip_be2_be3r(phba)) {
09a1093a
JSJ
4914 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4915 be32_to_cpu(task->cmdsn));
4916 AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4917 io_task->pwrb_handle->wrb_index);
4918 AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4919 io_task->psgl_handle->sgl_index);
4920 AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
4921 task->data_count);
4922 AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4923 io_task->pwrb_handle->nxt_wrb_index);
4924 pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
2c9dfd36
JK
4925 } else {
4926 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4927 be32_to_cpu(task->cmdsn));
4928 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4929 io_task->pwrb_handle->wrb_index);
4930 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4931 io_task->psgl_handle->sgl_index);
4932 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
4933 task->data_count);
4934 AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4935 io_task->pwrb_handle->nxt_wrb_index);
4936 pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
09a1093a
JSJ
4937 }
4938
dafab8e0 4939
6733b39a
JK
4940 switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4941 case ISCSI_OP_LOGIN:
6733b39a 4942 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
09a1093a 4943 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
6733b39a
JK
4944 hwi_write_buffer(pwrb, task);
4945 break;
4946 case ISCSI_OP_NOOP_OUT:
1390b01b 4947 if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
09a1093a 4948 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
2c9dfd36
JK
4949 if (is_chip_be2_be3r(phba))
4950 AMAP_SET_BITS(struct amap_iscsi_wrb,
09a1093a
JSJ
4951 dmsg, pwrb, 1);
4952 else
2c9dfd36 4953 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
09a1093a 4954 dmsg, pwrb, 1);
1390b01b 4955 } else {
09a1093a 4956 ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
2c9dfd36
JK
4957 if (is_chip_be2_be3r(phba))
4958 AMAP_SET_BITS(struct amap_iscsi_wrb,
09a1093a
JSJ
4959 dmsg, pwrb, 0);
4960 else
2c9dfd36 4961 AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
09a1093a 4962 dmsg, pwrb, 0);
1390b01b 4963 }
6733b39a
JK
4964 hwi_write_buffer(pwrb, task);
4965 break;
4966 case ISCSI_OP_TEXT:
09a1093a 4967 ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
6733b39a
JK
4968 hwi_write_buffer(pwrb, task);
4969 break;
4970 case ISCSI_OP_SCSI_TMFUNC:
09a1093a 4971 ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
6733b39a
JK
4972 hwi_write_buffer(pwrb, task);
4973 break;
4974 case ISCSI_OP_LOGOUT:
09a1093a 4975 ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
6733b39a
JK
4976 hwi_write_buffer(pwrb, task);
4977 break;
4978
4979 default:
99bc5d55
JSJ
4980 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4981 "BM_%d : opcode =%d Not supported\n",
4982 task->hdr->opcode & ISCSI_OPCODE_MASK);
4983
6733b39a
JK
4984 return -EINVAL;
4985 }
4986
09a1093a 4987 /* Set the task type */
2c9dfd36
JK
4988 io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
4989 AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
4990 AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
6733b39a 4991
bfead3b2 4992 doorbell |= cid & DB_WRB_POST_CID_MASK;
32951dd8 4993 doorbell |= (io_task->pwrb_handle->wrb_index &
6733b39a
JK
4994 DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4995 doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
1e4be6ff
JK
4996 iowrite32(doorbell, phba->db_va +
4997 beiscsi_conn->doorbell_offset);
6733b39a
JK
4998 return 0;
4999}
5000
5001static int beiscsi_task_xmit(struct iscsi_task *task)
5002{
6733b39a
JK
5003 struct beiscsi_io_task *io_task = task->dd_data;
5004 struct scsi_cmnd *sc = task->sc;
09a1093a 5005 struct beiscsi_hba *phba = NULL;
6733b39a
JK
5006 struct scatterlist *sg;
5007 int num_sg;
5008 unsigned int writedir = 0, xferlen = 0;
5009
09a1093a
JSJ
5010 phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
5011
6733b39a
JK
5012 if (!sc)
5013 return beiscsi_mtask(task);
5014
5015 io_task->scsi_cmnd = sc;
5016 num_sg = scsi_dma_map(sc);
5017 if (num_sg < 0) {
99bc5d55
JSJ
5018 struct iscsi_conn *conn = task->conn;
5019 struct beiscsi_hba *phba = NULL;
5020
5021 phba = ((struct beiscsi_conn *)conn->dd_data)->phba;
5022 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO,
5023 "BM_%d : scsi_dma_map Failed\n");
5024
6733b39a
JK
5025 return num_sg;
5026 }
6733b39a
JK
5027 xferlen = scsi_bufflen(sc);
5028 sg = scsi_sglist(sc);
99bc5d55 5029 if (sc->sc_data_direction == DMA_TO_DEVICE)
6733b39a 5030 writedir = 1;
99bc5d55 5031 else
6733b39a 5032 writedir = 0;
99bc5d55 5033
09a1093a 5034 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
6733b39a
JK
5035}
5036
ffce3e2e
JK
5037/**
5038 * beiscsi_bsg_request - handle bsg request from ISCSI transport
5039 * @job: job to handle
5040 */
5041static int beiscsi_bsg_request(struct bsg_job *job)
5042{
5043 struct Scsi_Host *shost;
5044 struct beiscsi_hba *phba;
5045 struct iscsi_bsg_request *bsg_req = job->request;
5046 int rc = -EINVAL;
5047 unsigned int tag;
5048 struct be_dma_mem nonemb_cmd;
5049 struct be_cmd_resp_hdr *resp;
5050 struct iscsi_bsg_reply *bsg_reply = job->reply;
5051 unsigned short status, extd_status;
5052
5053 shost = iscsi_job_to_shost(job);
5054 phba = iscsi_host_priv(shost);
5055
5056 switch (bsg_req->msgcode) {
5057 case ISCSI_BSG_HST_VENDOR:
5058 nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
5059 job->request_payload.payload_len,
5060 &nonemb_cmd.dma);
5061 if (nonemb_cmd.va == NULL) {
99bc5d55
JSJ
5062 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5063 "BM_%d : Failed to allocate memory for "
5064 "beiscsi_bsg_request\n");
8359c79b 5065 return -ENOMEM;
ffce3e2e
JK
5066 }
5067 tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
5068 &nonemb_cmd);
5069 if (!tag) {
99bc5d55 5070 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
8359c79b 5071 "BM_%d : MBX Tag Allocation Failed\n");
99bc5d55 5072
ffce3e2e
JK
5073 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
5074 nonemb_cmd.va, nonemb_cmd.dma);
5075 return -EAGAIN;
e175defe
JSJ
5076 }
5077
5078 rc = wait_event_interruptible_timeout(
5079 phba->ctrl.mcc_wait[tag],
5080 phba->ctrl.mcc_numtag[tag],
5081 msecs_to_jiffies(
5082 BEISCSI_HOST_MBX_TIMEOUT));
ffce3e2e
JK
5083 extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
5084 status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
5085 free_mcc_tag(&phba->ctrl, tag);
5086 resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
5087 sg_copy_from_buffer(job->reply_payload.sg_list,
5088 job->reply_payload.sg_cnt,
5089 nonemb_cmd.va, (resp->response_length
5090 + sizeof(*resp)));
5091 bsg_reply->reply_payload_rcv_len = resp->response_length;
5092 bsg_reply->result = status;
5093 bsg_job_done(job, bsg_reply->result,
5094 bsg_reply->reply_payload_rcv_len);
5095 pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
5096 nonemb_cmd.va, nonemb_cmd.dma);
5097 if (status || extd_status) {
99bc5d55 5098 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
8359c79b 5099 "BM_%d : MBX Cmd Failed"
99bc5d55
JSJ
5100 " status = %d extd_status = %d\n",
5101 status, extd_status);
5102
ffce3e2e 5103 return -EIO;
8359c79b
JSJ
5104 } else {
5105 rc = 0;
ffce3e2e
JK
5106 }
5107 break;
5108
5109 default:
99bc5d55
JSJ
5110 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
5111 "BM_%d : Unsupported bsg command: 0x%x\n",
5112 bsg_req->msgcode);
ffce3e2e
JK
5113 break;
5114 }
5115
5116 return rc;
5117}
5118
99bc5d55
JSJ
5119void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
5120{
5121 /* Set the logging parameter */
5122 beiscsi_log_enable_init(phba, beiscsi_log_enable);
5123}
5124
4d4d1ef8
JSJ
5125/*
5126 * beiscsi_quiesce()- Cleanup Driver resources
5127 * @phba: Instance Priv structure
5128 *
5129 * Free the OS and HW resources held by the driver
5130 **/
25602c97 5131static void beiscsi_quiesce(struct beiscsi_hba *phba)
6733b39a 5132{
bfead3b2
JK
5133 struct hwi_controller *phwi_ctrlr;
5134 struct hwi_context_memory *phwi_context;
5135 struct be_eq_obj *pbe_eq;
5136 unsigned int i, msix_vec;
6733b39a 5137
bfead3b2
JK
5138 phwi_ctrlr = phba->phwi_ctrlr;
5139 phwi_context = phwi_ctrlr->phwi_ctxt;
6733b39a 5140 hwi_disable_intr(phba);
bfead3b2
JK
5141 if (phba->msix_enabled) {
5142 for (i = 0; i <= phba->num_cpus; i++) {
5143 msix_vec = phba->msix_entries[i].vector;
5144 free_irq(msix_vec, &phwi_context->be_eq[i]);
8fcfb210 5145 kfree(phba->msi_name[i]);
bfead3b2
JK
5146 }
5147 } else
5148 if (phba->pcidev->irq)
5149 free_irq(phba->pcidev->irq, phba);
5150 pci_disable_msix(phba->pcidev);
6733b39a
JK
5151 destroy_workqueue(phba->wq);
5152 if (blk_iopoll_enabled)
bfead3b2
JK
5153 for (i = 0; i < phba->num_cpus; i++) {
5154 pbe_eq = &phwi_context->be_eq[i];
5155 blk_iopoll_disable(&pbe_eq->iopoll);
5156 }
6733b39a
JK
5157
5158 beiscsi_clean_port(phba);
5159 beiscsi_free_mem(phba);
e9b91193 5160
6733b39a
JK
5161 beiscsi_unmap_pci_function(phba);
5162 pci_free_consistent(phba->pcidev,
5163 phba->ctrl.mbox_mem_alloced.size,
5164 phba->ctrl.mbox_mem_alloced.va,
5165 phba->ctrl.mbox_mem_alloced.dma);
7a158003
JSJ
5166
5167 cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
25602c97
JK
5168}
5169
5170static void beiscsi_remove(struct pci_dev *pcidev)
5171{
5172
5173 struct beiscsi_hba *phba = NULL;
5174
5175 phba = pci_get_drvdata(pcidev);
5176 if (!phba) {
5177 dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
5178 return;
5179 }
5180
0e43895e 5181 beiscsi_destroy_def_ifaces(phba);
25602c97 5182 beiscsi_quiesce(phba);
9d045163 5183 iscsi_boot_destroy_kset(phba->boot_kset);
6733b39a
JK
5184 iscsi_host_remove(phba->shost);
5185 pci_dev_put(phba->pcidev);
5186 iscsi_host_free(phba->shost);
8dce69ff 5187 pci_disable_device(pcidev);
6733b39a
JK
5188}
5189
25602c97
JK
5190static void beiscsi_shutdown(struct pci_dev *pcidev)
5191{
5192
5193 struct beiscsi_hba *phba = NULL;
5194
5195 phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
5196 if (!phba) {
5197 dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
5198 return;
5199 }
5200
5201 beiscsi_quiesce(phba);
8dce69ff 5202 pci_disable_device(pcidev);
25602c97
JK
5203}
5204
bfead3b2
JK
5205static void beiscsi_msix_enable(struct beiscsi_hba *phba)
5206{
5207 int i, status;
5208
5209 for (i = 0; i <= phba->num_cpus; i++)
5210 phba->msix_entries[i].entry = i;
5211
5212 status = pci_enable_msix(phba->pcidev, phba->msix_entries,
5213 (phba->num_cpus + 1));
5214 if (!status)
5215 phba->msix_enabled = true;
5216
5217 return;
5218}
5219
7a158003
JSJ
5220/*
5221 * beiscsi_hw_health_check()- Check adapter health
5222 * @work: work item to check HW health
5223 *
5224 * Check if adapter in an unrecoverable state or not.
5225 **/
5226static void
5227beiscsi_hw_health_check(struct work_struct *work)
5228{
5229 struct beiscsi_hba *phba =
5230 container_of(work, struct beiscsi_hba,
5231 beiscsi_hw_check_task.work);
5232
5233 beiscsi_ue_detect(phba);
5234
5235 schedule_delayed_work(&phba->beiscsi_hw_check_task,
5236 msecs_to_jiffies(1000));
5237}
5238
6f039790
GKH
5239static int beiscsi_dev_probe(struct pci_dev *pcidev,
5240 const struct pci_device_id *id)
6733b39a
JK
5241{
5242 struct beiscsi_hba *phba = NULL;
bfead3b2
JK
5243 struct hwi_controller *phwi_ctrlr;
5244 struct hwi_context_memory *phwi_context;
5245 struct be_eq_obj *pbe_eq;
107dfcba 5246 int ret, i;
6733b39a
JK
5247
5248 ret = beiscsi_enable_pci(pcidev);
5249 if (ret < 0) {
99bc5d55
JSJ
5250 dev_err(&pcidev->dev,
5251 "beiscsi_dev_probe - Failed to enable pci device\n");
6733b39a
JK
5252 return ret;
5253 }
5254
5255 phba = beiscsi_hba_alloc(pcidev);
5256 if (!phba) {
99bc5d55
JSJ
5257 dev_err(&pcidev->dev,
5258 "beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
6733b39a
JK
5259 goto disable_pci;
5260 }
5261
99bc5d55
JSJ
5262 /* Initialize Driver configuration Paramters */
5263 beiscsi_hba_attrs_init(phba);
5264
e175defe 5265 phba->fw_timeout = false;
6c83185a 5266 phba->mac_addr_set = false;
e175defe
JSJ
5267
5268
f98c96b0
JK
5269 switch (pcidev->device) {
5270 case BE_DEVICE_ID1:
5271 case OC_DEVICE_ID1:
5272 case OC_DEVICE_ID2:
5273 phba->generation = BE_GEN2;
09a1093a 5274 phba->iotask_fn = beiscsi_iotask;
f98c96b0
JK
5275 break;
5276 case BE_DEVICE_ID2:
5277 case OC_DEVICE_ID3:
5278 phba->generation = BE_GEN3;
09a1093a 5279 phba->iotask_fn = beiscsi_iotask;
f98c96b0 5280 break;
139a1b1e
JSJ
5281 case OC_SKH_ID1:
5282 phba->generation = BE_GEN4;
09a1093a 5283 phba->iotask_fn = beiscsi_iotask_v2;
bf9131cb 5284 break;
f98c96b0
JK
5285 default:
5286 phba->generation = 0;
5287 }
5288
6733b39a
JK
5289 ret = be_ctrl_init(phba, pcidev);
5290 if (ret) {
99bc5d55
JSJ
5291 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5292 "BM_%d : beiscsi_dev_probe-"
5293 "Failed in be_ctrl_init\n");
6733b39a
JK
5294 goto hba_free;
5295 }
5296
4d4d1ef8
JSJ
5297 ret = beiscsi_cmd_reset_function(phba);
5298 if (ret) {
5299 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
92665a66 5300 "BM_%d : Reset Failed\n");
4d4d1ef8
JSJ
5301 goto hba_free;
5302 }
5303 ret = be_chk_reset_complete(phba);
5304 if (ret) {
5305 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
92665a66 5306 "BM_%d : Failed to get out of reset.\n");
4d4d1ef8 5307 goto hba_free;
e9b91193
JK
5308 }
5309
6733b39a
JK
5310 spin_lock_init(&phba->io_sgl_lock);
5311 spin_lock_init(&phba->mgmt_sgl_lock);
5312 spin_lock_init(&phba->isr_lock);
8f09a3b9 5313 spin_lock_init(&phba->async_pdu_lock);
7da50879
JK
5314 ret = mgmt_get_fw_config(&phba->ctrl, phba);
5315 if (ret != 0) {
99bc5d55
JSJ
5316 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5317 "BM_%d : Error getting fw config\n");
7da50879
JK
5318 goto free_port;
5319 }
68c26a3a
JK
5320
5321 if (enable_msix)
5322 find_num_cpus(phba);
5323 else
5324 phba->num_cpus = 1;
5325
5326 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5327 "BM_%d : num_cpus = %d\n",
5328 phba->num_cpus);
5329
5330 if (enable_msix) {
5331 beiscsi_msix_enable(phba);
5332 if (!phba->msix_enabled)
5333 phba->num_cpus = 1;
5334 }
5335
843ae752 5336 phba->shost->max_id = phba->params.cxns_per_ctrl;
6733b39a 5337 beiscsi_get_params(phba);
aa874f07 5338 phba->shost->can_queue = phba->params.ios_per_ctrl;
6733b39a
JK
5339 ret = beiscsi_init_port(phba);
5340 if (ret < 0) {
99bc5d55
JSJ
5341 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5342 "BM_%d : beiscsi_dev_probe-"
5343 "Failed in beiscsi_init_port\n");
6733b39a
JK
5344 goto free_port;
5345 }
5346
756d29c8
JK
5347 for (i = 0; i < MAX_MCC_CMD ; i++) {
5348 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5349 phba->ctrl.mcc_tag[i] = i + 1;
5350 phba->ctrl.mcc_numtag[i + 1] = 0;
5351 phba->ctrl.mcc_tag_available++;
5352 }
5353
5354 phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
5355
72fb46a9 5356 snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
6733b39a 5357 phba->shost->host_no);
d8537548 5358 phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, phba->wq_name);
6733b39a 5359 if (!phba->wq) {
99bc5d55
JSJ
5360 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5361 "BM_%d : beiscsi_dev_probe-"
5362 "Failed to allocate work queue\n");
6733b39a
JK
5363 goto free_twq;
5364 }
5365
7a158003
JSJ
5366 INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
5367 beiscsi_hw_health_check);
6733b39a 5368
bfead3b2
JK
5369 phwi_ctrlr = phba->phwi_ctrlr;
5370 phwi_context = phwi_ctrlr->phwi_ctxt;
72fb46a9 5371
6733b39a 5372 if (blk_iopoll_enabled) {
bfead3b2
JK
5373 for (i = 0; i < phba->num_cpus; i++) {
5374 pbe_eq = &phwi_context->be_eq[i];
5375 blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
5376 be_iopoll);
5377 blk_iopoll_enable(&pbe_eq->iopoll);
5378 }
72fb46a9
JSJ
5379
5380 i = (phba->msix_enabled) ? i : 0;
5381 /* Work item for MCC handling */
5382 pbe_eq = &phwi_context->be_eq[i];
5383 INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
5384 } else {
5385 if (phba->msix_enabled) {
5386 for (i = 0; i <= phba->num_cpus; i++) {
5387 pbe_eq = &phwi_context->be_eq[i];
5388 INIT_WORK(&pbe_eq->work_cqs,
5389 beiscsi_process_all_cqs);
5390 }
5391 } else {
5392 pbe_eq = &phwi_context->be_eq[0];
5393 INIT_WORK(&pbe_eq->work_cqs,
5394 beiscsi_process_all_cqs);
5395 }
6733b39a 5396 }
72fb46a9 5397
6733b39a
JK
5398 ret = beiscsi_init_irqs(phba);
5399 if (ret < 0) {
99bc5d55
JSJ
5400 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5401 "BM_%d : beiscsi_dev_probe-"
5402 "Failed to beiscsi_init_irqs\n");
6733b39a
JK
5403 goto free_blkenbld;
5404 }
238f6b72 5405 hwi_enable_intr(phba);
f457a46f
MC
5406
5407 if (beiscsi_setup_boot_info(phba))
5408 /*
5409 * log error but continue, because we may not be using
5410 * iscsi boot.
5411 */
99bc5d55
JSJ
5412 beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5413 "BM_%d : Could not set up "
5414 "iSCSI boot info.\n");
f457a46f 5415
0e43895e 5416 beiscsi_create_def_ifaces(phba);
7a158003
JSJ
5417 schedule_delayed_work(&phba->beiscsi_hw_check_task,
5418 msecs_to_jiffies(1000));
5419
99bc5d55
JSJ
5420 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5421 "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
6733b39a
JK
5422 return 0;
5423
6733b39a
JK
5424free_blkenbld:
5425 destroy_workqueue(phba->wq);
5426 if (blk_iopoll_enabled)
bfead3b2
JK
5427 for (i = 0; i < phba->num_cpus; i++) {
5428 pbe_eq = &phwi_context->be_eq[i];
5429 blk_iopoll_disable(&pbe_eq->iopoll);
5430 }
6733b39a
JK
5431free_twq:
5432 beiscsi_clean_port(phba);
5433 beiscsi_free_mem(phba);
5434free_port:
5435 pci_free_consistent(phba->pcidev,
5436 phba->ctrl.mbox_mem_alloced.size,
5437 phba->ctrl.mbox_mem_alloced.va,
5438 phba->ctrl.mbox_mem_alloced.dma);
5439 beiscsi_unmap_pci_function(phba);
5440hba_free:
238f6b72
JK
5441 if (phba->msix_enabled)
5442 pci_disable_msix(phba->pcidev);
6733b39a
JK
5443 iscsi_host_remove(phba->shost);
5444 pci_dev_put(phba->pcidev);
5445 iscsi_host_free(phba->shost);
5446disable_pci:
5447 pci_disable_device(pcidev);
5448 return ret;
5449}
5450
5451struct iscsi_transport beiscsi_iscsi_transport = {
5452 .owner = THIS_MODULE,
5453 .name = DRV_NAME,
9db0fb3a 5454 .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
6733b39a 5455 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
6733b39a
JK
5456 .create_session = beiscsi_session_create,
5457 .destroy_session = beiscsi_session_destroy,
5458 .create_conn = beiscsi_conn_create,
5459 .bind_conn = beiscsi_conn_bind,
5460 .destroy_conn = iscsi_conn_teardown,
3128c6c7 5461 .attr_is_visible = be2iscsi_attr_is_visible,
0e43895e
MC
5462 .set_iface_param = be2iscsi_iface_set_param,
5463 .get_iface_param = be2iscsi_iface_get_param,
6733b39a 5464 .set_param = beiscsi_set_param,
c7f7fd5b 5465 .get_conn_param = iscsi_conn_get_param,
6733b39a
JK
5466 .get_session_param = iscsi_session_get_param,
5467 .get_host_param = beiscsi_get_host_param,
5468 .start_conn = beiscsi_conn_start,
fa95d206 5469 .stop_conn = iscsi_conn_stop,
6733b39a
JK
5470 .send_pdu = iscsi_conn_send_pdu,
5471 .xmit_task = beiscsi_task_xmit,
5472 .cleanup_task = beiscsi_cleanup_task,
5473 .alloc_pdu = beiscsi_alloc_pdu,
5474 .parse_pdu_itt = beiscsi_parse_pdu,
5475 .get_stats = beiscsi_conn_get_stats,
c7f7fd5b 5476 .get_ep_param = beiscsi_ep_get_param,
6733b39a
JK
5477 .ep_connect = beiscsi_ep_connect,
5478 .ep_poll = beiscsi_ep_poll,
5479 .ep_disconnect = beiscsi_ep_disconnect,
5480 .session_recovery_timedout = iscsi_session_recovery_timedout,
ffce3e2e 5481 .bsg_request = beiscsi_bsg_request,
6733b39a
JK
5482};
5483
5484static struct pci_driver beiscsi_pci_driver = {
5485 .name = DRV_NAME,
5486 .probe = beiscsi_dev_probe,
5487 .remove = beiscsi_remove,
25602c97 5488 .shutdown = beiscsi_shutdown,
6733b39a
JK
5489 .id_table = beiscsi_pci_id_table
5490};
5491
bfead3b2 5492
6733b39a
JK
5493static int __init beiscsi_module_init(void)
5494{
5495 int ret;
5496
5497 beiscsi_scsi_transport =
5498 iscsi_register_transport(&beiscsi_iscsi_transport);
5499 if (!beiscsi_scsi_transport) {
99bc5d55
JSJ
5500 printk(KERN_ERR
5501 "beiscsi_module_init - Unable to register beiscsi transport.\n");
f55a24f2 5502 return -ENOMEM;
6733b39a 5503 }
99bc5d55
JSJ
5504 printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
5505 &beiscsi_iscsi_transport);
6733b39a
JK
5506
5507 ret = pci_register_driver(&beiscsi_pci_driver);
5508 if (ret) {
99bc5d55
JSJ
5509 printk(KERN_ERR
5510 "beiscsi_module_init - Unable to register beiscsi pci driver.\n");
6733b39a
JK
5511 goto unregister_iscsi_transport;
5512 }
5513 return 0;
5514
5515unregister_iscsi_transport:
5516 iscsi_unregister_transport(&beiscsi_iscsi_transport);
5517 return ret;
5518}
5519
5520static void __exit beiscsi_module_exit(void)
5521{
5522 pci_unregister_driver(&beiscsi_pci_driver);
5523 iscsi_unregister_transport(&beiscsi_iscsi_transport);
5524}
5525
5526module_init(beiscsi_module_init);
5527module_exit(beiscsi_module_exit);