]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/scsi/ufs/ufshcd.c
ufs: adjust queue settings to PRDT limitations
[thirdparty/kernel/stable.git] / drivers / scsi / ufs / ufshcd.c
CommitLineData
7a3e97b0 1/*
e0eca63e 2 * Universal Flash Storage Host controller driver Core
7a3e97b0
SY
3 *
4 * This code is based on drivers/scsi/ufs/ufshcd.c
3b1d0580 5 * Copyright (C) 2011-2013 Samsung India Software Operations
7a3e97b0 6 *
3b1d0580
VH
7 * Authors:
8 * Santosh Yaraganavi <santosh.sy@samsung.com>
9 * Vinayak Holikatti <h.vinayak@samsung.com>
7a3e97b0
SY
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version 2
14 * of the License, or (at your option) any later version.
3b1d0580
VH
15 * See the COPYING file in the top-level directory or visit
16 * <http://www.gnu.org/licenses/gpl-2.0.html>
7a3e97b0
SY
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
3b1d0580
VH
23 * This program is provided "AS IS" and "WITH ALL FAULTS" and
24 * without warranty of any kind. You are solely responsible for
25 * determining the appropriateness of using and distributing
26 * the program and assume all risks associated with your exercise
27 * of rights with respect to the program, including but not limited
28 * to infringement of third party rights, the risks and costs of
29 * program errors, damage to or loss of data, programs or equipment,
30 * and unavailability or interruption of operations. Under no
31 * circumstances will the contributor of this Program be liable for
32 * any damages of any kind arising from your use or distribution of
33 * this program.
7a3e97b0
SY
34 */
35
6ccf44fe
SJ
36#include <linux/async.h>
37
e0eca63e 38#include "ufshcd.h"
53b3d9c3 39#include "unipro.h"
7a3e97b0 40
2fbd009b
SJ
41#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
42 UTP_TASK_REQ_COMPL |\
53b3d9c3 43 UIC_POWER_MODE |\
2fbd009b 44 UFSHCD_ERROR_MASK)
6ccf44fe
SJ
45/* UIC command timeout, unit: ms */
46#define UIC_CMD_TIMEOUT 500
2fbd009b 47
5a0b0cb9
SRT
48/* NOP OUT retries waiting for NOP IN response */
49#define NOP_OUT_RETRIES 10
50/* Timeout after 30 msecs if NOP OUT hangs without response */
51#define NOP_OUT_TIMEOUT 30 /* msecs */
52
68078d5c
DR
53/* Query request retries */
54#define QUERY_REQ_RETRIES 10
55/* Query request timeout */
56#define QUERY_REQ_TIMEOUT 30 /* msec */
57
e2933132
SRT
58/* Task management command timeout */
59#define TM_CMD_TIMEOUT 100 /* msecs */
60
68078d5c
DR
61/* Expose the flag value from utp_upiu_query.value */
62#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
63
7d568652
SJ
64/* Interrupt aggregation default timeout, unit: 40us */
65#define INT_AGGR_DEF_TO 0x02
66
7a3e97b0
SY
67enum {
68 UFSHCD_MAX_CHANNEL = 0,
69 UFSHCD_MAX_ID = 1,
70 UFSHCD_MAX_LUNS = 8,
71 UFSHCD_CMD_PER_LUN = 32,
72 UFSHCD_CAN_QUEUE = 32,
73};
74
75/* UFSHCD states */
76enum {
7a3e97b0
SY
77 UFSHCD_STATE_RESET,
78 UFSHCD_STATE_ERROR,
3441da7d
SRT
79 UFSHCD_STATE_OPERATIONAL,
80};
81
82/* UFSHCD error handling flags */
83enum {
84 UFSHCD_EH_IN_PROGRESS = (1 << 0),
7a3e97b0
SY
85};
86
e8e7f271
SRT
87/* UFSHCD UIC layer error flags */
88enum {
89 UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
90 UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
91 UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
92 UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
93};
94
7a3e97b0
SY
95/* Interrupt configuration options */
96enum {
97 UFSHCD_INT_DISABLE,
98 UFSHCD_INT_ENABLE,
99 UFSHCD_INT_CLEAR,
100};
101
3441da7d
SRT
102#define ufshcd_set_eh_in_progress(h) \
103 (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
104#define ufshcd_eh_in_progress(h) \
105 (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
106#define ufshcd_clear_eh_in_progress(h) \
107 (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
108
109static void ufshcd_tmc_handler(struct ufs_hba *hba);
110static void ufshcd_async_scan(void *data, async_cookie_t cookie);
e8e7f271
SRT
111static int ufshcd_reset_and_restore(struct ufs_hba *hba);
112static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
1b3e8956
DR
113static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
114 struct scsi_device *sdev);
3441da7d 115
5a0b0cb9
SRT
116/*
117 * ufshcd_wait_for_register - wait for register value to change
118 * @hba - per-adapter interface
119 * @reg - mmio register offset
120 * @mask - mask to apply to read register value
121 * @val - wait condition
122 * @interval_us - polling interval in microsecs
123 * @timeout_ms - timeout in millisecs
124 *
125 * Returns -ETIMEDOUT on error, zero on success
126 */
127static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
128 u32 val, unsigned long interval_us, unsigned long timeout_ms)
129{
130 int err = 0;
131 unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
132
133 /* ignore bits that we don't intend to wait on */
134 val = val & mask;
135
136 while ((ufshcd_readl(hba, reg) & mask) != val) {
137 /* wakeup within 50us of expiry */
138 usleep_range(interval_us, interval_us + 50);
139
140 if (time_after(jiffies, timeout)) {
141 if ((ufshcd_readl(hba, reg) & mask) != val)
142 err = -ETIMEDOUT;
143 break;
144 }
145 }
146
147 return err;
148}
149
2fbd009b
SJ
150/**
151 * ufshcd_get_intr_mask - Get the interrupt bit mask
152 * @hba - Pointer to adapter instance
153 *
154 * Returns interrupt bit mask per version
155 */
156static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
157{
158 if (hba->ufs_version == UFSHCI_VERSION_10)
159 return INTERRUPT_MASK_ALL_VER_10;
160 else
161 return INTERRUPT_MASK_ALL_VER_11;
162}
163
7a3e97b0
SY
164/**
165 * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
166 * @hba - Pointer to adapter instance
167 *
168 * Returns UFSHCI version supported by the controller
169 */
170static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
171{
b873a275 172 return ufshcd_readl(hba, REG_UFS_VERSION);
7a3e97b0
SY
173}
174
175/**
176 * ufshcd_is_device_present - Check if any device connected to
177 * the host controller
178 * @reg_hcs - host controller status register value
179 *
73ec513a 180 * Returns 1 if device present, 0 if no device detected
7a3e97b0
SY
181 */
182static inline int ufshcd_is_device_present(u32 reg_hcs)
183{
73ec513a 184 return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
7a3e97b0
SY
185}
186
187/**
188 * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
189 * @lrb: pointer to local command reference block
190 *
191 * This function is used to get the OCS field from UTRD
192 * Returns the OCS field in the UTRD
193 */
194static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
195{
e8c8e82a 196 return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
7a3e97b0
SY
197}
198
199/**
200 * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
201 * @task_req_descp: pointer to utp_task_req_desc structure
202 *
203 * This function is used to get the OCS field from UTMRD
204 * Returns the OCS field in the UTMRD
205 */
206static inline int
207ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
208{
e8c8e82a 209 return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
7a3e97b0
SY
210}
211
212/**
213 * ufshcd_get_tm_free_slot - get a free slot for task management request
214 * @hba: per adapter instance
e2933132 215 * @free_slot: pointer to variable with available slot value
7a3e97b0 216 *
e2933132
SRT
217 * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
218 * Returns 0 if free slot is not available, else return 1 with tag value
219 * in @free_slot.
7a3e97b0 220 */
e2933132 221static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
7a3e97b0 222{
e2933132
SRT
223 int tag;
224 bool ret = false;
225
226 if (!free_slot)
227 goto out;
228
229 do {
230 tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
231 if (tag >= hba->nutmrs)
232 goto out;
233 } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
234
235 *free_slot = tag;
236 ret = true;
237out:
238 return ret;
239}
240
241static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
242{
243 clear_bit_unlock(slot, &hba->tm_slots_in_use);
7a3e97b0
SY
244}
245
246/**
247 * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
248 * @hba: per adapter instance
249 * @pos: position of the bit to be cleared
250 */
251static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
252{
b873a275 253 ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
7a3e97b0
SY
254}
255
256/**
257 * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
258 * @reg: Register value of host controller status
259 *
260 * Returns integer, 0 on Success and positive value if failed
261 */
262static inline int ufshcd_get_lists_status(u32 reg)
263{
264 /*
265 * The mask 0xFF is for the following HCS register bits
266 * Bit Description
267 * 0 Device Present
268 * 1 UTRLRDY
269 * 2 UTMRLRDY
270 * 3 UCRDY
271 * 4 HEI
272 * 5 DEI
273 * 6-7 reserved
274 */
275 return (((reg) & (0xFF)) >> 1) ^ (0x07);
276}
277
278/**
279 * ufshcd_get_uic_cmd_result - Get the UIC command result
280 * @hba: Pointer to adapter instance
281 *
282 * This function gets the result of UIC command completion
283 * Returns 0 on success, non zero value on error
284 */
285static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
286{
b873a275 287 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
7a3e97b0
SY
288 MASK_UIC_COMMAND_RESULT;
289}
290
12b4fdb4
SJ
291/**
292 * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
293 * @hba: Pointer to adapter instance
294 *
295 * This function gets UIC command argument3
296 * Returns 0 on success, non zero value on error
297 */
298static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
299{
300 return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
301}
302
7a3e97b0 303/**
5a0b0cb9 304 * ufshcd_get_req_rsp - returns the TR response transaction type
7a3e97b0 305 * @ucd_rsp_ptr: pointer to response UPIU
7a3e97b0
SY
306 */
307static inline int
5a0b0cb9 308ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
7a3e97b0 309{
5a0b0cb9 310 return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
7a3e97b0
SY
311}
312
313/**
314 * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
315 * @ucd_rsp_ptr: pointer to response UPIU
316 *
317 * This function gets the response status and scsi_status from response UPIU
318 * Returns the response result code.
319 */
320static inline int
321ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
322{
323 return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
324}
325
1c2623c5
SJ
326/*
327 * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
328 * from response UPIU
329 * @ucd_rsp_ptr: pointer to response UPIU
330 *
331 * Return the data segment length.
332 */
333static inline unsigned int
334ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
335{
336 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
337 MASK_RSP_UPIU_DATA_SEG_LEN;
338}
339
66ec6d59
SRT
340/**
341 * ufshcd_is_exception_event - Check if the device raised an exception event
342 * @ucd_rsp_ptr: pointer to response UPIU
343 *
344 * The function checks if the device raised an exception event indicated in
345 * the Device Information field of response UPIU.
346 *
347 * Returns true if exception is raised, false otherwise.
348 */
349static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
350{
351 return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
352 MASK_RSP_EXCEPTION_EVENT ? true : false;
353}
354
7a3e97b0 355/**
7d568652 356 * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
7a3e97b0 357 * @hba: per adapter instance
7a3e97b0
SY
358 */
359static inline void
7d568652 360ufshcd_reset_intr_aggr(struct ufs_hba *hba)
7a3e97b0 361{
7d568652
SJ
362 ufshcd_writel(hba, INT_AGGR_ENABLE |
363 INT_AGGR_COUNTER_AND_TIMER_RESET,
364 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
365}
366
367/**
368 * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
369 * @hba: per adapter instance
370 * @cnt: Interrupt aggregation counter threshold
371 * @tmout: Interrupt aggregation timeout value
372 */
373static inline void
374ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
375{
376 ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
377 INT_AGGR_COUNTER_THLD_VAL(cnt) |
378 INT_AGGR_TIMEOUT_VAL(tmout),
379 REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
7a3e97b0
SY
380}
381
382/**
383 * ufshcd_enable_run_stop_reg - Enable run-stop registers,
384 * When run-stop registers are set to 1, it indicates the
385 * host controller that it can process the requests
386 * @hba: per adapter instance
387 */
388static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
389{
b873a275
SJ
390 ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
391 REG_UTP_TASK_REQ_LIST_RUN_STOP);
392 ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
393 REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
7a3e97b0
SY
394}
395
7a3e97b0
SY
396/**
397 * ufshcd_hba_start - Start controller initialization sequence
398 * @hba: per adapter instance
399 */
400static inline void ufshcd_hba_start(struct ufs_hba *hba)
401{
b873a275 402 ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
7a3e97b0
SY
403}
404
405/**
406 * ufshcd_is_hba_active - Get controller state
407 * @hba: per adapter instance
408 *
409 * Returns zero if controller is active, 1 otherwise
410 */
411static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
412{
b873a275 413 return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
7a3e97b0
SY
414}
415
416/**
417 * ufshcd_send_command - Send SCSI or device management commands
418 * @hba: per adapter instance
419 * @task_tag: Task tag of the command
420 */
421static inline
422void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
423{
424 __set_bit(task_tag, &hba->outstanding_reqs);
b873a275 425 ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7a3e97b0
SY
426}
427
428/**
429 * ufshcd_copy_sense_data - Copy sense data in case of check condition
430 * @lrb - pointer to local reference block
431 */
432static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
433{
434 int len;
1c2623c5
SJ
435 if (lrbp->sense_buffer &&
436 ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
5a0b0cb9 437 len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
7a3e97b0 438 memcpy(lrbp->sense_buffer,
5a0b0cb9 439 lrbp->ucd_rsp_ptr->sr.sense_data,
7a3e97b0
SY
440 min_t(int, len, SCSI_SENSE_BUFFERSIZE));
441 }
442}
443
68078d5c
DR
444/**
445 * ufshcd_copy_query_response() - Copy the Query Response and the data
446 * descriptor
447 * @hba: per adapter instance
448 * @lrb - pointer to local reference block
449 */
450static
c6d4a831 451int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
68078d5c
DR
452{
453 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
454
68078d5c 455 memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
68078d5c 456
68078d5c
DR
457 /* Get the descriptor */
458 if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
d44a5f98 459 u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
68078d5c 460 GENERAL_UPIU_REQUEST_SIZE;
c6d4a831
DR
461 u16 resp_len;
462 u16 buf_len;
68078d5c
DR
463
464 /* data segment length */
c6d4a831 465 resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
68078d5c 466 MASK_QUERY_DATA_SEG_LEN;
c6d4a831
DR
467 buf_len = hba->dev_cmd.query.request.upiu_req.length;
468 if (likely(buf_len >= resp_len)) {
469 memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
470 } else {
471 dev_warn(hba->dev,
472 "%s: Response size is bigger than buffer",
473 __func__);
474 return -EINVAL;
475 }
68078d5c 476 }
c6d4a831
DR
477
478 return 0;
68078d5c
DR
479}
480
7a3e97b0
SY
481/**
482 * ufshcd_hba_capabilities - Read controller capabilities
483 * @hba: per adapter instance
484 */
485static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
486{
b873a275 487 hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
7a3e97b0
SY
488
489 /* nutrs and nutmrs are 0 based values */
490 hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
491 hba->nutmrs =
492 ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
493}
494
495/**
6ccf44fe
SJ
496 * ufshcd_ready_for_uic_cmd - Check if controller is ready
497 * to accept UIC commands
7a3e97b0 498 * @hba: per adapter instance
6ccf44fe
SJ
499 * Return true on success, else false
500 */
501static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
502{
503 if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
504 return true;
505 else
506 return false;
507}
508
53b3d9c3
SJ
509/**
510 * ufshcd_get_upmcrs - Get the power mode change request status
511 * @hba: Pointer to adapter instance
512 *
513 * This function gets the UPMCRS field of HCS register
514 * Returns value of UPMCRS field
515 */
516static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
517{
518 return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
519}
520
6ccf44fe
SJ
521/**
522 * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
523 * @hba: per adapter instance
524 * @uic_cmd: UIC command
525 *
526 * Mutex must be held.
7a3e97b0
SY
527 */
528static inline void
6ccf44fe 529ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
7a3e97b0 530{
6ccf44fe
SJ
531 WARN_ON(hba->active_uic_cmd);
532
533 hba->active_uic_cmd = uic_cmd;
534
7a3e97b0 535 /* Write Args */
6ccf44fe
SJ
536 ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
537 ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
538 ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
7a3e97b0
SY
539
540 /* Write UIC Cmd */
6ccf44fe 541 ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
b873a275 542 REG_UIC_COMMAND);
7a3e97b0
SY
543}
544
6ccf44fe
SJ
545/**
546 * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
547 * @hba: per adapter instance
548 * @uic_command: UIC command
549 *
550 * Must be called with mutex held.
551 * Returns 0 only if success.
552 */
553static int
554ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
555{
556 int ret;
557 unsigned long flags;
558
559 if (wait_for_completion_timeout(&uic_cmd->done,
560 msecs_to_jiffies(UIC_CMD_TIMEOUT)))
561 ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
562 else
563 ret = -ETIMEDOUT;
564
565 spin_lock_irqsave(hba->host->host_lock, flags);
566 hba->active_uic_cmd = NULL;
567 spin_unlock_irqrestore(hba->host->host_lock, flags);
568
569 return ret;
570}
571
572/**
573 * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
574 * @hba: per adapter instance
575 * @uic_cmd: UIC command
576 *
577 * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
578 * with mutex held.
579 * Returns 0 only if success.
580 */
581static int
582__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
583{
584 int ret;
585 unsigned long flags;
586
587 if (!ufshcd_ready_for_uic_cmd(hba)) {
588 dev_err(hba->dev,
589 "Controller not ready to accept UIC commands\n");
590 return -EIO;
591 }
592
593 init_completion(&uic_cmd->done);
594
595 spin_lock_irqsave(hba->host->host_lock, flags);
596 ufshcd_dispatch_uic_cmd(hba, uic_cmd);
597 spin_unlock_irqrestore(hba->host->host_lock, flags);
598
599 ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
600
601 return ret;
602}
603
604/**
605 * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
606 * @hba: per adapter instance
607 * @uic_cmd: UIC command
608 *
609 * Returns 0 only if success.
610 */
611static int
612ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
613{
614 int ret;
615
616 mutex_lock(&hba->uic_cmd_mutex);
617 ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
618 mutex_unlock(&hba->uic_cmd_mutex);
619
620 return ret;
621}
622
7a3e97b0
SY
623/**
624 * ufshcd_map_sg - Map scatter-gather list to prdt
625 * @lrbp - pointer to local reference block
626 *
627 * Returns 0 in case of success, non-zero value in case of failure
628 */
629static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
630{
631 struct ufshcd_sg_entry *prd_table;
632 struct scatterlist *sg;
633 struct scsi_cmnd *cmd;
634 int sg_segments;
635 int i;
636
637 cmd = lrbp->cmd;
638 sg_segments = scsi_dma_map(cmd);
639 if (sg_segments < 0)
640 return sg_segments;
641
642 if (sg_segments) {
643 lrbp->utr_descriptor_ptr->prd_table_length =
644 cpu_to_le16((u16) (sg_segments));
645
646 prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
647
648 scsi_for_each_sg(cmd, sg, sg_segments, i) {
649 prd_table[i].size =
650 cpu_to_le32(((u32) sg_dma_len(sg))-1);
651 prd_table[i].base_addr =
652 cpu_to_le32(lower_32_bits(sg->dma_address));
653 prd_table[i].upper_addr =
654 cpu_to_le32(upper_32_bits(sg->dma_address));
655 }
656 } else {
657 lrbp->utr_descriptor_ptr->prd_table_length = 0;
658 }
659
660 return 0;
661}
662
663/**
2fbd009b 664 * ufshcd_enable_intr - enable interrupts
7a3e97b0 665 * @hba: per adapter instance
2fbd009b 666 * @intrs: interrupt bits
7a3e97b0 667 */
2fbd009b 668static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
7a3e97b0 669{
2fbd009b
SJ
670 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
671
672 if (hba->ufs_version == UFSHCI_VERSION_10) {
673 u32 rw;
674 rw = set & INTERRUPT_MASK_RW_VER_10;
675 set = rw | ((set ^ intrs) & intrs);
676 } else {
677 set |= intrs;
678 }
679
680 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
681}
682
683/**
684 * ufshcd_disable_intr - disable interrupts
685 * @hba: per adapter instance
686 * @intrs: interrupt bits
687 */
688static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
689{
690 u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
691
692 if (hba->ufs_version == UFSHCI_VERSION_10) {
693 u32 rw;
694 rw = (set & INTERRUPT_MASK_RW_VER_10) &
695 ~(intrs & INTERRUPT_MASK_RW_VER_10);
696 set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
697
698 } else {
699 set &= ~intrs;
7a3e97b0 700 }
2fbd009b
SJ
701
702 ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
7a3e97b0
SY
703}
704
5a0b0cb9
SRT
705/**
706 * ufshcd_prepare_req_desc_hdr() - Fills the requests header
707 * descriptor according to request
708 * @lrbp: pointer to local reference block
709 * @upiu_flags: flags required in the header
710 * @cmd_dir: requests data direction
711 */
712static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
713 u32 *upiu_flags, enum dma_data_direction cmd_dir)
714{
715 struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
716 u32 data_direction;
717 u32 dword_0;
718
719 if (cmd_dir == DMA_FROM_DEVICE) {
720 data_direction = UTP_DEVICE_TO_HOST;
721 *upiu_flags = UPIU_CMD_FLAGS_READ;
722 } else if (cmd_dir == DMA_TO_DEVICE) {
723 data_direction = UTP_HOST_TO_DEVICE;
724 *upiu_flags = UPIU_CMD_FLAGS_WRITE;
725 } else {
726 data_direction = UTP_NO_DATA_TRANSFER;
727 *upiu_flags = UPIU_CMD_FLAGS_NONE;
728 }
729
730 dword_0 = data_direction | (lrbp->command_type
731 << UPIU_COMMAND_TYPE_OFFSET);
732 if (lrbp->intr_cmd)
733 dword_0 |= UTP_REQ_DESC_INT_CMD;
734
735 /* Transfer request descriptor header fields */
736 req_desc->header.dword_0 = cpu_to_le32(dword_0);
737
738 /*
739 * assigning invalid value for command status. Controller
740 * updates OCS on command completion, with the command
741 * status
742 */
743 req_desc->header.dword_2 =
744 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
745}
746
747/**
748 * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
749 * for scsi commands
750 * @lrbp - local reference block pointer
751 * @upiu_flags - flags
752 */
753static
754void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
755{
756 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
757
758 /* command descriptor fields */
759 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
760 UPIU_TRANSACTION_COMMAND, upiu_flags,
761 lrbp->lun, lrbp->task_tag);
762 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
763 UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
764
765 /* Total EHS length and Data segment length will be zero */
766 ucd_req_ptr->header.dword_2 = 0;
767
768 ucd_req_ptr->sc.exp_data_transfer_len =
769 cpu_to_be32(lrbp->cmd->sdb.length);
770
771 memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
772 (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
773}
774
68078d5c
DR
775/**
776 * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
777 * for query requsts
778 * @hba: UFS hba
779 * @lrbp: local reference block pointer
780 * @upiu_flags: flags
781 */
782static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
783 struct ufshcd_lrb *lrbp, u32 upiu_flags)
784{
785 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
786 struct ufs_query *query = &hba->dev_cmd.query;
e8c8e82a 787 u16 len = be16_to_cpu(query->request.upiu_req.length);
68078d5c
DR
788 u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
789
790 /* Query request header */
791 ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
792 UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
793 lrbp->lun, lrbp->task_tag);
794 ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
795 0, query->request.query_func, 0, 0);
796
797 /* Data segment length */
798 ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
799 0, 0, len >> 8, (u8)len);
800
801 /* Copy the Query Request buffer as is */
802 memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
803 QUERY_OSF_SIZE);
68078d5c
DR
804
805 /* Copy the Descriptor */
c6d4a831
DR
806 if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
807 memcpy(descp, query->descriptor, len);
808
68078d5c
DR
809}
810
5a0b0cb9
SRT
811static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
812{
813 struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
814
815 memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
816
817 /* command descriptor fields */
818 ucd_req_ptr->header.dword_0 =
819 UPIU_HEADER_DWORD(
820 UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
821}
822
7a3e97b0
SY
823/**
824 * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
5a0b0cb9 825 * @hba - per adapter instance
7a3e97b0
SY
826 * @lrb - pointer to local reference block
827 */
5a0b0cb9 828static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
7a3e97b0 829{
7a3e97b0 830 u32 upiu_flags;
5a0b0cb9 831 int ret = 0;
7a3e97b0
SY
832
833 switch (lrbp->command_type) {
834 case UTP_CMD_TYPE_SCSI:
5a0b0cb9
SRT
835 if (likely(lrbp->cmd)) {
836 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
837 lrbp->cmd->sc_data_direction);
838 ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
7a3e97b0 839 } else {
5a0b0cb9 840 ret = -EINVAL;
7a3e97b0 841 }
7a3e97b0
SY
842 break;
843 case UTP_CMD_TYPE_DEV_MANAGE:
5a0b0cb9 844 ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
68078d5c
DR
845 if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
846 ufshcd_prepare_utp_query_req_upiu(
847 hba, lrbp, upiu_flags);
848 else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
5a0b0cb9
SRT
849 ufshcd_prepare_utp_nop_upiu(lrbp);
850 else
851 ret = -EINVAL;
7a3e97b0
SY
852 break;
853 case UTP_CMD_TYPE_UFS:
854 /* For UFS native command implementation */
5a0b0cb9
SRT
855 ret = -ENOTSUPP;
856 dev_err(hba->dev, "%s: UFS native command are not supported\n",
857 __func__);
858 break;
859 default:
860 ret = -ENOTSUPP;
861 dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
862 __func__, lrbp->command_type);
7a3e97b0
SY
863 break;
864 } /* end of switch */
5a0b0cb9
SRT
865
866 return ret;
7a3e97b0
SY
867}
868
869/**
870 * ufshcd_queuecommand - main entry point for SCSI requests
871 * @cmd: command from SCSI Midlayer
872 * @done: call back function
873 *
874 * Returns 0 for success, non-zero in case of failure
875 */
876static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
877{
878 struct ufshcd_lrb *lrbp;
879 struct ufs_hba *hba;
880 unsigned long flags;
881 int tag;
882 int err = 0;
883
884 hba = shost_priv(host);
885
886 tag = cmd->request->tag;
887
3441da7d
SRT
888 spin_lock_irqsave(hba->host->host_lock, flags);
889 switch (hba->ufshcd_state) {
890 case UFSHCD_STATE_OPERATIONAL:
891 break;
892 case UFSHCD_STATE_RESET:
7a3e97b0 893 err = SCSI_MLQUEUE_HOST_BUSY;
3441da7d
SRT
894 goto out_unlock;
895 case UFSHCD_STATE_ERROR:
896 set_host_byte(cmd, DID_ERROR);
897 cmd->scsi_done(cmd);
898 goto out_unlock;
899 default:
900 dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
901 __func__, hba->ufshcd_state);
902 set_host_byte(cmd, DID_BAD_TARGET);
903 cmd->scsi_done(cmd);
904 goto out_unlock;
7a3e97b0 905 }
3441da7d 906 spin_unlock_irqrestore(hba->host->host_lock, flags);
7a3e97b0 907
5a0b0cb9
SRT
908 /* acquire the tag to make sure device cmds don't use it */
909 if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
910 /*
911 * Dev manage command in progress, requeue the command.
912 * Requeuing the command helps in cases where the request *may*
913 * find different tag instead of waiting for dev manage command
914 * completion.
915 */
916 err = SCSI_MLQUEUE_HOST_BUSY;
917 goto out;
918 }
919
7a3e97b0
SY
920 lrbp = &hba->lrb[tag];
921
5a0b0cb9 922 WARN_ON(lrbp->cmd);
7a3e97b0
SY
923 lrbp->cmd = cmd;
924 lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
925 lrbp->sense_buffer = cmd->sense_buffer;
926 lrbp->task_tag = tag;
927 lrbp->lun = cmd->device->lun;
5a0b0cb9 928 lrbp->intr_cmd = false;
7a3e97b0
SY
929 lrbp->command_type = UTP_CMD_TYPE_SCSI;
930
931 /* form UPIU before issuing the command */
5a0b0cb9 932 ufshcd_compose_upiu(hba, lrbp);
7a3e97b0 933 err = ufshcd_map_sg(lrbp);
5a0b0cb9
SRT
934 if (err) {
935 lrbp->cmd = NULL;
936 clear_bit_unlock(tag, &hba->lrb_in_use);
7a3e97b0 937 goto out;
5a0b0cb9 938 }
7a3e97b0
SY
939
940 /* issue command to the controller */
941 spin_lock_irqsave(hba->host->host_lock, flags);
942 ufshcd_send_command(hba, tag);
3441da7d 943out_unlock:
7a3e97b0
SY
944 spin_unlock_irqrestore(hba->host->host_lock, flags);
945out:
946 return err;
947}
948
5a0b0cb9
SRT
949static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
950 struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
951{
952 lrbp->cmd = NULL;
953 lrbp->sense_bufflen = 0;
954 lrbp->sense_buffer = NULL;
955 lrbp->task_tag = tag;
956 lrbp->lun = 0; /* device management cmd is not specific to any LUN */
957 lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
958 lrbp->intr_cmd = true; /* No interrupt aggregation */
959 hba->dev_cmd.type = cmd_type;
960
961 return ufshcd_compose_upiu(hba, lrbp);
962}
963
964static int
965ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
966{
967 int err = 0;
968 unsigned long flags;
969 u32 mask = 1 << tag;
970
971 /* clear outstanding transaction before retry */
972 spin_lock_irqsave(hba->host->host_lock, flags);
973 ufshcd_utrl_clear(hba, tag);
974 spin_unlock_irqrestore(hba->host->host_lock, flags);
975
976 /*
977 * wait for for h/w to clear corresponding bit in door-bell.
978 * max. wait is 1 sec.
979 */
980 err = ufshcd_wait_for_register(hba,
981 REG_UTP_TRANSFER_REQ_DOOR_BELL,
982 mask, ~mask, 1000, 1000);
983
984 return err;
985}
986
c6d4a831
DR
987static int
988ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
989{
990 struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
991
992 /* Get the UPIU response */
993 query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
994 UPIU_RSP_CODE_OFFSET;
995 return query_res->response;
996}
997
5a0b0cb9
SRT
998/**
999 * ufshcd_dev_cmd_completion() - handles device management command responses
1000 * @hba: per adapter instance
1001 * @lrbp: pointer to local reference block
1002 */
1003static int
1004ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
1005{
1006 int resp;
1007 int err = 0;
1008
1009 resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
1010
1011 switch (resp) {
1012 case UPIU_TRANSACTION_NOP_IN:
1013 if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
1014 err = -EINVAL;
1015 dev_err(hba->dev, "%s: unexpected response %x\n",
1016 __func__, resp);
1017 }
1018 break;
68078d5c 1019 case UPIU_TRANSACTION_QUERY_RSP:
c6d4a831
DR
1020 err = ufshcd_check_query_response(hba, lrbp);
1021 if (!err)
1022 err = ufshcd_copy_query_response(hba, lrbp);
68078d5c 1023 break;
5a0b0cb9
SRT
1024 case UPIU_TRANSACTION_REJECT_UPIU:
1025 /* TODO: handle Reject UPIU Response */
1026 err = -EPERM;
1027 dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
1028 __func__);
1029 break;
1030 default:
1031 err = -EINVAL;
1032 dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
1033 __func__, resp);
1034 break;
1035 }
1036
1037 return err;
1038}
1039
1040static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
1041 struct ufshcd_lrb *lrbp, int max_timeout)
1042{
1043 int err = 0;
1044 unsigned long time_left;
1045 unsigned long flags;
1046
1047 time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
1048 msecs_to_jiffies(max_timeout));
1049
1050 spin_lock_irqsave(hba->host->host_lock, flags);
1051 hba->dev_cmd.complete = NULL;
1052 if (likely(time_left)) {
1053 err = ufshcd_get_tr_ocs(lrbp);
1054 if (!err)
1055 err = ufshcd_dev_cmd_completion(hba, lrbp);
1056 }
1057 spin_unlock_irqrestore(hba->host->host_lock, flags);
1058
1059 if (!time_left) {
1060 err = -ETIMEDOUT;
1061 if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
1062 /* sucessfully cleared the command, retry if needed */
1063 err = -EAGAIN;
1064 }
1065
1066 return err;
1067}
1068
1069/**
1070 * ufshcd_get_dev_cmd_tag - Get device management command tag
1071 * @hba: per-adapter instance
1072 * @tag: pointer to variable with available slot value
1073 *
1074 * Get a free slot and lock it until device management command
1075 * completes.
1076 *
1077 * Returns false if free slot is unavailable for locking, else
1078 * return true with tag value in @tag.
1079 */
1080static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
1081{
1082 int tag;
1083 bool ret = false;
1084 unsigned long tmp;
1085
1086 if (!tag_out)
1087 goto out;
1088
1089 do {
1090 tmp = ~hba->lrb_in_use;
1091 tag = find_last_bit(&tmp, hba->nutrs);
1092 if (tag >= hba->nutrs)
1093 goto out;
1094 } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
1095
1096 *tag_out = tag;
1097 ret = true;
1098out:
1099 return ret;
1100}
1101
1102static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
1103{
1104 clear_bit_unlock(tag, &hba->lrb_in_use);
1105}
1106
1107/**
1108 * ufshcd_exec_dev_cmd - API for sending device management requests
1109 * @hba - UFS hba
1110 * @cmd_type - specifies the type (NOP, Query...)
1111 * @timeout - time in seconds
1112 *
68078d5c
DR
1113 * NOTE: Since there is only one available tag for device management commands,
1114 * it is expected you hold the hba->dev_cmd.lock mutex.
5a0b0cb9
SRT
1115 */
1116static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
1117 enum dev_cmd_type cmd_type, int timeout)
1118{
1119 struct ufshcd_lrb *lrbp;
1120 int err;
1121 int tag;
1122 struct completion wait;
1123 unsigned long flags;
1124
1125 /*
1126 * Get free slot, sleep if slots are unavailable.
1127 * Even though we use wait_event() which sleeps indefinitely,
1128 * the maximum wait time is bounded by SCSI request timeout.
1129 */
1130 wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
1131
1132 init_completion(&wait);
1133 lrbp = &hba->lrb[tag];
1134 WARN_ON(lrbp->cmd);
1135 err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
1136 if (unlikely(err))
1137 goto out_put_tag;
1138
1139 hba->dev_cmd.complete = &wait;
1140
1141 spin_lock_irqsave(hba->host->host_lock, flags);
1142 ufshcd_send_command(hba, tag);
1143 spin_unlock_irqrestore(hba->host->host_lock, flags);
1144
1145 err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
1146
1147out_put_tag:
1148 ufshcd_put_dev_cmd_tag(hba, tag);
1149 wake_up(&hba->dev_cmd.tag_wq);
1150 return err;
1151}
1152
d44a5f98
DR
1153/**
1154 * ufshcd_init_query() - init the query response and request parameters
1155 * @hba: per-adapter instance
1156 * @request: address of the request pointer to be initialized
1157 * @response: address of the response pointer to be initialized
1158 * @opcode: operation to perform
1159 * @idn: flag idn to access
1160 * @index: LU number to access
1161 * @selector: query/flag/descriptor further identification
1162 */
1163static inline void ufshcd_init_query(struct ufs_hba *hba,
1164 struct ufs_query_req **request, struct ufs_query_res **response,
1165 enum query_opcode opcode, u8 idn, u8 index, u8 selector)
1166{
1167 *request = &hba->dev_cmd.query.request;
1168 *response = &hba->dev_cmd.query.response;
1169 memset(*request, 0, sizeof(struct ufs_query_req));
1170 memset(*response, 0, sizeof(struct ufs_query_res));
1171 (*request)->upiu_req.opcode = opcode;
1172 (*request)->upiu_req.idn = idn;
1173 (*request)->upiu_req.index = index;
1174 (*request)->upiu_req.selector = selector;
1175}
1176
68078d5c
DR
1177/**
1178 * ufshcd_query_flag() - API function for sending flag query requests
1179 * hba: per-adapter instance
1180 * query_opcode: flag query to perform
1181 * idn: flag idn to access
1182 * flag_res: the flag value after the query request completes
1183 *
1184 * Returns 0 for success, non-zero in case of failure
1185 */
1186static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
1187 enum flag_idn idn, bool *flag_res)
1188{
d44a5f98
DR
1189 struct ufs_query_req *request = NULL;
1190 struct ufs_query_res *response = NULL;
1191 int err, index = 0, selector = 0;
68078d5c
DR
1192
1193 BUG_ON(!hba);
1194
1195 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
1196 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1197 selector);
68078d5c
DR
1198
1199 switch (opcode) {
1200 case UPIU_QUERY_OPCODE_SET_FLAG:
1201 case UPIU_QUERY_OPCODE_CLEAR_FLAG:
1202 case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
1203 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1204 break;
1205 case UPIU_QUERY_OPCODE_READ_FLAG:
1206 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1207 if (!flag_res) {
1208 /* No dummy reads */
1209 dev_err(hba->dev, "%s: Invalid argument for read request\n",
1210 __func__);
1211 err = -EINVAL;
1212 goto out_unlock;
1213 }
1214 break;
1215 default:
1216 dev_err(hba->dev,
1217 "%s: Expected query flag opcode but got = %d\n",
1218 __func__, opcode);
1219 err = -EINVAL;
1220 goto out_unlock;
1221 }
68078d5c 1222
d44a5f98 1223 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
68078d5c
DR
1224
1225 if (err) {
1226 dev_err(hba->dev,
1227 "%s: Sending flag query for idn %d failed, err = %d\n",
1228 __func__, idn, err);
1229 goto out_unlock;
1230 }
1231
1232 if (flag_res)
e8c8e82a 1233 *flag_res = (be32_to_cpu(response->upiu_res.value) &
68078d5c
DR
1234 MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
1235
1236out_unlock:
1237 mutex_unlock(&hba->dev_cmd.lock);
1238 return err;
1239}
1240
66ec6d59
SRT
1241/**
1242 * ufshcd_query_attr - API function for sending attribute requests
1243 * hba: per-adapter instance
1244 * opcode: attribute opcode
1245 * idn: attribute idn to access
1246 * index: index field
1247 * selector: selector field
1248 * attr_val: the attribute value after the query request completes
1249 *
1250 * Returns 0 for success, non-zero in case of failure
1251*/
bdbe5d2f 1252static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
66ec6d59
SRT
1253 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
1254{
d44a5f98
DR
1255 struct ufs_query_req *request = NULL;
1256 struct ufs_query_res *response = NULL;
66ec6d59
SRT
1257 int err;
1258
1259 BUG_ON(!hba);
1260
1261 if (!attr_val) {
1262 dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
1263 __func__, opcode);
1264 err = -EINVAL;
1265 goto out;
1266 }
1267
1268 mutex_lock(&hba->dev_cmd.lock);
d44a5f98
DR
1269 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1270 selector);
66ec6d59
SRT
1271
1272 switch (opcode) {
1273 case UPIU_QUERY_OPCODE_WRITE_ATTR:
1274 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
e8c8e82a 1275 request->upiu_req.value = cpu_to_be32(*attr_val);
66ec6d59
SRT
1276 break;
1277 case UPIU_QUERY_OPCODE_READ_ATTR:
1278 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1279 break;
1280 default:
1281 dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
1282 __func__, opcode);
1283 err = -EINVAL;
1284 goto out_unlock;
1285 }
1286
d44a5f98 1287 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
66ec6d59
SRT
1288
1289 if (err) {
1290 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1291 __func__, opcode, idn, err);
1292 goto out_unlock;
1293 }
1294
e8c8e82a 1295 *attr_val = be32_to_cpu(response->upiu_res.value);
66ec6d59
SRT
1296
1297out_unlock:
1298 mutex_unlock(&hba->dev_cmd.lock);
1299out:
1300 return err;
1301}
1302
d44a5f98
DR
1303/**
1304 * ufshcd_query_descriptor - API function for sending descriptor requests
1305 * hba: per-adapter instance
1306 * opcode: attribute opcode
1307 * idn: attribute idn to access
1308 * index: index field
1309 * selector: selector field
1310 * desc_buf: the buffer that contains the descriptor
1311 * buf_len: length parameter passed to the device
1312 *
1313 * Returns 0 for success, non-zero in case of failure.
1314 * The buf_len parameter will contain, on return, the length parameter
1315 * received on the response.
1316 */
1317int ufshcd_query_descriptor(struct ufs_hba *hba,
1318 enum query_opcode opcode, enum desc_idn idn, u8 index,
1319 u8 selector, u8 *desc_buf, int *buf_len)
1320{
1321 struct ufs_query_req *request = NULL;
1322 struct ufs_query_res *response = NULL;
1323 int err;
1324
1325 BUG_ON(!hba);
1326
1327 if (!desc_buf) {
1328 dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
1329 __func__, opcode);
1330 err = -EINVAL;
1331 goto out;
1332 }
1333
1334 if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
1335 dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
1336 __func__, *buf_len);
1337 err = -EINVAL;
1338 goto out;
1339 }
1340
1341 mutex_lock(&hba->dev_cmd.lock);
1342 ufshcd_init_query(hba, &request, &response, opcode, idn, index,
1343 selector);
1344 hba->dev_cmd.query.descriptor = desc_buf;
1345 request->upiu_req.length = *buf_len;
1346
1347 switch (opcode) {
1348 case UPIU_QUERY_OPCODE_WRITE_DESC:
1349 request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
1350 break;
1351 case UPIU_QUERY_OPCODE_READ_DESC:
1352 request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
1353 break;
1354 default:
1355 dev_err(hba->dev,
1356 "%s: Expected query descriptor opcode but got = 0x%.2x\n",
1357 __func__, opcode);
1358 err = -EINVAL;
1359 goto out_unlock;
1360 }
1361
1362 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
1363
1364 if (err) {
1365 dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
1366 __func__, opcode, idn, err);
1367 goto out_unlock;
1368 }
1369
1370 hba->dev_cmd.query.descriptor = NULL;
1371 *buf_len = response->upiu_res.length;
1372
1373out_unlock:
1374 mutex_unlock(&hba->dev_cmd.lock);
1375out:
1376 return err;
1377}
1378
7a3e97b0
SY
1379/**
1380 * ufshcd_memory_alloc - allocate memory for host memory space data structures
1381 * @hba: per adapter instance
1382 *
1383 * 1. Allocate DMA memory for Command Descriptor array
1384 * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
1385 * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
1386 * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
1387 * (UTMRDL)
1388 * 4. Allocate memory for local reference block(lrb).
1389 *
1390 * Returns 0 for success, non-zero in case of failure
1391 */
1392static int ufshcd_memory_alloc(struct ufs_hba *hba)
1393{
1394 size_t utmrdl_size, utrdl_size, ucdl_size;
1395
1396 /* Allocate memory for UTP command descriptors */
1397 ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
2953f850
SJ
1398 hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
1399 ucdl_size,
1400 &hba->ucdl_dma_addr,
1401 GFP_KERNEL);
7a3e97b0
SY
1402
1403 /*
1404 * UFSHCI requires UTP command descriptor to be 128 byte aligned.
1405 * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
1406 * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
1407 * be aligned to 128 bytes as well
1408 */
1409 if (!hba->ucdl_base_addr ||
1410 WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1411 dev_err(hba->dev,
7a3e97b0
SY
1412 "Command Descriptor Memory allocation failed\n");
1413 goto out;
1414 }
1415
1416 /*
1417 * Allocate memory for UTP Transfer descriptors
1418 * UFSHCI requires 1024 byte alignment of UTRD
1419 */
1420 utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
2953f850
SJ
1421 hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
1422 utrdl_size,
1423 &hba->utrdl_dma_addr,
1424 GFP_KERNEL);
7a3e97b0
SY
1425 if (!hba->utrdl_base_addr ||
1426 WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1427 dev_err(hba->dev,
7a3e97b0
SY
1428 "Transfer Descriptor Memory allocation failed\n");
1429 goto out;
1430 }
1431
1432 /*
1433 * Allocate memory for UTP Task Management descriptors
1434 * UFSHCI requires 1024 byte alignment of UTMRD
1435 */
1436 utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
2953f850
SJ
1437 hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
1438 utmrdl_size,
1439 &hba->utmrdl_dma_addr,
1440 GFP_KERNEL);
7a3e97b0
SY
1441 if (!hba->utmrdl_base_addr ||
1442 WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
3b1d0580 1443 dev_err(hba->dev,
7a3e97b0
SY
1444 "Task Management Descriptor Memory allocation failed\n");
1445 goto out;
1446 }
1447
1448 /* Allocate memory for local reference block */
2953f850
SJ
1449 hba->lrb = devm_kzalloc(hba->dev,
1450 hba->nutrs * sizeof(struct ufshcd_lrb),
1451 GFP_KERNEL);
7a3e97b0 1452 if (!hba->lrb) {
3b1d0580 1453 dev_err(hba->dev, "LRB Memory allocation failed\n");
7a3e97b0
SY
1454 goto out;
1455 }
1456 return 0;
1457out:
7a3e97b0
SY
1458 return -ENOMEM;
1459}
1460
1461/**
1462 * ufshcd_host_memory_configure - configure local reference block with
1463 * memory offsets
1464 * @hba: per adapter instance
1465 *
1466 * Configure Host memory space
1467 * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
1468 * address.
1469 * 2. Update each UTRD with Response UPIU offset, Response UPIU length
1470 * and PRDT offset.
1471 * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
1472 * into local reference block.
1473 */
1474static void ufshcd_host_memory_configure(struct ufs_hba *hba)
1475{
1476 struct utp_transfer_cmd_desc *cmd_descp;
1477 struct utp_transfer_req_desc *utrdlp;
1478 dma_addr_t cmd_desc_dma_addr;
1479 dma_addr_t cmd_desc_element_addr;
1480 u16 response_offset;
1481 u16 prdt_offset;
1482 int cmd_desc_size;
1483 int i;
1484
1485 utrdlp = hba->utrdl_base_addr;
1486 cmd_descp = hba->ucdl_base_addr;
1487
1488 response_offset =
1489 offsetof(struct utp_transfer_cmd_desc, response_upiu);
1490 prdt_offset =
1491 offsetof(struct utp_transfer_cmd_desc, prd_table);
1492
1493 cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
1494 cmd_desc_dma_addr = hba->ucdl_dma_addr;
1495
1496 for (i = 0; i < hba->nutrs; i++) {
1497 /* Configure UTRD with command descriptor base address */
1498 cmd_desc_element_addr =
1499 (cmd_desc_dma_addr + (cmd_desc_size * i));
1500 utrdlp[i].command_desc_base_addr_lo =
1501 cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
1502 utrdlp[i].command_desc_base_addr_hi =
1503 cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
1504
1505 /* Response upiu and prdt offset should be in double words */
1506 utrdlp[i].response_upiu_offset =
1507 cpu_to_le16((response_offset >> 2));
1508 utrdlp[i].prd_table_offset =
1509 cpu_to_le16((prdt_offset >> 2));
1510 utrdlp[i].response_upiu_length =
3ca316c5 1511 cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
7a3e97b0
SY
1512
1513 hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
5a0b0cb9
SRT
1514 hba->lrb[i].ucd_req_ptr =
1515 (struct utp_upiu_req *)(cmd_descp + i);
7a3e97b0
SY
1516 hba->lrb[i].ucd_rsp_ptr =
1517 (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
1518 hba->lrb[i].ucd_prdt_ptr =
1519 (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
1520 }
1521}
1522
1523/**
1524 * ufshcd_dme_link_startup - Notify Unipro to perform link startup
1525 * @hba: per adapter instance
1526 *
1527 * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
1528 * in order to initialize the Unipro link startup procedure.
1529 * Once the Unipro links are up, the device connected to the controller
1530 * is detected.
1531 *
1532 * Returns 0 on success, non-zero value on failure
1533 */
1534static int ufshcd_dme_link_startup(struct ufs_hba *hba)
1535{
6ccf44fe
SJ
1536 struct uic_command uic_cmd = {0};
1537 int ret;
7a3e97b0 1538
6ccf44fe 1539 uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
7a3e97b0 1540
6ccf44fe
SJ
1541 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1542 if (ret)
1543 dev_err(hba->dev,
1544 "dme-link-startup: error code %d\n", ret);
1545 return ret;
7a3e97b0
SY
1546}
1547
12b4fdb4
SJ
1548/**
1549 * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
1550 * @hba: per adapter instance
1551 * @attr_sel: uic command argument1
1552 * @attr_set: attribute set type as uic command argument2
1553 * @mib_val: setting value as uic command argument3
1554 * @peer: indicate whether peer or local
1555 *
1556 * Returns 0 on success, non-zero value on failure
1557 */
1558int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
1559 u8 attr_set, u32 mib_val, u8 peer)
1560{
1561 struct uic_command uic_cmd = {0};
1562 static const char *const action[] = {
1563 "dme-set",
1564 "dme-peer-set"
1565 };
1566 const char *set = action[!!peer];
1567 int ret;
1568
1569 uic_cmd.command = peer ?
1570 UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
1571 uic_cmd.argument1 = attr_sel;
1572 uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
1573 uic_cmd.argument3 = mib_val;
1574
1575 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1576 if (ret)
1577 dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
1578 set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
1579
1580 return ret;
1581}
1582EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
1583
1584/**
1585 * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
1586 * @hba: per adapter instance
1587 * @attr_sel: uic command argument1
1588 * @mib_val: the value of the attribute as returned by the UIC command
1589 * @peer: indicate whether peer or local
1590 *
1591 * Returns 0 on success, non-zero value on failure
1592 */
1593int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
1594 u32 *mib_val, u8 peer)
1595{
1596 struct uic_command uic_cmd = {0};
1597 static const char *const action[] = {
1598 "dme-get",
1599 "dme-peer-get"
1600 };
1601 const char *get = action[!!peer];
1602 int ret;
1603
1604 uic_cmd.command = peer ?
1605 UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
1606 uic_cmd.argument1 = attr_sel;
1607
1608 ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
1609 if (ret) {
1610 dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
1611 get, UIC_GET_ATTR_ID(attr_sel), ret);
1612 goto out;
1613 }
1614
1615 if (mib_val)
1616 *mib_val = uic_cmd.argument3;
1617out:
1618 return ret;
1619}
1620EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
1621
53b3d9c3
SJ
1622/**
1623 * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
1624 * using DME_SET primitives.
1625 * @hba: per adapter instance
1626 * @mode: powr mode value
1627 *
1628 * Returns 0 on success, non-zero value on failure
1629 */
bdbe5d2f 1630static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
53b3d9c3
SJ
1631{
1632 struct uic_command uic_cmd = {0};
1633 struct completion pwr_done;
1634 unsigned long flags;
1635 u8 status;
1636 int ret;
1637
1638 uic_cmd.command = UIC_CMD_DME_SET;
1639 uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
1640 uic_cmd.argument3 = mode;
1641 init_completion(&pwr_done);
1642
1643 mutex_lock(&hba->uic_cmd_mutex);
1644
1645 spin_lock_irqsave(hba->host->host_lock, flags);
1646 hba->pwr_done = &pwr_done;
1647 spin_unlock_irqrestore(hba->host->host_lock, flags);
1648 ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
1649 if (ret) {
1650 dev_err(hba->dev,
1651 "pwr mode change with mode 0x%x uic error %d\n",
1652 mode, ret);
1653 goto out;
1654 }
1655
1656 if (!wait_for_completion_timeout(hba->pwr_done,
1657 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
1658 dev_err(hba->dev,
1659 "pwr mode change with mode 0x%x completion timeout\n",
1660 mode);
1661 ret = -ETIMEDOUT;
1662 goto out;
1663 }
1664
1665 status = ufshcd_get_upmcrs(hba);
1666 if (status != PWR_LOCAL) {
1667 dev_err(hba->dev,
1668 "pwr mode change failed, host umpcrs:0x%x\n",
1669 status);
1670 ret = (status != PWR_OK) ? status : -1;
1671 }
1672out:
1673 spin_lock_irqsave(hba->host->host_lock, flags);
1674 hba->pwr_done = NULL;
1675 spin_unlock_irqrestore(hba->host->host_lock, flags);
1676 mutex_unlock(&hba->uic_cmd_mutex);
1677 return ret;
1678}
1679
d3e89bac
SJ
1680/**
1681 * ufshcd_config_max_pwr_mode - Set & Change power mode with
1682 * maximum capability attribute information.
1683 * @hba: per adapter instance
1684 *
1685 * Returns 0 on success, non-zero value on failure
1686 */
1687static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
1688{
1689 enum {RX = 0, TX = 1};
1690 u32 lanes[] = {1, 1};
1691 u32 gear[] = {1, 1};
1692 u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
1693 int ret;
1694
1695 /* Get the connected lane count */
1696 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
1697 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
1698
1699 /*
1700 * First, get the maximum gears of HS speed.
1701 * If a zero value, it means there is no HSGEAR capability.
1702 * Then, get the maximum gears of PWM speed.
1703 */
1704 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
1705 if (!gear[RX]) {
1706 ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
1707 pwr[RX] = SLOWAUTO_MODE;
1708 }
1709
1710 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
1711 if (!gear[TX]) {
1712 ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
1713 &gear[TX]);
1714 pwr[TX] = SLOWAUTO_MODE;
1715 }
1716
1717 /*
1718 * Configure attributes for power mode change with below.
1719 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
1720 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
1721 * - PA_HSSERIES
1722 */
1723 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
1724 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
1725 if (pwr[RX] == FASTAUTO_MODE)
1726 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
1727
1728 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
1729 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
1730 if (pwr[TX] == FASTAUTO_MODE)
1731 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
1732
1733 if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
1734 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
1735
1736 ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
1737 if (ret)
1738 dev_err(hba->dev,
1739 "pwr_mode: power mode change failed %d\n", ret);
1740
1741 return ret;
1742}
1743
68078d5c
DR
1744/**
1745 * ufshcd_complete_dev_init() - checks device readiness
1746 * hba: per-adapter instance
1747 *
1748 * Set fDeviceInit flag and poll until device toggles it.
1749 */
1750static int ufshcd_complete_dev_init(struct ufs_hba *hba)
1751{
1752 int i, retries, err = 0;
1753 bool flag_res = 1;
1754
1755 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1756 /* Set the fDeviceInit flag */
1757 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
1758 QUERY_FLAG_IDN_FDEVICEINIT, NULL);
1759 if (!err || err == -ETIMEDOUT)
1760 break;
1761 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1762 }
1763 if (err) {
1764 dev_err(hba->dev,
1765 "%s setting fDeviceInit flag failed with error %d\n",
1766 __func__, err);
1767 goto out;
1768 }
1769
1770 /* poll for max. 100 iterations for fDeviceInit flag to clear */
1771 for (i = 0; i < 100 && !err && flag_res; i++) {
1772 for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
1773 err = ufshcd_query_flag(hba,
1774 UPIU_QUERY_OPCODE_READ_FLAG,
1775 QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
1776 if (!err || err == -ETIMEDOUT)
1777 break;
1778 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
1779 err);
1780 }
1781 }
1782 if (err)
1783 dev_err(hba->dev,
1784 "%s reading fDeviceInit flag failed with error %d\n",
1785 __func__, err);
1786 else if (flag_res)
1787 dev_err(hba->dev,
1788 "%s fDeviceInit was not cleared by the device\n",
1789 __func__);
1790
1791out:
1792 return err;
1793}
1794
7a3e97b0
SY
1795/**
1796 * ufshcd_make_hba_operational - Make UFS controller operational
1797 * @hba: per adapter instance
1798 *
1799 * To bring UFS host controller to operational state,
1800 * 1. Check if device is present
6ccf44fe
SJ
1801 * 2. Enable required interrupts
1802 * 3. Configure interrupt aggregation
1803 * 4. Program UTRL and UTMRL base addres
1804 * 5. Configure run-stop-registers
7a3e97b0
SY
1805 *
1806 * Returns 0 on success, non-zero value on failure
1807 */
1808static int ufshcd_make_hba_operational(struct ufs_hba *hba)
1809{
1810 int err = 0;
1811 u32 reg;
1812
1813 /* check if device present */
b873a275 1814 reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
73ec513a 1815 if (!ufshcd_is_device_present(reg)) {
3b1d0580 1816 dev_err(hba->dev, "cc: Device not present\n");
7a3e97b0
SY
1817 err = -ENXIO;
1818 goto out;
1819 }
1820
6ccf44fe
SJ
1821 /* Enable required interrupts */
1822 ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
1823
1824 /* Configure interrupt aggregation */
7d568652 1825 ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
6ccf44fe
SJ
1826
1827 /* Configure UTRL and UTMRL base address registers */
1828 ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
1829 REG_UTP_TRANSFER_REQ_LIST_BASE_L);
1830 ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
1831 REG_UTP_TRANSFER_REQ_LIST_BASE_H);
1832 ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
1833 REG_UTP_TASK_REQ_LIST_BASE_L);
1834 ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
1835 REG_UTP_TASK_REQ_LIST_BASE_H);
1836
7a3e97b0
SY
1837 /*
1838 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
1839 * DEI, HEI bits must be 0
1840 */
1841 if (!(ufshcd_get_lists_status(reg))) {
1842 ufshcd_enable_run_stop_reg(hba);
1843 } else {
3b1d0580 1844 dev_err(hba->dev,
7a3e97b0
SY
1845 "Host controller not ready to process requests");
1846 err = -EIO;
1847 goto out;
1848 }
1849
7a3e97b0
SY
1850out:
1851 return err;
1852}
1853
1854/**
1855 * ufshcd_hba_enable - initialize the controller
1856 * @hba: per adapter instance
1857 *
1858 * The controller resets itself and controller firmware initialization
1859 * sequence kicks off. When controller is ready it will set
1860 * the Host Controller Enable bit to 1.
1861 *
1862 * Returns 0 on success, non-zero value on failure
1863 */
1864static int ufshcd_hba_enable(struct ufs_hba *hba)
1865{
1866 int retry;
1867
1868 /*
1869 * msleep of 1 and 5 used in this function might result in msleep(20),
1870 * but it was necessary to send the UFS FPGA to reset mode during
1871 * development and testing of this driver. msleep can be changed to
1872 * mdelay and retry count can be reduced based on the controller.
1873 */
1874 if (!ufshcd_is_hba_active(hba)) {
1875
1876 /* change controller state to "reset state" */
1877 ufshcd_hba_stop(hba);
1878
1879 /*
1880 * This delay is based on the testing done with UFS host
1881 * controller FPGA. The delay can be changed based on the
1882 * host controller used.
1883 */
1884 msleep(5);
1885 }
1886
1887 /* start controller initialization sequence */
1888 ufshcd_hba_start(hba);
1889
1890 /*
1891 * To initialize a UFS host controller HCE bit must be set to 1.
1892 * During initialization the HCE bit value changes from 1->0->1.
1893 * When the host controller completes initialization sequence
1894 * it sets the value of HCE bit to 1. The same HCE bit is read back
1895 * to check if the controller has completed initialization sequence.
1896 * So without this delay the value HCE = 1, set in the previous
1897 * instruction might be read back.
1898 * This delay can be changed based on the controller.
1899 */
1900 msleep(1);
1901
1902 /* wait for the host controller to complete initialization */
1903 retry = 10;
1904 while (ufshcd_is_hba_active(hba)) {
1905 if (retry) {
1906 retry--;
1907 } else {
3b1d0580 1908 dev_err(hba->dev,
7a3e97b0
SY
1909 "Controller enable failed\n");
1910 return -EIO;
1911 }
1912 msleep(5);
1913 }
1914 return 0;
1915}
1916
1917/**
6ccf44fe 1918 * ufshcd_link_startup - Initialize unipro link startup
7a3e97b0
SY
1919 * @hba: per adapter instance
1920 *
6ccf44fe 1921 * Returns 0 for success, non-zero in case of failure
7a3e97b0 1922 */
6ccf44fe 1923static int ufshcd_link_startup(struct ufs_hba *hba)
7a3e97b0 1924{
6ccf44fe 1925 int ret;
7a3e97b0 1926
6ccf44fe
SJ
1927 /* enable UIC related interrupts */
1928 ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
1929
1930 ret = ufshcd_dme_link_startup(hba);
1931 if (ret)
1932 goto out;
1933
1934 ret = ufshcd_make_hba_operational(hba);
7a3e97b0 1935
6ccf44fe
SJ
1936out:
1937 if (ret)
1938 dev_err(hba->dev, "link startup failed %d\n", ret);
1939 return ret;
7a3e97b0
SY
1940}
1941
5a0b0cb9
SRT
1942/**
1943 * ufshcd_verify_dev_init() - Verify device initialization
1944 * @hba: per-adapter instance
1945 *
1946 * Send NOP OUT UPIU and wait for NOP IN response to check whether the
1947 * device Transport Protocol (UTP) layer is ready after a reset.
1948 * If the UTP layer at the device side is not initialized, it may
1949 * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
1950 * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
1951 */
1952static int ufshcd_verify_dev_init(struct ufs_hba *hba)
1953{
1954 int err = 0;
1955 int retries;
1956
1957 mutex_lock(&hba->dev_cmd.lock);
1958 for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
1959 err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
1960 NOP_OUT_TIMEOUT);
1961
1962 if (!err || err == -ETIMEDOUT)
1963 break;
1964
1965 dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
1966 }
1967 mutex_unlock(&hba->dev_cmd.lock);
1968
1969 if (err)
1970 dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
1971 return err;
1972}
1973
7a3e97b0
SY
1974/**
1975 * ufshcd_slave_alloc - handle initial SCSI device configurations
1976 * @sdev: pointer to SCSI device
1977 *
1978 * Returns success
1979 */
1980static int ufshcd_slave_alloc(struct scsi_device *sdev)
1981{
1982 struct ufs_hba *hba;
1b3e8956 1983 int lun_qdepth;
7a3e97b0
SY
1984
1985 hba = shost_priv(sdev->host);
1986 sdev->tagged_supported = 1;
1987
1988 /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
1989 sdev->use_10_for_ms = 1;
1990 scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
1991
e8e7f271
SRT
1992 /* allow SCSI layer to restart the device in case of errors */
1993 sdev->allow_restart = 1;
4264fd61 1994
b2a6c522
SRT
1995 /* REPORT SUPPORTED OPERATION CODES is not supported */
1996 sdev->no_report_opcodes = 1;
1997
1b3e8956 1998 lun_qdepth = ufshcd_read_sdev_qdepth(hba, sdev);
4264fd61
SRT
1999 if (lun_qdepth <= 0)
2000 /* eventually, we can figure out the real queue depth */
1b3e8956 2001 lun_qdepth = hba->nutrs;
4264fd61
SRT
2002 else
2003 lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
e8e7f271 2004
4264fd61
SRT
2005 dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
2006 __func__, lun_qdepth);
1b3e8956 2007 scsi_activate_tcq(sdev, lun_qdepth);
4264fd61 2008
7a3e97b0
SY
2009 return 0;
2010}
2011
4264fd61
SRT
2012/**
2013 * ufshcd_change_queue_depth - change queue depth
2014 * @sdev: pointer to SCSI device
2015 * @depth: required depth to set
2016 * @reason: reason for changing the depth
2017 *
2018 * Change queue depth according to the reason and make sure
2019 * the max. limits are not crossed.
2020 */
2021int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth, int reason)
2022{
2023 struct ufs_hba *hba = shost_priv(sdev->host);
2024
2025 if (depth > hba->nutrs)
2026 depth = hba->nutrs;
2027
2028 switch (reason) {
2029 case SCSI_QDEPTH_DEFAULT:
2030 case SCSI_QDEPTH_RAMP_UP:
2031 if (!sdev->tagged_supported)
2032 depth = 1;
2033 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
2034 break;
2035 case SCSI_QDEPTH_QFULL:
2036 scsi_track_queue_full(sdev, depth);
2037 break;
2038 default:
2039 return -EOPNOTSUPP;
2040 }
2041
2042 return depth;
2043}
2044
eeda4749
AM
2045/**
2046 * ufshcd_slave_configure - adjust SCSI device configurations
2047 * @sdev: pointer to SCSI device
2048 */
2049static int ufshcd_slave_configure(struct scsi_device *sdev)
2050{
2051 struct request_queue *q = sdev->request_queue;
2052
2053 blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
2054 blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
2055
2056 return 0;
2057}
2058
7a3e97b0
SY
2059/**
2060 * ufshcd_slave_destroy - remove SCSI device configurations
2061 * @sdev: pointer to SCSI device
2062 */
2063static void ufshcd_slave_destroy(struct scsi_device *sdev)
2064{
2065 struct ufs_hba *hba;
2066
2067 hba = shost_priv(sdev->host);
2068 scsi_deactivate_tcq(sdev, hba->nutrs);
2069}
2070
2071/**
2072 * ufshcd_task_req_compl - handle task management request completion
2073 * @hba: per adapter instance
2074 * @index: index of the completed request
e2933132 2075 * @resp: task management service response
7a3e97b0 2076 *
e2933132 2077 * Returns non-zero value on error, zero on success
7a3e97b0 2078 */
e2933132 2079static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
7a3e97b0
SY
2080{
2081 struct utp_task_req_desc *task_req_descp;
2082 struct utp_upiu_task_rsp *task_rsp_upiup;
2083 unsigned long flags;
2084 int ocs_value;
2085 int task_result;
2086
2087 spin_lock_irqsave(hba->host->host_lock, flags);
2088
2089 /* Clear completed tasks from outstanding_tasks */
2090 __clear_bit(index, &hba->outstanding_tasks);
2091
2092 task_req_descp = hba->utmrdl_base_addr;
2093 ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
2094
2095 if (ocs_value == OCS_SUCCESS) {
2096 task_rsp_upiup = (struct utp_upiu_task_rsp *)
2097 task_req_descp[index].task_rsp_upiu;
2098 task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
2099 task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
e2933132
SRT
2100 if (resp)
2101 *resp = (u8)task_result;
7a3e97b0 2102 } else {
e2933132
SRT
2103 dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
2104 __func__, ocs_value);
7a3e97b0
SY
2105 }
2106 spin_unlock_irqrestore(hba->host->host_lock, flags);
e2933132
SRT
2107
2108 return ocs_value;
7a3e97b0
SY
2109}
2110
7a3e97b0
SY
2111/**
2112 * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
2113 * @lrb: pointer to local reference block of completed command
2114 * @scsi_status: SCSI command status
2115 *
2116 * Returns value base on SCSI command status
2117 */
2118static inline int
2119ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
2120{
2121 int result = 0;
2122
2123 switch (scsi_status) {
7a3e97b0 2124 case SAM_STAT_CHECK_CONDITION:
1c2623c5
SJ
2125 ufshcd_copy_sense_data(lrbp);
2126 case SAM_STAT_GOOD:
7a3e97b0
SY
2127 result |= DID_OK << 16 |
2128 COMMAND_COMPLETE << 8 |
1c2623c5 2129 scsi_status;
7a3e97b0
SY
2130 break;
2131 case SAM_STAT_TASK_SET_FULL:
1c2623c5 2132 case SAM_STAT_BUSY:
7a3e97b0 2133 case SAM_STAT_TASK_ABORTED:
1c2623c5
SJ
2134 ufshcd_copy_sense_data(lrbp);
2135 result |= scsi_status;
7a3e97b0
SY
2136 break;
2137 default:
2138 result |= DID_ERROR << 16;
2139 break;
2140 } /* end of switch */
2141
2142 return result;
2143}
2144
2145/**
2146 * ufshcd_transfer_rsp_status - Get overall status of the response
2147 * @hba: per adapter instance
2148 * @lrb: pointer to local reference block of completed command
2149 *
2150 * Returns result of the command to notify SCSI midlayer
2151 */
2152static inline int
2153ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2154{
2155 int result = 0;
2156 int scsi_status;
2157 int ocs;
2158
2159 /* overall command status of utrd */
2160 ocs = ufshcd_get_tr_ocs(lrbp);
2161
2162 switch (ocs) {
2163 case OCS_SUCCESS:
5a0b0cb9 2164 result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
7a3e97b0 2165
5a0b0cb9
SRT
2166 switch (result) {
2167 case UPIU_TRANSACTION_RESPONSE:
2168 /*
2169 * get the response UPIU result to extract
2170 * the SCSI command status
2171 */
2172 result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
2173
2174 /*
2175 * get the result based on SCSI status response
2176 * to notify the SCSI midlayer of the command status
2177 */
2178 scsi_status = result & MASK_SCSI_STATUS;
2179 result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
66ec6d59
SRT
2180
2181 if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
2182 schedule_work(&hba->eeh_work);
5a0b0cb9
SRT
2183 break;
2184 case UPIU_TRANSACTION_REJECT_UPIU:
2185 /* TODO: handle Reject UPIU Response */
2186 result = DID_ERROR << 16;
3b1d0580 2187 dev_err(hba->dev,
5a0b0cb9
SRT
2188 "Reject UPIU not fully implemented\n");
2189 break;
2190 default:
2191 result = DID_ERROR << 16;
2192 dev_err(hba->dev,
2193 "Unexpected request response code = %x\n",
2194 result);
7a3e97b0
SY
2195 break;
2196 }
7a3e97b0
SY
2197 break;
2198 case OCS_ABORTED:
2199 result |= DID_ABORT << 16;
2200 break;
e8e7f271
SRT
2201 case OCS_INVALID_COMMAND_STATUS:
2202 result |= DID_REQUEUE << 16;
2203 break;
7a3e97b0
SY
2204 case OCS_INVALID_CMD_TABLE_ATTR:
2205 case OCS_INVALID_PRDT_ATTR:
2206 case OCS_MISMATCH_DATA_BUF_SIZE:
2207 case OCS_MISMATCH_RESP_UPIU_SIZE:
2208 case OCS_PEER_COMM_FAILURE:
2209 case OCS_FATAL_ERROR:
2210 default:
2211 result |= DID_ERROR << 16;
3b1d0580 2212 dev_err(hba->dev,
7a3e97b0
SY
2213 "OCS error from controller = %x\n", ocs);
2214 break;
2215 } /* end of switch */
2216
2217 return result;
2218}
2219
6ccf44fe
SJ
2220/**
2221 * ufshcd_uic_cmd_compl - handle completion of uic command
2222 * @hba: per adapter instance
53b3d9c3 2223 * @intr_status: interrupt status generated by the controller
6ccf44fe 2224 */
53b3d9c3 2225static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
6ccf44fe 2226{
53b3d9c3 2227 if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
6ccf44fe
SJ
2228 hba->active_uic_cmd->argument2 |=
2229 ufshcd_get_uic_cmd_result(hba);
12b4fdb4
SJ
2230 hba->active_uic_cmd->argument3 =
2231 ufshcd_get_dme_attr_val(hba);
6ccf44fe
SJ
2232 complete(&hba->active_uic_cmd->done);
2233 }
53b3d9c3
SJ
2234
2235 if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
2236 complete(hba->pwr_done);
6ccf44fe
SJ
2237}
2238
7a3e97b0
SY
2239/**
2240 * ufshcd_transfer_req_compl - handle SCSI and query command completion
2241 * @hba: per adapter instance
2242 */
2243static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
2244{
5a0b0cb9
SRT
2245 struct ufshcd_lrb *lrbp;
2246 struct scsi_cmnd *cmd;
7a3e97b0
SY
2247 unsigned long completed_reqs;
2248 u32 tr_doorbell;
2249 int result;
2250 int index;
e9d501b1
DR
2251
2252 /* Resetting interrupt aggregation counters first and reading the
2253 * DOOR_BELL afterward allows us to handle all the completed requests.
2254 * In order to prevent other interrupts starvation the DB is read once
2255 * after reset. The down side of this solution is the possibility of
2256 * false interrupt if device completes another request after resetting
2257 * aggregation and before reading the DB.
2258 */
2259 ufshcd_reset_intr_aggr(hba);
7a3e97b0 2260
b873a275 2261 tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
7a3e97b0
SY
2262 completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
2263
e9d501b1
DR
2264 for_each_set_bit(index, &completed_reqs, hba->nutrs) {
2265 lrbp = &hba->lrb[index];
2266 cmd = lrbp->cmd;
2267 if (cmd) {
2268 result = ufshcd_transfer_rsp_status(hba, lrbp);
2269 scsi_dma_unmap(cmd);
2270 cmd->result = result;
2271 /* Mark completed command as NULL in LRB */
2272 lrbp->cmd = NULL;
2273 clear_bit_unlock(index, &hba->lrb_in_use);
2274 /* Do not touch lrbp after scsi done */
2275 cmd->scsi_done(cmd);
2276 } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
2277 if (hba->dev_cmd.complete)
2278 complete(hba->dev_cmd.complete);
2279 }
2280 }
7a3e97b0
SY
2281
2282 /* clear corresponding bits of completed commands */
2283 hba->outstanding_reqs ^= completed_reqs;
2284
5a0b0cb9
SRT
2285 /* we might have free'd some tags above */
2286 wake_up(&hba->dev_cmd.tag_wq);
7a3e97b0
SY
2287}
2288
66ec6d59
SRT
2289/**
2290 * ufshcd_disable_ee - disable exception event
2291 * @hba: per-adapter instance
2292 * @mask: exception event to disable
2293 *
2294 * Disables exception event in the device so that the EVENT_ALERT
2295 * bit is not set.
2296 *
2297 * Returns zero on success, non-zero error value on failure.
2298 */
2299static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
2300{
2301 int err = 0;
2302 u32 val;
2303
2304 if (!(hba->ee_ctrl_mask & mask))
2305 goto out;
2306
2307 val = hba->ee_ctrl_mask & ~mask;
2308 val &= 0xFFFF; /* 2 bytes */
2309 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
2310 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
2311 if (!err)
2312 hba->ee_ctrl_mask &= ~mask;
2313out:
2314 return err;
2315}
2316
2317/**
2318 * ufshcd_enable_ee - enable exception event
2319 * @hba: per-adapter instance
2320 * @mask: exception event to enable
2321 *
2322 * Enable corresponding exception event in the device to allow
2323 * device to alert host in critical scenarios.
2324 *
2325 * Returns zero on success, non-zero error value on failure.
2326 */
2327static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
2328{
2329 int err = 0;
2330 u32 val;
2331
2332 if (hba->ee_ctrl_mask & mask)
2333 goto out;
2334
2335 val = hba->ee_ctrl_mask | mask;
2336 val &= 0xFFFF; /* 2 bytes */
2337 err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
2338 QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
2339 if (!err)
2340 hba->ee_ctrl_mask |= mask;
2341out:
2342 return err;
2343}
2344
2345/**
2346 * ufshcd_enable_auto_bkops - Allow device managed BKOPS
2347 * @hba: per-adapter instance
2348 *
2349 * Allow device to manage background operations on its own. Enabling
2350 * this might lead to inconsistent latencies during normal data transfers
2351 * as the device is allowed to manage its own way of handling background
2352 * operations.
2353 *
2354 * Returns zero on success, non-zero on failure.
2355 */
2356static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
2357{
2358 int err = 0;
2359
2360 if (hba->auto_bkops_enabled)
2361 goto out;
2362
2363 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
2364 QUERY_FLAG_IDN_BKOPS_EN, NULL);
2365 if (err) {
2366 dev_err(hba->dev, "%s: failed to enable bkops %d\n",
2367 __func__, err);
2368 goto out;
2369 }
2370
2371 hba->auto_bkops_enabled = true;
2372
2373 /* No need of URGENT_BKOPS exception from the device */
2374 err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2375 if (err)
2376 dev_err(hba->dev, "%s: failed to disable exception event %d\n",
2377 __func__, err);
2378out:
2379 return err;
2380}
2381
2382/**
2383 * ufshcd_disable_auto_bkops - block device in doing background operations
2384 * @hba: per-adapter instance
2385 *
2386 * Disabling background operations improves command response latency but
2387 * has drawback of device moving into critical state where the device is
2388 * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
2389 * host is idle so that BKOPS are managed effectively without any negative
2390 * impacts.
2391 *
2392 * Returns zero on success, non-zero on failure.
2393 */
2394static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
2395{
2396 int err = 0;
2397
2398 if (!hba->auto_bkops_enabled)
2399 goto out;
2400
2401 /*
2402 * If host assisted BKOPs is to be enabled, make sure
2403 * urgent bkops exception is allowed.
2404 */
2405 err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
2406 if (err) {
2407 dev_err(hba->dev, "%s: failed to enable exception event %d\n",
2408 __func__, err);
2409 goto out;
2410 }
2411
2412 err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
2413 QUERY_FLAG_IDN_BKOPS_EN, NULL);
2414 if (err) {
2415 dev_err(hba->dev, "%s: failed to disable bkops %d\n",
2416 __func__, err);
2417 ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
2418 goto out;
2419 }
2420
2421 hba->auto_bkops_enabled = false;
2422out:
2423 return err;
2424}
2425
2426/**
2427 * ufshcd_force_reset_auto_bkops - force enable of auto bkops
2428 * @hba: per adapter instance
2429 *
2430 * After a device reset the device may toggle the BKOPS_EN flag
2431 * to default value. The s/w tracking variables should be updated
2432 * as well. Do this by forcing enable of auto bkops.
2433 */
2434static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
2435{
2436 hba->auto_bkops_enabled = false;
2437 hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
2438 ufshcd_enable_auto_bkops(hba);
2439}
2440
2441static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
2442{
2443 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2444 QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
2445}
2446
2447/**
2448 * ufshcd_urgent_bkops - handle urgent bkops exception event
2449 * @hba: per-adapter instance
2450 *
2451 * Enable fBackgroundOpsEn flag in the device to permit background
2452 * operations.
2453 */
2454static int ufshcd_urgent_bkops(struct ufs_hba *hba)
2455{
2456 int err;
2457 u32 status = 0;
2458
2459 err = ufshcd_get_bkops_status(hba, &status);
2460 if (err) {
2461 dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
2462 __func__, err);
2463 goto out;
2464 }
2465
2466 status = status & 0xF;
2467
2468 /* handle only if status indicates performance impact or critical */
2469 if (status >= BKOPS_STATUS_PERF_IMPACT)
2470 err = ufshcd_enable_auto_bkops(hba);
2471out:
2472 return err;
2473}
2474
2475static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
2476{
2477 return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
2478 QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
2479}
2480
2481/**
2482 * ufshcd_exception_event_handler - handle exceptions raised by device
2483 * @work: pointer to work data
2484 *
2485 * Read bExceptionEventStatus attribute from the device and handle the
2486 * exception event accordingly.
2487 */
2488static void ufshcd_exception_event_handler(struct work_struct *work)
2489{
2490 struct ufs_hba *hba;
2491 int err;
2492 u32 status = 0;
2493 hba = container_of(work, struct ufs_hba, eeh_work);
2494
62694735 2495 pm_runtime_get_sync(hba->dev);
66ec6d59
SRT
2496 err = ufshcd_get_ee_status(hba, &status);
2497 if (err) {
2498 dev_err(hba->dev, "%s: failed to get exception status %d\n",
2499 __func__, err);
2500 goto out;
2501 }
2502
2503 status &= hba->ee_ctrl_mask;
2504 if (status & MASK_EE_URGENT_BKOPS) {
2505 err = ufshcd_urgent_bkops(hba);
2506 if (err)
2507 dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
2508 __func__, err);
2509 }
2510out:
62694735 2511 pm_runtime_put_sync(hba->dev);
66ec6d59
SRT
2512 return;
2513}
2514
7a3e97b0 2515/**
e8e7f271
SRT
2516 * ufshcd_err_handler - handle UFS errors that require s/w attention
2517 * @work: pointer to work structure
7a3e97b0 2518 */
e8e7f271 2519static void ufshcd_err_handler(struct work_struct *work)
7a3e97b0
SY
2520{
2521 struct ufs_hba *hba;
e8e7f271
SRT
2522 unsigned long flags;
2523 u32 err_xfer = 0;
2524 u32 err_tm = 0;
2525 int err = 0;
2526 int tag;
2527
2528 hba = container_of(work, struct ufs_hba, eh_work);
7a3e97b0 2529
62694735 2530 pm_runtime_get_sync(hba->dev);
e8e7f271
SRT
2531
2532 spin_lock_irqsave(hba->host->host_lock, flags);
2533 if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
2534 spin_unlock_irqrestore(hba->host->host_lock, flags);
2535 goto out;
2536 }
2537
2538 hba->ufshcd_state = UFSHCD_STATE_RESET;
2539 ufshcd_set_eh_in_progress(hba);
2540
2541 /* Complete requests that have door-bell cleared by h/w */
2542 ufshcd_transfer_req_compl(hba);
2543 ufshcd_tmc_handler(hba);
2544 spin_unlock_irqrestore(hba->host->host_lock, flags);
2545
2546 /* Clear pending transfer requests */
2547 for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
2548 if (ufshcd_clear_cmd(hba, tag))
2549 err_xfer |= 1 << tag;
2550
2551 /* Clear pending task management requests */
2552 for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
2553 if (ufshcd_clear_tm_cmd(hba, tag))
2554 err_tm |= 1 << tag;
2555
2556 /* Complete the requests that are cleared by s/w */
2557 spin_lock_irqsave(hba->host->host_lock, flags);
2558 ufshcd_transfer_req_compl(hba);
2559 ufshcd_tmc_handler(hba);
2560 spin_unlock_irqrestore(hba->host->host_lock, flags);
2561
2562 /* Fatal errors need reset */
2563 if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
2564 ((hba->saved_err & UIC_ERROR) &&
2565 (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
2566 err = ufshcd_reset_and_restore(hba);
2567 if (err) {
2568 dev_err(hba->dev, "%s: reset and restore failed\n",
2569 __func__);
2570 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2571 }
2572 /*
2573 * Inform scsi mid-layer that we did reset and allow to handle
2574 * Unit Attention properly.
2575 */
2576 scsi_report_bus_reset(hba->host, 0);
2577 hba->saved_err = 0;
2578 hba->saved_uic_err = 0;
2579 }
2580 ufshcd_clear_eh_in_progress(hba);
2581
2582out:
2583 scsi_unblock_requests(hba->host);
62694735 2584 pm_runtime_put_sync(hba->dev);
7a3e97b0
SY
2585}
2586
2587/**
e8e7f271
SRT
2588 * ufshcd_update_uic_error - check and set fatal UIC error flags.
2589 * @hba: per-adapter instance
7a3e97b0 2590 */
e8e7f271 2591static void ufshcd_update_uic_error(struct ufs_hba *hba)
7a3e97b0
SY
2592{
2593 u32 reg;
2594
e8e7f271
SRT
2595 /* PA_INIT_ERROR is fatal and needs UIC reset */
2596 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
2597 if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
2598 hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
2599
2600 /* UIC NL/TL/DME errors needs software retry */
2601 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
2602 if (reg)
2603 hba->uic_error |= UFSHCD_UIC_NL_ERROR;
2604
2605 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
2606 if (reg)
2607 hba->uic_error |= UFSHCD_UIC_TL_ERROR;
2608
2609 reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
2610 if (reg)
2611 hba->uic_error |= UFSHCD_UIC_DME_ERROR;
2612
2613 dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
2614 __func__, hba->uic_error);
2615}
2616
2617/**
2618 * ufshcd_check_errors - Check for errors that need s/w attention
2619 * @hba: per-adapter instance
2620 */
2621static void ufshcd_check_errors(struct ufs_hba *hba)
2622{
2623 bool queue_eh_work = false;
2624
7a3e97b0 2625 if (hba->errors & INT_FATAL_ERRORS)
e8e7f271 2626 queue_eh_work = true;
7a3e97b0
SY
2627
2628 if (hba->errors & UIC_ERROR) {
e8e7f271
SRT
2629 hba->uic_error = 0;
2630 ufshcd_update_uic_error(hba);
2631 if (hba->uic_error)
2632 queue_eh_work = true;
7a3e97b0 2633 }
e8e7f271
SRT
2634
2635 if (queue_eh_work) {
2636 /* handle fatal errors only when link is functional */
2637 if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
2638 /* block commands from scsi mid-layer */
2639 scsi_block_requests(hba->host);
2640
2641 /* transfer error masks to sticky bits */
2642 hba->saved_err |= hba->errors;
2643 hba->saved_uic_err |= hba->uic_error;
2644
2645 hba->ufshcd_state = UFSHCD_STATE_ERROR;
2646 schedule_work(&hba->eh_work);
2647 }
3441da7d 2648 }
e8e7f271
SRT
2649 /*
2650 * if (!queue_eh_work) -
2651 * Other errors are either non-fatal where host recovers
2652 * itself without s/w intervention or errors that will be
2653 * handled by the SCSI core layer.
2654 */
7a3e97b0
SY
2655}
2656
2657/**
2658 * ufshcd_tmc_handler - handle task management function completion
2659 * @hba: per adapter instance
2660 */
2661static void ufshcd_tmc_handler(struct ufs_hba *hba)
2662{
2663 u32 tm_doorbell;
2664
b873a275 2665 tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0 2666 hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
e2933132 2667 wake_up(&hba->tm_wq);
7a3e97b0
SY
2668}
2669
2670/**
2671 * ufshcd_sl_intr - Interrupt service routine
2672 * @hba: per adapter instance
2673 * @intr_status: contains interrupts generated by the controller
2674 */
2675static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
2676{
2677 hba->errors = UFSHCD_ERROR_MASK & intr_status;
2678 if (hba->errors)
e8e7f271 2679 ufshcd_check_errors(hba);
7a3e97b0 2680
53b3d9c3
SJ
2681 if (intr_status & UFSHCD_UIC_MASK)
2682 ufshcd_uic_cmd_compl(hba, intr_status);
7a3e97b0
SY
2683
2684 if (intr_status & UTP_TASK_REQ_COMPL)
2685 ufshcd_tmc_handler(hba);
2686
2687 if (intr_status & UTP_TRANSFER_REQ_COMPL)
2688 ufshcd_transfer_req_compl(hba);
2689}
2690
2691/**
2692 * ufshcd_intr - Main interrupt service routine
2693 * @irq: irq number
2694 * @__hba: pointer to adapter instance
2695 *
2696 * Returns IRQ_HANDLED - If interrupt is valid
2697 * IRQ_NONE - If invalid interrupt
2698 */
2699static irqreturn_t ufshcd_intr(int irq, void *__hba)
2700{
2701 u32 intr_status;
2702 irqreturn_t retval = IRQ_NONE;
2703 struct ufs_hba *hba = __hba;
2704
2705 spin_lock(hba->host->host_lock);
b873a275 2706 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
7a3e97b0
SY
2707
2708 if (intr_status) {
261ea452 2709 ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
7a3e97b0 2710 ufshcd_sl_intr(hba, intr_status);
7a3e97b0
SY
2711 retval = IRQ_HANDLED;
2712 }
2713 spin_unlock(hba->host->host_lock);
2714 return retval;
2715}
2716
e2933132
SRT
2717static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
2718{
2719 int err = 0;
2720 u32 mask = 1 << tag;
2721 unsigned long flags;
2722
2723 if (!test_bit(tag, &hba->outstanding_tasks))
2724 goto out;
2725
2726 spin_lock_irqsave(hba->host->host_lock, flags);
2727 ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
2728 spin_unlock_irqrestore(hba->host->host_lock, flags);
2729
2730 /* poll for max. 1 sec to clear door bell register by h/w */
2731 err = ufshcd_wait_for_register(hba,
2732 REG_UTP_TASK_REQ_DOOR_BELL,
2733 mask, 0, 1000, 1000);
2734out:
2735 return err;
2736}
2737
7a3e97b0
SY
2738/**
2739 * ufshcd_issue_tm_cmd - issues task management commands to controller
2740 * @hba: per adapter instance
e2933132
SRT
2741 * @lun_id: LUN ID to which TM command is sent
2742 * @task_id: task ID to which the TM command is applicable
2743 * @tm_function: task management function opcode
2744 * @tm_response: task management service response return value
7a3e97b0 2745 *
e2933132 2746 * Returns non-zero value on error, zero on success.
7a3e97b0 2747 */
e2933132
SRT
2748static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
2749 u8 tm_function, u8 *tm_response)
7a3e97b0
SY
2750{
2751 struct utp_task_req_desc *task_req_descp;
2752 struct utp_upiu_task_req *task_req_upiup;
2753 struct Scsi_Host *host;
2754 unsigned long flags;
e2933132 2755 int free_slot;
7a3e97b0 2756 int err;
e2933132 2757 int task_tag;
7a3e97b0
SY
2758
2759 host = hba->host;
2760
e2933132
SRT
2761 /*
2762 * Get free slot, sleep if slots are unavailable.
2763 * Even though we use wait_event() which sleeps indefinitely,
2764 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
2765 */
2766 wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
7a3e97b0 2767
e2933132 2768 spin_lock_irqsave(host->host_lock, flags);
7a3e97b0
SY
2769 task_req_descp = hba->utmrdl_base_addr;
2770 task_req_descp += free_slot;
2771
2772 /* Configure task request descriptor */
2773 task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
2774 task_req_descp->header.dword_2 =
2775 cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
2776
2777 /* Configure task request UPIU */
2778 task_req_upiup =
2779 (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
e2933132 2780 task_tag = hba->nutrs + free_slot;
7a3e97b0 2781 task_req_upiup->header.dword_0 =
5a0b0cb9 2782 UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
e2933132 2783 lun_id, task_tag);
7a3e97b0 2784 task_req_upiup->header.dword_1 =
5a0b0cb9 2785 UPIU_HEADER_DWORD(0, tm_function, 0, 0);
7a3e97b0 2786
e2933132
SRT
2787 task_req_upiup->input_param1 = cpu_to_be32(lun_id);
2788 task_req_upiup->input_param2 = cpu_to_be32(task_id);
7a3e97b0
SY
2789
2790 /* send command to the controller */
2791 __set_bit(free_slot, &hba->outstanding_tasks);
b873a275 2792 ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
7a3e97b0
SY
2793
2794 spin_unlock_irqrestore(host->host_lock, flags);
2795
2796 /* wait until the task management command is completed */
e2933132
SRT
2797 err = wait_event_timeout(hba->tm_wq,
2798 test_bit(free_slot, &hba->tm_condition),
2799 msecs_to_jiffies(TM_CMD_TIMEOUT));
7a3e97b0 2800 if (!err) {
e2933132
SRT
2801 dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
2802 __func__, tm_function);
2803 if (ufshcd_clear_tm_cmd(hba, free_slot))
2804 dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
2805 __func__, free_slot);
2806 err = -ETIMEDOUT;
2807 } else {
2808 err = ufshcd_task_req_compl(hba, free_slot, tm_response);
7a3e97b0 2809 }
e2933132 2810
7a3e97b0 2811 clear_bit(free_slot, &hba->tm_condition);
e2933132
SRT
2812 ufshcd_put_tm_slot(hba, free_slot);
2813 wake_up(&hba->tm_tag_wq);
2814
7a3e97b0
SY
2815 return err;
2816}
2817
2818/**
3441da7d
SRT
2819 * ufshcd_eh_device_reset_handler - device reset handler registered to
2820 * scsi layer.
7a3e97b0
SY
2821 * @cmd: SCSI command pointer
2822 *
2823 * Returns SUCCESS/FAILED
2824 */
3441da7d 2825static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
7a3e97b0
SY
2826{
2827 struct Scsi_Host *host;
2828 struct ufs_hba *hba;
2829 unsigned int tag;
2830 u32 pos;
2831 int err;
e2933132
SRT
2832 u8 resp = 0xF;
2833 struct ufshcd_lrb *lrbp;
3441da7d 2834 unsigned long flags;
7a3e97b0
SY
2835
2836 host = cmd->device->host;
2837 hba = shost_priv(host);
2838 tag = cmd->request->tag;
2839
e2933132
SRT
2840 lrbp = &hba->lrb[tag];
2841 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
2842 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
3441da7d
SRT
2843 if (!err)
2844 err = resp;
7a3e97b0 2845 goto out;
e2933132 2846 }
7a3e97b0 2847
3441da7d
SRT
2848 /* clear the commands that were pending for corresponding LUN */
2849 for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
2850 if (hba->lrb[pos].lun == lrbp->lun) {
2851 err = ufshcd_clear_cmd(hba, pos);
2852 if (err)
2853 break;
7a3e97b0 2854 }
3441da7d
SRT
2855 }
2856 spin_lock_irqsave(host->host_lock, flags);
2857 ufshcd_transfer_req_compl(hba);
2858 spin_unlock_irqrestore(host->host_lock, flags);
7a3e97b0 2859out:
3441da7d
SRT
2860 if (!err) {
2861 err = SUCCESS;
2862 } else {
2863 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
2864 err = FAILED;
2865 }
7a3e97b0
SY
2866 return err;
2867}
2868
7a3e97b0
SY
2869/**
2870 * ufshcd_abort - abort a specific command
2871 * @cmd: SCSI command pointer
2872 *
f20810d8
SRT
2873 * Abort the pending command in device by sending UFS_ABORT_TASK task management
2874 * command, and in host controller by clearing the door-bell register. There can
2875 * be race between controller sending the command to the device while abort is
2876 * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
2877 * really issued and then try to abort it.
2878 *
7a3e97b0
SY
2879 * Returns SUCCESS/FAILED
2880 */
2881static int ufshcd_abort(struct scsi_cmnd *cmd)
2882{
2883 struct Scsi_Host *host;
2884 struct ufs_hba *hba;
2885 unsigned long flags;
2886 unsigned int tag;
f20810d8
SRT
2887 int err = 0;
2888 int poll_cnt;
e2933132
SRT
2889 u8 resp = 0xF;
2890 struct ufshcd_lrb *lrbp;
e9d501b1 2891 u32 reg;
7a3e97b0
SY
2892
2893 host = cmd->device->host;
2894 hba = shost_priv(host);
2895 tag = cmd->request->tag;
2896
f20810d8
SRT
2897 /* If command is already aborted/completed, return SUCCESS */
2898 if (!(test_bit(tag, &hba->outstanding_reqs)))
2899 goto out;
7a3e97b0 2900
e9d501b1
DR
2901 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2902 if (!(reg & (1 << tag))) {
2903 dev_err(hba->dev,
2904 "%s: cmd was completed, but without a notifying intr, tag = %d",
2905 __func__, tag);
2906 }
2907
f20810d8
SRT
2908 lrbp = &hba->lrb[tag];
2909 for (poll_cnt = 100; poll_cnt; poll_cnt--) {
2910 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2911 UFS_QUERY_TASK, &resp);
2912 if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
2913 /* cmd pending in the device */
2914 break;
2915 } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
f20810d8
SRT
2916 /*
2917 * cmd not pending in the device, check if it is
2918 * in transition.
2919 */
2920 reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
2921 if (reg & (1 << tag)) {
2922 /* sleep for max. 200us to stabilize */
2923 usleep_range(100, 200);
2924 continue;
2925 }
2926 /* command completed already */
2927 goto out;
2928 } else {
2929 if (!err)
2930 err = resp; /* service response error */
2931 goto out;
2932 }
2933 }
2934
2935 if (!poll_cnt) {
2936 err = -EBUSY;
7a3e97b0
SY
2937 goto out;
2938 }
7a3e97b0 2939
e2933132
SRT
2940 err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
2941 UFS_ABORT_TASK, &resp);
2942 if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
f20810d8
SRT
2943 if (!err)
2944 err = resp; /* service response error */
7a3e97b0 2945 goto out;
e2933132 2946 }
7a3e97b0 2947
f20810d8
SRT
2948 err = ufshcd_clear_cmd(hba, tag);
2949 if (err)
2950 goto out;
2951
7a3e97b0
SY
2952 scsi_dma_unmap(cmd);
2953
2954 spin_lock_irqsave(host->host_lock, flags);
7a3e97b0
SY
2955 __clear_bit(tag, &hba->outstanding_reqs);
2956 hba->lrb[tag].cmd = NULL;
2957 spin_unlock_irqrestore(host->host_lock, flags);
5a0b0cb9
SRT
2958
2959 clear_bit_unlock(tag, &hba->lrb_in_use);
2960 wake_up(&hba->dev_cmd.tag_wq);
7a3e97b0 2961out:
f20810d8
SRT
2962 if (!err) {
2963 err = SUCCESS;
2964 } else {
2965 dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
2966 err = FAILED;
2967 }
2968
7a3e97b0
SY
2969 return err;
2970}
2971
3441da7d
SRT
2972/**
2973 * ufshcd_host_reset_and_restore - reset and restore host controller
2974 * @hba: per-adapter instance
2975 *
2976 * Note that host controller reset may issue DME_RESET to
2977 * local and remote (device) Uni-Pro stack and the attributes
2978 * are reset to default state.
2979 *
2980 * Returns zero on success, non-zero on failure
2981 */
2982static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
2983{
2984 int err;
2985 async_cookie_t cookie;
2986 unsigned long flags;
2987
2988 /* Reset the host controller */
2989 spin_lock_irqsave(hba->host->host_lock, flags);
2990 ufshcd_hba_stop(hba);
2991 spin_unlock_irqrestore(hba->host->host_lock, flags);
2992
2993 err = ufshcd_hba_enable(hba);
2994 if (err)
2995 goto out;
2996
2997 /* Establish the link again and restore the device */
2998 cookie = async_schedule(ufshcd_async_scan, hba);
2999 /* wait for async scan to be completed */
3000 async_synchronize_cookie(++cookie);
3001 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
3002 err = -EIO;
3003out:
3004 if (err)
3005 dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
3006
3007 return err;
3008}
3009
3010/**
3011 * ufshcd_reset_and_restore - reset and re-initialize host/device
3012 * @hba: per-adapter instance
3013 *
3014 * Reset and recover device, host and re-establish link. This
3015 * is helpful to recover the communication in fatal error conditions.
3016 *
3017 * Returns zero on success, non-zero on failure
3018 */
3019static int ufshcd_reset_and_restore(struct ufs_hba *hba)
3020{
3021 int err = 0;
3022 unsigned long flags;
3023
3024 err = ufshcd_host_reset_and_restore(hba);
3025
3026 /*
3027 * After reset the door-bell might be cleared, complete
3028 * outstanding requests in s/w here.
3029 */
3030 spin_lock_irqsave(hba->host->host_lock, flags);
3031 ufshcd_transfer_req_compl(hba);
3032 ufshcd_tmc_handler(hba);
3033 spin_unlock_irqrestore(hba->host->host_lock, flags);
3034
3035 return err;
3036}
3037
3038/**
3039 * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
3040 * @cmd - SCSI command pointer
3041 *
3042 * Returns SUCCESS/FAILED
3043 */
3044static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
3045{
3046 int err;
3047 unsigned long flags;
3048 struct ufs_hba *hba;
3049
3050 hba = shost_priv(cmd->device->host);
3051
3052 /*
3053 * Check if there is any race with fatal error handling.
3054 * If so, wait for it to complete. Even though fatal error
3055 * handling does reset and restore in some cases, don't assume
3056 * anything out of it. We are just avoiding race here.
3057 */
3058 do {
3059 spin_lock_irqsave(hba->host->host_lock, flags);
e8e7f271 3060 if (!(work_pending(&hba->eh_work) ||
3441da7d
SRT
3061 hba->ufshcd_state == UFSHCD_STATE_RESET))
3062 break;
3063 spin_unlock_irqrestore(hba->host->host_lock, flags);
3064 dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
e8e7f271 3065 flush_work(&hba->eh_work);
3441da7d
SRT
3066 } while (1);
3067
3068 hba->ufshcd_state = UFSHCD_STATE_RESET;
3069 ufshcd_set_eh_in_progress(hba);
3070 spin_unlock_irqrestore(hba->host->host_lock, flags);
3071
3072 err = ufshcd_reset_and_restore(hba);
3073
3074 spin_lock_irqsave(hba->host->host_lock, flags);
3075 if (!err) {
3076 err = SUCCESS;
3077 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
3078 } else {
3079 err = FAILED;
3080 hba->ufshcd_state = UFSHCD_STATE_ERROR;
3081 }
3082 ufshcd_clear_eh_in_progress(hba);
3083 spin_unlock_irqrestore(hba->host->host_lock, flags);
3084
3085 return err;
3086}
3087
1b3e8956
DR
3088/**
3089 * ufshcd_read_sdev_qdepth - read the lun command queue depth
3090 * @hba: Pointer to adapter instance
3091 * @sdev: pointer to SCSI device
3092 *
3093 * Return in case of success the lun's queue depth else error.
3094 */
3095static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
3096 struct scsi_device *sdev)
3097{
3098 int ret;
3099 int buff_len = UNIT_DESC_MAX_SIZE;
3100 u8 desc_buf[UNIT_DESC_MAX_SIZE];
3101
3102 ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
3103 QUERY_DESC_IDN_UNIT, sdev->lun, 0, desc_buf, &buff_len);
3104
3105 if (ret || (buff_len < UNIT_DESC_PARAM_LU_Q_DEPTH)) {
3106 dev_err(hba->dev,
3107 "%s:Failed reading unit descriptor. len = %d ret = %d"
3108 , __func__, buff_len, ret);
3109 if (!ret)
3110 ret = -EINVAL;
3111
3112 goto out;
3113 }
3114
3115 ret = desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH] & 0xFF;
3116out:
3117 return ret;
3118}
3119
6ccf44fe
SJ
3120/**
3121 * ufshcd_async_scan - asynchronous execution for link startup
3122 * @data: data pointer to pass to this function
3123 * @cookie: cookie data
3124 */
3125static void ufshcd_async_scan(void *data, async_cookie_t cookie)
3126{
3127 struct ufs_hba *hba = (struct ufs_hba *)data;
3128 int ret;
3129
3130 ret = ufshcd_link_startup(hba);
5a0b0cb9
SRT
3131 if (ret)
3132 goto out;
3133
d3e89bac
SJ
3134 ufshcd_config_max_pwr_mode(hba);
3135
5a0b0cb9
SRT
3136 ret = ufshcd_verify_dev_init(hba);
3137 if (ret)
3138 goto out;
68078d5c
DR
3139
3140 ret = ufshcd_complete_dev_init(hba);
3141 if (ret)
3142 goto out;
5a0b0cb9 3143
66ec6d59 3144 ufshcd_force_reset_auto_bkops(hba);
3441da7d
SRT
3145 hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
3146
3147 /* If we are in error handling context no need to scan the host */
3148 if (!ufshcd_eh_in_progress(hba)) {
3149 scsi_scan_host(hba->host);
3150 pm_runtime_put_sync(hba->dev);
3151 }
5a0b0cb9
SRT
3152out:
3153 return;
6ccf44fe
SJ
3154}
3155
7a3e97b0
SY
3156static struct scsi_host_template ufshcd_driver_template = {
3157 .module = THIS_MODULE,
3158 .name = UFSHCD,
3159 .proc_name = UFSHCD,
3160 .queuecommand = ufshcd_queuecommand,
3161 .slave_alloc = ufshcd_slave_alloc,
eeda4749 3162 .slave_configure = ufshcd_slave_configure,
7a3e97b0 3163 .slave_destroy = ufshcd_slave_destroy,
4264fd61 3164 .change_queue_depth = ufshcd_change_queue_depth,
7a3e97b0 3165 .eh_abort_handler = ufshcd_abort,
3441da7d
SRT
3166 .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
3167 .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
7a3e97b0
SY
3168 .this_id = -1,
3169 .sg_tablesize = SG_ALL,
3170 .cmd_per_lun = UFSHCD_CMD_PER_LUN,
3171 .can_queue = UFSHCD_CAN_QUEUE,
3172};
3173
7a3e97b0
SY
3174/**
3175 * ufshcd_suspend - suspend power management function
3b1d0580 3176 * @hba: per adapter instance
7a3e97b0
SY
3177 * @state: power state
3178 *
3179 * Returns -ENOSYS
3180 */
3b1d0580 3181int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
7a3e97b0
SY
3182{
3183 /*
3184 * TODO:
3185 * 1. Block SCSI requests from SCSI midlayer
3186 * 2. Change the internal driver state to non operational
3187 * 3. Set UTRLRSR and UTMRLRSR bits to zero
3188 * 4. Wait until outstanding commands are completed
3189 * 5. Set HCE to zero to send the UFS host controller to reset state
3190 */
3191
3192 return -ENOSYS;
3193}
3b1d0580 3194EXPORT_SYMBOL_GPL(ufshcd_suspend);
7a3e97b0
SY
3195
3196/**
3197 * ufshcd_resume - resume power management function
3b1d0580 3198 * @hba: per adapter instance
7a3e97b0
SY
3199 *
3200 * Returns -ENOSYS
3201 */
3b1d0580 3202int ufshcd_resume(struct ufs_hba *hba)
7a3e97b0
SY
3203{
3204 /*
3205 * TODO:
3206 * 1. Set HCE to 1, to start the UFS host controller
3207 * initialization process
3208 * 2. Set UTRLRSR and UTMRLRSR bits to 1
3209 * 3. Change the internal driver state to operational
3210 * 4. Unblock SCSI requests from SCSI midlayer
3211 */
3212
3213 return -ENOSYS;
3214}
3b1d0580
VH
3215EXPORT_SYMBOL_GPL(ufshcd_resume);
3216
66ec6d59
SRT
3217int ufshcd_runtime_suspend(struct ufs_hba *hba)
3218{
3219 if (!hba)
3220 return 0;
3221
3222 /*
3223 * The device is idle with no requests in the queue,
3224 * allow background operations.
3225 */
3226 return ufshcd_enable_auto_bkops(hba);
3227}
3228EXPORT_SYMBOL(ufshcd_runtime_suspend);
3229
3230int ufshcd_runtime_resume(struct ufs_hba *hba)
3231{
3232 if (!hba)
3233 return 0;
3234
3235 return ufshcd_disable_auto_bkops(hba);
3236}
3237EXPORT_SYMBOL(ufshcd_runtime_resume);
3238
3239int ufshcd_runtime_idle(struct ufs_hba *hba)
3240{
3241 return 0;
3242}
3243EXPORT_SYMBOL(ufshcd_runtime_idle);
3244
7a3e97b0 3245/**
3b1d0580 3246 * ufshcd_remove - de-allocate SCSI host and host memory space
7a3e97b0 3247 * data structure memory
3b1d0580 3248 * @hba - per adapter instance
7a3e97b0 3249 */
3b1d0580 3250void ufshcd_remove(struct ufs_hba *hba)
7a3e97b0 3251{
cfdf9c91 3252 scsi_remove_host(hba->host);
7a3e97b0 3253 /* disable interrupts */
2fbd009b 3254 ufshcd_disable_intr(hba, hba->intr_mask);
7a3e97b0 3255 ufshcd_hba_stop(hba);
7a3e97b0 3256
7a3e97b0 3257 scsi_host_put(hba->host);
3b1d0580
VH
3258}
3259EXPORT_SYMBOL_GPL(ufshcd_remove);
3260
7a3e97b0 3261/**
3b1d0580
VH
3262 * ufshcd_init - Driver initialization routine
3263 * @dev: pointer to device handle
3264 * @hba_handle: driver private handle
3265 * @mmio_base: base register address
3266 * @irq: Interrupt line of device
7a3e97b0
SY
3267 * Returns 0 on success, non-zero value on failure
3268 */
3b1d0580
VH
3269int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
3270 void __iomem *mmio_base, unsigned int irq)
7a3e97b0
SY
3271{
3272 struct Scsi_Host *host;
3273 struct ufs_hba *hba;
3274 int err;
3275
3b1d0580
VH
3276 if (!dev) {
3277 dev_err(dev,
3278 "Invalid memory reference for dev is NULL\n");
3279 err = -ENODEV;
7a3e97b0
SY
3280 goto out_error;
3281 }
3282
3b1d0580
VH
3283 if (!mmio_base) {
3284 dev_err(dev,
3285 "Invalid memory reference for mmio_base is NULL\n");
3286 err = -ENODEV;
3287 goto out_error;
3288 }
7a3e97b0
SY
3289
3290 host = scsi_host_alloc(&ufshcd_driver_template,
3291 sizeof(struct ufs_hba));
3292 if (!host) {
3b1d0580 3293 dev_err(dev, "scsi_host_alloc failed\n");
7a3e97b0 3294 err = -ENOMEM;
3b1d0580 3295 goto out_error;
7a3e97b0
SY
3296 }
3297 hba = shost_priv(host);
7a3e97b0 3298 hba->host = host;
3b1d0580
VH
3299 hba->dev = dev;
3300 hba->mmio_base = mmio_base;
3301 hba->irq = irq;
7a3e97b0
SY
3302
3303 /* Read capabilities registers */
3304 ufshcd_hba_capabilities(hba);
3305
3306 /* Get UFS version supported by the controller */
3307 hba->ufs_version = ufshcd_get_ufs_version(hba);
3308
2fbd009b
SJ
3309 /* Get Interrupt bit mask per version */
3310 hba->intr_mask = ufshcd_get_intr_mask(hba);
3311
7a3e97b0
SY
3312 /* Allocate memory for host memory space */
3313 err = ufshcd_memory_alloc(hba);
3314 if (err) {
3b1d0580
VH
3315 dev_err(hba->dev, "Memory allocation failed\n");
3316 goto out_disable;
7a3e97b0
SY
3317 }
3318
3319 /* Configure LRB */
3320 ufshcd_host_memory_configure(hba);
3321
3322 host->can_queue = hba->nutrs;
3323 host->cmd_per_lun = hba->nutrs;
3324 host->max_id = UFSHCD_MAX_ID;
3325 host->max_lun = UFSHCD_MAX_LUNS;
3326 host->max_channel = UFSHCD_MAX_CHANNEL;
3327 host->unique_id = host->host_no;
3328 host->max_cmd_len = MAX_CDB_SIZE;
3329
3330 /* Initailize wait queue for task management */
e2933132
SRT
3331 init_waitqueue_head(&hba->tm_wq);
3332 init_waitqueue_head(&hba->tm_tag_wq);
7a3e97b0
SY
3333
3334 /* Initialize work queues */
e8e7f271 3335 INIT_WORK(&hba->eh_work, ufshcd_err_handler);
66ec6d59 3336 INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
7a3e97b0 3337
6ccf44fe
SJ
3338 /* Initialize UIC command mutex */
3339 mutex_init(&hba->uic_cmd_mutex);
3340
5a0b0cb9
SRT
3341 /* Initialize mutex for device management commands */
3342 mutex_init(&hba->dev_cmd.lock);
3343
3344 /* Initialize device management tag acquire wait queue */
3345 init_waitqueue_head(&hba->dev_cmd.tag_wq);
3346
7a3e97b0 3347 /* IRQ registration */
2953f850 3348 err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
7a3e97b0 3349 if (err) {
3b1d0580 3350 dev_err(hba->dev, "request irq failed\n");
2953f850 3351 goto out_disable;
7a3e97b0
SY
3352 }
3353
3354 /* Enable SCSI tag mapping */
3355 err = scsi_init_shared_tag_map(host, host->can_queue);
3356 if (err) {
3b1d0580 3357 dev_err(hba->dev, "init shared queue failed\n");
2953f850 3358 goto out_disable;
7a3e97b0
SY
3359 }
3360
3b1d0580 3361 err = scsi_add_host(host, hba->dev);
7a3e97b0 3362 if (err) {
3b1d0580 3363 dev_err(hba->dev, "scsi_add_host failed\n");
2953f850 3364 goto out_disable;
7a3e97b0
SY
3365 }
3366
6ccf44fe
SJ
3367 /* Host controller enable */
3368 err = ufshcd_hba_enable(hba);
7a3e97b0 3369 if (err) {
6ccf44fe 3370 dev_err(hba->dev, "Host controller enable failed\n");
3b1d0580 3371 goto out_remove_scsi_host;
7a3e97b0 3372 }
6ccf44fe 3373
3b1d0580 3374 *hba_handle = hba;
7a3e97b0 3375
62694735
SRT
3376 /* Hold auto suspend until async scan completes */
3377 pm_runtime_get_sync(dev);
3378
6ccf44fe
SJ
3379 async_schedule(ufshcd_async_scan, hba);
3380
7a3e97b0
SY
3381 return 0;
3382
3b1d0580
VH
3383out_remove_scsi_host:
3384 scsi_remove_host(hba->host);
3b1d0580
VH
3385out_disable:
3386 scsi_host_put(host);
3387out_error:
3388 return err;
3389}
3390EXPORT_SYMBOL_GPL(ufshcd_init);
3391
3b1d0580
VH
3392MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
3393MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
e0eca63e 3394MODULE_DESCRIPTION("Generic UFS host controller driver Core");
7a3e97b0
SY
3395MODULE_LICENSE("GPL");
3396MODULE_VERSION(UFSHCD_DRIVER_VERSION);