]>
Commit | Line | Data |
---|---|---|
7feafb0a FA |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /** | |
b2d01681 | 3 | * ufs.c - Universal Flash Storage (UFS) driver |
7feafb0a FA |
4 | * |
5 | * Taken from Linux Kernel v5.2 (drivers/scsi/ufs/ufshcd.c) and ported | |
6 | * to u-boot. | |
7 | * | |
a94a4071 | 8 | * Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com |
7feafb0a FA |
9 | */ |
10 | ||
91913a1a | 11 | #include <bouncebuf.h> |
7feafb0a | 12 | #include <charset.h> |
d678a59d | 13 | #include <common.h> |
7feafb0a | 14 | #include <dm.h> |
f7ae49fc | 15 | #include <log.h> |
336d4615 | 16 | #include <dm/device_compat.h> |
61b29b82 | 17 | #include <dm/devres.h> |
7feafb0a FA |
18 | #include <dm/lists.h> |
19 | #include <dm/device-internal.h> | |
20 | #include <malloc.h> | |
21 | #include <hexdump.h> | |
22 | #include <scsi.h> | |
98eb4ce5 SG |
23 | #include <asm/io.h> |
24 | #include <asm/dma-mapping.h> | |
cd93d625 | 25 | #include <linux/bitops.h> |
c05ed00a | 26 | #include <linux/delay.h> |
9d86b89c | 27 | #include <linux/dma-mapping.h> |
7feafb0a FA |
28 | |
29 | #include "ufs.h" | |
30 | ||
31 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ | |
32 | UTP_TASK_REQ_COMPL |\ | |
33 | UFSHCD_ERROR_MASK) | |
34 | /* maximum number of link-startup retries */ | |
35 | #define DME_LINKSTARTUP_RETRIES 3 | |
36 | ||
37 | /* maximum number of retries for a general UIC command */ | |
38 | #define UFS_UIC_COMMAND_RETRIES 3 | |
39 | ||
40 | /* Query request retries */ | |
41 | #define QUERY_REQ_RETRIES 3 | |
42 | /* Query request timeout */ | |
43 | #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ | |
44 | ||
45 | /* maximum timeout in ms for a general UIC command */ | |
46 | #define UFS_UIC_CMD_TIMEOUT 1000 | |
47 | /* NOP OUT retries waiting for NOP IN response */ | |
48 | #define NOP_OUT_RETRIES 10 | |
49 | /* Timeout after 30 msecs if NOP OUT hangs without response */ | |
50 | #define NOP_OUT_TIMEOUT 30 /* msecs */ | |
51 | ||
52 | /* Only use one Task Tag for all requests */ | |
53 | #define TASK_TAG 0 | |
54 | ||
55 | /* Expose the flag value from utp_upiu_query.value */ | |
56 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF | |
57 | ||
58 | #define MAX_PRDT_ENTRY 262144 | |
59 | ||
60 | /* maximum bytes per request */ | |
61 | #define UFS_MAX_BYTES (128 * 256 * 1024) | |
62 | ||
63 | static inline bool ufshcd_is_hba_active(struct ufs_hba *hba); | |
64 | static inline void ufshcd_hba_stop(struct ufs_hba *hba); | |
65 | static int ufshcd_hba_enable(struct ufs_hba *hba); | |
66 | ||
67 | /* | |
68 | * ufshcd_wait_for_register - wait for register value to change | |
69 | */ | |
70 | static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, | |
71 | u32 val, unsigned long timeout_ms) | |
72 | { | |
73 | int err = 0; | |
74 | unsigned long start = get_timer(0); | |
75 | ||
76 | /* ignore bits that we don't intend to wait on */ | |
77 | val = val & mask; | |
78 | ||
79 | while ((ufshcd_readl(hba, reg) & mask) != val) { | |
80 | if (get_timer(start) > timeout_ms) { | |
81 | if ((ufshcd_readl(hba, reg) & mask) != val) | |
82 | err = -ETIMEDOUT; | |
83 | break; | |
84 | } | |
85 | } | |
86 | ||
87 | return err; | |
88 | } | |
89 | ||
90 | /** | |
91 | * ufshcd_init_pwr_info - setting the POR (power on reset) | |
92 | * values in hba power info | |
93 | */ | |
94 | static void ufshcd_init_pwr_info(struct ufs_hba *hba) | |
95 | { | |
96 | hba->pwr_info.gear_rx = UFS_PWM_G1; | |
97 | hba->pwr_info.gear_tx = UFS_PWM_G1; | |
98 | hba->pwr_info.lane_rx = 1; | |
99 | hba->pwr_info.lane_tx = 1; | |
100 | hba->pwr_info.pwr_rx = SLOWAUTO_MODE; | |
101 | hba->pwr_info.pwr_tx = SLOWAUTO_MODE; | |
102 | hba->pwr_info.hs_rate = 0; | |
103 | } | |
104 | ||
105 | /** | |
106 | * ufshcd_print_pwr_info - print power params as saved in hba | |
107 | * power info | |
108 | */ | |
109 | static void ufshcd_print_pwr_info(struct ufs_hba *hba) | |
110 | { | |
111 | static const char * const names[] = { | |
112 | "INVALID MODE", | |
113 | "FAST MODE", | |
114 | "SLOW_MODE", | |
115 | "INVALID MODE", | |
116 | "FASTAUTO_MODE", | |
117 | "SLOWAUTO_MODE", | |
118 | "INVALID MODE", | |
119 | }; | |
120 | ||
121 | dev_err(hba->dev, "[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n", | |
122 | hba->pwr_info.gear_rx, hba->pwr_info.gear_tx, | |
123 | hba->pwr_info.lane_rx, hba->pwr_info.lane_tx, | |
124 | names[hba->pwr_info.pwr_rx], | |
125 | names[hba->pwr_info.pwr_tx], | |
126 | hba->pwr_info.hs_rate); | |
127 | } | |
128 | ||
129 | /** | |
130 | * ufshcd_ready_for_uic_cmd - Check if controller is ready | |
131 | * to accept UIC commands | |
132 | */ | |
133 | static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) | |
134 | { | |
135 | if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY) | |
136 | return true; | |
137 | else | |
138 | return false; | |
139 | } | |
140 | ||
141 | /** | |
142 | * ufshcd_get_uic_cmd_result - Get the UIC command result | |
143 | */ | |
144 | static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) | |
145 | { | |
146 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) & | |
147 | MASK_UIC_COMMAND_RESULT; | |
148 | } | |
149 | ||
150 | /** | |
151 | * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command | |
152 | */ | |
153 | static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) | |
154 | { | |
155 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); | |
156 | } | |
157 | ||
158 | /** | |
159 | * ufshcd_is_device_present - Check if any device connected to | |
160 | * the host controller | |
161 | */ | |
162 | static inline bool ufshcd_is_device_present(struct ufs_hba *hba) | |
163 | { | |
164 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & | |
165 | DEVICE_PRESENT) ? true : false; | |
166 | } | |
167 | ||
168 | /** | |
169 | * ufshcd_send_uic_cmd - UFS Interconnect layer command API | |
170 | * | |
171 | */ | |
172 | static int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) | |
173 | { | |
174 | unsigned long start = 0; | |
175 | u32 intr_status; | |
176 | u32 enabled_intr_status; | |
177 | ||
178 | if (!ufshcd_ready_for_uic_cmd(hba)) { | |
179 | dev_err(hba->dev, | |
180 | "Controller not ready to accept UIC commands\n"); | |
181 | return -EIO; | |
182 | } | |
183 | ||
184 | debug("sending uic command:%d\n", uic_cmd->command); | |
185 | ||
186 | /* Write Args */ | |
187 | ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1); | |
188 | ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2); | |
189 | ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3); | |
190 | ||
191 | /* Write UIC Cmd */ | |
192 | ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK, | |
193 | REG_UIC_COMMAND); | |
194 | ||
195 | start = get_timer(0); | |
196 | do { | |
197 | intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); | |
198 | enabled_intr_status = intr_status & hba->intr_mask; | |
199 | ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); | |
200 | ||
201 | if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { | |
202 | dev_err(hba->dev, | |
203 | "Timedout waiting for UIC response\n"); | |
204 | ||
205 | return -ETIMEDOUT; | |
206 | } | |
207 | ||
208 | if (enabled_intr_status & UFSHCD_ERROR_MASK) { | |
209 | dev_err(hba->dev, "Error in status:%08x\n", | |
210 | enabled_intr_status); | |
211 | ||
212 | return -1; | |
213 | } | |
214 | } while (!(enabled_intr_status & UFSHCD_UIC_MASK)); | |
215 | ||
216 | uic_cmd->argument2 = ufshcd_get_uic_cmd_result(hba); | |
217 | uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba); | |
218 | ||
219 | debug("Sent successfully\n"); | |
220 | ||
221 | return 0; | |
222 | } | |
223 | ||
224 | /** | |
225 | * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET | |
226 | * | |
227 | */ | |
228 | int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, u8 attr_set, | |
229 | u32 mib_val, u8 peer) | |
230 | { | |
231 | struct uic_command uic_cmd = {0}; | |
232 | static const char *const action[] = { | |
233 | "dme-set", | |
234 | "dme-peer-set" | |
235 | }; | |
236 | const char *set = action[!!peer]; | |
237 | int ret; | |
238 | int retries = UFS_UIC_COMMAND_RETRIES; | |
239 | ||
240 | uic_cmd.command = peer ? | |
241 | UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; | |
242 | uic_cmd.argument1 = attr_sel; | |
243 | uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); | |
244 | uic_cmd.argument3 = mib_val; | |
245 | ||
246 | do { | |
247 | /* for peer attributes we retry upon failure */ | |
248 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | |
249 | if (ret) | |
250 | dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", | |
251 | set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); | |
252 | } while (ret && peer && --retries); | |
253 | ||
254 | if (ret) | |
255 | dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n", | |
256 | set, UIC_GET_ATTR_ID(attr_sel), mib_val, | |
257 | UFS_UIC_COMMAND_RETRIES - retries); | |
258 | ||
259 | return ret; | |
260 | } | |
261 | ||
262 | /** | |
263 | * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET | |
264 | * | |
265 | */ | |
266 | int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, | |
267 | u32 *mib_val, u8 peer) | |
268 | { | |
269 | struct uic_command uic_cmd = {0}; | |
270 | static const char *const action[] = { | |
271 | "dme-get", | |
272 | "dme-peer-get" | |
273 | }; | |
274 | const char *get = action[!!peer]; | |
275 | int ret; | |
276 | int retries = UFS_UIC_COMMAND_RETRIES; | |
277 | ||
278 | uic_cmd.command = peer ? | |
279 | UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; | |
280 | uic_cmd.argument1 = attr_sel; | |
281 | ||
282 | do { | |
283 | /* for peer attributes we retry upon failure */ | |
284 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | |
285 | if (ret) | |
286 | dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n", | |
287 | get, UIC_GET_ATTR_ID(attr_sel), ret); | |
288 | } while (ret && peer && --retries); | |
289 | ||
290 | if (ret) | |
291 | dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n", | |
292 | get, UIC_GET_ATTR_ID(attr_sel), | |
293 | UFS_UIC_COMMAND_RETRIES - retries); | |
294 | ||
295 | if (mib_val && !ret) | |
296 | *mib_val = uic_cmd.argument3; | |
297 | ||
298 | return ret; | |
299 | } | |
300 | ||
301 | static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) | |
302 | { | |
303 | u32 tx_lanes, i, err = 0; | |
304 | ||
305 | if (!peer) | |
306 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
307 | &tx_lanes); | |
308 | else | |
309 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
310 | &tx_lanes); | |
311 | for (i = 0; i < tx_lanes; i++) { | |
312 | if (!peer) | |
313 | err = ufshcd_dme_set(hba, | |
314 | UIC_ARG_MIB_SEL(TX_LCC_ENABLE, | |
315 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), | |
316 | 0); | |
317 | else | |
318 | err = ufshcd_dme_peer_set(hba, | |
319 | UIC_ARG_MIB_SEL(TX_LCC_ENABLE, | |
320 | UIC_ARG_MPHY_TX_GEN_SEL_INDEX(i)), | |
321 | 0); | |
322 | if (err) { | |
1b3dab2d | 323 | dev_err(hba->dev, "%s: TX LCC Disable failed, peer = %d, lane = %d, err = %d\n", |
7feafb0a FA |
324 | __func__, peer, i, err); |
325 | break; | |
326 | } | |
327 | } | |
328 | ||
329 | return err; | |
330 | } | |
331 | ||
332 | static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) | |
333 | { | |
334 | return ufshcd_disable_tx_lcc(hba, true); | |
335 | } | |
336 | ||
337 | /** | |
338 | * ufshcd_dme_link_startup - Notify Unipro to perform link startup | |
339 | * | |
340 | */ | |
341 | static int ufshcd_dme_link_startup(struct ufs_hba *hba) | |
342 | { | |
343 | struct uic_command uic_cmd = {0}; | |
344 | int ret; | |
345 | ||
346 | uic_cmd.command = UIC_CMD_DME_LINK_STARTUP; | |
347 | ||
348 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | |
349 | if (ret) | |
350 | dev_dbg(hba->dev, | |
351 | "dme-link-startup: error code %d\n", ret); | |
352 | return ret; | |
353 | } | |
354 | ||
355 | /** | |
356 | * ufshcd_disable_intr_aggr - Disables interrupt aggregation. | |
357 | * | |
358 | */ | |
359 | static inline void ufshcd_disable_intr_aggr(struct ufs_hba *hba) | |
360 | { | |
361 | ufshcd_writel(hba, 0, REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | |
362 | } | |
363 | ||
364 | /** | |
365 | * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY | |
366 | */ | |
367 | static inline int ufshcd_get_lists_status(u32 reg) | |
368 | { | |
369 | return !((reg & UFSHCD_STATUS_READY) == UFSHCD_STATUS_READY); | |
370 | } | |
371 | ||
372 | /** | |
373 | * ufshcd_enable_run_stop_reg - Enable run-stop registers, | |
374 | * When run-stop registers are set to 1, it indicates the | |
375 | * host controller that it can process the requests | |
376 | */ | |
377 | static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba) | |
378 | { | |
379 | ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT, | |
380 | REG_UTP_TASK_REQ_LIST_RUN_STOP); | |
381 | ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT, | |
382 | REG_UTP_TRANSFER_REQ_LIST_RUN_STOP); | |
383 | } | |
384 | ||
385 | /** | |
386 | * ufshcd_enable_intr - enable interrupts | |
387 | */ | |
388 | static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs) | |
389 | { | |
390 | u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE); | |
391 | u32 rw; | |
392 | ||
393 | if (hba->version == UFSHCI_VERSION_10) { | |
394 | rw = set & INTERRUPT_MASK_RW_VER_10; | |
395 | set = rw | ((set ^ intrs) & intrs); | |
396 | } else { | |
397 | set |= intrs; | |
398 | } | |
399 | ||
400 | ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE); | |
401 | ||
402 | hba->intr_mask = set; | |
403 | } | |
404 | ||
405 | /** | |
406 | * ufshcd_make_hba_operational - Make UFS controller operational | |
407 | * | |
408 | * To bring UFS host controller to operational state, | |
409 | * 1. Enable required interrupts | |
410 | * 2. Configure interrupt aggregation | |
411 | * 3. Program UTRL and UTMRL base address | |
412 | * 4. Configure run-stop-registers | |
413 | * | |
414 | */ | |
415 | static int ufshcd_make_hba_operational(struct ufs_hba *hba) | |
416 | { | |
417 | int err = 0; | |
418 | u32 reg; | |
419 | ||
420 | /* Enable required interrupts */ | |
421 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); | |
422 | ||
423 | /* Disable interrupt aggregation */ | |
424 | ufshcd_disable_intr_aggr(hba); | |
425 | ||
426 | /* Configure UTRL and UTMRL base address registers */ | |
427 | ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utrdl), | |
428 | REG_UTP_TRANSFER_REQ_LIST_BASE_L); | |
429 | ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utrdl), | |
430 | REG_UTP_TRANSFER_REQ_LIST_BASE_H); | |
431 | ufshcd_writel(hba, lower_32_bits((dma_addr_t)hba->utmrdl), | |
432 | REG_UTP_TASK_REQ_LIST_BASE_L); | |
433 | ufshcd_writel(hba, upper_32_bits((dma_addr_t)hba->utmrdl), | |
434 | REG_UTP_TASK_REQ_LIST_BASE_H); | |
435 | ||
436 | /* | |
437 | * UCRDY, UTMRLDY and UTRLRDY bits must be 1 | |
438 | */ | |
439 | reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS); | |
440 | if (!(ufshcd_get_lists_status(reg))) { | |
441 | ufshcd_enable_run_stop_reg(hba); | |
442 | } else { | |
443 | dev_err(hba->dev, | |
1b3dab2d | 444 | "Host controller not ready to process requests\n"); |
7feafb0a FA |
445 | err = -EIO; |
446 | goto out; | |
447 | } | |
448 | ||
449 | out: | |
450 | return err; | |
451 | } | |
452 | ||
453 | /** | |
454 | * ufshcd_link_startup - Initialize unipro link startup | |
455 | */ | |
456 | static int ufshcd_link_startup(struct ufs_hba *hba) | |
457 | { | |
458 | int ret; | |
459 | int retries = DME_LINKSTARTUP_RETRIES; | |
460 | bool link_startup_again = true; | |
461 | ||
462 | link_startup: | |
463 | do { | |
464 | ufshcd_ops_link_startup_notify(hba, PRE_CHANGE); | |
465 | ||
466 | ret = ufshcd_dme_link_startup(hba); | |
467 | ||
468 | /* check if device is detected by inter-connect layer */ | |
469 | if (!ret && !ufshcd_is_device_present(hba)) { | |
470 | dev_err(hba->dev, "%s: Device not present\n", __func__); | |
471 | ret = -ENXIO; | |
472 | goto out; | |
473 | } | |
474 | ||
475 | /* | |
476 | * DME link lost indication is only received when link is up, | |
477 | * but we can't be sure if the link is up until link startup | |
478 | * succeeds. So reset the local Uni-Pro and try again. | |
479 | */ | |
480 | if (ret && ufshcd_hba_enable(hba)) | |
481 | goto out; | |
482 | } while (ret && retries--); | |
483 | ||
484 | if (ret) | |
485 | /* failed to get the link up... retire */ | |
486 | goto out; | |
487 | ||
488 | if (link_startup_again) { | |
489 | link_startup_again = false; | |
490 | retries = DME_LINKSTARTUP_RETRIES; | |
491 | goto link_startup; | |
492 | } | |
493 | ||
494 | /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */ | |
495 | ufshcd_init_pwr_info(hba); | |
496 | ||
497 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) { | |
498 | ret = ufshcd_disable_device_tx_lcc(hba); | |
499 | if (ret) | |
500 | goto out; | |
501 | } | |
502 | ||
503 | /* Include any host controller configuration via UIC commands */ | |
504 | ret = ufshcd_ops_link_startup_notify(hba, POST_CHANGE); | |
505 | if (ret) | |
506 | goto out; | |
507 | ||
508 | ret = ufshcd_make_hba_operational(hba); | |
509 | out: | |
510 | if (ret) | |
511 | dev_err(hba->dev, "link startup failed %d\n", ret); | |
512 | ||
513 | return ret; | |
514 | } | |
515 | ||
516 | /** | |
517 | * ufshcd_hba_stop - Send controller to reset state | |
518 | */ | |
519 | static inline void ufshcd_hba_stop(struct ufs_hba *hba) | |
520 | { | |
521 | int err; | |
522 | ||
523 | ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE); | |
524 | err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE, | |
525 | CONTROLLER_ENABLE, CONTROLLER_DISABLE, | |
526 | 10); | |
527 | if (err) | |
528 | dev_err(hba->dev, "%s: Controller disable failed\n", __func__); | |
529 | } | |
530 | ||
531 | /** | |
532 | * ufshcd_is_hba_active - Get controller state | |
533 | */ | |
534 | static inline bool ufshcd_is_hba_active(struct ufs_hba *hba) | |
535 | { | |
536 | return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & CONTROLLER_ENABLE) | |
537 | ? false : true; | |
538 | } | |
539 | ||
540 | /** | |
541 | * ufshcd_hba_start - Start controller initialization sequence | |
542 | */ | |
543 | static inline void ufshcd_hba_start(struct ufs_hba *hba) | |
544 | { | |
545 | ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE); | |
546 | } | |
547 | ||
548 | /** | |
549 | * ufshcd_hba_enable - initialize the controller | |
550 | */ | |
551 | static int ufshcd_hba_enable(struct ufs_hba *hba) | |
552 | { | |
553 | int retry; | |
554 | ||
555 | if (!ufshcd_is_hba_active(hba)) | |
556 | /* change controller state to "reset state" */ | |
557 | ufshcd_hba_stop(hba); | |
558 | ||
559 | ufshcd_ops_hce_enable_notify(hba, PRE_CHANGE); | |
560 | ||
561 | /* start controller initialization sequence */ | |
562 | ufshcd_hba_start(hba); | |
563 | ||
564 | /* | |
565 | * To initialize a UFS host controller HCE bit must be set to 1. | |
566 | * During initialization the HCE bit value changes from 1->0->1. | |
567 | * When the host controller completes initialization sequence | |
568 | * it sets the value of HCE bit to 1. The same HCE bit is read back | |
569 | * to check if the controller has completed initialization sequence. | |
570 | * So without this delay the value HCE = 1, set in the previous | |
571 | * instruction might be read back. | |
572 | * This delay can be changed based on the controller. | |
573 | */ | |
574 | mdelay(1); | |
575 | ||
576 | /* wait for the host controller to complete initialization */ | |
577 | retry = 10; | |
578 | while (ufshcd_is_hba_active(hba)) { | |
579 | if (retry) { | |
580 | retry--; | |
581 | } else { | |
582 | dev_err(hba->dev, "Controller enable failed\n"); | |
583 | return -EIO; | |
584 | } | |
585 | mdelay(5); | |
586 | } | |
587 | ||
588 | /* enable UIC related interrupts */ | |
589 | ufshcd_enable_intr(hba, UFSHCD_UIC_MASK); | |
590 | ||
591 | ufshcd_ops_hce_enable_notify(hba, POST_CHANGE); | |
592 | ||
593 | return 0; | |
594 | } | |
595 | ||
596 | /** | |
597 | * ufshcd_host_memory_configure - configure local reference block with | |
598 | * memory offsets | |
599 | */ | |
600 | static void ufshcd_host_memory_configure(struct ufs_hba *hba) | |
601 | { | |
602 | struct utp_transfer_req_desc *utrdlp; | |
603 | dma_addr_t cmd_desc_dma_addr; | |
604 | u16 response_offset; | |
605 | u16 prdt_offset; | |
606 | ||
607 | utrdlp = hba->utrdl; | |
608 | cmd_desc_dma_addr = (dma_addr_t)hba->ucdl; | |
609 | ||
610 | utrdlp->command_desc_base_addr_lo = | |
611 | cpu_to_le32(lower_32_bits(cmd_desc_dma_addr)); | |
612 | utrdlp->command_desc_base_addr_hi = | |
613 | cpu_to_le32(upper_32_bits(cmd_desc_dma_addr)); | |
614 | ||
615 | response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu); | |
616 | prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); | |
617 | ||
618 | utrdlp->response_upiu_offset = cpu_to_le16(response_offset >> 2); | |
619 | utrdlp->prd_table_offset = cpu_to_le16(prdt_offset >> 2); | |
620 | utrdlp->response_upiu_length = cpu_to_le16(ALIGNED_UPIU_SIZE >> 2); | |
621 | ||
622 | hba->ucd_req_ptr = (struct utp_upiu_req *)hba->ucdl; | |
623 | hba->ucd_rsp_ptr = | |
624 | (struct utp_upiu_rsp *)&hba->ucdl->response_upiu; | |
625 | hba->ucd_prdt_ptr = | |
626 | (struct ufshcd_sg_entry *)&hba->ucdl->prd_table; | |
627 | } | |
628 | ||
629 | /** | |
630 | * ufshcd_memory_alloc - allocate memory for host memory space data structures | |
631 | */ | |
632 | static int ufshcd_memory_alloc(struct ufs_hba *hba) | |
633 | { | |
634 | /* Allocate one Transfer Request Descriptor | |
635 | * Should be aligned to 1k boundary. | |
636 | */ | |
637 | hba->utrdl = memalign(1024, sizeof(struct utp_transfer_req_desc)); | |
638 | if (!hba->utrdl) { | |
639 | dev_err(hba->dev, "Transfer Descriptor memory allocation failed\n"); | |
640 | return -ENOMEM; | |
641 | } | |
642 | ||
643 | /* Allocate one Command Descriptor | |
644 | * Should be aligned to 1k boundary. | |
645 | */ | |
646 | hba->ucdl = memalign(1024, sizeof(struct utp_transfer_cmd_desc)); | |
647 | if (!hba->ucdl) { | |
648 | dev_err(hba->dev, "Command descriptor memory allocation failed\n"); | |
649 | return -ENOMEM; | |
650 | } | |
651 | ||
652 | return 0; | |
653 | } | |
654 | ||
655 | /** | |
656 | * ufshcd_get_intr_mask - Get the interrupt bit mask | |
657 | */ | |
658 | static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba) | |
659 | { | |
660 | u32 intr_mask = 0; | |
661 | ||
662 | switch (hba->version) { | |
663 | case UFSHCI_VERSION_10: | |
664 | intr_mask = INTERRUPT_MASK_ALL_VER_10; | |
665 | break; | |
666 | case UFSHCI_VERSION_11: | |
667 | case UFSHCI_VERSION_20: | |
668 | intr_mask = INTERRUPT_MASK_ALL_VER_11; | |
669 | break; | |
670 | case UFSHCI_VERSION_21: | |
671 | default: | |
672 | intr_mask = INTERRUPT_MASK_ALL_VER_21; | |
673 | break; | |
674 | } | |
675 | ||
676 | return intr_mask; | |
677 | } | |
678 | ||
679 | /** | |
680 | * ufshcd_get_ufs_version - Get the UFS version supported by the HBA | |
681 | */ | |
682 | static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba) | |
683 | { | |
684 | return ufshcd_readl(hba, REG_UFS_VERSION); | |
685 | } | |
686 | ||
687 | /** | |
688 | * ufshcd_get_upmcrs - Get the power mode change request status | |
689 | */ | |
690 | static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) | |
691 | { | |
692 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; | |
693 | } | |
694 | ||
c5b3e5cd MV |
695 | /** |
696 | * ufshcd_cache_flush_and_invalidate - Flush and invalidate cache | |
697 | * | |
698 | * Flush and invalidate cache in aligned address..address+size range. | |
699 | * The invalidation is in place to avoid stale data in cache. | |
700 | */ | |
701 | static void ufshcd_cache_flush_and_invalidate(void *addr, unsigned long size) | |
702 | { | |
703 | uintptr_t aaddr = (uintptr_t)addr & ~(ARCH_DMA_MINALIGN - 1); | |
704 | unsigned long asize = ALIGN(size, ARCH_DMA_MINALIGN); | |
705 | ||
706 | flush_dcache_range(aaddr, aaddr + asize); | |
707 | invalidate_dcache_range(aaddr, aaddr + asize); | |
708 | } | |
709 | ||
7feafb0a FA |
710 | /** |
711 | * ufshcd_prepare_req_desc_hdr() - Fills the requests header | |
712 | * descriptor according to request | |
713 | */ | |
7f26fcbe | 714 | static void ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba, |
7feafb0a FA |
715 | u32 *upiu_flags, |
716 | enum dma_data_direction cmd_dir) | |
717 | { | |
7f26fcbe | 718 | struct utp_transfer_req_desc *req_desc = hba->utrdl; |
7feafb0a FA |
719 | u32 data_direction; |
720 | u32 dword_0; | |
721 | ||
722 | if (cmd_dir == DMA_FROM_DEVICE) { | |
723 | data_direction = UTP_DEVICE_TO_HOST; | |
724 | *upiu_flags = UPIU_CMD_FLAGS_READ; | |
725 | } else if (cmd_dir == DMA_TO_DEVICE) { | |
726 | data_direction = UTP_HOST_TO_DEVICE; | |
727 | *upiu_flags = UPIU_CMD_FLAGS_WRITE; | |
728 | } else { | |
729 | data_direction = UTP_NO_DATA_TRANSFER; | |
730 | *upiu_flags = UPIU_CMD_FLAGS_NONE; | |
731 | } | |
732 | ||
733 | dword_0 = data_direction | (0x1 << UPIU_COMMAND_TYPE_OFFSET); | |
734 | ||
735 | /* Enable Interrupt for command */ | |
736 | dword_0 |= UTP_REQ_DESC_INT_CMD; | |
737 | ||
738 | /* Transfer request descriptor header fields */ | |
739 | req_desc->header.dword_0 = cpu_to_le32(dword_0); | |
740 | /* dword_1 is reserved, hence it is set to 0 */ | |
741 | req_desc->header.dword_1 = 0; | |
742 | /* | |
743 | * assigning invalid value for command status. Controller | |
744 | * updates OCS on command completion, with the command | |
745 | * status | |
746 | */ | |
747 | req_desc->header.dword_2 = | |
748 | cpu_to_le32(OCS_INVALID_COMMAND_STATUS); | |
749 | /* dword_3 is reserved, hence it is set to 0 */ | |
750 | req_desc->header.dword_3 = 0; | |
751 | ||
752 | req_desc->prd_table_length = 0; | |
c5b3e5cd MV |
753 | |
754 | ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); | |
7feafb0a FA |
755 | } |
756 | ||
757 | static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba, | |
758 | u32 upiu_flags) | |
759 | { | |
760 | struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; | |
761 | struct ufs_query *query = &hba->dev_cmd.query; | |
762 | u16 len = be16_to_cpu(query->request.upiu_req.length); | |
763 | ||
764 | /* Query request header */ | |
765 | ucd_req_ptr->header.dword_0 = | |
766 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_QUERY_REQ, | |
767 | upiu_flags, 0, TASK_TAG); | |
768 | ucd_req_ptr->header.dword_1 = | |
769 | UPIU_HEADER_DWORD(0, query->request.query_func, | |
770 | 0, 0); | |
771 | ||
772 | /* Data segment length only need for WRITE_DESC */ | |
773 | if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) | |
774 | ucd_req_ptr->header.dword_2 = | |
775 | UPIU_HEADER_DWORD(0, 0, (len >> 8), (u8)len); | |
776 | else | |
777 | ucd_req_ptr->header.dword_2 = 0; | |
778 | ||
779 | /* Copy the Query Request buffer as is */ | |
780 | memcpy(&ucd_req_ptr->qr, &query->request.upiu_req, QUERY_OSF_SIZE); | |
781 | ||
782 | /* Copy the Descriptor */ | |
c5b3e5cd | 783 | if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC) { |
7feafb0a | 784 | memcpy(ucd_req_ptr + 1, query->descriptor, len); |
c5b3e5cd MV |
785 | ufshcd_cache_flush_and_invalidate(ucd_req_ptr, 2 * sizeof(*ucd_req_ptr)); |
786 | } else { | |
787 | ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); | |
788 | } | |
7feafb0a FA |
789 | |
790 | memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); | |
c5b3e5cd | 791 | ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); |
7feafb0a FA |
792 | } |
793 | ||
794 | static inline void ufshcd_prepare_utp_nop_upiu(struct ufs_hba *hba) | |
795 | { | |
796 | struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; | |
797 | ||
798 | memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req)); | |
799 | ||
800 | /* command descriptor fields */ | |
801 | ucd_req_ptr->header.dword_0 = | |
820801ea | 802 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_NOP_OUT, 0, 0, TASK_TAG); |
7feafb0a FA |
803 | /* clear rest of the fields of basic header */ |
804 | ucd_req_ptr->header.dword_1 = 0; | |
805 | ucd_req_ptr->header.dword_2 = 0; | |
806 | ||
807 | memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); | |
c5b3e5cd MV |
808 | |
809 | ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); | |
810 | ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); | |
7feafb0a FA |
811 | } |
812 | ||
813 | /** | |
814 | * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU) | |
815 | * for Device Management Purposes | |
816 | */ | |
817 | static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, | |
818 | enum dev_cmd_type cmd_type) | |
819 | { | |
820 | u32 upiu_flags; | |
821 | int ret = 0; | |
7feafb0a FA |
822 | |
823 | hba->dev_cmd.type = cmd_type; | |
824 | ||
7f26fcbe | 825 | ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, DMA_NONE); |
7feafb0a FA |
826 | switch (cmd_type) { |
827 | case DEV_CMD_TYPE_QUERY: | |
828 | ufshcd_prepare_utp_query_req_upiu(hba, upiu_flags); | |
829 | break; | |
830 | case DEV_CMD_TYPE_NOP: | |
831 | ufshcd_prepare_utp_nop_upiu(hba); | |
832 | break; | |
833 | default: | |
834 | ret = -EINVAL; | |
835 | } | |
836 | ||
837 | return ret; | |
838 | } | |
839 | ||
840 | static int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) | |
841 | { | |
842 | unsigned long start; | |
843 | u32 intr_status; | |
844 | u32 enabled_intr_status; | |
845 | ||
846 | ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); | |
847 | ||
848 | start = get_timer(0); | |
849 | do { | |
850 | intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); | |
851 | enabled_intr_status = intr_status & hba->intr_mask; | |
852 | ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); | |
853 | ||
854 | if (get_timer(start) > QUERY_REQ_TIMEOUT) { | |
855 | dev_err(hba->dev, | |
856 | "Timedout waiting for UTP response\n"); | |
857 | ||
858 | return -ETIMEDOUT; | |
859 | } | |
860 | ||
861 | if (enabled_intr_status & UFSHCD_ERROR_MASK) { | |
862 | dev_err(hba->dev, "Error in status:%08x\n", | |
863 | enabled_intr_status); | |
864 | ||
865 | return -1; | |
866 | } | |
867 | } while (!(enabled_intr_status & UTP_TRANSFER_REQ_COMPL)); | |
868 | ||
869 | return 0; | |
870 | } | |
871 | ||
872 | /** | |
873 | * ufshcd_get_req_rsp - returns the TR response transaction type | |
874 | */ | |
875 | static inline int ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr) | |
876 | { | |
877 | return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24; | |
878 | } | |
879 | ||
880 | /** | |
881 | * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status | |
882 | * | |
883 | */ | |
884 | static inline int ufshcd_get_tr_ocs(struct ufs_hba *hba) | |
885 | { | |
12675cb1 MV |
886 | struct utp_transfer_req_desc *req_desc = hba->utrdl; |
887 | ||
888 | return le32_to_cpu(req_desc->header.dword_2) & MASK_OCS; | |
7feafb0a FA |
889 | } |
890 | ||
891 | static inline int ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) | |
892 | { | |
893 | return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; | |
894 | } | |
895 | ||
896 | static int ufshcd_check_query_response(struct ufs_hba *hba) | |
897 | { | |
898 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; | |
899 | ||
900 | /* Get the UPIU response */ | |
901 | query_res->response = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr) >> | |
902 | UPIU_RSP_CODE_OFFSET; | |
903 | return query_res->response; | |
904 | } | |
905 | ||
906 | /** | |
907 | * ufshcd_copy_query_response() - Copy the Query Response and the data | |
908 | * descriptor | |
909 | */ | |
910 | static int ufshcd_copy_query_response(struct ufs_hba *hba) | |
911 | { | |
912 | struct ufs_query_res *query_res = &hba->dev_cmd.query.response; | |
913 | ||
914 | memcpy(&query_res->upiu_res, &hba->ucd_rsp_ptr->qr, QUERY_OSF_SIZE); | |
915 | ||
916 | /* Get the descriptor */ | |
917 | if (hba->dev_cmd.query.descriptor && | |
918 | hba->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) { | |
919 | u8 *descp = (u8 *)hba->ucd_rsp_ptr + | |
920 | GENERAL_UPIU_REQUEST_SIZE; | |
921 | u16 resp_len; | |
922 | u16 buf_len; | |
923 | ||
924 | /* data segment length */ | |
925 | resp_len = be32_to_cpu(hba->ucd_rsp_ptr->header.dword_2) & | |
926 | MASK_QUERY_DATA_SEG_LEN; | |
927 | buf_len = | |
928 | be16_to_cpu(hba->dev_cmd.query.request.upiu_req.length); | |
929 | if (likely(buf_len >= resp_len)) { | |
930 | memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); | |
931 | } else { | |
932 | dev_warn(hba->dev, | |
1b3dab2d | 933 | "%s: Response size is bigger than buffer\n", |
7feafb0a FA |
934 | __func__); |
935 | return -EINVAL; | |
936 | } | |
937 | } | |
938 | ||
939 | return 0; | |
940 | } | |
941 | ||
942 | /** | |
943 | * ufshcd_exec_dev_cmd - API for sending device management requests | |
944 | */ | |
945 | static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, | |
946 | int timeout) | |
947 | { | |
948 | int err; | |
949 | int resp; | |
950 | ||
951 | err = ufshcd_comp_devman_upiu(hba, cmd_type); | |
952 | if (err) | |
953 | return err; | |
954 | ||
955 | err = ufshcd_send_command(hba, TASK_TAG); | |
956 | if (err) | |
957 | return err; | |
958 | ||
959 | err = ufshcd_get_tr_ocs(hba); | |
960 | if (err) { | |
961 | dev_err(hba->dev, "Error in OCS:%d\n", err); | |
962 | return -EINVAL; | |
963 | } | |
964 | ||
965 | resp = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); | |
966 | switch (resp) { | |
967 | case UPIU_TRANSACTION_NOP_IN: | |
968 | break; | |
969 | case UPIU_TRANSACTION_QUERY_RSP: | |
970 | err = ufshcd_check_query_response(hba); | |
971 | if (!err) | |
972 | err = ufshcd_copy_query_response(hba); | |
973 | break; | |
974 | case UPIU_TRANSACTION_REJECT_UPIU: | |
975 | /* TODO: handle Reject UPIU Response */ | |
976 | err = -EPERM; | |
977 | dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", | |
978 | __func__); | |
979 | break; | |
980 | default: | |
981 | err = -EINVAL; | |
982 | dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", | |
983 | __func__, resp); | |
984 | } | |
985 | ||
986 | return err; | |
987 | } | |
988 | ||
989 | /** | |
990 | * ufshcd_init_query() - init the query response and request parameters | |
991 | */ | |
992 | static inline void ufshcd_init_query(struct ufs_hba *hba, | |
993 | struct ufs_query_req **request, | |
994 | struct ufs_query_res **response, | |
995 | enum query_opcode opcode, | |
996 | u8 idn, u8 index, u8 selector) | |
997 | { | |
998 | *request = &hba->dev_cmd.query.request; | |
999 | *response = &hba->dev_cmd.query.response; | |
1000 | memset(*request, 0, sizeof(struct ufs_query_req)); | |
1001 | memset(*response, 0, sizeof(struct ufs_query_res)); | |
1002 | (*request)->upiu_req.opcode = opcode; | |
1003 | (*request)->upiu_req.idn = idn; | |
1004 | (*request)->upiu_req.index = index; | |
1005 | (*request)->upiu_req.selector = selector; | |
1006 | } | |
1007 | ||
1008 | /** | |
1009 | * ufshcd_query_flag() - API function for sending flag query requests | |
1010 | */ | |
1011 | int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, | |
1012 | enum flag_idn idn, bool *flag_res) | |
1013 | { | |
1014 | struct ufs_query_req *request = NULL; | |
1015 | struct ufs_query_res *response = NULL; | |
1016 | int err, index = 0, selector = 0; | |
1017 | int timeout = QUERY_REQ_TIMEOUT; | |
1018 | ||
1019 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, | |
1020 | selector); | |
1021 | ||
1022 | switch (opcode) { | |
1023 | case UPIU_QUERY_OPCODE_SET_FLAG: | |
1024 | case UPIU_QUERY_OPCODE_CLEAR_FLAG: | |
1025 | case UPIU_QUERY_OPCODE_TOGGLE_FLAG: | |
1026 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | |
1027 | break; | |
1028 | case UPIU_QUERY_OPCODE_READ_FLAG: | |
1029 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | |
1030 | if (!flag_res) { | |
1031 | /* No dummy reads */ | |
1032 | dev_err(hba->dev, "%s: Invalid argument for read request\n", | |
1033 | __func__); | |
1034 | err = -EINVAL; | |
1035 | goto out; | |
1036 | } | |
1037 | break; | |
1038 | default: | |
1039 | dev_err(hba->dev, | |
1040 | "%s: Expected query flag opcode but got = %d\n", | |
1041 | __func__, opcode); | |
1042 | err = -EINVAL; | |
1043 | goto out; | |
1044 | } | |
1045 | ||
1046 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout); | |
1047 | ||
1048 | if (err) { | |
1049 | dev_err(hba->dev, | |
1050 | "%s: Sending flag query for idn %d failed, err = %d\n", | |
1051 | __func__, idn, err); | |
1052 | goto out; | |
1053 | } | |
1054 | ||
1055 | if (flag_res) | |
1056 | *flag_res = (be32_to_cpu(response->upiu_res.value) & | |
1057 | MASK_QUERY_UPIU_FLAG_LOC) & 0x1; | |
1058 | ||
1059 | out: | |
1060 | return err; | |
1061 | } | |
1062 | ||
1063 | static int ufshcd_query_flag_retry(struct ufs_hba *hba, | |
1064 | enum query_opcode opcode, | |
1065 | enum flag_idn idn, bool *flag_res) | |
1066 | { | |
1067 | int ret; | |
1068 | int retries; | |
1069 | ||
1070 | for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) { | |
1071 | ret = ufshcd_query_flag(hba, opcode, idn, flag_res); | |
1072 | if (ret) | |
1073 | dev_dbg(hba->dev, | |
1074 | "%s: failed with error %d, retries %d\n", | |
1075 | __func__, ret, retries); | |
1076 | else | |
1077 | break; | |
1078 | } | |
1079 | ||
1080 | if (ret) | |
1081 | dev_err(hba->dev, | |
1082 | "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n", | |
1083 | __func__, opcode, idn, ret, retries); | |
1084 | return ret; | |
1085 | } | |
1086 | ||
1087 | static int __ufshcd_query_descriptor(struct ufs_hba *hba, | |
1088 | enum query_opcode opcode, | |
1089 | enum desc_idn idn, u8 index, u8 selector, | |
1090 | u8 *desc_buf, int *buf_len) | |
1091 | { | |
1092 | struct ufs_query_req *request = NULL; | |
1093 | struct ufs_query_res *response = NULL; | |
1094 | int err; | |
1095 | ||
1096 | if (!desc_buf) { | |
1097 | dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n", | |
1098 | __func__, opcode); | |
1099 | err = -EINVAL; | |
1100 | goto out; | |
1101 | } | |
1102 | ||
1103 | if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) { | |
1104 | dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n", | |
1105 | __func__, *buf_len); | |
1106 | err = -EINVAL; | |
1107 | goto out; | |
1108 | } | |
1109 | ||
1110 | ufshcd_init_query(hba, &request, &response, opcode, idn, index, | |
1111 | selector); | |
1112 | hba->dev_cmd.query.descriptor = desc_buf; | |
1113 | request->upiu_req.length = cpu_to_be16(*buf_len); | |
1114 | ||
1115 | switch (opcode) { | |
1116 | case UPIU_QUERY_OPCODE_WRITE_DESC: | |
1117 | request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST; | |
1118 | break; | |
1119 | case UPIU_QUERY_OPCODE_READ_DESC: | |
1120 | request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST; | |
1121 | break; | |
1122 | default: | |
1123 | dev_err(hba->dev, "%s: Expected query descriptor opcode but got = 0x%.2x\n", | |
1124 | __func__, opcode); | |
1125 | err = -EINVAL; | |
1126 | goto out; | |
1127 | } | |
1128 | ||
1129 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT); | |
1130 | ||
1131 | if (err) { | |
1132 | dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n", | |
1133 | __func__, opcode, idn, index, err); | |
1134 | goto out; | |
1135 | } | |
1136 | ||
1137 | hba->dev_cmd.query.descriptor = NULL; | |
1138 | *buf_len = be16_to_cpu(response->upiu_res.length); | |
1139 | ||
1140 | out: | |
1141 | return err; | |
1142 | } | |
1143 | ||
1144 | /** | |
1145 | * ufshcd_query_descriptor_retry - API function for sending descriptor requests | |
1146 | */ | |
1147 | int ufshcd_query_descriptor_retry(struct ufs_hba *hba, enum query_opcode opcode, | |
1148 | enum desc_idn idn, u8 index, u8 selector, | |
1149 | u8 *desc_buf, int *buf_len) | |
1150 | { | |
1151 | int err; | |
1152 | int retries; | |
1153 | ||
1154 | for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { | |
1155 | err = __ufshcd_query_descriptor(hba, opcode, idn, index, | |
1156 | selector, desc_buf, buf_len); | |
1157 | if (!err || err == -EINVAL) | |
1158 | break; | |
1159 | } | |
1160 | ||
1161 | return err; | |
1162 | } | |
1163 | ||
1164 | /** | |
1165 | * ufshcd_read_desc_length - read the specified descriptor length from header | |
1166 | */ | |
1167 | static int ufshcd_read_desc_length(struct ufs_hba *hba, enum desc_idn desc_id, | |
1168 | int desc_index, int *desc_length) | |
1169 | { | |
1170 | int ret; | |
1171 | u8 header[QUERY_DESC_HDR_SIZE]; | |
1172 | int header_len = QUERY_DESC_HDR_SIZE; | |
1173 | ||
1174 | if (desc_id >= QUERY_DESC_IDN_MAX) | |
1175 | return -EINVAL; | |
1176 | ||
1177 | ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, | |
1178 | desc_id, desc_index, 0, header, | |
1179 | &header_len); | |
1180 | ||
1181 | if (ret) { | |
1b3dab2d | 1182 | dev_err(hba->dev, "%s: Failed to get descriptor header id %d\n", |
7feafb0a FA |
1183 | __func__, desc_id); |
1184 | return ret; | |
1185 | } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) { | |
1b3dab2d | 1186 | dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch\n", |
7feafb0a FA |
1187 | __func__, header[QUERY_DESC_DESC_TYPE_OFFSET], |
1188 | desc_id); | |
1189 | ret = -EINVAL; | |
1190 | } | |
1191 | ||
1192 | *desc_length = header[QUERY_DESC_LENGTH_OFFSET]; | |
1193 | ||
1194 | return ret; | |
1195 | } | |
1196 | ||
1197 | static void ufshcd_init_desc_sizes(struct ufs_hba *hba) | |
1198 | { | |
1199 | int err; | |
1200 | ||
1201 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0, | |
1202 | &hba->desc_size.dev_desc); | |
1203 | if (err) | |
1204 | hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; | |
1205 | ||
1206 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0, | |
1207 | &hba->desc_size.pwr_desc); | |
1208 | if (err) | |
1209 | hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; | |
1210 | ||
1211 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0, | |
1212 | &hba->desc_size.interc_desc); | |
1213 | if (err) | |
1214 | hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; | |
1215 | ||
1216 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0, | |
1217 | &hba->desc_size.conf_desc); | |
1218 | if (err) | |
1219 | hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; | |
1220 | ||
1221 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0, | |
1222 | &hba->desc_size.unit_desc); | |
1223 | if (err) | |
1224 | hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; | |
1225 | ||
1226 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0, | |
1227 | &hba->desc_size.geom_desc); | |
1228 | if (err) | |
1229 | hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; | |
1230 | ||
1231 | err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, | |
1232 | &hba->desc_size.hlth_desc); | |
1233 | if (err) | |
1234 | hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; | |
1235 | } | |
1236 | ||
1237 | /** | |
1238 | * ufshcd_map_desc_id_to_length - map descriptor IDN to its length | |
1239 | * | |
1240 | */ | |
1241 | int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, | |
1242 | int *desc_len) | |
1243 | { | |
1244 | switch (desc_id) { | |
1245 | case QUERY_DESC_IDN_DEVICE: | |
1246 | *desc_len = hba->desc_size.dev_desc; | |
1247 | break; | |
1248 | case QUERY_DESC_IDN_POWER: | |
1249 | *desc_len = hba->desc_size.pwr_desc; | |
1250 | break; | |
1251 | case QUERY_DESC_IDN_GEOMETRY: | |
1252 | *desc_len = hba->desc_size.geom_desc; | |
1253 | break; | |
1254 | case QUERY_DESC_IDN_CONFIGURATION: | |
1255 | *desc_len = hba->desc_size.conf_desc; | |
1256 | break; | |
1257 | case QUERY_DESC_IDN_UNIT: | |
1258 | *desc_len = hba->desc_size.unit_desc; | |
1259 | break; | |
1260 | case QUERY_DESC_IDN_INTERCONNECT: | |
1261 | *desc_len = hba->desc_size.interc_desc; | |
1262 | break; | |
1263 | case QUERY_DESC_IDN_STRING: | |
1264 | *desc_len = QUERY_DESC_MAX_SIZE; | |
1265 | break; | |
1266 | case QUERY_DESC_IDN_HEALTH: | |
1267 | *desc_len = hba->desc_size.hlth_desc; | |
1268 | break; | |
1269 | case QUERY_DESC_IDN_RFU_0: | |
1270 | case QUERY_DESC_IDN_RFU_1: | |
1271 | *desc_len = 0; | |
1272 | break; | |
1273 | default: | |
1274 | *desc_len = 0; | |
1275 | return -EINVAL; | |
1276 | } | |
1277 | return 0; | |
1278 | } | |
1279 | EXPORT_SYMBOL(ufshcd_map_desc_id_to_length); | |
1280 | ||
1281 | /** | |
1282 | * ufshcd_read_desc_param - read the specified descriptor parameter | |
1283 | * | |
1284 | */ | |
1285 | int ufshcd_read_desc_param(struct ufs_hba *hba, enum desc_idn desc_id, | |
1286 | int desc_index, u8 param_offset, u8 *param_read_buf, | |
1287 | u8 param_size) | |
1288 | { | |
1289 | int ret; | |
1290 | u8 *desc_buf; | |
1291 | int buff_len; | |
1292 | bool is_kmalloc = true; | |
1293 | ||
1294 | /* Safety check */ | |
1295 | if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) | |
1296 | return -EINVAL; | |
1297 | ||
1298 | /* Get the max length of descriptor from structure filled up at probe | |
1299 | * time. | |
1300 | */ | |
1301 | ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); | |
1302 | ||
1303 | /* Sanity checks */ | |
1304 | if (ret || !buff_len) { | |
1b3dab2d | 1305 | dev_err(hba->dev, "%s: Failed to get full descriptor length\n", |
7feafb0a FA |
1306 | __func__); |
1307 | return ret; | |
1308 | } | |
1309 | ||
1310 | /* Check whether we need temp memory */ | |
1311 | if (param_offset != 0 || param_size < buff_len) { | |
1312 | desc_buf = kmalloc(buff_len, GFP_KERNEL); | |
1313 | if (!desc_buf) | |
1314 | return -ENOMEM; | |
1315 | } else { | |
1316 | desc_buf = param_read_buf; | |
1317 | is_kmalloc = false; | |
1318 | } | |
1319 | ||
1320 | /* Request for full descriptor */ | |
1321 | ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, | |
1322 | desc_id, desc_index, 0, desc_buf, | |
1323 | &buff_len); | |
1324 | ||
1325 | if (ret) { | |
1b3dab2d | 1326 | dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d\n", |
7feafb0a FA |
1327 | __func__, desc_id, desc_index, param_offset, ret); |
1328 | goto out; | |
1329 | } | |
1330 | ||
1331 | /* Sanity check */ | |
1332 | if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) { | |
1b3dab2d | 1333 | dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header\n", |
7feafb0a FA |
1334 | __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]); |
1335 | ret = -EINVAL; | |
1336 | goto out; | |
1337 | } | |
1338 | ||
1339 | /* Check wherher we will not copy more data, than available */ | |
1340 | if (is_kmalloc && param_size > buff_len) | |
1341 | param_size = buff_len; | |
1342 | ||
1343 | if (is_kmalloc) | |
1344 | memcpy(param_read_buf, &desc_buf[param_offset], param_size); | |
1345 | out: | |
1346 | if (is_kmalloc) | |
1347 | kfree(desc_buf); | |
1348 | return ret; | |
1349 | } | |
1350 | ||
1351 | /* replace non-printable or non-ASCII characters with spaces */ | |
1352 | static inline void ufshcd_remove_non_printable(uint8_t *val) | |
1353 | { | |
1354 | if (!val) | |
1355 | return; | |
1356 | ||
1357 | if (*val < 0x20 || *val > 0x7e) | |
1358 | *val = ' '; | |
1359 | } | |
1360 | ||
1361 | /** | |
1362 | * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power | |
1363 | * state) and waits for it to take effect. | |
1364 | * | |
1365 | */ | |
1366 | static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd) | |
1367 | { | |
1368 | unsigned long start = 0; | |
1369 | u8 status; | |
1370 | int ret; | |
1371 | ||
1372 | ret = ufshcd_send_uic_cmd(hba, cmd); | |
1373 | if (ret) { | |
1374 | dev_err(hba->dev, | |
1375 | "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n", | |
1376 | cmd->command, cmd->argument3, ret); | |
1377 | ||
1378 | return ret; | |
1379 | } | |
1380 | ||
1381 | start = get_timer(0); | |
1382 | do { | |
1383 | status = ufshcd_get_upmcrs(hba); | |
1384 | if (get_timer(start) > UFS_UIC_CMD_TIMEOUT) { | |
1385 | dev_err(hba->dev, | |
1386 | "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n", | |
1387 | cmd->command, status); | |
1388 | ret = (status != PWR_OK) ? status : -1; | |
1389 | break; | |
1390 | } | |
1391 | } while (status != PWR_LOCAL); | |
1392 | ||
1393 | return ret; | |
1394 | } | |
1395 | ||
1396 | /** | |
1397 | * ufshcd_uic_change_pwr_mode - Perform the UIC power mode change | |
1398 | * using DME_SET primitives. | |
1399 | */ | |
1400 | static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) | |
1401 | { | |
1402 | struct uic_command uic_cmd = {0}; | |
1403 | int ret; | |
1404 | ||
1405 | uic_cmd.command = UIC_CMD_DME_SET; | |
1406 | uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); | |
1407 | uic_cmd.argument3 = mode; | |
1408 | ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd); | |
1409 | ||
1410 | return ret; | |
1411 | } | |
1412 | ||
1413 | static | |
1414 | void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufs_hba *hba, | |
1415 | struct scsi_cmd *pccb, u32 upiu_flags) | |
1416 | { | |
1417 | struct utp_upiu_req *ucd_req_ptr = hba->ucd_req_ptr; | |
1418 | unsigned int cdb_len; | |
1419 | ||
1420 | /* command descriptor fields */ | |
1421 | ucd_req_ptr->header.dword_0 = | |
1422 | UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND, upiu_flags, | |
1423 | pccb->lun, TASK_TAG); | |
1424 | ucd_req_ptr->header.dword_1 = | |
1425 | UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0); | |
1426 | ||
1427 | /* Total EHS length and Data segment length will be zero */ | |
1428 | ucd_req_ptr->header.dword_2 = 0; | |
1429 | ||
1430 | ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(pccb->datalen); | |
1431 | ||
1432 | cdb_len = min_t(unsigned short, pccb->cmdlen, UFS_CDB_SIZE); | |
1433 | memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); | |
1434 | memcpy(ucd_req_ptr->sc.cdb, pccb->cmd, cdb_len); | |
1435 | ||
1436 | memset(hba->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); | |
c5b3e5cd MV |
1437 | ufshcd_cache_flush_and_invalidate(ucd_req_ptr, sizeof(*ucd_req_ptr)); |
1438 | ufshcd_cache_flush_and_invalidate(hba->ucd_rsp_ptr, sizeof(*hba->ucd_rsp_ptr)); | |
7feafb0a FA |
1439 | } |
1440 | ||
1441 | static inline void prepare_prdt_desc(struct ufshcd_sg_entry *entry, | |
1442 | unsigned char *buf, ulong len) | |
1443 | { | |
1444 | entry->size = cpu_to_le32(len) | GENMASK(1, 0); | |
1445 | entry->base_addr = cpu_to_le32(lower_32_bits((unsigned long)buf)); | |
1446 | entry->upper_addr = cpu_to_le32(upper_32_bits((unsigned long)buf)); | |
1447 | } | |
1448 | ||
1449 | static void prepare_prdt_table(struct ufs_hba *hba, struct scsi_cmd *pccb) | |
1450 | { | |
1451 | struct utp_transfer_req_desc *req_desc = hba->utrdl; | |
1452 | struct ufshcd_sg_entry *prd_table = hba->ucd_prdt_ptr; | |
c5b3e5cd | 1453 | uintptr_t aaddr = (uintptr_t)(pccb->pdata) & ~(ARCH_DMA_MINALIGN - 1); |
7feafb0a FA |
1454 | ulong datalen = pccb->datalen; |
1455 | int table_length; | |
1456 | u8 *buf; | |
1457 | int i; | |
1458 | ||
1459 | if (!datalen) { | |
1460 | req_desc->prd_table_length = 0; | |
c5b3e5cd | 1461 | ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); |
7feafb0a FA |
1462 | return; |
1463 | } | |
1464 | ||
c5b3e5cd MV |
1465 | if (pccb->dma_dir == DMA_TO_DEVICE) { /* Write to device */ |
1466 | flush_dcache_range(aaddr, aaddr + | |
1467 | ALIGN(datalen, ARCH_DMA_MINALIGN)); | |
1468 | } | |
1469 | ||
1470 | /* In any case, invalidate cache to avoid stale data in it. */ | |
1471 | invalidate_dcache_range(aaddr, aaddr + | |
1472 | ALIGN(datalen, ARCH_DMA_MINALIGN)); | |
1473 | ||
7feafb0a FA |
1474 | table_length = DIV_ROUND_UP(pccb->datalen, MAX_PRDT_ENTRY); |
1475 | buf = pccb->pdata; | |
1476 | i = table_length; | |
1477 | while (--i) { | |
1478 | prepare_prdt_desc(&prd_table[table_length - i - 1], buf, | |
1479 | MAX_PRDT_ENTRY - 1); | |
1480 | buf += MAX_PRDT_ENTRY; | |
1481 | datalen -= MAX_PRDT_ENTRY; | |
1482 | } | |
1483 | ||
1484 | prepare_prdt_desc(&prd_table[table_length - i - 1], buf, datalen - 1); | |
1485 | ||
1486 | req_desc->prd_table_length = table_length; | |
c5b3e5cd MV |
1487 | ufshcd_cache_flush_and_invalidate(prd_table, sizeof(*prd_table) * table_length); |
1488 | ufshcd_cache_flush_and_invalidate(req_desc, sizeof(*req_desc)); | |
7feafb0a FA |
1489 | } |
1490 | ||
1491 | static int ufs_scsi_exec(struct udevice *scsi_dev, struct scsi_cmd *pccb) | |
1492 | { | |
1493 | struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); | |
7feafb0a FA |
1494 | u32 upiu_flags; |
1495 | int ocs, result = 0; | |
1496 | u8 scsi_status; | |
1497 | ||
7f26fcbe | 1498 | ufshcd_prepare_req_desc_hdr(hba, &upiu_flags, pccb->dma_dir); |
7feafb0a FA |
1499 | ufshcd_prepare_utp_scsi_cmd_upiu(hba, pccb, upiu_flags); |
1500 | prepare_prdt_table(hba, pccb); | |
1501 | ||
1502 | ufshcd_send_command(hba, TASK_TAG); | |
1503 | ||
1504 | ocs = ufshcd_get_tr_ocs(hba); | |
1505 | switch (ocs) { | |
1506 | case OCS_SUCCESS: | |
1507 | result = ufshcd_get_req_rsp(hba->ucd_rsp_ptr); | |
1508 | switch (result) { | |
1509 | case UPIU_TRANSACTION_RESPONSE: | |
1510 | result = ufshcd_get_rsp_upiu_result(hba->ucd_rsp_ptr); | |
1511 | ||
1512 | scsi_status = result & MASK_SCSI_STATUS; | |
1513 | if (scsi_status) | |
1514 | return -EINVAL; | |
1515 | ||
1516 | break; | |
1517 | case UPIU_TRANSACTION_REJECT_UPIU: | |
1518 | /* TODO: handle Reject UPIU Response */ | |
1519 | dev_err(hba->dev, | |
1520 | "Reject UPIU not fully implemented\n"); | |
1521 | return -EINVAL; | |
1522 | default: | |
1523 | dev_err(hba->dev, | |
1524 | "Unexpected request response code = %x\n", | |
1525 | result); | |
1526 | return -EINVAL; | |
1527 | } | |
1528 | break; | |
1529 | default: | |
1530 | dev_err(hba->dev, "OCS error from controller = %x\n", ocs); | |
1531 | return -EINVAL; | |
1532 | } | |
1533 | ||
1534 | return 0; | |
1535 | } | |
1536 | ||
1537 | static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, | |
1538 | int desc_index, u8 *buf, u32 size) | |
1539 | { | |
1540 | return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); | |
1541 | } | |
1542 | ||
1543 | static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) | |
1544 | { | |
1545 | return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); | |
1546 | } | |
1547 | ||
1548 | /** | |
1549 | * ufshcd_read_string_desc - read string descriptor | |
1550 | * | |
1551 | */ | |
1552 | int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, | |
1553 | u8 *buf, u32 size, bool ascii) | |
1554 | { | |
1555 | int err = 0; | |
1556 | ||
1557 | err = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, desc_index, buf, | |
1558 | size); | |
1559 | ||
1560 | if (err) { | |
1561 | dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", | |
1562 | __func__, QUERY_REQ_RETRIES, err); | |
1563 | goto out; | |
1564 | } | |
1565 | ||
1566 | if (ascii) { | |
1567 | int desc_len; | |
1568 | int ascii_len; | |
1569 | int i; | |
1570 | u8 *buff_ascii; | |
1571 | ||
1572 | desc_len = buf[0]; | |
1573 | /* remove header and divide by 2 to move from UTF16 to UTF8 */ | |
1574 | ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; | |
1575 | if (size < ascii_len + QUERY_DESC_HDR_SIZE) { | |
1576 | dev_err(hba->dev, "%s: buffer allocated size is too small\n", | |
1577 | __func__); | |
1578 | err = -ENOMEM; | |
1579 | goto out; | |
1580 | } | |
1581 | ||
1582 | buff_ascii = kmalloc(ascii_len, GFP_KERNEL); | |
1583 | if (!buff_ascii) { | |
1584 | err = -ENOMEM; | |
1585 | goto out; | |
1586 | } | |
1587 | ||
1588 | /* | |
1589 | * the descriptor contains string in UTF16 format | |
1590 | * we need to convert to utf-8 so it can be displayed | |
1591 | */ | |
1592 | utf16_to_utf8(buff_ascii, | |
1593 | (uint16_t *)&buf[QUERY_DESC_HDR_SIZE], ascii_len); | |
1594 | ||
1595 | /* replace non-printable or non-ASCII characters with spaces */ | |
1596 | for (i = 0; i < ascii_len; i++) | |
1597 | ufshcd_remove_non_printable(&buff_ascii[i]); | |
1598 | ||
1599 | memset(buf + QUERY_DESC_HDR_SIZE, 0, | |
1600 | size - QUERY_DESC_HDR_SIZE); | |
1601 | memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); | |
1602 | buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; | |
1603 | kfree(buff_ascii); | |
1604 | } | |
1605 | out: | |
1606 | return err; | |
1607 | } | |
1608 | ||
1609 | static int ufs_get_device_desc(struct ufs_hba *hba, | |
1610 | struct ufs_dev_desc *dev_desc) | |
1611 | { | |
1612 | int err; | |
1613 | size_t buff_len; | |
1614 | u8 model_index; | |
1615 | u8 *desc_buf; | |
1616 | ||
1617 | buff_len = max_t(size_t, hba->desc_size.dev_desc, | |
1618 | QUERY_DESC_MAX_SIZE + 1); | |
1619 | desc_buf = kmalloc(buff_len, GFP_KERNEL); | |
1620 | if (!desc_buf) { | |
1621 | err = -ENOMEM; | |
1622 | goto out; | |
1623 | } | |
1624 | ||
1625 | err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); | |
1626 | if (err) { | |
1627 | dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", | |
1628 | __func__, err); | |
1629 | goto out; | |
1630 | } | |
1631 | ||
1632 | /* | |
1633 | * getting vendor (manufacturerID) and Bank Index in big endian | |
1634 | * format | |
1635 | */ | |
1636 | dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | | |
1637 | desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; | |
1638 | ||
1639 | model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; | |
1640 | ||
1641 | /* Zero-pad entire buffer for string termination. */ | |
1642 | memset(desc_buf, 0, buff_len); | |
1643 | ||
1644 | err = ufshcd_read_string_desc(hba, model_index, desc_buf, | |
1645 | QUERY_DESC_MAX_SIZE, true/*ASCII*/); | |
1646 | if (err) { | |
1647 | dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", | |
1648 | __func__, err); | |
1649 | goto out; | |
1650 | } | |
1651 | ||
1652 | desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; | |
1653 | strlcpy(dev_desc->model, (char *)(desc_buf + QUERY_DESC_HDR_SIZE), | |
1654 | min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET], | |
1655 | MAX_MODEL_LEN)); | |
1656 | ||
1657 | /* Null terminate the model string */ | |
1658 | dev_desc->model[MAX_MODEL_LEN] = '\0'; | |
1659 | ||
1660 | out: | |
1661 | kfree(desc_buf); | |
1662 | return err; | |
1663 | } | |
1664 | ||
1665 | /** | |
1666 | * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device | |
1667 | */ | |
1668 | static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba) | |
1669 | { | |
1670 | struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info; | |
1671 | ||
1672 | if (hba->max_pwr_info.is_valid) | |
1673 | return 0; | |
1674 | ||
f430151e MV |
1675 | if (hba->quirks & UFSHCD_QUIRK_HIBERN_FASTAUTO) { |
1676 | pwr_info->pwr_tx = FASTAUTO_MODE; | |
1677 | pwr_info->pwr_rx = FASTAUTO_MODE; | |
1678 | } else { | |
1679 | pwr_info->pwr_tx = FAST_MODE; | |
1680 | pwr_info->pwr_rx = FAST_MODE; | |
1681 | } | |
7feafb0a FA |
1682 | pwr_info->hs_rate = PA_HS_MODE_B; |
1683 | ||
1684 | /* Get the connected lane count */ | |
1685 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), | |
1686 | &pwr_info->lane_rx); | |
1687 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), | |
1688 | &pwr_info->lane_tx); | |
1689 | ||
1690 | if (!pwr_info->lane_rx || !pwr_info->lane_tx) { | |
1691 | dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n", | |
1692 | __func__, pwr_info->lane_rx, pwr_info->lane_tx); | |
1693 | return -EINVAL; | |
1694 | } | |
1695 | ||
1696 | /* | |
1697 | * First, get the maximum gears of HS speed. | |
1698 | * If a zero value, it means there is no HSGEAR capability. | |
1699 | * Then, get the maximum gears of PWM speed. | |
1700 | */ | |
1701 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx); | |
1702 | if (!pwr_info->gear_rx) { | |
1703 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), | |
1704 | &pwr_info->gear_rx); | |
1705 | if (!pwr_info->gear_rx) { | |
1706 | dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n", | |
1707 | __func__, pwr_info->gear_rx); | |
1708 | return -EINVAL; | |
1709 | } | |
1710 | pwr_info->pwr_rx = SLOW_MODE; | |
1711 | } | |
1712 | ||
1713 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), | |
1714 | &pwr_info->gear_tx); | |
1715 | if (!pwr_info->gear_tx) { | |
1716 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), | |
1717 | &pwr_info->gear_tx); | |
1718 | if (!pwr_info->gear_tx) { | |
1719 | dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n", | |
1720 | __func__, pwr_info->gear_tx); | |
1721 | return -EINVAL; | |
1722 | } | |
1723 | pwr_info->pwr_tx = SLOW_MODE; | |
1724 | } | |
1725 | ||
1726 | hba->max_pwr_info.is_valid = true; | |
1727 | return 0; | |
1728 | } | |
1729 | ||
1730 | static int ufshcd_change_power_mode(struct ufs_hba *hba, | |
1731 | struct ufs_pa_layer_attr *pwr_mode) | |
1732 | { | |
1733 | int ret; | |
1734 | ||
1735 | /* if already configured to the requested pwr_mode */ | |
1736 | if (pwr_mode->gear_rx == hba->pwr_info.gear_rx && | |
1737 | pwr_mode->gear_tx == hba->pwr_info.gear_tx && | |
1738 | pwr_mode->lane_rx == hba->pwr_info.lane_rx && | |
1739 | pwr_mode->lane_tx == hba->pwr_info.lane_tx && | |
1740 | pwr_mode->pwr_rx == hba->pwr_info.pwr_rx && | |
1741 | pwr_mode->pwr_tx == hba->pwr_info.pwr_tx && | |
1742 | pwr_mode->hs_rate == hba->pwr_info.hs_rate) { | |
1743 | dev_dbg(hba->dev, "%s: power already configured\n", __func__); | |
1744 | return 0; | |
1745 | } | |
1746 | ||
1747 | /* | |
1748 | * Configure attributes for power mode change with below. | |
1749 | * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, | |
1750 | * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, | |
1751 | * - PA_HSSERIES | |
1752 | */ | |
1753 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx); | |
1754 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), | |
1755 | pwr_mode->lane_rx); | |
1756 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || pwr_mode->pwr_rx == FAST_MODE) | |
1757 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); | |
1758 | else | |
1759 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE); | |
1760 | ||
1761 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx); | |
1762 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), | |
1763 | pwr_mode->lane_tx); | |
1764 | if (pwr_mode->pwr_tx == FASTAUTO_MODE || pwr_mode->pwr_tx == FAST_MODE) | |
1765 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); | |
1766 | else | |
1767 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE); | |
1768 | ||
1769 | if (pwr_mode->pwr_rx == FASTAUTO_MODE || | |
1770 | pwr_mode->pwr_tx == FASTAUTO_MODE || | |
1771 | pwr_mode->pwr_rx == FAST_MODE || | |
1772 | pwr_mode->pwr_tx == FAST_MODE) | |
1773 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), | |
1774 | pwr_mode->hs_rate); | |
1775 | ||
1776 | ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 | | |
1777 | pwr_mode->pwr_tx); | |
1778 | ||
1779 | if (ret) { | |
1780 | dev_err(hba->dev, | |
1781 | "%s: power mode change failed %d\n", __func__, ret); | |
1782 | ||
1783 | return ret; | |
1784 | } | |
1785 | ||
1786 | /* Copy new Power Mode to power info */ | |
1787 | memcpy(&hba->pwr_info, pwr_mode, sizeof(struct ufs_pa_layer_attr)); | |
1788 | ||
1789 | return ret; | |
1790 | } | |
1791 | ||
1792 | /** | |
1793 | * ufshcd_verify_dev_init() - Verify device initialization | |
1794 | * | |
1795 | */ | |
1796 | static int ufshcd_verify_dev_init(struct ufs_hba *hba) | |
1797 | { | |
1798 | int retries; | |
1799 | int err; | |
1800 | ||
1801 | for (retries = NOP_OUT_RETRIES; retries > 0; retries--) { | |
1802 | err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP, | |
1803 | NOP_OUT_TIMEOUT); | |
1804 | if (!err || err == -ETIMEDOUT) | |
1805 | break; | |
1806 | ||
1807 | dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err); | |
1808 | } | |
1809 | ||
1810 | if (err) | |
1811 | dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err); | |
1812 | ||
1813 | return err; | |
1814 | } | |
1815 | ||
1816 | /** | |
1817 | * ufshcd_complete_dev_init() - checks device readiness | |
1818 | */ | |
1819 | static int ufshcd_complete_dev_init(struct ufs_hba *hba) | |
1820 | { | |
1821 | int i; | |
1822 | int err; | |
1823 | bool flag_res = 1; | |
1824 | ||
1825 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG, | |
1826 | QUERY_FLAG_IDN_FDEVICEINIT, NULL); | |
1827 | if (err) { | |
1828 | dev_err(hba->dev, | |
1829 | "%s setting fDeviceInit flag failed with error %d\n", | |
1830 | __func__, err); | |
1831 | goto out; | |
1832 | } | |
1833 | ||
1834 | /* poll for max. 1000 iterations for fDeviceInit flag to clear */ | |
1835 | for (i = 0; i < 1000 && !err && flag_res; i++) | |
1836 | err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, | |
1837 | QUERY_FLAG_IDN_FDEVICEINIT, | |
1838 | &flag_res); | |
1839 | ||
1840 | if (err) | |
1841 | dev_err(hba->dev, | |
1842 | "%s reading fDeviceInit flag failed with error %d\n", | |
1843 | __func__, err); | |
1844 | else if (flag_res) | |
1845 | dev_err(hba->dev, | |
1846 | "%s fDeviceInit was not cleared by the device\n", | |
1847 | __func__); | |
1848 | ||
1849 | out: | |
1850 | return err; | |
1851 | } | |
1852 | ||
1853 | static void ufshcd_def_desc_sizes(struct ufs_hba *hba) | |
1854 | { | |
1855 | hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; | |
1856 | hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; | |
1857 | hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; | |
1858 | hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; | |
1859 | hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; | |
1860 | hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; | |
1861 | hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; | |
1862 | } | |
1863 | ||
1864 | int ufs_start(struct ufs_hba *hba) | |
1865 | { | |
1866 | struct ufs_dev_desc card = {0}; | |
1867 | int ret; | |
1868 | ||
1869 | ret = ufshcd_link_startup(hba); | |
1870 | if (ret) | |
1871 | return ret; | |
1872 | ||
1873 | ret = ufshcd_verify_dev_init(hba); | |
1874 | if (ret) | |
1875 | return ret; | |
1876 | ||
1877 | ret = ufshcd_complete_dev_init(hba); | |
1878 | if (ret) | |
1879 | return ret; | |
1880 | ||
1881 | /* Init check for device descriptor sizes */ | |
1882 | ufshcd_init_desc_sizes(hba); | |
1883 | ||
1884 | ret = ufs_get_device_desc(hba, &card); | |
1885 | if (ret) { | |
1886 | dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", | |
1887 | __func__, ret); | |
1888 | ||
1889 | return ret; | |
1890 | } | |
1891 | ||
1892 | if (ufshcd_get_max_pwr_mode(hba)) { | |
1893 | dev_err(hba->dev, | |
1894 | "%s: Failed getting max supported power mode\n", | |
1895 | __func__); | |
1896 | } else { | |
1897 | ret = ufshcd_change_power_mode(hba, &hba->max_pwr_info.info); | |
1898 | if (ret) { | |
1899 | dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", | |
1900 | __func__, ret); | |
1901 | ||
1902 | return ret; | |
1903 | } | |
1904 | ||
1905 | printf("Device at %s up at:", hba->dev->name); | |
1906 | ufshcd_print_pwr_info(hba); | |
1907 | } | |
1908 | ||
1909 | return 0; | |
1910 | } | |
1911 | ||
1912 | int ufshcd_probe(struct udevice *ufs_dev, struct ufs_hba_ops *hba_ops) | |
1913 | { | |
1914 | struct ufs_hba *hba = dev_get_uclass_priv(ufs_dev); | |
8a8d24bd | 1915 | struct scsi_plat *scsi_plat; |
7feafb0a | 1916 | struct udevice *scsi_dev; |
e5c19ce4 | 1917 | void __iomem *mmio_base; |
7feafb0a FA |
1918 | int err; |
1919 | ||
1920 | device_find_first_child(ufs_dev, &scsi_dev); | |
1921 | if (!scsi_dev) | |
1922 | return -ENODEV; | |
1923 | ||
caa4daa2 | 1924 | scsi_plat = dev_get_uclass_plat(scsi_dev); |
7feafb0a FA |
1925 | scsi_plat->max_id = UFSHCD_MAX_ID; |
1926 | scsi_plat->max_lun = UFS_MAX_LUNS; | |
1927 | scsi_plat->max_bytes_per_req = UFS_MAX_BYTES; | |
1928 | ||
1929 | hba->dev = ufs_dev; | |
1930 | hba->ops = hba_ops; | |
e5c19ce4 BM |
1931 | |
1932 | if (device_is_on_pci_bus(ufs_dev)) { | |
1933 | mmio_base = dm_pci_map_bar(ufs_dev, PCI_BASE_ADDRESS_0, 0, 0, | |
1934 | PCI_REGION_TYPE, PCI_REGION_MEM); | |
1935 | } else { | |
1936 | mmio_base = dev_read_addr_ptr(ufs_dev); | |
1937 | } | |
1938 | hba->mmio_base = mmio_base; | |
7feafb0a FA |
1939 | |
1940 | /* Set descriptor lengths to specification defaults */ | |
1941 | ufshcd_def_desc_sizes(hba); | |
1942 | ||
1943 | ufshcd_ops_init(hba); | |
1944 | ||
1945 | /* Read capabilties registers */ | |
1946 | hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES); | |
91913a1a MV |
1947 | if (hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) |
1948 | hba->capabilities &= ~MASK_64_ADDRESSING_SUPPORT; | |
7feafb0a FA |
1949 | |
1950 | /* Get UFS version supported by the controller */ | |
1951 | hba->version = ufshcd_get_ufs_version(hba); | |
1952 | if (hba->version != UFSHCI_VERSION_10 && | |
1953 | hba->version != UFSHCI_VERSION_11 && | |
1954 | hba->version != UFSHCI_VERSION_20 && | |
2ff810ae | 1955 | hba->version != UFSHCI_VERSION_21 && |
5b2d25a2 BM |
1956 | hba->version != UFSHCI_VERSION_30 && |
1957 | hba->version != UFSHCI_VERSION_31) | |
7feafb0a FA |
1958 | dev_err(hba->dev, "invalid UFS version 0x%x\n", |
1959 | hba->version); | |
1960 | ||
1961 | /* Get Interrupt bit mask per version */ | |
1962 | hba->intr_mask = ufshcd_get_intr_mask(hba); | |
1963 | ||
1964 | /* Allocate memory for host memory space */ | |
1965 | err = ufshcd_memory_alloc(hba); | |
1966 | if (err) { | |
1967 | dev_err(hba->dev, "Memory allocation failed\n"); | |
1968 | return err; | |
1969 | } | |
1970 | ||
1971 | /* Configure Local data structures */ | |
1972 | ufshcd_host_memory_configure(hba); | |
1973 | ||
1974 | /* | |
1975 | * In order to avoid any spurious interrupt immediately after | |
1976 | * registering UFS controller interrupt handler, clear any pending UFS | |
1977 | * interrupt status and disable all the UFS interrupts. | |
1978 | */ | |
1979 | ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS), | |
1980 | REG_INTERRUPT_STATUS); | |
1981 | ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE); | |
1982 | ||
1983 | err = ufshcd_hba_enable(hba); | |
1984 | if (err) { | |
1985 | dev_err(hba->dev, "Host controller enable failed\n"); | |
1986 | return err; | |
1987 | } | |
1988 | ||
1989 | err = ufs_start(hba); | |
1990 | if (err) | |
1991 | return err; | |
1992 | ||
1993 | return 0; | |
1994 | } | |
1995 | ||
1996 | int ufs_scsi_bind(struct udevice *ufs_dev, struct udevice **scsi_devp) | |
1997 | { | |
1998 | int ret = device_bind_driver(ufs_dev, "ufs_scsi", "ufs_scsi", | |
1999 | scsi_devp); | |
2000 | ||
2001 | return ret; | |
2002 | } | |
2003 | ||
91913a1a MV |
2004 | #if IS_ENABLED(CONFIG_BOUNCE_BUFFER) |
2005 | static int ufs_scsi_buffer_aligned(struct udevice *scsi_dev, struct bounce_buffer *state) | |
2006 | { | |
2007 | #ifdef CONFIG_PHYS_64BIT | |
2008 | struct ufs_hba *hba = dev_get_uclass_priv(scsi_dev->parent); | |
2009 | uintptr_t ubuf = (uintptr_t)state->user_buffer; | |
2010 | size_t len = state->len_aligned; | |
2011 | ||
2012 | /* Check if below 32bit boundary */ | |
2013 | if ((hba->quirks & UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS) && | |
2014 | ((ubuf >> 32) || (ubuf + len) >> 32)) { | |
2015 | dev_dbg(scsi_dev, "Buffer above 32bit boundary %lx-%lx\n", | |
2016 | ubuf, ubuf + len); | |
2017 | return 0; | |
2018 | } | |
2019 | #endif | |
2020 | return 1; | |
2021 | } | |
2022 | #endif /* CONFIG_BOUNCE_BUFFER */ | |
2023 | ||
7feafb0a FA |
2024 | static struct scsi_ops ufs_ops = { |
2025 | .exec = ufs_scsi_exec, | |
91913a1a MV |
2026 | #if IS_ENABLED(CONFIG_BOUNCE_BUFFER) |
2027 | .buffer_aligned = ufs_scsi_buffer_aligned, | |
2028 | #endif /* CONFIG_BOUNCE_BUFFER */ | |
7feafb0a FA |
2029 | }; |
2030 | ||
2031 | int ufs_probe_dev(int index) | |
2032 | { | |
2033 | struct udevice *dev; | |
2034 | ||
2035 | return uclass_get_device(UCLASS_UFS, index, &dev); | |
2036 | } | |
2037 | ||
2038 | int ufs_probe(void) | |
2039 | { | |
2040 | struct udevice *dev; | |
2041 | int ret, i; | |
2042 | ||
2043 | for (i = 0;; i++) { | |
2044 | ret = uclass_get_device(UCLASS_UFS, i, &dev); | |
2045 | if (ret == -ENODEV) | |
2046 | break; | |
2047 | } | |
2048 | ||
2049 | return 0; | |
2050 | } | |
2051 | ||
2052 | U_BOOT_DRIVER(ufs_scsi) = { | |
2053 | .id = UCLASS_SCSI, | |
2054 | .name = "ufs_scsi", | |
2055 | .ops = &ufs_ops, | |
2056 | }; |