]> git.ipfire.org Git - thirdparty/openwrt.git/blob
078a56cf169ebca84a92bce3d511def109f7be01
[thirdparty/openwrt.git] /
1 From 1d479f5b345e0c3650fec4dddeef9fc6fab30c8b Mon Sep 17 00:00:00 2001
2 From: Md Sadre Alam <quic_mdalam@quicinc.com>
3 Date: Wed, 20 Nov 2024 14:45:01 +0530
4 Subject: [PATCH 2/4] mtd: rawnand: qcom: Add qcom prefix to common api
5
6 Add qcom prefix to all the api which will be commonly
7 used by spi nand driver and raw nand driver.
8
9 Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
10 Signed-off-by: Md Sadre Alam <quic_mdalam@quicinc.com>
11 Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
12 ---
13 drivers/mtd/nand/raw/qcom_nandc.c | 320 +++++++++++++++---------------
14 1 file changed, 160 insertions(+), 160 deletions(-)
15
16 --- a/drivers/mtd/nand/raw/qcom_nandc.c
17 +++ b/drivers/mtd/nand/raw/qcom_nandc.c
18 @@ -53,7 +53,7 @@
19 #define NAND_READ_LOCATION_LAST_CW_2 0xf48
20 #define NAND_READ_LOCATION_LAST_CW_3 0xf4c
21
22 -/* dummy register offsets, used by write_reg_dma */
23 +/* dummy register offsets, used by qcom_write_reg_dma */
24 #define NAND_DEV_CMD1_RESTORE 0xdead
25 #define NAND_DEV_CMD_VLD_RESTORE 0xbeef
26
27 @@ -211,7 +211,7 @@
28
29 /*
30 * Flags used in DMA descriptor preparation helper functions
31 - * (i.e. read_reg_dma/write_reg_dma/read_data_dma/write_data_dma)
32 + * (i.e. qcom_read_reg_dma/qcom_write_reg_dma/qcom_read_data_dma/qcom_write_data_dma)
33 */
34 /* Don't set the EOT in current tx BAM sgl */
35 #define NAND_BAM_NO_EOT BIT(0)
36 @@ -550,7 +550,7 @@ struct qcom_nandc_props {
37 };
38
39 /* Frees the BAM transaction memory */
40 -static void free_bam_transaction(struct qcom_nand_controller *nandc)
41 +static void qcom_free_bam_transaction(struct qcom_nand_controller *nandc)
42 {
43 struct bam_transaction *bam_txn = nandc->bam_txn;
44
45 @@ -559,7 +559,7 @@ static void free_bam_transaction(struct
46
47 /* Allocates and Initializes the BAM transaction */
48 static struct bam_transaction *
49 -alloc_bam_transaction(struct qcom_nand_controller *nandc)
50 +qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc)
51 {
52 struct bam_transaction *bam_txn;
53 size_t bam_txn_size;
54 @@ -595,7 +595,7 @@ alloc_bam_transaction(struct qcom_nand_c
55 }
56
57 /* Clears the BAM transaction indexes */
58 -static void clear_bam_transaction(struct qcom_nand_controller *nandc)
59 +static void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc)
60 {
61 struct bam_transaction *bam_txn = nandc->bam_txn;
62
63 @@ -614,7 +614,7 @@ static void clear_bam_transaction(struct
64 }
65
66 /* Callback for DMA descriptor completion */
67 -static void qpic_bam_dma_done(void *data)
68 +static void qcom_qpic_bam_dma_done(void *data)
69 {
70 struct bam_transaction *bam_txn = data;
71
72 @@ -644,7 +644,7 @@ static void nandc_write(struct qcom_nand
73 iowrite32(val, nandc->base + offset);
74 }
75
76 -static void nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
77 +static void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
78 {
79 if (!nandc->props->supports_bam)
80 return;
81 @@ -824,9 +824,9 @@ static void update_rw_regs(struct qcom_n
82 * for BAM. This descriptor will be added in the NAND DMA descriptor queue
83 * which will be submitted to DMA engine.
84 */
85 -static int prepare_bam_async_desc(struct qcom_nand_controller *nandc,
86 - struct dma_chan *chan,
87 - unsigned long flags)
88 +static int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc,
89 + struct dma_chan *chan,
90 + unsigned long flags)
91 {
92 struct desc_info *desc;
93 struct scatterlist *sgl;
94 @@ -903,9 +903,9 @@ static int prepare_bam_async_desc(struct
95 * NAND_BAM_NEXT_SGL will be used for starting the separate SGL
96 * after the current command element.
97 */
98 -static int prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
99 - int reg_off, const void *vaddr,
100 - int size, unsigned int flags)
101 +static int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read,
102 + int reg_off, const void *vaddr,
103 + int size, unsigned int flags)
104 {
105 int bam_ce_size;
106 int i, ret;
107 @@ -943,9 +943,9 @@ static int prep_bam_dma_desc_cmd(struct
108 bam_txn->bam_ce_start = bam_txn->bam_ce_pos;
109
110 if (flags & NAND_BAM_NWD) {
111 - ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
112 - DMA_PREP_FENCE |
113 - DMA_PREP_CMD);
114 + ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
115 + DMA_PREP_FENCE |
116 + DMA_PREP_CMD);
117 if (ret)
118 return ret;
119 }
120 @@ -958,9 +958,8 @@ static int prep_bam_dma_desc_cmd(struct
121 * Prepares the data descriptor for BAM DMA which will be used for NAND
122 * data reads and writes.
123 */
124 -static int prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
125 - const void *vaddr,
126 - int size, unsigned int flags)
127 +static int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read,
128 + const void *vaddr, int size, unsigned int flags)
129 {
130 int ret;
131 struct bam_transaction *bam_txn = nandc->bam_txn;
132 @@ -979,8 +978,8 @@ static int prep_bam_dma_desc_data(struct
133 * is not set, form the DMA descriptor
134 */
135 if (!(flags & NAND_BAM_NO_EOT)) {
136 - ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
137 - DMA_PREP_INTERRUPT);
138 + ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
139 + DMA_PREP_INTERRUPT);
140 if (ret)
141 return ret;
142 }
143 @@ -989,9 +988,9 @@ static int prep_bam_dma_desc_data(struct
144 return 0;
145 }
146
147 -static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
148 - int reg_off, const void *vaddr, int size,
149 - bool flow_control)
150 +static int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
151 + int reg_off, const void *vaddr, int size,
152 + bool flow_control)
153 {
154 struct desc_info *desc;
155 struct dma_async_tx_descriptor *dma_desc;
156 @@ -1069,15 +1068,15 @@ err:
157 }
158
159 /*
160 - * read_reg_dma: prepares a descriptor to read a given number of
161 + * qcom_read_reg_dma: prepares a descriptor to read a given number of
162 * contiguous registers to the reg_read_buf pointer
163 *
164 * @first: offset of the first register in the contiguous block
165 * @num_regs: number of registers to read
166 * @flags: flags to control DMA descriptor preparation
167 */
168 -static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
169 - int num_regs, unsigned int flags)
170 +static int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first,
171 + int num_regs, unsigned int flags)
172 {
173 bool flow_control = false;
174 void *vaddr;
175 @@ -1089,18 +1088,18 @@ static int read_reg_dma(struct qcom_nand
176 first = dev_cmd_reg_addr(nandc, first);
177
178 if (nandc->props->supports_bam)
179 - return prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
180 + return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr,
181 num_regs, flags);
182
183 if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
184 flow_control = true;
185
186 - return prep_adm_dma_desc(nandc, true, first, vaddr,
187 + return qcom_prep_adm_dma_desc(nandc, true, first, vaddr,
188 num_regs * sizeof(u32), flow_control);
189 }
190
191 /*
192 - * write_reg_dma: prepares a descriptor to write a given number of
193 + * qcom_write_reg_dma: prepares a descriptor to write a given number of
194 * contiguous registers
195 *
196 * @vaddr: contiguous memory from where register value will
197 @@ -1109,8 +1108,8 @@ static int read_reg_dma(struct qcom_nand
198 * @num_regs: number of registers to write
199 * @flags: flags to control DMA descriptor preparation
200 */
201 -static int write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
202 - int first, int num_regs, unsigned int flags)
203 +static int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr,
204 + int first, int num_regs, unsigned int flags)
205 {
206 bool flow_control = false;
207
208 @@ -1124,18 +1123,18 @@ static int write_reg_dma(struct qcom_nan
209 first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
210
211 if (nandc->props->supports_bam)
212 - return prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
213 + return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
214 num_regs, flags);
215
216 if (first == NAND_FLASH_CMD)
217 flow_control = true;
218
219 - return prep_adm_dma_desc(nandc, false, first, vaddr,
220 + return qcom_prep_adm_dma_desc(nandc, false, first, vaddr,
221 num_regs * sizeof(u32), flow_control);
222 }
223
224 /*
225 - * read_data_dma: prepares a DMA descriptor to transfer data from the
226 + * qcom_read_data_dma: prepares a DMA descriptor to transfer data from the
227 * controller's internal buffer to the buffer 'vaddr'
228 *
229 * @reg_off: offset within the controller's data buffer
230 @@ -1143,17 +1142,17 @@ static int write_reg_dma(struct qcom_nan
231 * @size: DMA transaction size in bytes
232 * @flags: flags to control DMA descriptor preparation
233 */
234 -static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
235 - const u8 *vaddr, int size, unsigned int flags)
236 +static int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
237 + const u8 *vaddr, int size, unsigned int flags)
238 {
239 if (nandc->props->supports_bam)
240 - return prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
241 + return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
242
243 - return prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
244 + return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false);
245 }
246
247 /*
248 - * write_data_dma: prepares a DMA descriptor to transfer data from
249 + * qcom_write_data_dma: prepares a DMA descriptor to transfer data from
250 * 'vaddr' to the controller's internal buffer
251 *
252 * @reg_off: offset within the controller's data buffer
253 @@ -1161,13 +1160,13 @@ static int read_data_dma(struct qcom_nan
254 * @size: DMA transaction size in bytes
255 * @flags: flags to control DMA descriptor preparation
256 */
257 -static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
258 - const u8 *vaddr, int size, unsigned int flags)
259 +static int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
260 + const u8 *vaddr, int size, unsigned int flags)
261 {
262 if (nandc->props->supports_bam)
263 - return prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
264 + return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
265
266 - return prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
267 + return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false);
268 }
269
270 /*
271 @@ -1178,14 +1177,14 @@ static void config_nand_page_read(struct
272 {
273 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
274
275 - write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
276 - write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
277 + qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
278 + qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
279 if (!nandc->props->qpic_version2)
280 - write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
281 - write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
282 - NAND_ERASED_CW_DETECT_CFG, 1, 0);
283 - write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
284 - NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
285 + qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1, 0);
286 + qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_clr,
287 + NAND_ERASED_CW_DETECT_CFG, 1, 0);
288 + qcom_write_reg_dma(nandc, &nandc->regs->erased_cw_detect_cfg_set,
289 + NAND_ERASED_CW_DETECT_CFG, 1, NAND_ERASED_CW_SET | NAND_BAM_NEXT_SGL);
290 }
291
292 /*
293 @@ -1204,17 +1203,17 @@ config_nand_cw_read(struct nand_chip *ch
294 reg = &nandc->regs->read_location_last0;
295
296 if (nandc->props->supports_bam)
297 - write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
298 + qcom_write_reg_dma(nandc, reg, NAND_READ_LOCATION_0, 4, NAND_BAM_NEXT_SGL);
299
300 - write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
301 - write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
302 + qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
303 + qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
304
305 if (use_ecc) {
306 - read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
307 - read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
308 - NAND_BAM_NEXT_SGL);
309 + qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 2, 0);
310 + qcom_read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1,
311 + NAND_BAM_NEXT_SGL);
312 } else {
313 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
314 + qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
315 }
316 }
317
318 @@ -1238,11 +1237,11 @@ static void config_nand_page_write(struc
319 {
320 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
321
322 - write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
323 - write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
324 + qcom_write_reg_dma(nandc, &nandc->regs->addr0, NAND_ADDR0, 2, 0);
325 + qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 3, 0);
326 if (!nandc->props->qpic_version2)
327 - write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
328 - NAND_BAM_NEXT_SGL);
329 + qcom_write_reg_dma(nandc, &nandc->regs->ecc_buf_cfg, NAND_EBI2_ECC_BUF_CFG, 1,
330 + NAND_BAM_NEXT_SGL);
331 }
332
333 /*
334 @@ -1253,17 +1252,18 @@ static void config_nand_cw_write(struct
335 {
336 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
337
338 - write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
339 - write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
340 + qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
341 + qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
342
343 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
344 + qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
345
346 - write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
347 - write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1, NAND_BAM_NEXT_SGL);
348 + qcom_write_reg_dma(nandc, &nandc->regs->clrflashstatus, NAND_FLASH_STATUS, 1, 0);
349 + qcom_write_reg_dma(nandc, &nandc->regs->clrreadstatus, NAND_READ_STATUS, 1,
350 + NAND_BAM_NEXT_SGL);
351 }
352
353 /* helpers to submit/free our list of dma descriptors */
354 -static int submit_descs(struct qcom_nand_controller *nandc)
355 +static int qcom_submit_descs(struct qcom_nand_controller *nandc)
356 {
357 struct desc_info *desc, *n;
358 dma_cookie_t cookie = 0;
359 @@ -1272,21 +1272,21 @@ static int submit_descs(struct qcom_nand
360
361 if (nandc->props->supports_bam) {
362 if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
363 - ret = prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
364 + ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0);
365 if (ret)
366 goto err_unmap_free_desc;
367 }
368
369 if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
370 - ret = prepare_bam_async_desc(nandc, nandc->tx_chan,
371 - DMA_PREP_INTERRUPT);
372 + ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
373 + DMA_PREP_INTERRUPT);
374 if (ret)
375 goto err_unmap_free_desc;
376 }
377
378 if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
379 - ret = prepare_bam_async_desc(nandc, nandc->cmd_chan,
380 - DMA_PREP_CMD);
381 + ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
382 + DMA_PREP_CMD);
383 if (ret)
384 goto err_unmap_free_desc;
385 }
386 @@ -1296,7 +1296,7 @@ static int submit_descs(struct qcom_nand
387 cookie = dmaengine_submit(desc->dma_desc);
388
389 if (nandc->props->supports_bam) {
390 - bam_txn->last_cmd_desc->callback = qpic_bam_dma_done;
391 + bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done;
392 bam_txn->last_cmd_desc->callback_param = bam_txn;
393
394 dma_async_issue_pending(nandc->tx_chan);
395 @@ -1314,7 +1314,7 @@ static int submit_descs(struct qcom_nand
396 err_unmap_free_desc:
397 /*
398 * Unmap the dma sg_list and free the desc allocated by both
399 - * prepare_bam_async_desc() and prep_adm_dma_desc() functions.
400 + * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
401 */
402 list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
403 list_del(&desc->node);
404 @@ -1333,10 +1333,10 @@ err_unmap_free_desc:
405 }
406
407 /* reset the register read buffer for next NAND operation */
408 -static void clear_read_regs(struct qcom_nand_controller *nandc)
409 +static void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
410 {
411 nandc->reg_read_pos = 0;
412 - nandc_dev_to_mem(nandc, false);
413 + qcom_nandc_dev_to_mem(nandc, false);
414 }
415
416 /*
417 @@ -1400,7 +1400,7 @@ static int check_flash_errors(struct qco
418 struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
419 int i;
420
421 - nandc_dev_to_mem(nandc, true);
422 + qcom_nandc_dev_to_mem(nandc, true);
423
424 for (i = 0; i < cw_cnt; i++) {
425 u32 flash = le32_to_cpu(nandc->reg_read_buf[i]);
426 @@ -1427,13 +1427,13 @@ qcom_nandc_read_cw_raw(struct mtd_info *
427 nand_read_page_op(chip, page, 0, NULL, 0);
428 nandc->buf_count = 0;
429 nandc->buf_start = 0;
430 - clear_read_regs(nandc);
431 + qcom_clear_read_regs(nandc);
432 host->use_ecc = false;
433
434 if (nandc->props->qpic_version2)
435 raw_cw = ecc->steps - 1;
436
437 - clear_bam_transaction(nandc);
438 + qcom_clear_bam_transaction(nandc);
439 set_address(host, host->cw_size * cw, page);
440 update_rw_regs(host, 1, true, raw_cw);
441 config_nand_page_read(chip);
442 @@ -1466,18 +1466,18 @@ qcom_nandc_read_cw_raw(struct mtd_info *
443
444 config_nand_cw_read(chip, false, raw_cw);
445
446 - read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
447 + qcom_read_data_dma(nandc, reg_off, data_buf, data_size1, 0);
448 reg_off += data_size1;
449
450 - read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
451 + qcom_read_data_dma(nandc, reg_off, oob_buf, oob_size1, 0);
452 reg_off += oob_size1;
453
454 - read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
455 + qcom_read_data_dma(nandc, reg_off, data_buf + data_size1, data_size2, 0);
456 reg_off += data_size2;
457
458 - read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
459 + qcom_read_data_dma(nandc, reg_off, oob_buf + oob_size1, oob_size2, 0);
460
461 - ret = submit_descs(nandc);
462 + ret = qcom_submit_descs(nandc);
463 if (ret) {
464 dev_err(nandc->dev, "failure to read raw cw %d\n", cw);
465 return ret;
466 @@ -1575,7 +1575,7 @@ static int parse_read_errors(struct qcom
467 u8 *data_buf_start = data_buf, *oob_buf_start = oob_buf;
468
469 buf = (struct read_stats *)nandc->reg_read_buf;
470 - nandc_dev_to_mem(nandc, true);
471 + qcom_nandc_dev_to_mem(nandc, true);
472
473 for (i = 0; i < ecc->steps; i++, buf++) {
474 u32 flash, buffer, erased_cw;
475 @@ -1704,8 +1704,8 @@ static int read_page_ecc(struct qcom_nan
476 config_nand_cw_read(chip, true, i);
477
478 if (data_buf)
479 - read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
480 - data_size, 0);
481 + qcom_read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
482 + data_size, 0);
483
484 /*
485 * when ecc is enabled, the controller doesn't read the real
486 @@ -1720,8 +1720,8 @@ static int read_page_ecc(struct qcom_nan
487 for (j = 0; j < host->bbm_size; j++)
488 *oob_buf++ = 0xff;
489
490 - read_data_dma(nandc, FLASH_BUF_ACC + data_size,
491 - oob_buf, oob_size, 0);
492 + qcom_read_data_dma(nandc, FLASH_BUF_ACC + data_size,
493 + oob_buf, oob_size, 0);
494 }
495
496 if (data_buf)
497 @@ -1730,7 +1730,7 @@ static int read_page_ecc(struct qcom_nan
498 oob_buf += oob_size;
499 }
500
501 - ret = submit_descs(nandc);
502 + ret = qcom_submit_descs(nandc);
503 if (ret) {
504 dev_err(nandc->dev, "failure to read page/oob\n");
505 return ret;
506 @@ -1751,7 +1751,7 @@ static int copy_last_cw(struct qcom_nand
507 int size;
508 int ret;
509
510 - clear_read_regs(nandc);
511 + qcom_clear_read_regs(nandc);
512
513 size = host->use_ecc ? host->cw_data : host->cw_size;
514
515 @@ -1763,9 +1763,9 @@ static int copy_last_cw(struct qcom_nand
516
517 config_nand_single_cw_page_read(chip, host->use_ecc, ecc->steps - 1);
518
519 - read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
520 + qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size, 0);
521
522 - ret = submit_descs(nandc);
523 + ret = qcom_submit_descs(nandc);
524 if (ret)
525 dev_err(nandc->dev, "failed to copy last codeword\n");
526
527 @@ -1851,14 +1851,14 @@ static int qcom_nandc_read_page(struct n
528 nandc->buf_count = 0;
529 nandc->buf_start = 0;
530 host->use_ecc = true;
531 - clear_read_regs(nandc);
532 + qcom_clear_read_regs(nandc);
533 set_address(host, 0, page);
534 update_rw_regs(host, ecc->steps, true, 0);
535
536 data_buf = buf;
537 oob_buf = oob_required ? chip->oob_poi : NULL;
538
539 - clear_bam_transaction(nandc);
540 + qcom_clear_bam_transaction(nandc);
541
542 return read_page_ecc(host, data_buf, oob_buf, page);
543 }
544 @@ -1899,8 +1899,8 @@ static int qcom_nandc_read_oob(struct na
545 if (host->nr_boot_partitions)
546 qcom_nandc_codeword_fixup(host, page);
547
548 - clear_read_regs(nandc);
549 - clear_bam_transaction(nandc);
550 + qcom_clear_read_regs(nandc);
551 + qcom_clear_bam_transaction(nandc);
552
553 host->use_ecc = true;
554 set_address(host, 0, page);
555 @@ -1927,8 +1927,8 @@ static int qcom_nandc_write_page(struct
556 set_address(host, 0, page);
557 nandc->buf_count = 0;
558 nandc->buf_start = 0;
559 - clear_read_regs(nandc);
560 - clear_bam_transaction(nandc);
561 + qcom_clear_read_regs(nandc);
562 + qcom_clear_bam_transaction(nandc);
563
564 data_buf = (u8 *)buf;
565 oob_buf = chip->oob_poi;
566 @@ -1949,8 +1949,8 @@ static int qcom_nandc_write_page(struct
567 oob_size = ecc->bytes;
568 }
569
570 - write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
571 - i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
572 + qcom_write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size,
573 + i == (ecc->steps - 1) ? NAND_BAM_NO_EOT : 0);
574
575 /*
576 * when ECC is enabled, we don't really need to write anything
577 @@ -1962,8 +1962,8 @@ static int qcom_nandc_write_page(struct
578 if (qcom_nandc_is_last_cw(ecc, i)) {
579 oob_buf += host->bbm_size;
580
581 - write_data_dma(nandc, FLASH_BUF_ACC + data_size,
582 - oob_buf, oob_size, 0);
583 + qcom_write_data_dma(nandc, FLASH_BUF_ACC + data_size,
584 + oob_buf, oob_size, 0);
585 }
586
587 config_nand_cw_write(chip);
588 @@ -1972,7 +1972,7 @@ static int qcom_nandc_write_page(struct
589 oob_buf += oob_size;
590 }
591
592 - ret = submit_descs(nandc);
593 + ret = qcom_submit_descs(nandc);
594 if (ret) {
595 dev_err(nandc->dev, "failure to write page\n");
596 return ret;
597 @@ -1997,8 +1997,8 @@ static int qcom_nandc_write_page_raw(str
598 qcom_nandc_codeword_fixup(host, page);
599
600 nand_prog_page_begin_op(chip, page, 0, NULL, 0);
601 - clear_read_regs(nandc);
602 - clear_bam_transaction(nandc);
603 + qcom_clear_read_regs(nandc);
604 + qcom_clear_bam_transaction(nandc);
605
606 data_buf = (u8 *)buf;
607 oob_buf = chip->oob_poi;
608 @@ -2024,28 +2024,28 @@ static int qcom_nandc_write_page_raw(str
609 oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
610 }
611
612 - write_data_dma(nandc, reg_off, data_buf, data_size1,
613 - NAND_BAM_NO_EOT);
614 + qcom_write_data_dma(nandc, reg_off, data_buf, data_size1,
615 + NAND_BAM_NO_EOT);
616 reg_off += data_size1;
617 data_buf += data_size1;
618
619 - write_data_dma(nandc, reg_off, oob_buf, oob_size1,
620 - NAND_BAM_NO_EOT);
621 + qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size1,
622 + NAND_BAM_NO_EOT);
623 reg_off += oob_size1;
624 oob_buf += oob_size1;
625
626 - write_data_dma(nandc, reg_off, data_buf, data_size2,
627 - NAND_BAM_NO_EOT);
628 + qcom_write_data_dma(nandc, reg_off, data_buf, data_size2,
629 + NAND_BAM_NO_EOT);
630 reg_off += data_size2;
631 data_buf += data_size2;
632
633 - write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
634 + qcom_write_data_dma(nandc, reg_off, oob_buf, oob_size2, 0);
635 oob_buf += oob_size2;
636
637 config_nand_cw_write(chip);
638 }
639
640 - ret = submit_descs(nandc);
641 + ret = qcom_submit_descs(nandc);
642 if (ret) {
643 dev_err(nandc->dev, "failure to write raw page\n");
644 return ret;
645 @@ -2075,7 +2075,7 @@ static int qcom_nandc_write_oob(struct n
646 qcom_nandc_codeword_fixup(host, page);
647
648 host->use_ecc = true;
649 - clear_bam_transaction(nandc);
650 + qcom_clear_bam_transaction(nandc);
651
652 /* calculate the data and oob size for the last codeword/step */
653 data_size = ecc->size - ((ecc->steps - 1) << 2);
654 @@ -2090,11 +2090,11 @@ static int qcom_nandc_write_oob(struct n
655 update_rw_regs(host, 1, false, 0);
656
657 config_nand_page_write(chip);
658 - write_data_dma(nandc, FLASH_BUF_ACC,
659 - nandc->data_buffer, data_size + oob_size, 0);
660 + qcom_write_data_dma(nandc, FLASH_BUF_ACC,
661 + nandc->data_buffer, data_size + oob_size, 0);
662 config_nand_cw_write(chip);
663
664 - ret = submit_descs(nandc);
665 + ret = qcom_submit_descs(nandc);
666 if (ret) {
667 dev_err(nandc->dev, "failure to write oob\n");
668 return ret;
669 @@ -2121,7 +2121,7 @@ static int qcom_nandc_block_bad(struct n
670 */
671 host->use_ecc = false;
672
673 - clear_bam_transaction(nandc);
674 + qcom_clear_bam_transaction(nandc);
675 ret = copy_last_cw(host, page);
676 if (ret)
677 goto err;
678 @@ -2148,8 +2148,8 @@ static int qcom_nandc_block_markbad(stru
679 struct nand_ecc_ctrl *ecc = &chip->ecc;
680 int page, ret;
681
682 - clear_read_regs(nandc);
683 - clear_bam_transaction(nandc);
684 + qcom_clear_read_regs(nandc);
685 + qcom_clear_bam_transaction(nandc);
686
687 /*
688 * to mark the BBM as bad, we flash the entire last codeword with 0s.
689 @@ -2166,11 +2166,11 @@ static int qcom_nandc_block_markbad(stru
690 update_rw_regs(host, 1, false, ecc->steps - 1);
691
692 config_nand_page_write(chip);
693 - write_data_dma(nandc, FLASH_BUF_ACC,
694 - nandc->data_buffer, host->cw_size, 0);
695 + qcom_write_data_dma(nandc, FLASH_BUF_ACC,
696 + nandc->data_buffer, host->cw_size, 0);
697 config_nand_cw_write(chip);
698
699 - ret = submit_descs(nandc);
700 + ret = qcom_submit_descs(nandc);
701 if (ret) {
702 dev_err(nandc->dev, "failure to update BBM\n");
703 return ret;
704 @@ -2410,14 +2410,14 @@ static int qcom_nand_attach_chip(struct
705 mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
706 /* Free the initially allocated BAM transaction for reading the ONFI params */
707 if (nandc->props->supports_bam)
708 - free_bam_transaction(nandc);
709 + qcom_free_bam_transaction(nandc);
710
711 nandc->max_cwperpage = max_t(unsigned int, nandc->max_cwperpage,
712 cwperpage);
713
714 /* Now allocate the BAM transaction based on updated max_cwperpage */
715 if (nandc->props->supports_bam) {
716 - nandc->bam_txn = alloc_bam_transaction(nandc);
717 + nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
718 if (!nandc->bam_txn) {
719 dev_err(nandc->dev,
720 "failed to allocate bam transaction\n");
721 @@ -2617,7 +2617,7 @@ static int qcom_wait_rdy_poll(struct nan
722 unsigned long start = jiffies + msecs_to_jiffies(time_ms);
723 u32 flash;
724
725 - nandc_dev_to_mem(nandc, true);
726 + qcom_nandc_dev_to_mem(nandc, true);
727
728 do {
729 flash = le32_to_cpu(nandc->reg_read_buf[0]);
730 @@ -2657,23 +2657,23 @@ static int qcom_read_status_exec(struct
731 nandc->buf_start = 0;
732 host->use_ecc = false;
733
734 - clear_read_regs(nandc);
735 - clear_bam_transaction(nandc);
736 + qcom_clear_read_regs(nandc);
737 + qcom_clear_bam_transaction(nandc);
738
739 nandc->regs->cmd = q_op.cmd_reg;
740 nandc->regs->exec = cpu_to_le32(1);
741
742 - write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
743 - write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
744 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
745 + qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL);
746 + qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
747 + qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
748
749 - ret = submit_descs(nandc);
750 + ret = qcom_submit_descs(nandc);
751 if (ret) {
752 dev_err(nandc->dev, "failure in submitting status descriptor\n");
753 goto err_out;
754 }
755
756 - nandc_dev_to_mem(nandc, true);
757 + qcom_nandc_dev_to_mem(nandc, true);
758
759 for (i = 0; i < num_cw; i++) {
760 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
761 @@ -2714,8 +2714,8 @@ static int qcom_read_id_type_exec(struct
762 nandc->buf_start = 0;
763 host->use_ecc = false;
764
765 - clear_read_regs(nandc);
766 - clear_bam_transaction(nandc);
767 + qcom_clear_read_regs(nandc);
768 + qcom_clear_bam_transaction(nandc);
769
770 nandc->regs->cmd = q_op.cmd_reg;
771 nandc->regs->addr0 = q_op.addr1_reg;
772 @@ -2723,12 +2723,12 @@ static int qcom_read_id_type_exec(struct
773 nandc->regs->chip_sel = cpu_to_le32(nandc->props->supports_bam ? 0 : DM_EN);
774 nandc->regs->exec = cpu_to_le32(1);
775
776 - write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
777 - write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
778 + qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, 4, NAND_BAM_NEXT_SGL);
779 + qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
780
781 - read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
782 + qcom_read_reg_dma(nandc, NAND_READ_ID, 1, NAND_BAM_NEXT_SGL);
783
784 - ret = submit_descs(nandc);
785 + ret = qcom_submit_descs(nandc);
786 if (ret) {
787 dev_err(nandc->dev, "failure in submitting read id descriptor\n");
788 goto err_out;
789 @@ -2738,7 +2738,7 @@ static int qcom_read_id_type_exec(struct
790 op_id = q_op.data_instr_idx;
791 len = nand_subop_get_data_len(subop, op_id);
792
793 - nandc_dev_to_mem(nandc, true);
794 + qcom_nandc_dev_to_mem(nandc, true);
795 memcpy(instr->ctx.data.buf.in, nandc->reg_read_buf, len);
796
797 err_out:
798 @@ -2774,20 +2774,20 @@ static int qcom_misc_cmd_type_exec(struc
799 nandc->buf_start = 0;
800 host->use_ecc = false;
801
802 - clear_read_regs(nandc);
803 - clear_bam_transaction(nandc);
804 + qcom_clear_read_regs(nandc);
805 + qcom_clear_bam_transaction(nandc);
806
807 nandc->regs->cmd = q_op.cmd_reg;
808 nandc->regs->exec = cpu_to_le32(1);
809
810 - write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
811 + qcom_write_reg_dma(nandc, &nandc->regs->cmd, NAND_FLASH_CMD, instrs, NAND_BAM_NEXT_SGL);
812 if (q_op.cmd_reg == cpu_to_le32(OP_BLOCK_ERASE))
813 - write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
814 + qcom_write_reg_dma(nandc, &nandc->regs->cfg0, NAND_DEV0_CFG0, 2, NAND_BAM_NEXT_SGL);
815
816 - write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
817 - read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
818 + qcom_write_reg_dma(nandc, &nandc->regs->exec, NAND_EXEC_CMD, 1, NAND_BAM_NEXT_SGL);
819 + qcom_read_reg_dma(nandc, NAND_FLASH_STATUS, 1, NAND_BAM_NEXT_SGL);
820
821 - ret = submit_descs(nandc);
822 + ret = qcom_submit_descs(nandc);
823 if (ret) {
824 dev_err(nandc->dev, "failure in submitting misc descriptor\n");
825 goto err_out;
826 @@ -2820,8 +2820,8 @@ static int qcom_param_page_type_exec(str
827 nandc->buf_count = 0;
828 nandc->buf_start = 0;
829 host->use_ecc = false;
830 - clear_read_regs(nandc);
831 - clear_bam_transaction(nandc);
832 + qcom_clear_read_regs(nandc);
833 + qcom_clear_bam_transaction(nandc);
834
835 nandc->regs->cmd = q_op.cmd_reg;
836 nandc->regs->addr0 = 0;
837 @@ -2864,8 +2864,8 @@ static int qcom_param_page_type_exec(str
838 nandc_set_read_loc(chip, 0, 0, 0, len, 1);
839
840 if (!nandc->props->qpic_version2) {
841 - write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
842 - write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
843 + qcom_write_reg_dma(nandc, &nandc->regs->vld, NAND_DEV_CMD_VLD, 1, 0);
844 + qcom_write_reg_dma(nandc, &nandc->regs->cmd1, NAND_DEV_CMD1, 1, NAND_BAM_NEXT_SGL);
845 }
846
847 nandc->buf_count = len;
848 @@ -2873,17 +2873,17 @@ static int qcom_param_page_type_exec(str
849
850 config_nand_single_cw_page_read(chip, false, 0);
851
852 - read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
853 - nandc->buf_count, 0);
854 + qcom_read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
855 + nandc->buf_count, 0);
856
857 /* restore CMD1 and VLD regs */
858 if (!nandc->props->qpic_version2) {
859 - write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
860 - write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
861 - NAND_BAM_NEXT_SGL);
862 + qcom_write_reg_dma(nandc, &nandc->regs->orig_cmd1, NAND_DEV_CMD1_RESTORE, 1, 0);
863 + qcom_write_reg_dma(nandc, &nandc->regs->orig_vld, NAND_DEV_CMD_VLD_RESTORE, 1,
864 + NAND_BAM_NEXT_SGL);
865 }
866
867 - ret = submit_descs(nandc);
868 + ret = qcom_submit_descs(nandc);
869 if (ret) {
870 dev_err(nandc->dev, "failure in submitting param page descriptor\n");
871 goto err_out;
872 @@ -3067,7 +3067,7 @@ static int qcom_nandc_alloc(struct qcom_
873 * maximum codeword size
874 */
875 nandc->max_cwperpage = 1;
876 - nandc->bam_txn = alloc_bam_transaction(nandc);
877 + nandc->bam_txn = qcom_alloc_bam_transaction(nandc);
878 if (!nandc->bam_txn) {
879 dev_err(nandc->dev,
880 "failed to allocate bam transaction\n");