]> git.ipfire.org Git - thirdparty/u-boot.git/blob - tools/kwbimage.c
riscv: qemu: imply GOLDFISH_RTC
[thirdparty/u-boot.git] / tools / kwbimage.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Image manipulator for Marvell SoCs
4 * supports Kirkwood, Dove, Armada 370, Armada XP, Armada 375, Armada 38x and
5 * Armada 39x
6 *
7 * (C) Copyright 2013 Thomas Petazzoni
8 * <thomas.petazzoni@free-electrons.com>
9 *
10 * (C) Copyright 2022 Pali Rohár <pali@kernel.org>
11 */
12
13 #define OPENSSL_API_COMPAT 0x10101000L
14
15 #include "imagetool.h"
16 #include <limits.h>
17 #include <image.h>
18 #include <stdarg.h>
19 #include <stdint.h>
20 #include "kwbimage.h"
21
22 #include <openssl/bn.h>
23 #include <openssl/rsa.h>
24 #include <openssl/pem.h>
25 #include <openssl/err.h>
26 #include <openssl/evp.h>
27
28 #if OPENSSL_VERSION_NUMBER < 0x10100000L || \
29 (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x2070000fL)
30 static void RSA_get0_key(const RSA *r,
31 const BIGNUM **n, const BIGNUM **e, const BIGNUM **d)
32 {
33 if (n != NULL)
34 *n = r->n;
35 if (e != NULL)
36 *e = r->e;
37 if (d != NULL)
38 *d = r->d;
39 }
40
41 #elif !defined(LIBRESSL_VERSION_NUMBER)
42 void EVP_MD_CTX_cleanup(EVP_MD_CTX *ctx)
43 {
44 EVP_MD_CTX_reset(ctx);
45 }
46 #endif
47
48 /* fls - find last (most-significant) bit set in 4-bit integer */
49 static inline int fls4(int num)
50 {
51 if (num & 0x8)
52 return 4;
53 else if (num & 0x4)
54 return 3;
55 else if (num & 0x2)
56 return 2;
57 else if (num & 0x1)
58 return 1;
59 else
60 return 0;
61 }
62
63 static struct image_cfg_element *image_cfg;
64 static int cfgn;
65 static int verbose_mode;
66
67 struct boot_mode {
68 unsigned int id;
69 const char *name;
70 };
71
72 /*
73 * SHA2-256 hash
74 */
75 struct hash_v1 {
76 uint8_t hash[32];
77 };
78
79 struct boot_mode boot_modes[] = {
80 { IBR_HDR_I2C_ID, "i2c" },
81 { IBR_HDR_SPI_ID, "spi" },
82 { IBR_HDR_NAND_ID, "nand" },
83 { IBR_HDR_SATA_ID, "sata" },
84 { IBR_HDR_PEX_ID, "pex" },
85 { IBR_HDR_UART_ID, "uart" },
86 { IBR_HDR_SDIO_ID, "sdio" },
87 {},
88 };
89
90 struct nand_ecc_mode {
91 unsigned int id;
92 const char *name;
93 };
94
95 struct nand_ecc_mode nand_ecc_modes[] = {
96 { IBR_HDR_ECC_DEFAULT, "default" },
97 { IBR_HDR_ECC_FORCED_HAMMING, "hamming" },
98 { IBR_HDR_ECC_FORCED_RS, "rs" },
99 { IBR_HDR_ECC_DISABLED, "disabled" },
100 {},
101 };
102
103 /* Used to identify an undefined execution or destination address */
104 #define ADDR_INVALID ((uint32_t)-1)
105
106 #define BINARY_MAX_ARGS 255
107
108 /* In-memory representation of a line of the configuration file */
109
110 enum image_cfg_type {
111 IMAGE_CFG_VERSION = 0x1,
112 IMAGE_CFG_BOOT_FROM,
113 IMAGE_CFG_DEST_ADDR,
114 IMAGE_CFG_EXEC_ADDR,
115 IMAGE_CFG_NAND_BLKSZ,
116 IMAGE_CFG_NAND_BADBLK_LOCATION,
117 IMAGE_CFG_NAND_ECC_MODE,
118 IMAGE_CFG_NAND_PAGESZ,
119 IMAGE_CFG_SATA_BLKSZ,
120 IMAGE_CFG_CPU,
121 IMAGE_CFG_BINARY,
122 IMAGE_CFG_DATA,
123 IMAGE_CFG_DATA_DELAY,
124 IMAGE_CFG_BAUDRATE,
125 IMAGE_CFG_UART_PORT,
126 IMAGE_CFG_UART_MPP,
127 IMAGE_CFG_DEBUG,
128 IMAGE_CFG_KAK,
129 IMAGE_CFG_CSK,
130 IMAGE_CFG_CSK_INDEX,
131 IMAGE_CFG_JTAG_DELAY,
132 IMAGE_CFG_BOX_ID,
133 IMAGE_CFG_FLASH_ID,
134 IMAGE_CFG_SEC_COMMON_IMG,
135 IMAGE_CFG_SEC_SPECIALIZED_IMG,
136 IMAGE_CFG_SEC_BOOT_DEV,
137 IMAGE_CFG_SEC_FUSE_DUMP,
138
139 IMAGE_CFG_COUNT
140 } type;
141
142 static const char * const id_strs[] = {
143 [IMAGE_CFG_VERSION] = "VERSION",
144 [IMAGE_CFG_BOOT_FROM] = "BOOT_FROM",
145 [IMAGE_CFG_DEST_ADDR] = "DEST_ADDR",
146 [IMAGE_CFG_EXEC_ADDR] = "EXEC_ADDR",
147 [IMAGE_CFG_NAND_BLKSZ] = "NAND_BLKSZ",
148 [IMAGE_CFG_NAND_BADBLK_LOCATION] = "NAND_BADBLK_LOCATION",
149 [IMAGE_CFG_NAND_ECC_MODE] = "NAND_ECC_MODE",
150 [IMAGE_CFG_NAND_PAGESZ] = "NAND_PAGE_SIZE",
151 [IMAGE_CFG_SATA_BLKSZ] = "SATA_BLKSZ",
152 [IMAGE_CFG_CPU] = "CPU",
153 [IMAGE_CFG_BINARY] = "BINARY",
154 [IMAGE_CFG_DATA] = "DATA",
155 [IMAGE_CFG_DATA_DELAY] = "DATA_DELAY",
156 [IMAGE_CFG_BAUDRATE] = "BAUDRATE",
157 [IMAGE_CFG_UART_PORT] = "UART_PORT",
158 [IMAGE_CFG_UART_MPP] = "UART_MPP",
159 [IMAGE_CFG_DEBUG] = "DEBUG",
160 [IMAGE_CFG_KAK] = "KAK",
161 [IMAGE_CFG_CSK] = "CSK",
162 [IMAGE_CFG_CSK_INDEX] = "CSK_INDEX",
163 [IMAGE_CFG_JTAG_DELAY] = "JTAG_DELAY",
164 [IMAGE_CFG_BOX_ID] = "BOX_ID",
165 [IMAGE_CFG_FLASH_ID] = "FLASH_ID",
166 [IMAGE_CFG_SEC_COMMON_IMG] = "SEC_COMMON_IMG",
167 [IMAGE_CFG_SEC_SPECIALIZED_IMG] = "SEC_SPECIALIZED_IMG",
168 [IMAGE_CFG_SEC_BOOT_DEV] = "SEC_BOOT_DEV",
169 [IMAGE_CFG_SEC_FUSE_DUMP] = "SEC_FUSE_DUMP"
170 };
171
172 struct image_cfg_element {
173 enum image_cfg_type type;
174 union {
175 unsigned int version;
176 unsigned int cpu_sheeva;
177 unsigned int bootfrom;
178 struct {
179 const char *file;
180 unsigned int loadaddr;
181 unsigned int args[BINARY_MAX_ARGS];
182 unsigned int nargs;
183 } binary;
184 unsigned int dstaddr;
185 unsigned int execaddr;
186 unsigned int nandblksz;
187 unsigned int nandbadblklocation;
188 unsigned int nandeccmode;
189 unsigned int nandpagesz;
190 unsigned int satablksz;
191 struct ext_hdr_v0_reg regdata;
192 unsigned int regdata_delay;
193 unsigned int baudrate;
194 unsigned int uart_port;
195 unsigned int uart_mpp;
196 unsigned int debug;
197 const char *key_name;
198 int csk_idx;
199 uint8_t jtag_delay;
200 uint32_t boxid;
201 uint32_t flashid;
202 bool sec_specialized_img;
203 unsigned int sec_boot_dev;
204 const char *name;
205 };
206 };
207
208 #define IMAGE_CFG_ELEMENT_MAX 256
209
210 /*
211 * Utility functions to manipulate boot mode and ecc modes (convert
212 * them back and forth between description strings and the
213 * corresponding numerical identifiers).
214 */
215
216 static const char *image_boot_mode_name(unsigned int id)
217 {
218 int i;
219
220 for (i = 0; boot_modes[i].name; i++)
221 if (boot_modes[i].id == id)
222 return boot_modes[i].name;
223 return NULL;
224 }
225
226 static int image_boot_mode_id(const char *boot_mode_name)
227 {
228 int i;
229
230 for (i = 0; boot_modes[i].name; i++)
231 if (!strcmp(boot_modes[i].name, boot_mode_name))
232 return boot_modes[i].id;
233
234 return -1;
235 }
236
237 static const char *image_nand_ecc_mode_name(unsigned int id)
238 {
239 int i;
240
241 for (i = 0; nand_ecc_modes[i].name; i++)
242 if (nand_ecc_modes[i].id == id)
243 return nand_ecc_modes[i].name;
244
245 return NULL;
246 }
247
248 static int image_nand_ecc_mode_id(const char *nand_ecc_mode_name)
249 {
250 int i;
251
252 for (i = 0; nand_ecc_modes[i].name; i++)
253 if (!strcmp(nand_ecc_modes[i].name, nand_ecc_mode_name))
254 return nand_ecc_modes[i].id;
255 return -1;
256 }
257
258 static struct image_cfg_element *
259 image_find_option(unsigned int optiontype)
260 {
261 int i;
262
263 for (i = 0; i < cfgn; i++) {
264 if (image_cfg[i].type == optiontype)
265 return &image_cfg[i];
266 }
267
268 return NULL;
269 }
270
271 static unsigned int
272 image_count_options(unsigned int optiontype)
273 {
274 int i;
275 unsigned int count = 0;
276
277 for (i = 0; i < cfgn; i++)
278 if (image_cfg[i].type == optiontype)
279 count++;
280
281 return count;
282 }
283
284 static int image_get_csk_index(void)
285 {
286 struct image_cfg_element *e;
287
288 e = image_find_option(IMAGE_CFG_CSK_INDEX);
289 if (!e)
290 return -1;
291
292 return e->csk_idx;
293 }
294
295 static bool image_get_spezialized_img(void)
296 {
297 struct image_cfg_element *e;
298
299 e = image_find_option(IMAGE_CFG_SEC_SPECIALIZED_IMG);
300 if (!e)
301 return false;
302
303 return e->sec_specialized_img;
304 }
305
306 static int image_get_bootfrom(void)
307 {
308 struct image_cfg_element *e;
309
310 e = image_find_option(IMAGE_CFG_BOOT_FROM);
311 if (!e)
312 /* fallback to SPI if no BOOT_FROM is not provided */
313 return IBR_HDR_SPI_ID;
314
315 return e->bootfrom;
316 }
317
318 static int image_is_cpu_sheeva(void)
319 {
320 struct image_cfg_element *e;
321
322 e = image_find_option(IMAGE_CFG_CPU);
323 if (!e)
324 return 0;
325
326 return e->cpu_sheeva;
327 }
328
329 /*
330 * Compute a 8-bit checksum of a memory area. This algorithm follows
331 * the requirements of the Marvell SoC BootROM specifications.
332 */
333 static uint8_t image_checksum8(void *start, uint32_t len)
334 {
335 uint8_t csum = 0;
336 uint8_t *p = start;
337
338 /* check len and return zero checksum if invalid */
339 if (!len)
340 return 0;
341
342 do {
343 csum += *p;
344 p++;
345 } while (--len);
346
347 return csum;
348 }
349
350 /*
351 * Verify checksum over a complete header that includes the checksum field.
352 * Return 1 when OK, otherwise 0.
353 */
354 static int main_hdr_checksum_ok(void *hdr)
355 {
356 /* Offsets of checksum in v0 and v1 headers are the same */
357 struct main_hdr_v0 *main_hdr = (struct main_hdr_v0 *)hdr;
358 uint8_t checksum;
359
360 checksum = image_checksum8(hdr, kwbheader_size_for_csum(hdr));
361 /* Calculated checksum includes the header checksum field. Compensate
362 * for that.
363 */
364 checksum -= main_hdr->checksum;
365
366 return checksum == main_hdr->checksum;
367 }
368
369 static uint32_t image_checksum32(void *start, uint32_t len)
370 {
371 uint32_t csum = 0;
372 uint32_t *p = start;
373
374 /* check len and return zero checksum if invalid */
375 if (!len)
376 return 0;
377
378 if (len % sizeof(uint32_t)) {
379 fprintf(stderr, "Length %d is not in multiple of %zu\n",
380 len, sizeof(uint32_t));
381 return 0;
382 }
383
384 do {
385 csum += *p;
386 p++;
387 len -= sizeof(uint32_t);
388 } while (len > 0);
389
390 return csum;
391 }
392
393 static unsigned int options_to_baudrate(uint8_t options)
394 {
395 switch (options & 0x7) {
396 case MAIN_HDR_V1_OPT_BAUD_2400:
397 return 2400;
398 case MAIN_HDR_V1_OPT_BAUD_4800:
399 return 4800;
400 case MAIN_HDR_V1_OPT_BAUD_9600:
401 return 9600;
402 case MAIN_HDR_V1_OPT_BAUD_19200:
403 return 19200;
404 case MAIN_HDR_V1_OPT_BAUD_38400:
405 return 38400;
406 case MAIN_HDR_V1_OPT_BAUD_57600:
407 return 57600;
408 case MAIN_HDR_V1_OPT_BAUD_115200:
409 return 115200;
410 case MAIN_HDR_V1_OPT_BAUD_DEFAULT:
411 default:
412 return 0;
413 }
414 }
415
416 static uint8_t baudrate_to_option(unsigned int baudrate)
417 {
418 switch (baudrate) {
419 case 2400:
420 return MAIN_HDR_V1_OPT_BAUD_2400;
421 case 4800:
422 return MAIN_HDR_V1_OPT_BAUD_4800;
423 case 9600:
424 return MAIN_HDR_V1_OPT_BAUD_9600;
425 case 19200:
426 return MAIN_HDR_V1_OPT_BAUD_19200;
427 case 38400:
428 return MAIN_HDR_V1_OPT_BAUD_38400;
429 case 57600:
430 return MAIN_HDR_V1_OPT_BAUD_57600;
431 case 115200:
432 return MAIN_HDR_V1_OPT_BAUD_115200;
433 default:
434 return MAIN_HDR_V1_OPT_BAUD_DEFAULT;
435 }
436 }
437
438 static void kwb_msg(const char *fmt, ...)
439 {
440 if (verbose_mode) {
441 va_list ap;
442
443 va_start(ap, fmt);
444 vfprintf(stdout, fmt, ap);
445 va_end(ap);
446 }
447 }
448
449 static int openssl_err(const char *msg)
450 {
451 unsigned long ssl_err = ERR_get_error();
452
453 fprintf(stderr, "%s", msg);
454 fprintf(stderr, ": %s\n",
455 ERR_error_string(ssl_err, 0));
456
457 return -1;
458 }
459
460 static int kwb_load_rsa_key(const char *keydir, const char *name, RSA **p_rsa)
461 {
462 char path[PATH_MAX];
463 RSA *rsa;
464 FILE *f;
465
466 if (!keydir)
467 keydir = ".";
468
469 snprintf(path, sizeof(path), "%s/%s.key", keydir, name);
470 f = fopen(path, "r");
471 if (!f) {
472 fprintf(stderr, "Couldn't open RSA private key: '%s': %s\n",
473 path, strerror(errno));
474 return -ENOENT;
475 }
476
477 rsa = PEM_read_RSAPrivateKey(f, 0, NULL, "");
478 if (!rsa) {
479 openssl_err("Failure reading private key");
480 fclose(f);
481 return -EPROTO;
482 }
483 fclose(f);
484 *p_rsa = rsa;
485
486 return 0;
487 }
488
489 static int kwb_load_cfg_key(struct image_tool_params *params,
490 unsigned int cfg_option, const char *key_name,
491 RSA **p_key)
492 {
493 struct image_cfg_element *e_key;
494 RSA *key;
495 int res;
496
497 *p_key = NULL;
498
499 e_key = image_find_option(cfg_option);
500 if (!e_key) {
501 fprintf(stderr, "%s not configured\n", key_name);
502 return -ENOENT;
503 }
504
505 res = kwb_load_rsa_key(params->keydir, e_key->key_name, &key);
506 if (res < 0) {
507 fprintf(stderr, "Failed to load %s\n", key_name);
508 return -ENOENT;
509 }
510
511 *p_key = key;
512
513 return 0;
514 }
515
516 static int kwb_load_kak(struct image_tool_params *params, RSA **p_kak)
517 {
518 return kwb_load_cfg_key(params, IMAGE_CFG_KAK, "KAK", p_kak);
519 }
520
521 static int kwb_load_csk(struct image_tool_params *params, RSA **p_csk)
522 {
523 return kwb_load_cfg_key(params, IMAGE_CFG_CSK, "CSK", p_csk);
524 }
525
526 static int kwb_compute_pubkey_hash(struct pubkey_der_v1 *pk,
527 struct hash_v1 *hash)
528 {
529 EVP_MD_CTX *ctx;
530 unsigned int key_size;
531 unsigned int hash_size;
532 int ret = 0;
533
534 if (!pk || !hash || pk->key[0] != 0x30 || pk->key[1] != 0x82)
535 return -EINVAL;
536
537 key_size = (pk->key[2] << 8) + pk->key[3] + 4;
538
539 ctx = EVP_MD_CTX_create();
540 if (!ctx)
541 return openssl_err("EVP context creation failed");
542
543 EVP_MD_CTX_init(ctx);
544 if (!EVP_DigestInit(ctx, EVP_sha256())) {
545 ret = openssl_err("Digest setup failed");
546 goto hash_err_ctx;
547 }
548
549 if (!EVP_DigestUpdate(ctx, pk->key, key_size)) {
550 ret = openssl_err("Hashing data failed");
551 goto hash_err_ctx;
552 }
553
554 if (!EVP_DigestFinal(ctx, hash->hash, &hash_size)) {
555 ret = openssl_err("Could not obtain hash");
556 goto hash_err_ctx;
557 }
558
559 EVP_MD_CTX_cleanup(ctx);
560
561 hash_err_ctx:
562 EVP_MD_CTX_destroy(ctx);
563 return ret;
564 }
565
566 static int kwb_import_pubkey(RSA **key, struct pubkey_der_v1 *src, char *keyname)
567 {
568 RSA *rsa;
569 const unsigned char *ptr;
570
571 if (!key || !src)
572 goto fail;
573
574 ptr = src->key;
575 rsa = d2i_RSAPublicKey(key, &ptr, sizeof(src->key));
576 if (!rsa) {
577 openssl_err("error decoding public key");
578 goto fail;
579 }
580
581 return 0;
582 fail:
583 fprintf(stderr, "Failed to decode %s pubkey\n", keyname);
584 return -EINVAL;
585 }
586
587 static int kwb_export_pubkey(RSA *key, struct pubkey_der_v1 *dst, FILE *hashf,
588 char *keyname)
589 {
590 int size_exp, size_mod, size_seq;
591 const BIGNUM *key_e, *key_n;
592 uint8_t *cur;
593 char *errmsg = "Failed to encode %s\n";
594
595 RSA_get0_key(key, NULL, &key_e, NULL);
596 RSA_get0_key(key, &key_n, NULL, NULL);
597
598 if (!key || !key_e || !key_n || !dst) {
599 fprintf(stderr, "export pk failed: (%p, %p, %p, %p)",
600 key, key_e, key_n, dst);
601 fprintf(stderr, errmsg, keyname);
602 return -EINVAL;
603 }
604
605 /*
606 * According to the specs, the key should be PKCS#1 DER encoded.
607 * But unfortunately the really required encoding seems to be different;
608 * it violates DER...! (But it still conformes to BER.)
609 * (Length always in long form w/ 2 byte length code; no leading zero
610 * when MSB of first byte is set...)
611 * So we cannot use the encoding func provided by OpenSSL and have to
612 * do the encoding manually.
613 */
614
615 size_exp = BN_num_bytes(key_e);
616 size_mod = BN_num_bytes(key_n);
617 size_seq = 4 + size_mod + 4 + size_exp;
618
619 if (size_mod > 256) {
620 fprintf(stderr, "export pk failed: wrong mod size: %d\n",
621 size_mod);
622 fprintf(stderr, errmsg, keyname);
623 return -EINVAL;
624 }
625
626 if (4 + size_seq > sizeof(dst->key)) {
627 fprintf(stderr, "export pk failed: seq too large (%d, %zu)\n",
628 4 + size_seq, sizeof(dst->key));
629 fprintf(stderr, errmsg, keyname);
630 return -ENOBUFS;
631 }
632
633 cur = dst->key;
634
635 /* PKCS#1 (RFC3447) RSAPublicKey structure */
636 *cur++ = 0x30; /* SEQUENCE */
637 *cur++ = 0x82;
638 *cur++ = (size_seq >> 8) & 0xFF;
639 *cur++ = size_seq & 0xFF;
640 /* Modulus */
641 *cur++ = 0x02; /* INTEGER */
642 *cur++ = 0x82;
643 *cur++ = (size_mod >> 8) & 0xFF;
644 *cur++ = size_mod & 0xFF;
645 BN_bn2bin(key_n, cur);
646 cur += size_mod;
647 /* Exponent */
648 *cur++ = 0x02; /* INTEGER */
649 *cur++ = 0x82;
650 *cur++ = (size_exp >> 8) & 0xFF;
651 *cur++ = size_exp & 0xFF;
652 BN_bn2bin(key_e, cur);
653
654 if (hashf) {
655 struct hash_v1 pk_hash;
656 int i;
657 int ret = 0;
658
659 ret = kwb_compute_pubkey_hash(dst, &pk_hash);
660 if (ret < 0) {
661 fprintf(stderr, errmsg, keyname);
662 return ret;
663 }
664
665 fprintf(hashf, "SHA256 = ");
666 for (i = 0 ; i < sizeof(pk_hash.hash); ++i)
667 fprintf(hashf, "%02X", pk_hash.hash[i]);
668 fprintf(hashf, "\n");
669 }
670
671 return 0;
672 }
673
674 static int kwb_sign(RSA *key, void *data, int datasz, struct sig_v1 *sig,
675 char *signame)
676 {
677 EVP_PKEY *evp_key;
678 EVP_MD_CTX *ctx;
679 unsigned int sig_size;
680 int size;
681 int ret = 0;
682
683 evp_key = EVP_PKEY_new();
684 if (!evp_key)
685 return openssl_err("EVP_PKEY object creation failed");
686
687 if (!EVP_PKEY_set1_RSA(evp_key, key)) {
688 ret = openssl_err("EVP key setup failed");
689 goto err_key;
690 }
691
692 size = EVP_PKEY_size(evp_key);
693 if (size > sizeof(sig->sig)) {
694 fprintf(stderr, "Buffer to small for signature (%d bytes)\n",
695 size);
696 ret = -ENOBUFS;
697 goto err_key;
698 }
699
700 ctx = EVP_MD_CTX_create();
701 if (!ctx) {
702 ret = openssl_err("EVP context creation failed");
703 goto err_key;
704 }
705 EVP_MD_CTX_init(ctx);
706 if (!EVP_SignInit(ctx, EVP_sha256())) {
707 ret = openssl_err("Signer setup failed");
708 goto err_ctx;
709 }
710
711 if (!EVP_SignUpdate(ctx, data, datasz)) {
712 ret = openssl_err("Signing data failed");
713 goto err_ctx;
714 }
715
716 if (!EVP_SignFinal(ctx, sig->sig, &sig_size, evp_key)) {
717 ret = openssl_err("Could not obtain signature");
718 goto err_ctx;
719 }
720
721 EVP_MD_CTX_cleanup(ctx);
722 EVP_MD_CTX_destroy(ctx);
723 EVP_PKEY_free(evp_key);
724
725 return 0;
726
727 err_ctx:
728 EVP_MD_CTX_destroy(ctx);
729 err_key:
730 EVP_PKEY_free(evp_key);
731 fprintf(stderr, "Failed to create %s signature\n", signame);
732 return ret;
733 }
734
735 static int kwb_verify(RSA *key, void *data, int datasz, struct sig_v1 *sig,
736 char *signame)
737 {
738 EVP_PKEY *evp_key;
739 EVP_MD_CTX *ctx;
740 int size;
741 int ret = 0;
742
743 evp_key = EVP_PKEY_new();
744 if (!evp_key)
745 return openssl_err("EVP_PKEY object creation failed");
746
747 if (!EVP_PKEY_set1_RSA(evp_key, key)) {
748 ret = openssl_err("EVP key setup failed");
749 goto err_key;
750 }
751
752 size = EVP_PKEY_size(evp_key);
753 if (size > sizeof(sig->sig)) {
754 fprintf(stderr, "Invalid signature size (%d bytes)\n",
755 size);
756 ret = -EINVAL;
757 goto err_key;
758 }
759
760 ctx = EVP_MD_CTX_create();
761 if (!ctx) {
762 ret = openssl_err("EVP context creation failed");
763 goto err_key;
764 }
765 EVP_MD_CTX_init(ctx);
766 if (!EVP_VerifyInit(ctx, EVP_sha256())) {
767 ret = openssl_err("Verifier setup failed");
768 goto err_ctx;
769 }
770
771 if (!EVP_VerifyUpdate(ctx, data, datasz)) {
772 ret = openssl_err("Hashing data failed");
773 goto err_ctx;
774 }
775
776 if (EVP_VerifyFinal(ctx, sig->sig, sizeof(sig->sig), evp_key) != 1) {
777 ret = openssl_err("Could not verify signature");
778 goto err_ctx;
779 }
780
781 EVP_MD_CTX_cleanup(ctx);
782 EVP_MD_CTX_destroy(ctx);
783 EVP_PKEY_free(evp_key);
784
785 return 0;
786
787 err_ctx:
788 EVP_MD_CTX_destroy(ctx);
789 err_key:
790 EVP_PKEY_free(evp_key);
791 fprintf(stderr, "Failed to verify %s signature\n", signame);
792 return ret;
793 }
794
795 static int kwb_sign_and_verify(RSA *key, void *data, int datasz,
796 struct sig_v1 *sig, char *signame)
797 {
798 if (kwb_sign(key, data, datasz, sig, signame) < 0)
799 return -1;
800
801 if (kwb_verify(key, data, datasz, sig, signame) < 0)
802 return -1;
803
804 return 0;
805 }
806
807
808 static int kwb_dump_fuse_cmds_38x(FILE *out, struct secure_hdr_v1 *sec_hdr)
809 {
810 struct hash_v1 kak_pub_hash;
811 struct image_cfg_element *e;
812 unsigned int fuse_line;
813 int i, idx;
814 uint8_t *ptr;
815 uint32_t val;
816 int ret = 0;
817
818 if (!out || !sec_hdr)
819 return -EINVAL;
820
821 ret = kwb_compute_pubkey_hash(&sec_hdr->kak, &kak_pub_hash);
822 if (ret < 0)
823 goto done;
824
825 fprintf(out, "# burn KAK pub key hash\n");
826 ptr = kak_pub_hash.hash;
827 for (fuse_line = 26; fuse_line <= 30; ++fuse_line) {
828 fprintf(out, "fuse prog -y %u 0 ", fuse_line);
829
830 for (i = 4; i-- > 0;)
831 fprintf(out, "%02hx", (ushort)ptr[i]);
832 ptr += 4;
833 fprintf(out, " 00");
834
835 if (fuse_line < 30) {
836 for (i = 3; i-- > 0;)
837 fprintf(out, "%02hx", (ushort)ptr[i]);
838 ptr += 3;
839 } else {
840 fprintf(out, "000000");
841 }
842
843 fprintf(out, " 1\n");
844 }
845
846 fprintf(out, "# burn CSK selection\n");
847
848 idx = image_get_csk_index();
849 if (idx < 0 || idx > 15) {
850 ret = -EINVAL;
851 goto done;
852 }
853 if (idx > 0) {
854 for (fuse_line = 31; fuse_line < 31 + idx; ++fuse_line)
855 fprintf(out, "fuse prog -y %u 0 00000001 00000000 1\n",
856 fuse_line);
857 } else {
858 fprintf(out, "# CSK index is 0; no mods needed\n");
859 }
860
861 e = image_find_option(IMAGE_CFG_BOX_ID);
862 if (e) {
863 fprintf(out, "# set box ID\n");
864 fprintf(out, "fuse prog -y 48 0 %08x 00000000 1\n", e->boxid);
865 }
866
867 e = image_find_option(IMAGE_CFG_FLASH_ID);
868 if (e) {
869 fprintf(out, "# set flash ID\n");
870 fprintf(out, "fuse prog -y 47 0 %08x 00000000 1\n", e->flashid);
871 }
872
873 fprintf(out, "# enable secure mode ");
874 fprintf(out, "(must be the last fuse line written)\n");
875
876 val = 1;
877 e = image_find_option(IMAGE_CFG_SEC_BOOT_DEV);
878 if (!e) {
879 fprintf(stderr, "ERROR: secured mode boot device not given\n");
880 ret = -EINVAL;
881 goto done;
882 }
883
884 if (e->sec_boot_dev > 0xff) {
885 fprintf(stderr, "ERROR: secured mode boot device invalid\n");
886 ret = -EINVAL;
887 goto done;
888 }
889
890 val |= (e->sec_boot_dev << 8);
891
892 fprintf(out, "fuse prog -y 24 0 %08x 0103e0a9 1\n", val);
893
894 fprintf(out, "# lock (unused) fuse lines (0-23)s\n");
895 for (fuse_line = 0; fuse_line < 24; ++fuse_line)
896 fprintf(out, "fuse prog -y %u 2 1\n", fuse_line);
897
898 fprintf(out, "# OK, that's all :-)\n");
899
900 done:
901 return ret;
902 }
903
904 static int kwb_dump_fuse_cmds(struct secure_hdr_v1 *sec_hdr)
905 {
906 int ret = 0;
907 struct image_cfg_element *e;
908
909 e = image_find_option(IMAGE_CFG_SEC_FUSE_DUMP);
910 if (!e)
911 return 0;
912
913 if (!strcmp(e->name, "a38x")) {
914 FILE *out = fopen("kwb_fuses_a38x.txt", "w+");
915
916 if (!out) {
917 fprintf(stderr, "Couldn't open eFuse settings: '%s': %s\n",
918 "kwb_fuses_a38x.txt", strerror(errno));
919 return -ENOENT;
920 }
921
922 kwb_dump_fuse_cmds_38x(out, sec_hdr);
923 fclose(out);
924 goto done;
925 }
926
927 ret = -ENOSYS;
928
929 done:
930 return ret;
931 }
932
933 static int image_fill_xip_header(void *image, struct image_tool_params *params)
934 {
935 struct main_hdr_v1 *main_hdr = image; /* kwbimage v0 and v1 have same XIP members */
936 int version = kwbimage_version(image);
937 uint32_t srcaddr = le32_to_cpu(main_hdr->srcaddr);
938 uint32_t startaddr = 0;
939
940 if (main_hdr->blockid != IBR_HDR_SPI_ID) {
941 fprintf(stderr, "XIP is supported only for SPI images\n");
942 return 0;
943 }
944
945 if (version == 0 &&
946 params->addr >= 0xE8000000 && params->addr < 0xEFFFFFFF &&
947 params->ep >= 0xE8000000 && params->ep < 0xEFFFFFFF) {
948 /* Load and Execute address is in SPI address space (kwbimage v0) */
949 startaddr = 0xE8000000;
950 } else if (version != 0 &&
951 params->addr >= 0xD4000000 && params->addr < 0xD7FFFFFF &&
952 params->ep >= 0xD4000000 && params->ep < 0xD7FFFFFF) {
953 /* Load and Execute address is in SPI address space (kwbimage v1) */
954 startaddr = 0xD4000000;
955 } else if (version != 0 &&
956 params->addr >= 0xD8000000 && params->addr < 0xDFFFFFFF &&
957 params->ep >= 0xD8000000 && params->ep < 0xDFFFFFFF) {
958 /* Load and Execute address is in Device bus space (kwbimage v1) */
959 startaddr = 0xD8000000;
960 } else if (params->addr != 0x0) {
961 /* Load address is non-zero */
962 if (version == 0)
963 fprintf(stderr, "XIP Load Address or XIP Entry Point is not in SPI address space\n");
964 else
965 fprintf(stderr, "XIP Load Address or XIP Entry Point is not in SPI nor in Device bus address space\n");
966 return 0;
967 }
968
969 /*
970 * For XIP destaddr must be set to 0xFFFFFFFF and
971 * execaddr relative to the start of XIP memory address space.
972 */
973 main_hdr->destaddr = cpu_to_le32(0xFFFFFFFF);
974
975 if (startaddr == 0) {
976 /*
977 * mkimage's --load-address 0x0 means that binary is Position
978 * Independent and in this case mkimage's --entry-point address
979 * is relative offset from beginning of the data part of image.
980 */
981 main_hdr->execaddr = cpu_to_le32(srcaddr + params->ep);
982 } else {
983 /* The lowest possible load address is after the header at srcaddr. */
984 if (params->addr - startaddr < srcaddr) {
985 fprintf(stderr,
986 "Invalid XIP Load Address 0x%08x.\n"
987 "The lowest address for this configuration is 0x%08x.\n",
988 params->addr, (unsigned)(startaddr + srcaddr));
989 return 0;
990 }
991 main_hdr->srcaddr = cpu_to_le32(params->addr - startaddr);
992 main_hdr->execaddr = cpu_to_le32(params->ep - startaddr);
993 }
994
995 return 1;
996 }
997
998 static unsigned int image_get_satablksz(void)
999 {
1000 struct image_cfg_element *e;
1001 e = image_find_option(IMAGE_CFG_SATA_BLKSZ);
1002 return e ? e->satablksz : 512;
1003 }
1004
1005 static size_t image_headersz_align(size_t headersz, uint8_t blockid)
1006 {
1007 /*
1008 * Header needs to be 4-byte aligned, which is already ensured by code
1009 * above. Moreover UART images must have header aligned to 128 bytes
1010 * (xmodem block size), NAND images to 256 bytes (ECC calculation),
1011 * SDIO images to 512 bytes (SDHC/SDXC fixed block size) and SATA
1012 * images to specified storage block size (default 512 bytes).
1013 * Note that SPI images do not have to have header size aligned
1014 * to 256 bytes because it is possible to read from SPI storage from
1015 * any offset (read offset does not have to be aligned to block size).
1016 */
1017 if (blockid == IBR_HDR_UART_ID)
1018 return ALIGN(headersz, 128);
1019 else if (blockid == IBR_HDR_NAND_ID)
1020 return ALIGN(headersz, 256);
1021 else if (blockid == IBR_HDR_SDIO_ID)
1022 return ALIGN(headersz, 512);
1023 else if (blockid == IBR_HDR_SATA_ID)
1024 return ALIGN(headersz, image_get_satablksz());
1025 else
1026 return headersz;
1027 }
1028
1029 static size_t image_headersz_v0(int *hasext)
1030 {
1031 size_t headersz;
1032
1033 headersz = sizeof(struct main_hdr_v0);
1034 if (image_count_options(IMAGE_CFG_DATA) > 0) {
1035 headersz += sizeof(struct ext_hdr_v0);
1036 if (hasext)
1037 *hasext = 1;
1038 }
1039
1040 return headersz;
1041 }
1042
1043 static void *image_create_v0(size_t *dataoff, struct image_tool_params *params,
1044 int payloadsz)
1045 {
1046 struct image_cfg_element *e;
1047 size_t headersz;
1048 struct main_hdr_v0 *main_hdr;
1049 uint8_t *image;
1050 int has_ext = 0;
1051
1052 /*
1053 * Calculate the size of the header and the offset of the
1054 * payload
1055 */
1056 headersz = image_headersz_v0(&has_ext);
1057 *dataoff = image_headersz_align(headersz, image_get_bootfrom());
1058
1059 image = malloc(headersz);
1060 if (!image) {
1061 fprintf(stderr, "Cannot allocate memory for image\n");
1062 return NULL;
1063 }
1064
1065 memset(image, 0, headersz);
1066
1067 main_hdr = (struct main_hdr_v0 *)image;
1068
1069 /* Fill in the main header */
1070 main_hdr->blocksize =
1071 cpu_to_le32(payloadsz);
1072 main_hdr->srcaddr = cpu_to_le32(*dataoff);
1073 main_hdr->ext = has_ext;
1074 main_hdr->version = 0;
1075 main_hdr->destaddr = cpu_to_le32(params->addr);
1076 main_hdr->execaddr = cpu_to_le32(params->ep);
1077 main_hdr->blockid = image_get_bootfrom();
1078
1079 e = image_find_option(IMAGE_CFG_NAND_ECC_MODE);
1080 if (e)
1081 main_hdr->nandeccmode = e->nandeccmode;
1082 e = image_find_option(IMAGE_CFG_NAND_BLKSZ);
1083 if (e)
1084 main_hdr->nandblocksize = e->nandblksz / (64 * 1024);
1085 e = image_find_option(IMAGE_CFG_NAND_PAGESZ);
1086 if (e)
1087 main_hdr->nandpagesize = cpu_to_le16(e->nandpagesz);
1088 e = image_find_option(IMAGE_CFG_NAND_BADBLK_LOCATION);
1089 if (e)
1090 main_hdr->nandbadblklocation = e->nandbadblklocation;
1091
1092 /* For SATA srcaddr is specified in number of sectors. */
1093 if (main_hdr->blockid == IBR_HDR_SATA_ID) {
1094 params->bl_len = image_get_satablksz();
1095 main_hdr->srcaddr = cpu_to_le32(le32_to_cpu(main_hdr->srcaddr) / params->bl_len);
1096 }
1097
1098 /* For PCIe srcaddr is not used and must be set to 0xFFFFFFFF. */
1099 if (main_hdr->blockid == IBR_HDR_PEX_ID)
1100 main_hdr->srcaddr = cpu_to_le32(0xFFFFFFFF);
1101
1102 if (params->xflag) {
1103 if (!image_fill_xip_header(main_hdr, params)) {
1104 free(image);
1105 return NULL;
1106 }
1107 *dataoff = le32_to_cpu(main_hdr->srcaddr);
1108 }
1109
1110 /* Generate the ext header */
1111 if (has_ext) {
1112 struct ext_hdr_v0 *ext_hdr;
1113 int cfgi, datai;
1114
1115 ext_hdr = (struct ext_hdr_v0 *)
1116 (image + sizeof(struct main_hdr_v0));
1117 ext_hdr->offset = cpu_to_le32(0x40);
1118
1119 for (cfgi = 0, datai = 0; cfgi < cfgn; cfgi++) {
1120 e = &image_cfg[cfgi];
1121 if (e->type != IMAGE_CFG_DATA)
1122 continue;
1123
1124 ext_hdr->rcfg[datai].raddr =
1125 cpu_to_le32(e->regdata.raddr);
1126 ext_hdr->rcfg[datai].rdata =
1127 cpu_to_le32(e->regdata.rdata);
1128 datai++;
1129 }
1130
1131 ext_hdr->checksum = image_checksum8(ext_hdr,
1132 sizeof(struct ext_hdr_v0));
1133 }
1134
1135 main_hdr->checksum = image_checksum8(image,
1136 sizeof(struct main_hdr_v0));
1137
1138 return image;
1139 }
1140
1141 static size_t image_headersz_v1(int *hasext)
1142 {
1143 struct image_cfg_element *e;
1144 unsigned int count;
1145 size_t headersz;
1146 int cpu_sheeva;
1147 struct stat s;
1148 int cfgi;
1149 int ret;
1150
1151 headersz = sizeof(struct main_hdr_v1);
1152
1153 if (image_get_csk_index() >= 0) {
1154 headersz += sizeof(struct secure_hdr_v1);
1155 if (hasext)
1156 *hasext = 1;
1157 }
1158
1159 cpu_sheeva = image_is_cpu_sheeva();
1160
1161 count = 0;
1162 for (cfgi = 0; cfgi < cfgn; cfgi++) {
1163 e = &image_cfg[cfgi];
1164
1165 if (e->type == IMAGE_CFG_DATA)
1166 count++;
1167
1168 if (e->type == IMAGE_CFG_DATA_DELAY ||
1169 (e->type == IMAGE_CFG_BINARY && count > 0)) {
1170 headersz += sizeof(struct register_set_hdr_v1) + 8 * count + 4;
1171 count = 0;
1172 }
1173
1174 if (e->type != IMAGE_CFG_BINARY)
1175 continue;
1176
1177 ret = stat(e->binary.file, &s);
1178 if (ret < 0) {
1179 char cwd[PATH_MAX];
1180 char *dir = cwd;
1181
1182 memset(cwd, 0, sizeof(cwd));
1183 if (!getcwd(cwd, sizeof(cwd))) {
1184 dir = "current working directory";
1185 perror("getcwd() failed");
1186 }
1187
1188 fprintf(stderr,
1189 "Didn't find the file '%s' in '%s' which is mandatory to generate the image\n"
1190 "This file generally contains the DDR3 training code, and should be extracted from an existing bootable\n"
1191 "image for your board. Use 'dumpimage -T kwbimage -p 1' to extract it from an existing image.\n",
1192 e->binary.file, dir);
1193 return 0;
1194 }
1195
1196 headersz += sizeof(struct opt_hdr_v1) + sizeof(uint32_t) +
1197 (e->binary.nargs) * sizeof(uint32_t);
1198
1199 if (e->binary.loadaddr) {
1200 /*
1201 * BootROM loads kwbimage header (in which the
1202 * executable code is also stored) to address
1203 * 0x40004000 or 0x40000000. Thus there is
1204 * restriction for the load address of the N-th
1205 * BINARY image.
1206 */
1207 unsigned int base_addr, low_addr, high_addr;
1208
1209 base_addr = cpu_sheeva ? 0x40004000 : 0x40000000;
1210 low_addr = base_addr + headersz;
1211 high_addr = low_addr +
1212 (BINARY_MAX_ARGS - e->binary.nargs) * sizeof(uint32_t);
1213
1214 if (cpu_sheeva && e->binary.loadaddr % 16) {
1215 fprintf(stderr,
1216 "Invalid LOAD_ADDRESS 0x%08x for BINARY %s with %d args.\n"
1217 "Address for CPU SHEEVA must be 16-byte aligned.\n",
1218 e->binary.loadaddr, e->binary.file, e->binary.nargs);
1219 return 0;
1220 }
1221
1222 if (e->binary.loadaddr % 4 || e->binary.loadaddr < low_addr ||
1223 e->binary.loadaddr > high_addr) {
1224 fprintf(stderr,
1225 "Invalid LOAD_ADDRESS 0x%08x for BINARY %s with %d args.\n"
1226 "Address must be 4-byte aligned and in range 0x%08x-0x%08x.\n",
1227 e->binary.loadaddr, e->binary.file,
1228 e->binary.nargs, low_addr, high_addr);
1229 return 0;
1230 }
1231 headersz = e->binary.loadaddr - base_addr;
1232 } else if (cpu_sheeva) {
1233 headersz = ALIGN(headersz, 16);
1234 } else {
1235 headersz = ALIGN(headersz, 4);
1236 }
1237
1238 headersz += ALIGN(s.st_size, 4) + sizeof(uint32_t);
1239 if (hasext)
1240 *hasext = 1;
1241 }
1242
1243 if (count > 0)
1244 headersz += sizeof(struct register_set_hdr_v1) + 8 * count + 4;
1245
1246 /*
1247 * For all images except UART, headersz stored in header itself should
1248 * contains header size without padding. For UART image BootROM rounds
1249 * down headersz to multiply of 128 bytes. Therefore align UART headersz
1250 * to multiply of 128 bytes to ensure that remaining UART header bytes
1251 * are not ignored by BootROM.
1252 */
1253 if (image_get_bootfrom() == IBR_HDR_UART_ID)
1254 headersz = ALIGN(headersz, 128);
1255
1256 return headersz;
1257 }
1258
1259 static int add_binary_header_v1(uint8_t **cur, uint8_t **next_ext,
1260 struct image_cfg_element *binarye,
1261 struct main_hdr_v1 *main_hdr)
1262 {
1263 struct opt_hdr_v1 *hdr = (struct opt_hdr_v1 *)*cur;
1264 uint32_t base_addr;
1265 uint32_t add_args;
1266 uint32_t offset;
1267 uint32_t *args;
1268 size_t binhdrsz;
1269 int cpu_sheeva;
1270 struct stat s;
1271 int argi;
1272 FILE *bin;
1273 int ret;
1274
1275 hdr->headertype = OPT_HDR_V1_BINARY_TYPE;
1276
1277 bin = fopen(binarye->binary.file, "r");
1278 if (!bin) {
1279 fprintf(stderr, "Cannot open binary file %s\n",
1280 binarye->binary.file);
1281 return -1;
1282 }
1283
1284 if (fstat(fileno(bin), &s)) {
1285 fprintf(stderr, "Cannot stat binary file %s\n",
1286 binarye->binary.file);
1287 goto err_close;
1288 }
1289
1290 *cur += sizeof(struct opt_hdr_v1);
1291
1292 args = (uint32_t *)*cur;
1293 *args = cpu_to_le32(binarye->binary.nargs);
1294 args++;
1295 for (argi = 0; argi < binarye->binary.nargs; argi++)
1296 args[argi] = cpu_to_le32(binarye->binary.args[argi]);
1297
1298 *cur += (binarye->binary.nargs + 1) * sizeof(uint32_t);
1299
1300 /*
1301 * ARM executable code inside the BIN header on platforms with Sheeva
1302 * CPU (A370 and AXP) must always be aligned with the 128-bit boundary.
1303 * In the case when this code is not position independent (e.g. ARM
1304 * SPL), it must be placed at fixed load and execute address.
1305 * This requirement can be met by inserting dummy arguments into
1306 * BIN header, if needed.
1307 */
1308 cpu_sheeva = image_is_cpu_sheeva();
1309 base_addr = cpu_sheeva ? 0x40004000 : 0x40000000;
1310 offset = *cur - (uint8_t *)main_hdr;
1311 if (binarye->binary.loadaddr)
1312 add_args = (binarye->binary.loadaddr - base_addr - offset) / sizeof(uint32_t);
1313 else if (cpu_sheeva)
1314 add_args = ((16 - offset % 16) % 16) / sizeof(uint32_t);
1315 else
1316 add_args = 0;
1317 if (add_args) {
1318 *(args - 1) = cpu_to_le32(binarye->binary.nargs + add_args);
1319 *cur += add_args * sizeof(uint32_t);
1320 }
1321
1322 ret = fread(*cur, s.st_size, 1, bin);
1323 if (ret != 1) {
1324 fprintf(stderr,
1325 "Could not read binary image %s\n",
1326 binarye->binary.file);
1327 goto err_close;
1328 }
1329
1330 fclose(bin);
1331
1332 *cur += ALIGN(s.st_size, 4);
1333
1334 *((uint32_t *)*cur) = 0x00000000;
1335 **next_ext = 1;
1336 *next_ext = *cur;
1337
1338 *cur += sizeof(uint32_t);
1339
1340 binhdrsz = sizeof(struct opt_hdr_v1) +
1341 (binarye->binary.nargs + add_args + 2) * sizeof(uint32_t) +
1342 ALIGN(s.st_size, 4);
1343 hdr->headersz_lsb = cpu_to_le16(binhdrsz & 0xFFFF);
1344 hdr->headersz_msb = (binhdrsz & 0xFFFF0000) >> 16;
1345
1346 return 0;
1347
1348 err_close:
1349 fclose(bin);
1350
1351 return -1;
1352 }
1353
1354 static int export_pub_kak_hash(RSA *kak, struct secure_hdr_v1 *secure_hdr)
1355 {
1356 FILE *hashf;
1357 int res;
1358
1359 hashf = fopen("pub_kak_hash.txt", "w");
1360 if (!hashf) {
1361 fprintf(stderr, "Couldn't open hash file: '%s': %s\n",
1362 "pub_kak_hash.txt", strerror(errno));
1363 return 1;
1364 }
1365
1366 res = kwb_export_pubkey(kak, &secure_hdr->kak, hashf, "KAK");
1367
1368 fclose(hashf);
1369
1370 return res < 0 ? 1 : 0;
1371 }
1372
1373 static int kwb_sign_csk_with_kak(struct image_tool_params *params,
1374 struct secure_hdr_v1 *secure_hdr, RSA *csk)
1375 {
1376 RSA *kak = NULL;
1377 RSA *kak_pub = NULL;
1378 int csk_idx = image_get_csk_index();
1379 struct sig_v1 tmp_sig;
1380
1381 if (csk_idx < 0 || csk_idx > 15) {
1382 fprintf(stderr, "Invalid CSK index %d\n", csk_idx);
1383 return 1;
1384 }
1385
1386 if (kwb_load_kak(params, &kak) < 0)
1387 return 1;
1388
1389 if (export_pub_kak_hash(kak, secure_hdr))
1390 return 1;
1391
1392 if (kwb_import_pubkey(&kak_pub, &secure_hdr->kak, "KAK") < 0)
1393 return 1;
1394
1395 if (kwb_export_pubkey(csk, &secure_hdr->csk[csk_idx], NULL, "CSK") < 0)
1396 return 1;
1397
1398 if (kwb_sign_and_verify(kak, &secure_hdr->csk,
1399 sizeof(secure_hdr->csk) +
1400 sizeof(secure_hdr->csksig),
1401 &tmp_sig, "CSK") < 0)
1402 return 1;
1403
1404 if (kwb_verify(kak_pub, &secure_hdr->csk,
1405 sizeof(secure_hdr->csk) +
1406 sizeof(secure_hdr->csksig),
1407 &tmp_sig, "CSK (2)") < 0)
1408 return 1;
1409
1410 secure_hdr->csksig = tmp_sig;
1411
1412 return 0;
1413 }
1414
1415 static int add_secure_header_v1(struct image_tool_params *params, uint8_t *image_ptr,
1416 size_t image_size, uint8_t *header_ptr, size_t headersz,
1417 struct secure_hdr_v1 *secure_hdr)
1418 {
1419 struct image_cfg_element *e_jtagdelay;
1420 struct image_cfg_element *e_boxid;
1421 struct image_cfg_element *e_flashid;
1422 RSA *csk = NULL;
1423 struct sig_v1 tmp_sig;
1424 bool specialized_img = image_get_spezialized_img();
1425
1426 kwb_msg("Create secure header content\n");
1427
1428 e_jtagdelay = image_find_option(IMAGE_CFG_JTAG_DELAY);
1429 e_boxid = image_find_option(IMAGE_CFG_BOX_ID);
1430 e_flashid = image_find_option(IMAGE_CFG_FLASH_ID);
1431
1432 if (kwb_load_csk(params, &csk) < 0)
1433 return 1;
1434
1435 secure_hdr->headertype = OPT_HDR_V1_SECURE_TYPE;
1436 secure_hdr->headersz_msb = 0;
1437 secure_hdr->headersz_lsb = cpu_to_le16(sizeof(struct secure_hdr_v1));
1438 if (e_jtagdelay)
1439 secure_hdr->jtag_delay = e_jtagdelay->jtag_delay;
1440 if (e_boxid && specialized_img)
1441 secure_hdr->boxid = cpu_to_le32(e_boxid->boxid);
1442 if (e_flashid && specialized_img)
1443 secure_hdr->flashid = cpu_to_le32(e_flashid->flashid);
1444
1445 if (kwb_sign_csk_with_kak(params, secure_hdr, csk))
1446 return 1;
1447
1448 if (kwb_sign_and_verify(csk, image_ptr, image_size - 4,
1449 &secure_hdr->imgsig, "image") < 0)
1450 return 1;
1451
1452 if (kwb_sign_and_verify(csk, header_ptr, headersz, &tmp_sig, "header") < 0)
1453 return 1;
1454
1455 secure_hdr->hdrsig = tmp_sig;
1456
1457 kwb_dump_fuse_cmds(secure_hdr);
1458
1459 return 0;
1460 }
1461
1462 static void finish_register_set_header_v1(uint8_t **cur, uint8_t **next_ext,
1463 struct register_set_hdr_v1 *register_set_hdr,
1464 int *datai, uint8_t delay)
1465 {
1466 int size = sizeof(struct register_set_hdr_v1) + 8 * (*datai) + 4;
1467
1468 register_set_hdr->headertype = OPT_HDR_V1_REGISTER_TYPE;
1469 register_set_hdr->headersz_lsb = cpu_to_le16(size & 0xFFFF);
1470 register_set_hdr->headersz_msb = size >> 16;
1471 register_set_hdr->data[*datai].last_entry.delay = delay;
1472 *cur += size;
1473 **next_ext = 1;
1474 *next_ext = &register_set_hdr->data[*datai].last_entry.next;
1475 *datai = 0;
1476 }
1477
1478 static void *image_create_v1(size_t *dataoff, struct image_tool_params *params,
1479 uint8_t *ptr, int payloadsz)
1480 {
1481 struct image_cfg_element *e;
1482 struct main_hdr_v1 *main_hdr;
1483 struct register_set_hdr_v1 *register_set_hdr;
1484 struct secure_hdr_v1 *secure_hdr = NULL;
1485 size_t headersz;
1486 uint8_t *image, *cur;
1487 int hasext = 0;
1488 uint8_t *next_ext = NULL;
1489 int cfgi, datai;
1490 uint8_t delay;
1491
1492 /*
1493 * Calculate the size of the header and the offset of the
1494 * payload
1495 */
1496 headersz = image_headersz_v1(&hasext);
1497 if (headersz == 0)
1498 return NULL;
1499 *dataoff = image_headersz_align(headersz, image_get_bootfrom());
1500
1501 image = malloc(headersz);
1502 if (!image) {
1503 fprintf(stderr, "Cannot allocate memory for image\n");
1504 return NULL;
1505 }
1506
1507 memset(image, 0, headersz);
1508
1509 main_hdr = (struct main_hdr_v1 *)image;
1510 cur = image;
1511 cur += sizeof(struct main_hdr_v1);
1512 next_ext = &main_hdr->ext;
1513
1514 /* Fill the main header */
1515 main_hdr->blocksize =
1516 cpu_to_le32(payloadsz);
1517 main_hdr->headersz_lsb = cpu_to_le16(headersz & 0xFFFF);
1518 main_hdr->headersz_msb = (headersz & 0xFFFF0000) >> 16;
1519 main_hdr->destaddr = cpu_to_le32(params->addr);
1520 main_hdr->execaddr = cpu_to_le32(params->ep);
1521 main_hdr->srcaddr = cpu_to_le32(*dataoff);
1522 main_hdr->ext = hasext;
1523 main_hdr->version = 1;
1524 main_hdr->blockid = image_get_bootfrom();
1525
1526 e = image_find_option(IMAGE_CFG_NAND_BLKSZ);
1527 if (e)
1528 main_hdr->nandblocksize = e->nandblksz / (64 * 1024);
1529 e = image_find_option(IMAGE_CFG_NAND_PAGESZ);
1530 if (e)
1531 main_hdr->nandpagesize = cpu_to_le16(e->nandpagesz);
1532 e = image_find_option(IMAGE_CFG_NAND_BADBLK_LOCATION);
1533 if (e)
1534 main_hdr->nandbadblklocation = e->nandbadblklocation;
1535 e = image_find_option(IMAGE_CFG_BAUDRATE);
1536 if (e)
1537 main_hdr->options |= baudrate_to_option(e->baudrate);
1538 e = image_find_option(IMAGE_CFG_UART_PORT);
1539 if (e)
1540 main_hdr->options |= (e->uart_port & 3) << 3;
1541 e = image_find_option(IMAGE_CFG_UART_MPP);
1542 if (e)
1543 main_hdr->options |= (e->uart_mpp & 7) << 5;
1544 e = image_find_option(IMAGE_CFG_DEBUG);
1545 if (e)
1546 main_hdr->flags = e->debug ? 0x1 : 0;
1547
1548 /* For SATA srcaddr is specified in number of sectors. */
1549 if (main_hdr->blockid == IBR_HDR_SATA_ID) {
1550 params->bl_len = image_get_satablksz();
1551 main_hdr->srcaddr = cpu_to_le32(le32_to_cpu(main_hdr->srcaddr) / params->bl_len);
1552 }
1553
1554 /* For PCIe srcaddr is not used and must be set to 0xFFFFFFFF. */
1555 if (main_hdr->blockid == IBR_HDR_PEX_ID)
1556 main_hdr->srcaddr = cpu_to_le32(0xFFFFFFFF);
1557
1558 if (params->xflag) {
1559 if (!image_fill_xip_header(main_hdr, params)) {
1560 free(image);
1561 return NULL;
1562 }
1563 *dataoff = le32_to_cpu(main_hdr->srcaddr);
1564 }
1565
1566 if (image_get_csk_index() >= 0) {
1567 /*
1568 * only reserve the space here; we fill the header later since
1569 * we need the header to be complete to compute the signatures
1570 */
1571 secure_hdr = (struct secure_hdr_v1 *)cur;
1572 cur += sizeof(struct secure_hdr_v1);
1573 *next_ext = 1;
1574 next_ext = &secure_hdr->next;
1575 }
1576
1577 datai = 0;
1578 for (cfgi = 0; cfgi < cfgn; cfgi++) {
1579 e = &image_cfg[cfgi];
1580 if (e->type != IMAGE_CFG_DATA &&
1581 e->type != IMAGE_CFG_DATA_DELAY &&
1582 e->type != IMAGE_CFG_BINARY)
1583 continue;
1584
1585 if (datai == 0)
1586 register_set_hdr = (struct register_set_hdr_v1 *)cur;
1587
1588 /* If delay is not specified, use the smallest possible value. */
1589 if (e->type == IMAGE_CFG_DATA_DELAY)
1590 delay = e->regdata_delay;
1591 else
1592 delay = REGISTER_SET_HDR_OPT_DELAY_MS(0);
1593
1594 /*
1595 * DATA_DELAY command is the last entry in the register set
1596 * header and BINARY command inserts new binary header.
1597 * Therefore BINARY command requires to finish register set
1598 * header if some DATA command was specified. And DATA_DELAY
1599 * command automatically finish register set header even when
1600 * there was no DATA command.
1601 */
1602 if (e->type == IMAGE_CFG_DATA_DELAY ||
1603 (e->type == IMAGE_CFG_BINARY && datai != 0))
1604 finish_register_set_header_v1(&cur, &next_ext, register_set_hdr,
1605 &datai, delay);
1606
1607 if (e->type == IMAGE_CFG_DATA) {
1608 register_set_hdr->data[datai].entry.address =
1609 cpu_to_le32(e->regdata.raddr);
1610 register_set_hdr->data[datai].entry.value =
1611 cpu_to_le32(e->regdata.rdata);
1612 datai++;
1613 }
1614
1615 if (e->type == IMAGE_CFG_BINARY) {
1616 if (add_binary_header_v1(&cur, &next_ext, e, main_hdr))
1617 return NULL;
1618 }
1619 }
1620 if (datai != 0) {
1621 /* Set delay to the smallest possible value. */
1622 delay = REGISTER_SET_HDR_OPT_DELAY_MS(0);
1623 finish_register_set_header_v1(&cur, &next_ext, register_set_hdr,
1624 &datai, delay);
1625 }
1626
1627 if (secure_hdr && add_secure_header_v1(params, ptr + *dataoff, payloadsz,
1628 image, headersz, secure_hdr))
1629 return NULL;
1630
1631 /* Calculate and set the header checksum */
1632 main_hdr->checksum = image_checksum8(main_hdr, headersz);
1633
1634 return image;
1635 }
1636
1637 static int recognize_keyword(char *keyword)
1638 {
1639 int kw_id;
1640
1641 for (kw_id = 1; kw_id < IMAGE_CFG_COUNT; ++kw_id)
1642 if (!strcmp(keyword, id_strs[kw_id]))
1643 return kw_id;
1644
1645 return 0;
1646 }
1647
1648 static int image_create_config_parse_oneline(char *line,
1649 struct image_cfg_element *el)
1650 {
1651 char *keyword, *saveptr, *value1, *value2;
1652 char delimiters[] = " \t";
1653 int keyword_id, ret, argi;
1654 char *unknown_msg = "Ignoring unknown line '%s'\n";
1655
1656 keyword = strtok_r(line, delimiters, &saveptr);
1657 keyword_id = recognize_keyword(keyword);
1658
1659 if (!keyword_id) {
1660 fprintf(stderr, unknown_msg, line);
1661 return 0;
1662 }
1663
1664 el->type = keyword_id;
1665
1666 value1 = strtok_r(NULL, delimiters, &saveptr);
1667
1668 if (!value1) {
1669 fprintf(stderr, "Parameter missing in line '%s'\n", line);
1670 return -1;
1671 }
1672
1673 switch (keyword_id) {
1674 case IMAGE_CFG_VERSION:
1675 el->version = atoi(value1);
1676 break;
1677 case IMAGE_CFG_CPU:
1678 if (strcmp(value1, "FEROCEON") == 0)
1679 el->cpu_sheeva = 0;
1680 else if (strcmp(value1, "SHEEVA") == 0)
1681 el->cpu_sheeva = 1;
1682 else if (strcmp(value1, "A9") == 0)
1683 el->cpu_sheeva = 0;
1684 else {
1685 fprintf(stderr, "Invalid CPU %s\n", value1);
1686 return -1;
1687 }
1688 break;
1689 case IMAGE_CFG_BOOT_FROM:
1690 ret = image_boot_mode_id(value1);
1691
1692 if (ret < 0) {
1693 fprintf(stderr, "Invalid boot media '%s'\n", value1);
1694 return -1;
1695 }
1696 el->bootfrom = ret;
1697 break;
1698 case IMAGE_CFG_NAND_BLKSZ:
1699 el->nandblksz = strtoul(value1, NULL, 16);
1700 break;
1701 case IMAGE_CFG_NAND_BADBLK_LOCATION:
1702 el->nandbadblklocation = strtoul(value1, NULL, 16);
1703 break;
1704 case IMAGE_CFG_NAND_ECC_MODE:
1705 ret = image_nand_ecc_mode_id(value1);
1706
1707 if (ret < 0) {
1708 fprintf(stderr, "Invalid NAND ECC mode '%s'\n", value1);
1709 return -1;
1710 }
1711 el->nandeccmode = ret;
1712 break;
1713 case IMAGE_CFG_NAND_PAGESZ:
1714 el->nandpagesz = strtoul(value1, NULL, 16);
1715 break;
1716 case IMAGE_CFG_SATA_BLKSZ:
1717 el->satablksz = strtoul(value1, NULL, 0);
1718 if (el->satablksz & (el->satablksz-1)) {
1719 fprintf(stderr, "Invalid SATA block size '%s'\n", value1);
1720 return -1;
1721 }
1722 break;
1723 case IMAGE_CFG_BINARY:
1724 argi = 0;
1725
1726 el->binary.file = strdup(value1);
1727 while (1) {
1728 char *value = strtok_r(NULL, delimiters, &saveptr);
1729 char *endptr;
1730
1731 if (!value)
1732 break;
1733
1734 if (!strcmp(value, "LOAD_ADDRESS")) {
1735 value = strtok_r(NULL, delimiters, &saveptr);
1736 if (!value) {
1737 fprintf(stderr,
1738 "Missing address argument for BINARY LOAD_ADDRESS\n");
1739 return -1;
1740 }
1741 el->binary.loadaddr = strtoul(value, &endptr, 16);
1742 if (*endptr) {
1743 fprintf(stderr,
1744 "Invalid argument '%s' for BINARY LOAD_ADDRESS\n",
1745 value);
1746 return -1;
1747 }
1748 value = strtok_r(NULL, delimiters, &saveptr);
1749 if (value) {
1750 fprintf(stderr,
1751 "Unexpected argument '%s' after BINARY LOAD_ADDRESS\n",
1752 value);
1753 return -1;
1754 }
1755 break;
1756 }
1757
1758 el->binary.args[argi] = strtoul(value, &endptr, 16);
1759 if (*endptr) {
1760 fprintf(stderr, "Invalid argument '%s' for BINARY\n", value);
1761 return -1;
1762 }
1763 argi++;
1764 if (argi >= BINARY_MAX_ARGS) {
1765 fprintf(stderr,
1766 "Too many arguments for BINARY\n");
1767 return -1;
1768 }
1769 }
1770 el->binary.nargs = argi;
1771 break;
1772 case IMAGE_CFG_DATA:
1773 value2 = strtok_r(NULL, delimiters, &saveptr);
1774
1775 if (!value1 || !value2) {
1776 fprintf(stderr,
1777 "Invalid number of arguments for DATA\n");
1778 return -1;
1779 }
1780
1781 el->regdata.raddr = strtoul(value1, NULL, 16);
1782 el->regdata.rdata = strtoul(value2, NULL, 16);
1783 break;
1784 case IMAGE_CFG_DATA_DELAY:
1785 if (!strcmp(value1, "SDRAM_SETUP"))
1786 el->regdata_delay = REGISTER_SET_HDR_OPT_DELAY_SDRAM_SETUP;
1787 else
1788 el->regdata_delay = REGISTER_SET_HDR_OPT_DELAY_MS(strtoul(value1, NULL, 10));
1789 if (el->regdata_delay > 255) {
1790 fprintf(stderr, "Maximal DATA_DELAY is 255\n");
1791 return -1;
1792 }
1793 break;
1794 case IMAGE_CFG_BAUDRATE:
1795 el->baudrate = strtoul(value1, NULL, 10);
1796 break;
1797 case IMAGE_CFG_UART_PORT:
1798 el->uart_port = strtoul(value1, NULL, 16);
1799 break;
1800 case IMAGE_CFG_UART_MPP:
1801 el->uart_mpp = strtoul(value1, NULL, 16);
1802 break;
1803 case IMAGE_CFG_DEBUG:
1804 el->debug = strtoul(value1, NULL, 10);
1805 break;
1806 case IMAGE_CFG_KAK:
1807 el->key_name = strdup(value1);
1808 break;
1809 case IMAGE_CFG_CSK:
1810 el->key_name = strdup(value1);
1811 break;
1812 case IMAGE_CFG_CSK_INDEX:
1813 el->csk_idx = strtol(value1, NULL, 0);
1814 break;
1815 case IMAGE_CFG_JTAG_DELAY:
1816 el->jtag_delay = strtoul(value1, NULL, 0);
1817 break;
1818 case IMAGE_CFG_BOX_ID:
1819 el->boxid = strtoul(value1, NULL, 0);
1820 break;
1821 case IMAGE_CFG_FLASH_ID:
1822 el->flashid = strtoul(value1, NULL, 0);
1823 break;
1824 case IMAGE_CFG_SEC_SPECIALIZED_IMG:
1825 el->sec_specialized_img = true;
1826 break;
1827 case IMAGE_CFG_SEC_COMMON_IMG:
1828 el->sec_specialized_img = false;
1829 break;
1830 case IMAGE_CFG_SEC_BOOT_DEV:
1831 el->sec_boot_dev = strtoul(value1, NULL, 0);
1832 break;
1833 case IMAGE_CFG_SEC_FUSE_DUMP:
1834 el->name = strdup(value1);
1835 break;
1836 default:
1837 fprintf(stderr, unknown_msg, line);
1838 }
1839
1840 return 0;
1841 }
1842
1843 /*
1844 * Parse the configuration file 'fcfg' into the array of configuration
1845 * elements 'image_cfg', and return the number of configuration
1846 * elements in 'cfgn'.
1847 */
1848 static int image_create_config_parse(FILE *fcfg)
1849 {
1850 int ret;
1851 int cfgi = 0;
1852
1853 /* Parse the configuration file */
1854 while (!feof(fcfg)) {
1855 char *line;
1856 char buf[256];
1857
1858 /* Read the current line */
1859 memset(buf, 0, sizeof(buf));
1860 line = fgets(buf, sizeof(buf), fcfg);
1861 if (!line)
1862 break;
1863
1864 /* Ignore useless lines */
1865 if (line[0] == '\n' || line[0] == '#')
1866 continue;
1867
1868 /* Strip final newline */
1869 if (line[strlen(line) - 1] == '\n')
1870 line[strlen(line) - 1] = 0;
1871
1872 /* Parse the current line */
1873 ret = image_create_config_parse_oneline(line,
1874 &image_cfg[cfgi]);
1875 if (ret)
1876 return ret;
1877
1878 cfgi++;
1879
1880 if (cfgi >= IMAGE_CFG_ELEMENT_MAX) {
1881 fprintf(stderr,
1882 "Too many configuration elements in .cfg file\n");
1883 return -1;
1884 }
1885 }
1886
1887 cfgn = cfgi;
1888 return 0;
1889 }
1890
1891 static int image_get_version(void)
1892 {
1893 struct image_cfg_element *e;
1894
1895 e = image_find_option(IMAGE_CFG_VERSION);
1896 if (!e)
1897 return -1;
1898
1899 return e->version;
1900 }
1901
1902 static void kwbimage_set_header(void *ptr, struct stat *sbuf, int ifd,
1903 struct image_tool_params *params)
1904 {
1905 FILE *fcfg;
1906 void *image = NULL;
1907 int version;
1908 size_t dataoff = 0;
1909 size_t datasz;
1910 uint32_t checksum;
1911 struct stat s;
1912 int ret;
1913
1914 params->bl_len = 1;
1915
1916 /*
1917 * Do not use sbuf->st_size as it contains size with padding.
1918 * We need original image data size, so stat original file.
1919 */
1920 if (params->skipcpy) {
1921 s.st_size = 0;
1922 } else if (stat(params->datafile, &s)) {
1923 fprintf(stderr, "Could not stat data file %s: %s\n",
1924 params->datafile, strerror(errno));
1925 exit(EXIT_FAILURE);
1926 }
1927 datasz = ALIGN(s.st_size, 4);
1928
1929 fcfg = fopen(params->imagename, "r");
1930 if (!fcfg) {
1931 fprintf(stderr, "Could not open input file %s\n",
1932 params->imagename);
1933 exit(EXIT_FAILURE);
1934 }
1935
1936 image_cfg = malloc(IMAGE_CFG_ELEMENT_MAX *
1937 sizeof(struct image_cfg_element));
1938 if (!image_cfg) {
1939 fprintf(stderr, "Cannot allocate memory\n");
1940 fclose(fcfg);
1941 exit(EXIT_FAILURE);
1942 }
1943
1944 memset(image_cfg, 0,
1945 IMAGE_CFG_ELEMENT_MAX * sizeof(struct image_cfg_element));
1946 rewind(fcfg);
1947
1948 ret = image_create_config_parse(fcfg);
1949 fclose(fcfg);
1950 if (ret) {
1951 free(image_cfg);
1952 exit(EXIT_FAILURE);
1953 }
1954
1955 version = image_get_version();
1956 switch (version) {
1957 /*
1958 * Fallback to version 0 if no version is provided in the
1959 * cfg file
1960 */
1961 case -1:
1962 case 0:
1963 image = image_create_v0(&dataoff, params, datasz + 4);
1964 break;
1965
1966 case 1:
1967 image = image_create_v1(&dataoff, params, ptr, datasz + 4);
1968 break;
1969
1970 default:
1971 fprintf(stderr, "Unsupported version %d\n", version);
1972 free(image_cfg);
1973 exit(EXIT_FAILURE);
1974 }
1975
1976 if (!image) {
1977 fprintf(stderr, "Could not create image\n");
1978 free(image_cfg);
1979 exit(EXIT_FAILURE);
1980 }
1981
1982 free(image_cfg);
1983
1984 /* Build and add image data checksum */
1985 checksum = cpu_to_le32(image_checksum32((uint8_t *)ptr + dataoff,
1986 datasz));
1987 memcpy((uint8_t *)ptr + dataoff + datasz, &checksum, sizeof(uint32_t));
1988
1989 /* Finally copy the header into the image area */
1990 memcpy(ptr, image, kwbheader_size(image));
1991
1992 free(image);
1993 }
1994
1995 static void kwbimage_print_header(const void *ptr, struct image_tool_params *params)
1996 {
1997 struct main_hdr_v0 *mhdr = (struct main_hdr_v0 *)ptr;
1998 struct bin_hdr_v0 *bhdr;
1999 struct opt_hdr_v1 *ohdr;
2000
2001 printf("Image Type: MVEBU Boot from %s Image\n",
2002 image_boot_mode_name(mhdr->blockid));
2003 printf("Image version:%d\n", kwbimage_version(ptr));
2004
2005 for_each_opt_hdr_v1 (ohdr, mhdr) {
2006 if (ohdr->headertype == OPT_HDR_V1_BINARY_TYPE) {
2007 printf("BIN Img Size: ");
2008 genimg_print_size(opt_hdr_v1_size(ohdr) - 12 -
2009 4 * ohdr->data[0]);
2010 printf("BIN Img Offs: ");
2011 genimg_print_size(((uint8_t *)ohdr - (uint8_t *)mhdr) +
2012 8 + 4 * ohdr->data[0]);
2013 }
2014 }
2015
2016 for_each_bin_hdr_v0(bhdr, mhdr) {
2017 printf("BIN Img Size: ");
2018 genimg_print_size(le32_to_cpu(bhdr->size));
2019 printf("BIN Img Addr: %08x\n", le32_to_cpu(bhdr->destaddr));
2020 printf("BIN Img Entr: %08x\n", le32_to_cpu(bhdr->execaddr));
2021 }
2022
2023 printf("Data Size: ");
2024 genimg_print_size(le32_to_cpu(mhdr->blocksize) - sizeof(uint32_t));
2025 printf("Data Offset: ");
2026 if (mhdr->blockid == IBR_HDR_SATA_ID)
2027 printf("%u Sector%s (LBA) = ", le32_to_cpu(mhdr->srcaddr),
2028 le32_to_cpu(mhdr->srcaddr) != 1 ? "s" : "");
2029 genimg_print_size(le32_to_cpu(mhdr->srcaddr) * params->bl_len);
2030 if (mhdr->blockid == IBR_HDR_SATA_ID)
2031 printf("Sector Size: %u Bytes\n", params->bl_len);
2032 if (mhdr->blockid == IBR_HDR_SPI_ID && le32_to_cpu(mhdr->destaddr) == 0xFFFFFFFF) {
2033 printf("Load Address: XIP\n");
2034 printf("Execute Offs: %08x\n", le32_to_cpu(mhdr->execaddr));
2035 } else {
2036 printf("Load Address: %08x\n", le32_to_cpu(mhdr->destaddr));
2037 printf("Entry Point: %08x\n", le32_to_cpu(mhdr->execaddr));
2038 }
2039 }
2040
2041 static int kwbimage_check_image_types(uint8_t type)
2042 {
2043 if (type == IH_TYPE_KWBIMAGE)
2044 return EXIT_SUCCESS;
2045
2046 return EXIT_FAILURE;
2047 }
2048
2049 static int kwbimage_verify_header(unsigned char *ptr, int image_size,
2050 struct image_tool_params *params)
2051 {
2052 size_t header_size = kwbheader_size(ptr);
2053 uint8_t blockid;
2054 uint32_t offset;
2055 uint32_t size;
2056 uint8_t csum;
2057 int blksz;
2058
2059 if (header_size > 192*1024)
2060 return -FDT_ERR_BADSTRUCTURE;
2061
2062 if (header_size > image_size)
2063 return -FDT_ERR_BADSTRUCTURE;
2064
2065 if (!main_hdr_checksum_ok(ptr))
2066 return -FDT_ERR_BADSTRUCTURE;
2067
2068 /* Only version 0 extended header has checksum */
2069 if (kwbimage_version(ptr) == 0) {
2070 struct main_hdr_v0 *mhdr = (struct main_hdr_v0 *)ptr;
2071 struct ext_hdr_v0 *ext_hdr;
2072 struct bin_hdr_v0 *bhdr;
2073
2074 for_each_ext_hdr_v0(ext_hdr, ptr) {
2075 csum = image_checksum8(ext_hdr, sizeof(*ext_hdr) - 1);
2076 if (csum != ext_hdr->checksum)
2077 return -FDT_ERR_BADSTRUCTURE;
2078 }
2079
2080 for_each_bin_hdr_v0(bhdr, ptr) {
2081 csum = image_checksum8(bhdr, (uint8_t *)&bhdr->checksum - (uint8_t *)bhdr - 1);
2082 if (csum != bhdr->checksum)
2083 return -FDT_ERR_BADSTRUCTURE;
2084
2085 if (bhdr->offset > sizeof(*bhdr) || bhdr->offset % 4 != 0)
2086 return -FDT_ERR_BADSTRUCTURE;
2087
2088 if (bhdr->offset + bhdr->size + 4 > sizeof(*bhdr) || bhdr->size % 4 != 0)
2089 return -FDT_ERR_BADSTRUCTURE;
2090
2091 if (image_checksum32((uint8_t *)bhdr + bhdr->offset, bhdr->size) !=
2092 *(uint32_t *)((uint8_t *)bhdr + bhdr->offset + bhdr->size))
2093 return -FDT_ERR_BADSTRUCTURE;
2094 }
2095
2096 blockid = mhdr->blockid;
2097 offset = le32_to_cpu(mhdr->srcaddr);
2098 size = le32_to_cpu(mhdr->blocksize);
2099 } else if (kwbimage_version(ptr) == 1) {
2100 struct main_hdr_v1 *mhdr = (struct main_hdr_v1 *)ptr;
2101 const uint8_t *mhdr_end;
2102 struct opt_hdr_v1 *ohdr;
2103
2104 mhdr_end = (uint8_t *)mhdr + header_size;
2105 for_each_opt_hdr_v1 (ohdr, ptr)
2106 if (!opt_hdr_v1_valid_size(ohdr, mhdr_end))
2107 return -FDT_ERR_BADSTRUCTURE;
2108
2109 blockid = mhdr->blockid;
2110 offset = le32_to_cpu(mhdr->srcaddr);
2111 size = le32_to_cpu(mhdr->blocksize);
2112 } else {
2113 return -FDT_ERR_BADSTRUCTURE;
2114 }
2115
2116 if (size < 4 || size % 4 != 0)
2117 return -FDT_ERR_BADSTRUCTURE;
2118
2119 /*
2120 * For SATA srcaddr is specified in number of sectors.
2121 * Try all possible sector sizes which are power of two,
2122 * at least 512 bytes and up to the 32 kB.
2123 */
2124 if (blockid == IBR_HDR_SATA_ID) {
2125 for (blksz = 512; blksz < 0x10000; blksz *= 2) {
2126 if (offset * blksz > image_size || offset * blksz + size > image_size)
2127 break;
2128
2129 if (image_checksum32(ptr + offset * blksz, size - 4) ==
2130 *(uint32_t *)(ptr + offset * blksz + size - 4)) {
2131 params->bl_len = blksz;
2132 return 0;
2133 }
2134 }
2135
2136 return -FDT_ERR_BADSTRUCTURE;
2137 }
2138
2139 /*
2140 * For PCIe srcaddr is always set to 0xFFFFFFFF.
2141 * This expects that data starts after all headers.
2142 */
2143 if (blockid == IBR_HDR_PEX_ID && offset == 0xFFFFFFFF)
2144 offset = header_size;
2145
2146 if (offset % 4 != 0 || offset > image_size || offset + size > image_size)
2147 return -FDT_ERR_BADSTRUCTURE;
2148
2149 if (image_checksum32(ptr + offset, size - 4) !=
2150 *(uint32_t *)(ptr + offset + size - 4))
2151 return -FDT_ERR_BADSTRUCTURE;
2152
2153 params->bl_len = 1;
2154 return 0;
2155 }
2156
2157 static int kwbimage_generate(struct image_tool_params *params,
2158 struct image_type_params *tparams)
2159 {
2160 FILE *fcfg;
2161 struct stat s;
2162 int alloc_len;
2163 int bootfrom;
2164 int version;
2165 void *hdr;
2166 int ret;
2167 int align, size;
2168 unsigned int satablksz;
2169
2170 fcfg = fopen(params->imagename, "r");
2171 if (!fcfg) {
2172 fprintf(stderr, "Could not open input file %s\n",
2173 params->imagename);
2174 exit(EXIT_FAILURE);
2175 }
2176
2177 if (params->skipcpy) {
2178 s.st_size = 0;
2179 } else if (stat(params->datafile, &s)) {
2180 fprintf(stderr, "Could not stat data file %s: %s\n",
2181 params->datafile, strerror(errno));
2182 exit(EXIT_FAILURE);
2183 }
2184
2185 image_cfg = malloc(IMAGE_CFG_ELEMENT_MAX *
2186 sizeof(struct image_cfg_element));
2187 if (!image_cfg) {
2188 fprintf(stderr, "Cannot allocate memory\n");
2189 fclose(fcfg);
2190 exit(EXIT_FAILURE);
2191 }
2192
2193 memset(image_cfg, 0,
2194 IMAGE_CFG_ELEMENT_MAX * sizeof(struct image_cfg_element));
2195 rewind(fcfg);
2196
2197 ret = image_create_config_parse(fcfg);
2198 fclose(fcfg);
2199 if (ret) {
2200 free(image_cfg);
2201 exit(EXIT_FAILURE);
2202 }
2203
2204 bootfrom = image_get_bootfrom();
2205 version = image_get_version();
2206 satablksz = image_get_satablksz();
2207 switch (version) {
2208 /*
2209 * Fallback to version 0 if no version is provided in the
2210 * cfg file
2211 */
2212 case -1:
2213 case 0:
2214 alloc_len = image_headersz_v0(NULL);
2215 break;
2216
2217 case 1:
2218 alloc_len = image_headersz_v1(NULL);
2219 if (!alloc_len) {
2220 free(image_cfg);
2221 exit(EXIT_FAILURE);
2222 }
2223 if (alloc_len > 192*1024) {
2224 fprintf(stderr, "Header is too big (%u bytes), maximal kwbimage header size is %u bytes\n", alloc_len, 192*1024);
2225 free(image_cfg);
2226 exit(EXIT_FAILURE);
2227 }
2228 break;
2229
2230 default:
2231 fprintf(stderr, "Unsupported version %d\n", version);
2232 free(image_cfg);
2233 exit(EXIT_FAILURE);
2234 }
2235
2236 alloc_len = image_headersz_align(alloc_len, image_get_bootfrom());
2237
2238 free(image_cfg);
2239
2240 hdr = malloc(alloc_len);
2241 if (!hdr) {
2242 fprintf(stderr, "%s: malloc return failure: %s\n",
2243 params->cmdname, strerror(errno));
2244 exit(EXIT_FAILURE);
2245 }
2246
2247 memset(hdr, 0, alloc_len);
2248 tparams->header_size = alloc_len;
2249 tparams->hdr = hdr;
2250
2251 /*
2252 * Final SATA images must be aligned to disk block size.
2253 * Final SDIO images must be aligned to 512 bytes.
2254 * Final SPI and NAND images must be aligned to 256 bytes.
2255 * Final UART image must be aligned to 128 bytes.
2256 */
2257 if (bootfrom == IBR_HDR_SATA_ID)
2258 align = satablksz;
2259 else if (bootfrom == IBR_HDR_SDIO_ID)
2260 align = 512;
2261 else if (bootfrom == IBR_HDR_SPI_ID || bootfrom == IBR_HDR_NAND_ID)
2262 align = 256;
2263 else if (bootfrom == IBR_HDR_UART_ID)
2264 align = 128;
2265 else
2266 align = 4;
2267
2268 /*
2269 * The resulting image needs to be 4-byte aligned. At least
2270 * the Marvell hdrparser tool complains if its unaligned.
2271 * After the image data is stored 4-byte checksum.
2272 */
2273 size = 4 + (align - (alloc_len + s.st_size + 4) % align) % align;
2274
2275 /*
2276 * This function should return aligned size of the datafile.
2277 * When skipcpy is set (datafile is skipped) then return value of this
2278 * function is ignored, so we have to put required kwbimage aligning
2279 * into the preallocated header size.
2280 */
2281 if (params->skipcpy) {
2282 tparams->header_size += size;
2283 return 0;
2284 } else {
2285 return size;
2286 }
2287 }
2288
2289 static int kwbimage_generate_config(void *ptr, struct image_tool_params *params)
2290 {
2291 struct main_hdr_v0 *mhdr0 = (struct main_hdr_v0 *)ptr;
2292 struct main_hdr_v1 *mhdr = (struct main_hdr_v1 *)ptr;
2293 size_t header_size = kwbheader_size(ptr);
2294 struct register_set_hdr_v1 *regset_hdr;
2295 struct ext_hdr_v0_reg *regdata;
2296 struct ext_hdr_v0 *ehdr0;
2297 struct bin_hdr_v0 *bhdr0;
2298 struct opt_hdr_v1 *ohdr;
2299 int regset_count;
2300 int params_count;
2301 unsigned offset;
2302 int is_v0_ext;
2303 int cur_idx;
2304 int version;
2305 FILE *f;
2306 int i;
2307
2308 f = fopen(params->outfile, "w");
2309 if (!f) {
2310 fprintf(stderr, "Can't open \"%s\": %s\n", params->outfile, strerror(errno));
2311 return -1;
2312 }
2313
2314 version = kwbimage_version(ptr);
2315
2316 is_v0_ext = 0;
2317 if (version == 0) {
2318 if (mhdr0->ext > 1 || mhdr0->bin ||
2319 ((ehdr0 = ext_hdr_v0_first(ptr)) &&
2320 (ehdr0->match_addr || ehdr0->match_mask || ehdr0->match_value)))
2321 is_v0_ext = 1;
2322 }
2323
2324 if (version != 0)
2325 fprintf(f, "VERSION %d\n", version);
2326
2327 fprintf(f, "BOOT_FROM %s\n", image_boot_mode_name(mhdr->blockid) ?: "<unknown>");
2328
2329 if (version == 0 && mhdr->blockid == IBR_HDR_NAND_ID)
2330 fprintf(f, "NAND_ECC_MODE %s\n", image_nand_ecc_mode_name(mhdr0->nandeccmode));
2331
2332 if (mhdr->blockid == IBR_HDR_NAND_ID)
2333 fprintf(f, "NAND_PAGE_SIZE 0x%x\n", (unsigned)le16_to_cpu(mhdr->nandpagesize));
2334
2335 if (mhdr->blockid == IBR_HDR_NAND_ID && (version != 0 || is_v0_ext || mhdr->nandblocksize != 0)) {
2336 if (mhdr->nandblocksize != 0) /* block size explicitly set in 64 kB unit */
2337 fprintf(f, "NAND_BLKSZ 0x%x\n", (unsigned)mhdr->nandblocksize * 64*1024);
2338 else if (le16_to_cpu(mhdr->nandpagesize) > 512)
2339 fprintf(f, "NAND_BLKSZ 0x10000\n"); /* large page NAND flash = 64 kB block size */
2340 else
2341 fprintf(f, "NAND_BLKSZ 0x4000\n"); /* small page NAND flash = 16 kB block size */
2342 }
2343
2344 if (mhdr->blockid == IBR_HDR_NAND_ID && (version != 0 || is_v0_ext))
2345 fprintf(f, "NAND_BADBLK_LOCATION 0x%x\n", (unsigned)mhdr->nandbadblklocation);
2346
2347 if (version == 0 && mhdr->blockid == IBR_HDR_SATA_ID)
2348 fprintf(f, "SATA_PIO_MODE %u\n", (unsigned)mhdr0->satapiomode);
2349
2350 if (mhdr->blockid == IBR_HDR_SATA_ID)
2351 fprintf(f, "SATA_BLKSZ %u\n", params->bl_len);
2352
2353 /*
2354 * Addresses and sizes which are specified by mkimage command line
2355 * arguments and not in kwbimage config file
2356 */
2357
2358 if (version != 0)
2359 fprintf(f, "#HEADER_SIZE 0x%x\n",
2360 ((unsigned)mhdr->headersz_msb << 8) | le16_to_cpu(mhdr->headersz_lsb));
2361
2362 fprintf(f, "#SRC_ADDRESS 0x%x\n", le32_to_cpu(mhdr->srcaddr));
2363 fprintf(f, "#BLOCK_SIZE 0x%x\n", le32_to_cpu(mhdr->blocksize));
2364 fprintf(f, "#DEST_ADDRESS 0x%08x\n", le32_to_cpu(mhdr->destaddr));
2365 fprintf(f, "#EXEC_ADDRESS 0x%08x\n", le32_to_cpu(mhdr->execaddr));
2366
2367 if (version != 0) {
2368 if (options_to_baudrate(mhdr->options))
2369 fprintf(f, "BAUDRATE %u\n", options_to_baudrate(mhdr->options));
2370 if (options_to_baudrate(mhdr->options) ||
2371 ((mhdr->options >> 3) & 0x3) || ((mhdr->options >> 5) & 0x7)) {
2372 fprintf(f, "UART_PORT %u\n", (unsigned)((mhdr->options >> 3) & 0x3));
2373 fprintf(f, "UART_MPP 0x%x\n", (unsigned)((mhdr->options >> 5) & 0x7));
2374 }
2375 if (mhdr->flags & 0x1)
2376 fprintf(f, "DEBUG 1\n");
2377 }
2378
2379 cur_idx = 1;
2380 for_each_opt_hdr_v1(ohdr, ptr) {
2381 if (ohdr->headertype == OPT_HDR_V1_SECURE_TYPE) {
2382 fprintf(f, "#SECURE_HEADER\n");
2383 } else if (ohdr->headertype == OPT_HDR_V1_BINARY_TYPE) {
2384 fprintf(f, "BINARY binary%d.bin", cur_idx);
2385 for (i = 0; i < ohdr->data[0]; i++)
2386 fprintf(f, " 0x%x", le32_to_cpu(((uint32_t *)ohdr->data)[i + 1]));
2387 offset = (unsigned)((uint8_t *)ohdr - (uint8_t *)mhdr) + 8 + 4 * ohdr->data[0];
2388 fprintf(f, " LOAD_ADDRESS 0x%08x\n", 0x40000000 + offset);
2389 fprintf(f, " # for CPU SHEEVA: LOAD_ADDRESS 0x%08x\n", 0x40004000 + offset);
2390 cur_idx++;
2391 } else if (ohdr->headertype == OPT_HDR_V1_REGISTER_TYPE) {
2392 regset_hdr = (struct register_set_hdr_v1 *)ohdr;
2393 if (opt_hdr_v1_size(ohdr) > sizeof(*ohdr))
2394 regset_count = (opt_hdr_v1_size(ohdr) - sizeof(*ohdr)) /
2395 sizeof(regset_hdr->data[0].entry);
2396 else
2397 regset_count = 0;
2398 for (i = 0; i < regset_count; i++)
2399 fprintf(f, "DATA 0x%08x 0x%08x\n",
2400 le32_to_cpu(regset_hdr->data[i].entry.address),
2401 le32_to_cpu(regset_hdr->data[i].entry.value));
2402 if (regset_count > 0) {
2403 if (regset_hdr->data[regset_count-1].last_entry.delay !=
2404 REGISTER_SET_HDR_OPT_DELAY_SDRAM_SETUP)
2405 fprintf(f, "DATA_DELAY %u\n",
2406 (unsigned)regset_hdr->data[regset_count-1].last_entry.delay);
2407 else
2408 fprintf(f, "DATA_DELAY SDRAM_SETUP\n");
2409 }
2410 }
2411 }
2412
2413 if (version == 0 && !is_v0_ext && le16_to_cpu(mhdr0->ddrinitdelay))
2414 fprintf(f, "DDR_INIT_DELAY %u\n", (unsigned)le16_to_cpu(mhdr0->ddrinitdelay));
2415
2416 for_each_ext_hdr_v0(ehdr0, ptr) {
2417 if (is_v0_ext) {
2418 fprintf(f, "\nMATCH ADDRESS 0x%08x MASK 0x%08x VALUE 0x%08x\n",
2419 le32_to_cpu(ehdr0->match_addr),
2420 le32_to_cpu(ehdr0->match_mask),
2421 le32_to_cpu(ehdr0->match_value));
2422 if (ehdr0->rsvd1[0] || ehdr0->rsvd1[1] || ehdr0->rsvd1[2] ||
2423 ehdr0->rsvd1[3] || ehdr0->rsvd1[4] || ehdr0->rsvd1[5] ||
2424 ehdr0->rsvd1[6] || ehdr0->rsvd1[7])
2425 fprintf(f, "#DDR_RSVD1 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
2426 ehdr0->rsvd1[0], ehdr0->rsvd1[1], ehdr0->rsvd1[2],
2427 ehdr0->rsvd1[3], ehdr0->rsvd1[4], ehdr0->rsvd1[5],
2428 ehdr0->rsvd1[6], ehdr0->rsvd1[7]);
2429 if (ehdr0->rsvd2[0] || ehdr0->rsvd2[1] || ehdr0->rsvd2[2] ||
2430 ehdr0->rsvd2[3] || ehdr0->rsvd2[4] || ehdr0->rsvd2[5] ||
2431 ehdr0->rsvd2[6])
2432 fprintf(f, "#DDR_RSVD2 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
2433 ehdr0->rsvd2[0], ehdr0->rsvd2[1], ehdr0->rsvd2[2],
2434 ehdr0->rsvd2[3], ehdr0->rsvd2[4], ehdr0->rsvd2[5],
2435 ehdr0->rsvd2[6]);
2436 if (ehdr0->ddrwritetype)
2437 fprintf(f, "DDR_WRITE_TYPE %u\n", (unsigned)ehdr0->ddrwritetype);
2438 if (ehdr0->ddrresetmpp)
2439 fprintf(f, "DDR_RESET_MPP 0x%x\n", (unsigned)ehdr0->ddrresetmpp);
2440 if (ehdr0->ddrclkenmpp)
2441 fprintf(f, "DDR_CLKEN_MPP 0x%x\n", (unsigned)ehdr0->ddrclkenmpp);
2442 if (ehdr0->ddrinitdelay)
2443 fprintf(f, "DDR_INIT_DELAY %u\n", (unsigned)ehdr0->ddrinitdelay);
2444 }
2445
2446 if (ehdr0->offset) {
2447 for (regdata = (struct ext_hdr_v0_reg *)((uint8_t *)ptr + ehdr0->offset);
2448 (uint8_t *)regdata < (uint8_t *)ptr + header_size &&
2449 (regdata->raddr || regdata->rdata);
2450 regdata++)
2451 fprintf(f, "DATA 0x%08x 0x%08x\n", le32_to_cpu(regdata->raddr),
2452 le32_to_cpu(regdata->rdata));
2453 if ((uint8_t *)regdata != (uint8_t *)ptr + ehdr0->offset)
2454 fprintf(f, "DATA 0x0 0x0\n");
2455 }
2456
2457 if (le32_to_cpu(ehdr0->enddelay))
2458 fprintf(f, "DATA_DELAY %u\n", le32_to_cpu(ehdr0->enddelay));
2459 else if (is_v0_ext)
2460 fprintf(f, "DATA_DELAY SDRAM_SETUP\n");
2461 }
2462
2463 cur_idx = 1;
2464 for_each_bin_hdr_v0(bhdr0, ptr) {
2465 fprintf(f, "\nMATCH ADDRESS 0x%08x MASK 0x%08x VALUE 0x%08x\n",
2466 le32_to_cpu(bhdr0->match_addr),
2467 le32_to_cpu(bhdr0->match_mask),
2468 le32_to_cpu(bhdr0->match_value));
2469
2470 fprintf(f, "BINARY binary%d.bin", cur_idx);
2471 params_count = fls4(bhdr0->params_flags & 0xF);
2472 for (i = 0; i < params_count; i++)
2473 fprintf(f, " 0x%x", (bhdr0->params[i] & (1 << i)) ? bhdr0->params[i] : 0);
2474 fprintf(f, " LOAD_ADDRESS 0x%08x", le32_to_cpu(bhdr0->destaddr));
2475 fprintf(f, " EXEC_ADDRESS 0x%08x", le32_to_cpu(bhdr0->execaddr));
2476 fprintf(f, "\n");
2477
2478 fprintf(f, "#BINARY_OFFSET 0x%x\n", le32_to_cpu(bhdr0->offset));
2479 fprintf(f, "#BINARY_SIZE 0x%x\n", le32_to_cpu(bhdr0->size));
2480
2481 if (bhdr0->rsvd1)
2482 fprintf(f, "#BINARY_RSVD1 0x%x\n", (unsigned)bhdr0->rsvd1);
2483 if (bhdr0->rsvd2)
2484 fprintf(f, "#BINARY_RSVD2 0x%x\n", (unsigned)bhdr0->rsvd2);
2485
2486 cur_idx++;
2487 }
2488
2489 /* Undocumented reserved fields */
2490
2491 if (version == 0 && (mhdr0->rsvd1[0] || mhdr0->rsvd1[1] || mhdr0->rsvd1[2]))
2492 fprintf(f, "#RSVD1 0x%x 0x%x 0x%x\n", (unsigned)mhdr0->rsvd1[0],
2493 (unsigned)mhdr0->rsvd1[1], (unsigned)mhdr0->rsvd1[2]);
2494
2495 if (version == 0 && le16_to_cpu(mhdr0->rsvd2))
2496 fprintf(f, "#RSVD2 0x%x\n", (unsigned)le16_to_cpu(mhdr0->rsvd2));
2497
2498 if (version != 0 && mhdr->reserved4)
2499 fprintf(f, "#RESERVED4 0x%x\n", (unsigned)mhdr->reserved4);
2500
2501 if (version != 0 && mhdr->reserved5)
2502 fprintf(f, "#RESERVED5 0x%x\n", (unsigned)le16_to_cpu(mhdr->reserved5));
2503
2504 fclose(f);
2505
2506 return 0;
2507 }
2508
2509 static int kwbimage_extract_subimage(void *ptr, struct image_tool_params *params)
2510 {
2511 struct main_hdr_v1 *mhdr = (struct main_hdr_v1 *)ptr;
2512 size_t header_size = kwbheader_size(ptr);
2513 struct bin_hdr_v0 *bhdr;
2514 struct opt_hdr_v1 *ohdr;
2515 int idx = params->pflag;
2516 int cur_idx;
2517 uint32_t offset;
2518 ulong image;
2519 ulong size;
2520
2521 /* Generate kwbimage config file when '-p -1' is specified */
2522 if (idx == -1)
2523 return kwbimage_generate_config(ptr, params);
2524
2525 image = 0;
2526 size = 0;
2527
2528 if (idx == 0) {
2529 /* Extract data image when -p is not specified or when '-p 0' is specified */
2530 offset = le32_to_cpu(mhdr->srcaddr);
2531
2532 if (mhdr->blockid == IBR_HDR_SATA_ID)
2533 offset *= params->bl_len;
2534
2535 if (mhdr->blockid == IBR_HDR_PEX_ID && offset == 0xFFFFFFFF)
2536 offset = header_size;
2537
2538 image = (ulong)((uint8_t *)ptr + offset);
2539 size = le32_to_cpu(mhdr->blocksize) - 4;
2540 } else {
2541 /* Extract N-th binary header executabe image when other '-p N' is specified */
2542 cur_idx = 1;
2543 for_each_opt_hdr_v1(ohdr, ptr) {
2544 if (ohdr->headertype != OPT_HDR_V1_BINARY_TYPE)
2545 continue;
2546
2547 if (idx == cur_idx) {
2548 image = (ulong)&ohdr->data[4 + 4 * ohdr->data[0]];
2549 size = opt_hdr_v1_size(ohdr) - 12 - 4 * ohdr->data[0];
2550 break;
2551 }
2552
2553 ++cur_idx;
2554 }
2555 for_each_bin_hdr_v0(bhdr, ptr) {
2556 if (idx == cur_idx) {
2557 image = (ulong)bhdr + bhdr->offset;
2558 size = bhdr->size;
2559 break;
2560 }
2561 ++cur_idx;
2562 }
2563
2564 if (!image) {
2565 fprintf(stderr, "Argument -p %d is invalid\n", idx);
2566 fprintf(stderr, "Available subimages:\n");
2567 fprintf(stderr, " -p -1 - kwbimage config file\n");
2568 fprintf(stderr, " -p 0 - data image\n");
2569 if (cur_idx - 1 > 0)
2570 fprintf(stderr, " -p N - Nth binary header image (totally: %d)\n",
2571 cur_idx - 1);
2572 return -1;
2573 }
2574 }
2575
2576 return imagetool_save_subimage(params->outfile, image, size);
2577 }
2578
2579 static int kwbimage_check_params(struct image_tool_params *params)
2580 {
2581 if (!params->lflag && !params->iflag && !params->pflag &&
2582 (!params->imagename || !strlen(params->imagename))) {
2583 char *msg = "Configuration file for kwbimage creation omitted";
2584
2585 fprintf(stderr, "Error:%s - %s\n", params->cmdname, msg);
2586 return 1;
2587 }
2588
2589 return (params->dflag && (params->fflag || params->lflag || params->skipcpy)) ||
2590 (params->fflag) ||
2591 (params->lflag && (params->dflag || params->fflag));
2592 }
2593
2594 /*
2595 * kwbimage type parameters definition
2596 */
2597 U_BOOT_IMAGE_TYPE(
2598 kwbimage,
2599 "Marvell MVEBU Boot Image support",
2600 0,
2601 NULL,
2602 kwbimage_check_params,
2603 kwbimage_verify_header,
2604 kwbimage_print_header,
2605 kwbimage_set_header,
2606 kwbimage_extract_subimage,
2607 kwbimage_check_image_types,
2608 NULL,
2609 kwbimage_generate
2610 );