From: Tobias Stoeckmann Date: Wed, 16 Oct 2024 14:55:43 +0000 (+0200) Subject: libkmod: Allow better optimization X-Git-Tag: v34~205 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=557e79c360a304ab22c2a81daff440f4e052a1a0;p=thirdparty%2Fkmod.git libkmod: Allow better optimization If we have native endianess, i.e. parsing modules for the running system, assist the compiler to note that it is really much faster to move a word/qword etc. instead of actually running through a loop. Reduces library instructions on x86_64 by 1.4 % and binary instructions by 3 % with default configuration. Signed-off-by: Tobias Stoeckmann Reviewed-by: Emil Velikov Link: https://github.com/kmod-project/kmod/pull/187 Signed-off-by: Lucas De Marchi --- diff --git a/libkmod/libkmod-elf.c b/libkmod/libkmod-elf.c index 9660bcd6..cbdcd75a 100644 --- a/libkmod/libkmod-elf.c +++ b/libkmod/libkmod-elf.c @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -110,7 +111,6 @@ static inline uint64_t elf_get_uint(const struct kmod_elf *elf, uint64_t offset, { const uint8_t *p; uint64_t ret = 0; - size_t i; assert(size <= sizeof(uint64_t)); assert(offset + size <= elf->size); @@ -123,12 +123,13 @@ static inline uint64_t elf_get_uint(const struct kmod_elf *elf, uint64_t offset, } p = elf->memory + offset; + if (elf->msb) { - for (i = 0; i < size; i++) - ret = (ret << 8) | p[i]; + memcpy((char *)&ret + sizeof(ret) - size, p, size); + ret = be64toh(ret); } else { - for (i = 1; i <= size; i++) - ret = (ret << 8) | p[size - i]; + memcpy(&ret, p, size); + ret = le64toh(ret); } ELFDBG(elf, "size=%" PRIu16 " offset=%" PRIu64 " value=%" PRIu64 "\n", size,