2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/extable.h>
21 #include <linux/moduleloader.h>
22 #include <linux/trace_events.h>
23 #include <linux/init.h>
24 #include <linux/kallsyms.h>
25 #include <linux/file.h>
27 #include <linux/sysfs.h>
28 #include <linux/kernel.h>
29 #include <linux/slab.h>
30 #include <linux/vmalloc.h>
31 #include <linux/elf.h>
32 #include <linux/proc_fs.h>
33 #include <linux/security.h>
34 #include <linux/seq_file.h>
35 #include <linux/syscalls.h>
36 #include <linux/fcntl.h>
37 #include <linux/rcupdate.h>
38 #include <linux/capability.h>
39 #include <linux/cpu.h>
40 #include <linux/moduleparam.h>
41 #include <linux/errno.h>
42 #include <linux/err.h>
43 #include <linux/vermagic.h>
44 #include <linux/notifier.h>
45 #include <linux/sched.h>
46 #include <linux/device.h>
47 #include <linux/string.h>
48 #include <linux/mutex.h>
49 #include <linux/rculist.h>
50 #include <linux/uaccess.h>
51 #include <asm/cacheflush.h>
52 #include <linux/set_memory.h>
53 #include <asm/mmu_context.h>
54 #include <linux/license.h>
55 #include <asm/sections.h>
56 #include <linux/tracepoint.h>
57 #include <linux/ftrace.h>
58 #include <linux/livepatch.h>
59 #include <linux/async.h>
60 #include <linux/percpu.h>
61 #include <linux/kmemleak.h>
62 #include <linux/jump_label.h>
63 #include <linux/pfn.h>
64 #include <linux/bsearch.h>
65 #include <linux/dynamic_debug.h>
66 #include <linux/audit.h>
67 #include <uapi/linux/module.h>
68 #include "module-internal.h"
70 #define CREATE_TRACE_POINTS
71 #include <trace/events/module.h>
73 #ifndef ARCH_SHF_SMALL
74 #define ARCH_SHF_SMALL 0
78 * Modules' sections will be aligned on page boundaries
79 * to ensure complete separation of code and data, but
80 * only when CONFIG_STRICT_MODULE_RWX=y
82 #ifdef CONFIG_STRICT_MODULE_RWX
83 # define debug_align(X) ALIGN(X, PAGE_SIZE)
85 # define debug_align(X) (X)
88 /* If this is set, the section belongs in the init part of the module */
89 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
93 * 1) List of modules (also safely readable with preempt_disable),
94 * 2) module_use links,
95 * 3) module_addr_min/module_addr_max.
96 * (delete and add uses RCU list operations). */
97 DEFINE_MUTEX(module_mutex
);
98 EXPORT_SYMBOL_GPL(module_mutex
);
99 static LIST_HEAD(modules
);
101 #ifdef CONFIG_MODULES_TREE_LOOKUP
104 * Use a latched RB-tree for __module_address(); this allows us to use
105 * RCU-sched lookups of the address from any context.
107 * This is conditional on PERF_EVENTS || TRACING because those can really hit
108 * __module_address() hard by doing a lot of stack unwinding; potentially from
112 static __always_inline
unsigned long __mod_tree_val(struct latch_tree_node
*n
)
114 struct module_layout
*layout
= container_of(n
, struct module_layout
, mtn
.node
);
116 return (unsigned long)layout
->base
;
119 static __always_inline
unsigned long __mod_tree_size(struct latch_tree_node
*n
)
121 struct module_layout
*layout
= container_of(n
, struct module_layout
, mtn
.node
);
123 return (unsigned long)layout
->size
;
126 static __always_inline
bool
127 mod_tree_less(struct latch_tree_node
*a
, struct latch_tree_node
*b
)
129 return __mod_tree_val(a
) < __mod_tree_val(b
);
132 static __always_inline
int
133 mod_tree_comp(void *key
, struct latch_tree_node
*n
)
135 unsigned long val
= (unsigned long)key
;
136 unsigned long start
, end
;
138 start
= __mod_tree_val(n
);
142 end
= start
+ __mod_tree_size(n
);
149 static const struct latch_tree_ops mod_tree_ops
= {
150 .less
= mod_tree_less
,
151 .comp
= mod_tree_comp
,
154 static struct mod_tree_root
{
155 struct latch_tree_root root
;
156 unsigned long addr_min
;
157 unsigned long addr_max
;
158 } mod_tree __cacheline_aligned
= {
162 #define module_addr_min mod_tree.addr_min
163 #define module_addr_max mod_tree.addr_max
165 static noinline
void __mod_tree_insert(struct mod_tree_node
*node
)
167 latch_tree_insert(&node
->node
, &mod_tree
.root
, &mod_tree_ops
);
170 static void __mod_tree_remove(struct mod_tree_node
*node
)
172 latch_tree_erase(&node
->node
, &mod_tree
.root
, &mod_tree_ops
);
176 * These modifications: insert, remove_init and remove; are serialized by the
179 static void mod_tree_insert(struct module
*mod
)
181 mod
->core_layout
.mtn
.mod
= mod
;
182 mod
->init_layout
.mtn
.mod
= mod
;
184 __mod_tree_insert(&mod
->core_layout
.mtn
);
185 if (mod
->init_layout
.size
)
186 __mod_tree_insert(&mod
->init_layout
.mtn
);
189 static void mod_tree_remove_init(struct module
*mod
)
191 if (mod
->init_layout
.size
)
192 __mod_tree_remove(&mod
->init_layout
.mtn
);
195 static void mod_tree_remove(struct module
*mod
)
197 __mod_tree_remove(&mod
->core_layout
.mtn
);
198 mod_tree_remove_init(mod
);
201 static struct module
*mod_find(unsigned long addr
)
203 struct latch_tree_node
*ltn
;
205 ltn
= latch_tree_find((void *)addr
, &mod_tree
.root
, &mod_tree_ops
);
209 return container_of(ltn
, struct mod_tree_node
, node
)->mod
;
212 #else /* MODULES_TREE_LOOKUP */
214 static unsigned long module_addr_min
= -1UL, module_addr_max
= 0;
216 static void mod_tree_insert(struct module
*mod
) { }
217 static void mod_tree_remove_init(struct module
*mod
) { }
218 static void mod_tree_remove(struct module
*mod
) { }
220 static struct module
*mod_find(unsigned long addr
)
224 list_for_each_entry_rcu(mod
, &modules
, list
) {
225 if (within_module(addr
, mod
))
232 #endif /* MODULES_TREE_LOOKUP */
235 * Bounds of module text, for speeding up __module_address.
236 * Protected by module_mutex.
238 static void __mod_update_bounds(void *base
, unsigned int size
)
240 unsigned long min
= (unsigned long)base
;
241 unsigned long max
= min
+ size
;
243 if (min
< module_addr_min
)
244 module_addr_min
= min
;
245 if (max
> module_addr_max
)
246 module_addr_max
= max
;
249 static void mod_update_bounds(struct module
*mod
)
251 __mod_update_bounds(mod
->core_layout
.base
, mod
->core_layout
.size
);
252 if (mod
->init_layout
.size
)
253 __mod_update_bounds(mod
->init_layout
.base
, mod
->init_layout
.size
);
256 #ifdef CONFIG_KGDB_KDB
257 struct list_head
*kdb_modules
= &modules
; /* kdb needs the list of modules */
258 #endif /* CONFIG_KGDB_KDB */
260 static void module_assert_mutex(void)
262 lockdep_assert_held(&module_mutex
);
265 static void module_assert_mutex_or_preempt(void)
267 #ifdef CONFIG_LOCKDEP
268 if (unlikely(!debug_locks
))
271 WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
272 !lockdep_is_held(&module_mutex
));
276 static bool sig_enforce
= IS_ENABLED(CONFIG_MODULE_SIG_FORCE
);
277 #ifndef CONFIG_MODULE_SIG_FORCE
278 module_param(sig_enforce
, bool_enable_only
, 0644);
279 #endif /* !CONFIG_MODULE_SIG_FORCE */
281 /* Block module loading/unloading? */
282 int modules_disabled
= 0;
283 core_param(nomodule
, modules_disabled
, bint
, 0);
285 /* Waiting for a module to finish initializing? */
286 static DECLARE_WAIT_QUEUE_HEAD(module_wq
);
288 static BLOCKING_NOTIFIER_HEAD(module_notify_list
);
290 int register_module_notifier(struct notifier_block
*nb
)
292 return blocking_notifier_chain_register(&module_notify_list
, nb
);
294 EXPORT_SYMBOL(register_module_notifier
);
296 int unregister_module_notifier(struct notifier_block
*nb
)
298 return blocking_notifier_chain_unregister(&module_notify_list
, nb
);
300 EXPORT_SYMBOL(unregister_module_notifier
);
307 char *secstrings
, *strtab
;
308 unsigned long symoffs
, stroffs
;
309 struct _ddebug
*debug
;
310 unsigned int num_debug
;
312 #ifdef CONFIG_KALLSYMS
313 unsigned long mod_kallsyms_init_off
;
316 unsigned int sym
, str
, mod
, vers
, info
, pcpu
;
321 * We require a truly strong try_module_get(): 0 means success.
322 * Otherwise an error is returned due to ongoing or failed
323 * initialization etc.
325 static inline int strong_try_module_get(struct module
*mod
)
327 BUG_ON(mod
&& mod
->state
== MODULE_STATE_UNFORMED
);
328 if (mod
&& mod
->state
== MODULE_STATE_COMING
)
330 if (try_module_get(mod
))
336 static inline void add_taint_module(struct module
*mod
, unsigned flag
,
337 enum lockdep_ok lockdep_ok
)
339 add_taint(flag
, lockdep_ok
);
340 set_bit(flag
, &mod
->taints
);
344 * A thread that wants to hold a reference to a module only while it
345 * is running can call this to safely exit. nfsd and lockd use this.
347 void __noreturn
__module_put_and_exit(struct module
*mod
, long code
)
352 EXPORT_SYMBOL(__module_put_and_exit
);
354 /* Find a module section: 0 means not found. */
355 static unsigned int find_sec(const struct load_info
*info
, const char *name
)
359 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
360 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
361 /* Alloc bit cleared means "ignore it." */
362 if ((shdr
->sh_flags
& SHF_ALLOC
)
363 && strcmp(info
->secstrings
+ shdr
->sh_name
, name
) == 0)
369 /* Find a module section, or NULL. */
370 static void *section_addr(const struct load_info
*info
, const char *name
)
372 /* Section 0 has sh_addr 0. */
373 return (void *)info
->sechdrs
[find_sec(info
, name
)].sh_addr
;
376 /* Find a module section, or NULL. Fill in number of "objects" in section. */
377 static void *section_objs(const struct load_info
*info
,
382 unsigned int sec
= find_sec(info
, name
);
384 /* Section 0 has sh_addr 0 and sh_size 0. */
385 *num
= info
->sechdrs
[sec
].sh_size
/ object_size
;
386 return (void *)info
->sechdrs
[sec
].sh_addr
;
389 /* Provided by the linker */
390 extern const struct kernel_symbol __start___ksymtab
[];
391 extern const struct kernel_symbol __stop___ksymtab
[];
392 extern const struct kernel_symbol __start___ksymtab_gpl
[];
393 extern const struct kernel_symbol __stop___ksymtab_gpl
[];
394 extern const struct kernel_symbol __start___ksymtab_gpl_future
[];
395 extern const struct kernel_symbol __stop___ksymtab_gpl_future
[];
396 extern const s32 __start___kcrctab
[];
397 extern const s32 __start___kcrctab_gpl
[];
398 extern const s32 __start___kcrctab_gpl_future
[];
399 #ifdef CONFIG_UNUSED_SYMBOLS
400 extern const struct kernel_symbol __start___ksymtab_unused
[];
401 extern const struct kernel_symbol __stop___ksymtab_unused
[];
402 extern const struct kernel_symbol __start___ksymtab_unused_gpl
[];
403 extern const struct kernel_symbol __stop___ksymtab_unused_gpl
[];
404 extern const s32 __start___kcrctab_unused
[];
405 extern const s32 __start___kcrctab_unused_gpl
[];
408 #ifndef CONFIG_MODVERSIONS
409 #define symversion(base, idx) NULL
411 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
414 static bool each_symbol_in_section(const struct symsearch
*arr
,
415 unsigned int arrsize
,
416 struct module
*owner
,
417 bool (*fn
)(const struct symsearch
*syms
,
418 struct module
*owner
,
424 for (j
= 0; j
< arrsize
; j
++) {
425 if (fn(&arr
[j
], owner
, data
))
432 /* Returns true as soon as fn returns true, otherwise false. */
433 static bool each_symbol_section(bool (*fn
)(const struct symsearch
*arr
,
434 struct module
*owner
,
439 static const struct symsearch arr
[] = {
440 { __start___ksymtab
, __stop___ksymtab
, __start___kcrctab
,
441 NOT_GPL_ONLY
, false },
442 { __start___ksymtab_gpl
, __stop___ksymtab_gpl
,
443 __start___kcrctab_gpl
,
445 { __start___ksymtab_gpl_future
, __stop___ksymtab_gpl_future
,
446 __start___kcrctab_gpl_future
,
447 WILL_BE_GPL_ONLY
, false },
448 #ifdef CONFIG_UNUSED_SYMBOLS
449 { __start___ksymtab_unused
, __stop___ksymtab_unused
,
450 __start___kcrctab_unused
,
451 NOT_GPL_ONLY
, true },
452 { __start___ksymtab_unused_gpl
, __stop___ksymtab_unused_gpl
,
453 __start___kcrctab_unused_gpl
,
458 module_assert_mutex_or_preempt();
460 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), NULL
, fn
, data
))
463 list_for_each_entry_rcu(mod
, &modules
, list
) {
464 struct symsearch arr
[] = {
465 { mod
->syms
, mod
->syms
+ mod
->num_syms
, mod
->crcs
,
466 NOT_GPL_ONLY
, false },
467 { mod
->gpl_syms
, mod
->gpl_syms
+ mod
->num_gpl_syms
,
470 { mod
->gpl_future_syms
,
471 mod
->gpl_future_syms
+ mod
->num_gpl_future_syms
,
472 mod
->gpl_future_crcs
,
473 WILL_BE_GPL_ONLY
, false },
474 #ifdef CONFIG_UNUSED_SYMBOLS
476 mod
->unused_syms
+ mod
->num_unused_syms
,
478 NOT_GPL_ONLY
, true },
479 { mod
->unused_gpl_syms
,
480 mod
->unused_gpl_syms
+ mod
->num_unused_gpl_syms
,
481 mod
->unused_gpl_crcs
,
486 if (mod
->state
== MODULE_STATE_UNFORMED
)
489 if (each_symbol_in_section(arr
, ARRAY_SIZE(arr
), mod
, fn
, data
))
495 struct find_symbol_arg
{
502 struct module
*owner
;
504 const struct kernel_symbol
*sym
;
505 enum mod_license license
;
508 static bool check_symbol(const struct symsearch
*syms
,
509 struct module
*owner
,
510 unsigned int symnum
, void *data
)
512 struct find_symbol_arg
*fsa
= data
;
515 if (syms
->license
== GPL_ONLY
)
517 if (syms
->license
== WILL_BE_GPL_ONLY
&& fsa
->warn
) {
518 pr_warn("Symbol %s is being used by a non-GPL module, "
519 "which will not be allowed in the future\n",
524 #ifdef CONFIG_UNUSED_SYMBOLS
525 if (syms
->unused
&& fsa
->warn
) {
526 pr_warn("Symbol %s is marked as UNUSED, however this module is "
527 "using it.\n", fsa
->name
);
528 pr_warn("This symbol will go away in the future.\n");
529 pr_warn("Please evaluate if this is the right api to use and "
530 "if it really is, submit a report to the linux kernel "
531 "mailing list together with submitting your code for "
537 fsa
->crc
= symversion(syms
->crcs
, symnum
);
538 fsa
->sym
= &syms
->start
[symnum
];
539 fsa
->license
= syms
->license
;
543 static int cmp_name(const void *va
, const void *vb
)
546 const struct kernel_symbol
*b
;
548 return strcmp(a
, b
->name
);
551 static bool find_symbol_in_section(const struct symsearch
*syms
,
552 struct module
*owner
,
555 struct find_symbol_arg
*fsa
= data
;
556 struct kernel_symbol
*sym
;
558 sym
= bsearch(fsa
->name
, syms
->start
, syms
->stop
- syms
->start
,
559 sizeof(struct kernel_symbol
), cmp_name
);
561 if (sym
!= NULL
&& check_symbol(syms
, owner
, sym
- syms
->start
, data
))
567 /* Find a symbol and return it, along with, (optional) crc and
568 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
569 static const struct kernel_symbol
*find_symbol(const char *name
,
570 struct module
**owner
,
572 enum mod_license
*license
,
576 struct find_symbol_arg fsa
;
582 if (each_symbol_section(find_symbol_in_section
, &fsa
)) {
588 *license
= fsa
.license
;
592 pr_debug("Failed to find symbol %s\n", name
);
597 * Search for module by name: must hold module_mutex (or preempt disabled
598 * for read-only access).
600 static struct module
*find_module_all(const char *name
, size_t len
,
605 module_assert_mutex_or_preempt();
607 list_for_each_entry_rcu(mod
, &modules
, list
) {
608 if (!even_unformed
&& mod
->state
== MODULE_STATE_UNFORMED
)
610 if (strlen(mod
->name
) == len
&& !memcmp(mod
->name
, name
, len
))
616 struct module
*find_module(const char *name
)
618 module_assert_mutex();
619 return find_module_all(name
, strlen(name
), false);
621 EXPORT_SYMBOL_GPL(find_module
);
625 static inline void __percpu
*mod_percpu(struct module
*mod
)
630 static int percpu_modalloc(struct module
*mod
, struct load_info
*info
)
632 Elf_Shdr
*pcpusec
= &info
->sechdrs
[info
->index
.pcpu
];
633 unsigned long align
= pcpusec
->sh_addralign
;
635 if (!pcpusec
->sh_size
)
638 if (align
> PAGE_SIZE
) {
639 pr_warn("%s: per-cpu alignment %li > %li\n",
640 mod
->name
, align
, PAGE_SIZE
);
644 mod
->percpu
= __alloc_reserved_percpu(pcpusec
->sh_size
, align
);
646 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
647 mod
->name
, (unsigned long)pcpusec
->sh_size
);
650 mod
->percpu_size
= pcpusec
->sh_size
;
654 static void percpu_modfree(struct module
*mod
)
656 free_percpu(mod
->percpu
);
659 static unsigned int find_pcpusec(struct load_info
*info
)
661 return find_sec(info
, ".data..percpu");
664 static void percpu_modcopy(struct module
*mod
,
665 const void *from
, unsigned long size
)
669 for_each_possible_cpu(cpu
)
670 memcpy(per_cpu_ptr(mod
->percpu
, cpu
), from
, size
);
673 bool __is_module_percpu_address(unsigned long addr
, unsigned long *can_addr
)
680 list_for_each_entry_rcu(mod
, &modules
, list
) {
681 if (mod
->state
== MODULE_STATE_UNFORMED
)
683 if (!mod
->percpu_size
)
685 for_each_possible_cpu(cpu
) {
686 void *start
= per_cpu_ptr(mod
->percpu
, cpu
);
687 void *va
= (void *)addr
;
689 if (va
>= start
&& va
< start
+ mod
->percpu_size
) {
691 *can_addr
= (unsigned long) (va
- start
);
692 *can_addr
+= (unsigned long)
693 per_cpu_ptr(mod
->percpu
,
707 * is_module_percpu_address - test whether address is from module static percpu
708 * @addr: address to test
710 * Test whether @addr belongs to module static percpu area.
713 * %true if @addr is from module static percpu area
715 bool is_module_percpu_address(unsigned long addr
)
717 return __is_module_percpu_address(addr
, NULL
);
720 #else /* ... !CONFIG_SMP */
722 static inline void __percpu
*mod_percpu(struct module
*mod
)
726 static int percpu_modalloc(struct module
*mod
, struct load_info
*info
)
728 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
729 if (info
->sechdrs
[info
->index
.pcpu
].sh_size
!= 0)
733 static inline void percpu_modfree(struct module
*mod
)
736 static unsigned int find_pcpusec(struct load_info
*info
)
740 static inline void percpu_modcopy(struct module
*mod
,
741 const void *from
, unsigned long size
)
743 /* pcpusec should be 0, and size of that section should be 0. */
746 bool is_module_percpu_address(unsigned long addr
)
751 bool __is_module_percpu_address(unsigned long addr
, unsigned long *can_addr
)
756 #endif /* CONFIG_SMP */
758 #define MODINFO_ATTR(field) \
759 static void setup_modinfo_##field(struct module *mod, const char *s) \
761 mod->field = kstrdup(s, GFP_KERNEL); \
763 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
764 struct module_kobject *mk, char *buffer) \
766 return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
768 static int modinfo_##field##_exists(struct module *mod) \
770 return mod->field != NULL; \
772 static void free_modinfo_##field(struct module *mod) \
777 static struct module_attribute modinfo_##field = { \
778 .attr = { .name = __stringify(field), .mode = 0444 }, \
779 .show = show_modinfo_##field, \
780 .setup = setup_modinfo_##field, \
781 .test = modinfo_##field##_exists, \
782 .free = free_modinfo_##field, \
785 MODINFO_ATTR(version
);
786 MODINFO_ATTR(srcversion
);
788 static char last_unloaded_module
[MODULE_NAME_LEN
+1];
790 #ifdef CONFIG_MODULE_UNLOAD
792 EXPORT_TRACEPOINT_SYMBOL(module_get
);
794 /* MODULE_REF_BASE is the base reference count by kmodule loader. */
795 #define MODULE_REF_BASE 1
797 /* Init the unload section of the module. */
798 static int module_unload_init(struct module
*mod
)
801 * Initialize reference counter to MODULE_REF_BASE.
802 * refcnt == 0 means module is going.
804 atomic_set(&mod
->refcnt
, MODULE_REF_BASE
);
806 INIT_LIST_HEAD(&mod
->source_list
);
807 INIT_LIST_HEAD(&mod
->target_list
);
809 /* Hold reference count during initialization. */
810 atomic_inc(&mod
->refcnt
);
815 /* Does a already use b? */
816 static int already_uses(struct module
*a
, struct module
*b
)
818 struct module_use
*use
;
820 list_for_each_entry(use
, &b
->source_list
, source_list
) {
821 if (use
->source
== a
) {
822 pr_debug("%s uses %s!\n", a
->name
, b
->name
);
826 pr_debug("%s does not use %s!\n", a
->name
, b
->name
);
832 * - we add 'a' as a "source", 'b' as a "target" of module use
833 * - the module_use is added to the list of 'b' sources (so
834 * 'b' can walk the list to see who sourced them), and of 'a'
835 * targets (so 'a' can see what modules it targets).
837 static int add_module_usage(struct module
*a
, struct module
*b
)
839 struct module_use
*use
;
841 pr_debug("Allocating new usage for %s.\n", a
->name
);
842 use
= kmalloc(sizeof(*use
), GFP_ATOMIC
);
844 pr_warn("%s: out of memory loading\n", a
->name
);
850 list_add(&use
->source_list
, &b
->source_list
);
851 list_add(&use
->target_list
, &a
->target_list
);
855 /* Module a uses b: caller needs module_mutex() */
856 static int ref_module(struct module
*a
, struct module
*b
)
860 if (b
== NULL
|| already_uses(a
, b
))
863 /* If module isn't available, we fail. */
864 err
= strong_try_module_get(b
);
868 err
= add_module_usage(a
, b
);
876 /* Clear the unload stuff of the module. */
877 static void module_unload_free(struct module
*mod
)
879 struct module_use
*use
, *tmp
;
881 mutex_lock(&module_mutex
);
882 list_for_each_entry_safe(use
, tmp
, &mod
->target_list
, target_list
) {
883 struct module
*i
= use
->target
;
884 pr_debug("%s unusing %s\n", mod
->name
, i
->name
);
886 list_del(&use
->source_list
);
887 list_del(&use
->target_list
);
890 mutex_unlock(&module_mutex
);
893 #ifdef CONFIG_MODULE_FORCE_UNLOAD
894 static inline int try_force_unload(unsigned int flags
)
896 int ret
= (flags
& O_TRUNC
);
898 add_taint(TAINT_FORCED_RMMOD
, LOCKDEP_NOW_UNRELIABLE
);
902 static inline int try_force_unload(unsigned int flags
)
906 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
908 /* Try to release refcount of module, 0 means success. */
909 static int try_release_module_ref(struct module
*mod
)
913 /* Try to decrement refcnt which we set at loading */
914 ret
= atomic_sub_return(MODULE_REF_BASE
, &mod
->refcnt
);
917 /* Someone can put this right now, recover with checking */
918 ret
= atomic_add_unless(&mod
->refcnt
, MODULE_REF_BASE
, 0);
923 static int try_stop_module(struct module
*mod
, int flags
, int *forced
)
925 /* If it's not unused, quit unless we're forcing. */
926 if (try_release_module_ref(mod
) != 0) {
927 *forced
= try_force_unload(flags
);
932 /* Mark it as dying. */
933 mod
->state
= MODULE_STATE_GOING
;
939 * module_refcount - return the refcount or -1 if unloading
941 * @mod: the module we're checking
944 * -1 if the module is in the process of unloading
945 * otherwise the number of references in the kernel to the module
947 int module_refcount(struct module
*mod
)
949 return atomic_read(&mod
->refcnt
) - MODULE_REF_BASE
;
951 EXPORT_SYMBOL(module_refcount
);
953 /* This exists whether we can unload or not */
954 static void free_module(struct module
*mod
);
956 SYSCALL_DEFINE2(delete_module
, const char __user
*, name_user
,
960 char name
[MODULE_NAME_LEN
];
963 if (!capable(CAP_SYS_MODULE
) || modules_disabled
)
966 if (strncpy_from_user(name
, name_user
, MODULE_NAME_LEN
-1) < 0)
968 name
[MODULE_NAME_LEN
-1] = '\0';
970 audit_log_kern_module(name
);
972 if (mutex_lock_interruptible(&module_mutex
) != 0)
975 mod
= find_module(name
);
981 if (!list_empty(&mod
->source_list
)) {
982 /* Other modules depend on us: get rid of them first. */
987 /* Doing init or already dying? */
988 if (mod
->state
!= MODULE_STATE_LIVE
) {
989 /* FIXME: if (force), slam module count damn the torpedoes */
990 pr_debug("%s already dying\n", mod
->name
);
995 /* If it has an init func, it must have an exit func to unload */
996 if (mod
->init
&& !mod
->exit
) {
997 forced
= try_force_unload(flags
);
999 /* This module can't be removed */
1005 /* Stop the machine so refcounts can't move and disable module. */
1006 ret
= try_stop_module(mod
, flags
, &forced
);
1010 mutex_unlock(&module_mutex
);
1011 /* Final destruction now no one is using it. */
1012 if (mod
->exit
!= NULL
)
1014 blocking_notifier_call_chain(&module_notify_list
,
1015 MODULE_STATE_GOING
, mod
);
1016 klp_module_going(mod
);
1017 ftrace_release_mod(mod
);
1019 async_synchronize_full();
1021 /* Store the name of the last unloaded module for diagnostic purposes */
1022 strlcpy(last_unloaded_module
, mod
->name
, sizeof(last_unloaded_module
));
1025 /* someone could wait for the module in add_unformed_module() */
1026 wake_up_all(&module_wq
);
1029 mutex_unlock(&module_mutex
);
1033 static inline void print_unload_info(struct seq_file
*m
, struct module
*mod
)
1035 struct module_use
*use
;
1036 int printed_something
= 0;
1038 seq_printf(m
, " %i ", module_refcount(mod
));
1041 * Always include a trailing , so userspace can differentiate
1042 * between this and the old multi-field proc format.
1044 list_for_each_entry(use
, &mod
->source_list
, source_list
) {
1045 printed_something
= 1;
1046 seq_printf(m
, "%s,", use
->source
->name
);
1049 if (mod
->init
!= NULL
&& mod
->exit
== NULL
) {
1050 printed_something
= 1;
1051 seq_puts(m
, "[permanent],");
1054 if (!printed_something
)
1058 void __symbol_put(const char *symbol
)
1060 struct module
*owner
;
1063 if (!find_symbol(symbol
, &owner
, NULL
, NULL
, true, false))
1068 EXPORT_SYMBOL(__symbol_put
);
1070 /* Note this assumes addr is a function, which it currently always is. */
1071 void symbol_put_addr(void *addr
)
1073 struct module
*modaddr
;
1074 unsigned long a
= (unsigned long)dereference_function_descriptor(addr
);
1076 if (core_kernel_text(a
))
1080 * Even though we hold a reference on the module; we still need to
1081 * disable preemption in order to safely traverse the data structure.
1084 modaddr
= __module_text_address(a
);
1086 module_put(modaddr
);
1089 EXPORT_SYMBOL_GPL(symbol_put_addr
);
1091 static ssize_t
show_refcnt(struct module_attribute
*mattr
,
1092 struct module_kobject
*mk
, char *buffer
)
1094 return sprintf(buffer
, "%i\n", module_refcount(mk
->mod
));
1097 static struct module_attribute modinfo_refcnt
=
1098 __ATTR(refcnt
, 0444, show_refcnt
, NULL
);
1100 void __module_get(struct module
*module
)
1104 atomic_inc(&module
->refcnt
);
1105 trace_module_get(module
, _RET_IP_
);
1109 EXPORT_SYMBOL(__module_get
);
1111 bool try_module_get(struct module
*module
)
1117 /* Note: here, we can fail to get a reference */
1118 if (likely(module_is_live(module
) &&
1119 atomic_inc_not_zero(&module
->refcnt
) != 0))
1120 trace_module_get(module
, _RET_IP_
);
1128 EXPORT_SYMBOL(try_module_get
);
1130 void module_put(struct module
*module
)
1136 ret
= atomic_dec_if_positive(&module
->refcnt
);
1137 WARN_ON(ret
< 0); /* Failed to put refcount */
1138 trace_module_put(module
, _RET_IP_
);
1142 EXPORT_SYMBOL(module_put
);
1144 #else /* !CONFIG_MODULE_UNLOAD */
1145 static inline void print_unload_info(struct seq_file
*m
, struct module
*mod
)
1147 /* We don't know the usage count, or what modules are using. */
1148 seq_puts(m
, " - -");
1151 static inline void module_unload_free(struct module
*mod
)
1155 static int ref_module(struct module
*a
, struct module
*b
)
1157 return strong_try_module_get(b
);
1160 static inline int module_unload_init(struct module
*mod
)
1164 #endif /* CONFIG_MODULE_UNLOAD */
1166 static size_t module_flags_taint(struct module
*mod
, char *buf
)
1171 for (i
= 0; i
< TAINT_FLAGS_COUNT
; i
++) {
1172 if (taint_flags
[i
].module
&& test_bit(i
, &mod
->taints
))
1173 buf
[l
++] = taint_flags
[i
].c_true
;
1179 static ssize_t
show_initstate(struct module_attribute
*mattr
,
1180 struct module_kobject
*mk
, char *buffer
)
1182 const char *state
= "unknown";
1184 switch (mk
->mod
->state
) {
1185 case MODULE_STATE_LIVE
:
1188 case MODULE_STATE_COMING
:
1191 case MODULE_STATE_GOING
:
1197 return sprintf(buffer
, "%s\n", state
);
1200 static struct module_attribute modinfo_initstate
=
1201 __ATTR(initstate
, 0444, show_initstate
, NULL
);
1203 static ssize_t
store_uevent(struct module_attribute
*mattr
,
1204 struct module_kobject
*mk
,
1205 const char *buffer
, size_t count
)
1209 rc
= kobject_synth_uevent(&mk
->kobj
, buffer
, count
);
1210 return rc
? rc
: count
;
1213 struct module_attribute module_uevent
=
1214 __ATTR(uevent
, 0200, NULL
, store_uevent
);
1216 static ssize_t
show_coresize(struct module_attribute
*mattr
,
1217 struct module_kobject
*mk
, char *buffer
)
1219 return sprintf(buffer
, "%u\n", mk
->mod
->core_layout
.size
);
1222 static struct module_attribute modinfo_coresize
=
1223 __ATTR(coresize
, 0444, show_coresize
, NULL
);
1225 static ssize_t
show_initsize(struct module_attribute
*mattr
,
1226 struct module_kobject
*mk
, char *buffer
)
1228 return sprintf(buffer
, "%u\n", mk
->mod
->init_layout
.size
);
1231 static struct module_attribute modinfo_initsize
=
1232 __ATTR(initsize
, 0444, show_initsize
, NULL
);
1234 static ssize_t
show_taint(struct module_attribute
*mattr
,
1235 struct module_kobject
*mk
, char *buffer
)
1239 l
= module_flags_taint(mk
->mod
, buffer
);
1244 static struct module_attribute modinfo_taint
=
1245 __ATTR(taint
, 0444, show_taint
, NULL
);
1247 static struct module_attribute
*modinfo_attrs
[] = {
1250 &modinfo_srcversion
,
1255 #ifdef CONFIG_MODULE_UNLOAD
1261 static const char vermagic
[] = VERMAGIC_STRING
;
1263 static int try_to_force_load(struct module
*mod
, const char *reason
)
1265 #ifdef CONFIG_MODULE_FORCE_LOAD
1266 if (!test_taint(TAINT_FORCED_MODULE
))
1267 pr_warn("%s: %s: kernel tainted.\n", mod
->name
, reason
);
1268 add_taint_module(mod
, TAINT_FORCED_MODULE
, LOCKDEP_NOW_UNRELIABLE
);
1275 #ifdef CONFIG_MODVERSIONS
1277 static u32
resolve_rel_crc(const s32
*crc
)
1279 return *(u32
*)((void *)crc
+ *crc
);
1282 static int check_version(const struct load_info
*info
,
1283 const char *symname
,
1287 Elf_Shdr
*sechdrs
= info
->sechdrs
;
1288 unsigned int versindex
= info
->index
.vers
;
1289 unsigned int i
, num_versions
;
1290 struct modversion_info
*versions
;
1292 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1296 /* No versions at all? modprobe --force does this. */
1298 return try_to_force_load(mod
, symname
) == 0;
1300 versions
= (void *) sechdrs
[versindex
].sh_addr
;
1301 num_versions
= sechdrs
[versindex
].sh_size
1302 / sizeof(struct modversion_info
);
1304 for (i
= 0; i
< num_versions
; i
++) {
1307 if (strcmp(versions
[i
].name
, symname
) != 0)
1310 if (IS_ENABLED(CONFIG_MODULE_REL_CRCS
))
1311 crcval
= resolve_rel_crc(crc
);
1314 if (versions
[i
].crc
== crcval
)
1316 pr_debug("Found checksum %X vs module %lX\n",
1317 crcval
, versions
[i
].crc
);
1321 /* Broken toolchain. Warn once, then let it go.. */
1322 pr_warn_once("%s: no symbol version for %s\n", info
->name
, symname
);
1326 pr_warn("%s: disagrees about version of symbol %s\n",
1327 info
->name
, symname
);
1331 static inline int check_modstruct_version(const struct load_info
*info
,
1337 * Since this should be found in kernel (which can't be removed), no
1338 * locking is necessary -- use preempt_disable() to placate lockdep.
1341 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout
), NULL
,
1342 &crc
, NULL
, true, false)) {
1347 return check_version(info
, VMLINUX_SYMBOL_STR(module_layout
),
1351 /* First part is kernel version, which we ignore if module has crcs. */
1352 static inline int same_magic(const char *amagic
, const char *bmagic
,
1356 amagic
+= strcspn(amagic
, " ");
1357 bmagic
+= strcspn(bmagic
, " ");
1359 return strcmp(amagic
, bmagic
) == 0;
1362 static inline int check_version(const struct load_info
*info
,
1363 const char *symname
,
1370 static inline int check_modstruct_version(const struct load_info
*info
,
1376 static inline int same_magic(const char *amagic
, const char *bmagic
,
1379 return strcmp(amagic
, bmagic
) == 0;
1381 #endif /* CONFIG_MODVERSIONS */
1383 static bool inherit_taint(struct module
*mod
, struct module
*owner
)
1385 if (!owner
|| !test_bit(TAINT_PROPRIETARY_MODULE
, &owner
->taints
))
1388 if (mod
->using_gplonly_symbols
) {
1389 pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n",
1390 mod
->name
, owner
->name
);
1394 if (!test_bit(TAINT_PROPRIETARY_MODULE
, &mod
->taints
)) {
1395 pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n",
1396 mod
->name
, owner
->name
);
1397 set_bit(TAINT_PROPRIETARY_MODULE
, &mod
->taints
);
1402 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1403 static const struct kernel_symbol
*resolve_symbol(struct module
*mod
,
1404 const struct load_info
*info
,
1408 struct module
*owner
;
1409 const struct kernel_symbol
*sym
;
1411 enum mod_license license
;
1415 * The module_mutex should not be a heavily contended lock;
1416 * if we get the occasional sleep here, we'll go an extra iteration
1417 * in the wait_event_interruptible(), which is harmless.
1419 sched_annotate_sleep();
1420 mutex_lock(&module_mutex
);
1421 sym
= find_symbol(name
, &owner
, &crc
, &license
,
1422 !(mod
->taints
& (1 << TAINT_PROPRIETARY_MODULE
)), true);
1426 if (license
== GPL_ONLY
)
1427 mod
->using_gplonly_symbols
= true;
1429 if (!inherit_taint(mod
, owner
)) {
1434 if (!check_version(info
, name
, mod
, crc
)) {
1435 sym
= ERR_PTR(-EINVAL
);
1439 err
= ref_module(mod
, owner
);
1446 /* We must make copy under the lock if we failed to get ref. */
1447 strncpy(ownername
, module_name(owner
), MODULE_NAME_LEN
);
1449 mutex_unlock(&module_mutex
);
1453 static const struct kernel_symbol
*
1454 resolve_symbol_wait(struct module
*mod
,
1455 const struct load_info
*info
,
1458 const struct kernel_symbol
*ksym
;
1459 char owner
[MODULE_NAME_LEN
];
1461 if (wait_event_interruptible_timeout(module_wq
,
1462 !IS_ERR(ksym
= resolve_symbol(mod
, info
, name
, owner
))
1463 || PTR_ERR(ksym
) != -EBUSY
,
1465 pr_warn("%s: gave up waiting for init of module %s.\n",
1472 * /sys/module/foo/sections stuff
1473 * J. Corbet <corbet@lwn.net>
1477 #ifdef CONFIG_KALLSYMS
1478 static inline bool sect_empty(const Elf_Shdr
*sect
)
1480 return !(sect
->sh_flags
& SHF_ALLOC
) || sect
->sh_size
== 0;
1483 struct module_sect_attr
{
1484 struct module_attribute mattr
;
1486 unsigned long address
;
1489 struct module_sect_attrs
{
1490 struct attribute_group grp
;
1491 unsigned int nsections
;
1492 struct module_sect_attr attrs
[0];
1495 static ssize_t
module_sect_show(struct module_attribute
*mattr
,
1496 struct module_kobject
*mk
, char *buf
)
1498 struct module_sect_attr
*sattr
=
1499 container_of(mattr
, struct module_sect_attr
, mattr
);
1500 return sprintf(buf
, "0x%pK\n", (void *)sattr
->address
);
1503 static void free_sect_attrs(struct module_sect_attrs
*sect_attrs
)
1505 unsigned int section
;
1507 for (section
= 0; section
< sect_attrs
->nsections
; section
++)
1508 kfree(sect_attrs
->attrs
[section
].name
);
1512 static void add_sect_attrs(struct module
*mod
, const struct load_info
*info
)
1514 unsigned int nloaded
= 0, i
, size
[2];
1515 struct module_sect_attrs
*sect_attrs
;
1516 struct module_sect_attr
*sattr
;
1517 struct attribute
**gattr
;
1519 /* Count loaded sections and allocate structures */
1520 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++)
1521 if (!sect_empty(&info
->sechdrs
[i
]))
1523 size
[0] = ALIGN(sizeof(*sect_attrs
)
1524 + nloaded
* sizeof(sect_attrs
->attrs
[0]),
1525 sizeof(sect_attrs
->grp
.attrs
[0]));
1526 size
[1] = (nloaded
+ 1) * sizeof(sect_attrs
->grp
.attrs
[0]);
1527 sect_attrs
= kzalloc(size
[0] + size
[1], GFP_KERNEL
);
1528 if (sect_attrs
== NULL
)
1531 /* Setup section attributes. */
1532 sect_attrs
->grp
.name
= "sections";
1533 sect_attrs
->grp
.attrs
= (void *)sect_attrs
+ size
[0];
1535 sect_attrs
->nsections
= 0;
1536 sattr
= §_attrs
->attrs
[0];
1537 gattr
= §_attrs
->grp
.attrs
[0];
1538 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++) {
1539 Elf_Shdr
*sec
= &info
->sechdrs
[i
];
1540 if (sect_empty(sec
))
1542 sattr
->address
= sec
->sh_addr
;
1543 sattr
->name
= kstrdup(info
->secstrings
+ sec
->sh_name
,
1545 if (sattr
->name
== NULL
)
1547 sect_attrs
->nsections
++;
1548 sysfs_attr_init(&sattr
->mattr
.attr
);
1549 sattr
->mattr
.show
= module_sect_show
;
1550 sattr
->mattr
.store
= NULL
;
1551 sattr
->mattr
.attr
.name
= sattr
->name
;
1552 sattr
->mattr
.attr
.mode
= S_IRUGO
;
1553 *(gattr
++) = &(sattr
++)->mattr
.attr
;
1557 if (sysfs_create_group(&mod
->mkobj
.kobj
, §_attrs
->grp
))
1560 mod
->sect_attrs
= sect_attrs
;
1563 free_sect_attrs(sect_attrs
);
1566 static void remove_sect_attrs(struct module
*mod
)
1568 if (mod
->sect_attrs
) {
1569 sysfs_remove_group(&mod
->mkobj
.kobj
,
1570 &mod
->sect_attrs
->grp
);
1571 /* We are positive that no one is using any sect attrs
1572 * at this point. Deallocate immediately. */
1573 free_sect_attrs(mod
->sect_attrs
);
1574 mod
->sect_attrs
= NULL
;
1579 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1582 struct module_notes_attrs
{
1583 struct kobject
*dir
;
1585 struct bin_attribute attrs
[0];
1588 static ssize_t
module_notes_read(struct file
*filp
, struct kobject
*kobj
,
1589 struct bin_attribute
*bin_attr
,
1590 char *buf
, loff_t pos
, size_t count
)
1593 * The caller checked the pos and count against our size.
1595 memcpy(buf
, bin_attr
->private + pos
, count
);
1599 static void free_notes_attrs(struct module_notes_attrs
*notes_attrs
,
1602 if (notes_attrs
->dir
) {
1604 sysfs_remove_bin_file(notes_attrs
->dir
,
1605 ¬es_attrs
->attrs
[i
]);
1606 kobject_put(notes_attrs
->dir
);
1611 static void add_notes_attrs(struct module
*mod
, const struct load_info
*info
)
1613 unsigned int notes
, loaded
, i
;
1614 struct module_notes_attrs
*notes_attrs
;
1615 struct bin_attribute
*nattr
;
1617 /* failed to create section attributes, so can't create notes */
1618 if (!mod
->sect_attrs
)
1621 /* Count notes sections and allocate structures. */
1623 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++)
1624 if (!sect_empty(&info
->sechdrs
[i
]) &&
1625 (info
->sechdrs
[i
].sh_type
== SHT_NOTE
))
1631 notes_attrs
= kzalloc(sizeof(*notes_attrs
)
1632 + notes
* sizeof(notes_attrs
->attrs
[0]),
1634 if (notes_attrs
== NULL
)
1637 notes_attrs
->notes
= notes
;
1638 nattr
= ¬es_attrs
->attrs
[0];
1639 for (loaded
= i
= 0; i
< info
->hdr
->e_shnum
; ++i
) {
1640 if (sect_empty(&info
->sechdrs
[i
]))
1642 if (info
->sechdrs
[i
].sh_type
== SHT_NOTE
) {
1643 sysfs_bin_attr_init(nattr
);
1644 nattr
->attr
.name
= mod
->sect_attrs
->attrs
[loaded
].name
;
1645 nattr
->attr
.mode
= S_IRUGO
;
1646 nattr
->size
= info
->sechdrs
[i
].sh_size
;
1647 nattr
->private = (void *) info
->sechdrs
[i
].sh_addr
;
1648 nattr
->read
= module_notes_read
;
1654 notes_attrs
->dir
= kobject_create_and_add("notes", &mod
->mkobj
.kobj
);
1655 if (!notes_attrs
->dir
)
1658 for (i
= 0; i
< notes
; ++i
)
1659 if (sysfs_create_bin_file(notes_attrs
->dir
,
1660 ¬es_attrs
->attrs
[i
]))
1663 mod
->notes_attrs
= notes_attrs
;
1667 free_notes_attrs(notes_attrs
, i
);
1670 static void remove_notes_attrs(struct module
*mod
)
1672 if (mod
->notes_attrs
)
1673 free_notes_attrs(mod
->notes_attrs
, mod
->notes_attrs
->notes
);
1678 static inline void add_sect_attrs(struct module
*mod
,
1679 const struct load_info
*info
)
1683 static inline void remove_sect_attrs(struct module
*mod
)
1687 static inline void add_notes_attrs(struct module
*mod
,
1688 const struct load_info
*info
)
1692 static inline void remove_notes_attrs(struct module
*mod
)
1695 #endif /* CONFIG_KALLSYMS */
1697 static void del_usage_links(struct module
*mod
)
1699 #ifdef CONFIG_MODULE_UNLOAD
1700 struct module_use
*use
;
1702 mutex_lock(&module_mutex
);
1703 list_for_each_entry(use
, &mod
->target_list
, target_list
)
1704 sysfs_remove_link(use
->target
->holders_dir
, mod
->name
);
1705 mutex_unlock(&module_mutex
);
1709 static int add_usage_links(struct module
*mod
)
1712 #ifdef CONFIG_MODULE_UNLOAD
1713 struct module_use
*use
;
1715 mutex_lock(&module_mutex
);
1716 list_for_each_entry(use
, &mod
->target_list
, target_list
) {
1717 ret
= sysfs_create_link(use
->target
->holders_dir
,
1718 &mod
->mkobj
.kobj
, mod
->name
);
1722 mutex_unlock(&module_mutex
);
1724 del_usage_links(mod
);
1729 static void module_remove_modinfo_attrs(struct module
*mod
, int end
);
1731 static int module_add_modinfo_attrs(struct module
*mod
)
1733 struct module_attribute
*attr
;
1734 struct module_attribute
*temp_attr
;
1738 mod
->modinfo_attrs
= kzalloc((sizeof(struct module_attribute
) *
1739 (ARRAY_SIZE(modinfo_attrs
) + 1)),
1741 if (!mod
->modinfo_attrs
)
1744 temp_attr
= mod
->modinfo_attrs
;
1745 for (i
= 0; (attr
= modinfo_attrs
[i
]); i
++) {
1746 if (!attr
->test
|| attr
->test(mod
)) {
1747 memcpy(temp_attr
, attr
, sizeof(*temp_attr
));
1748 sysfs_attr_init(&temp_attr
->attr
);
1749 error
= sysfs_create_file(&mod
->mkobj
.kobj
,
1761 module_remove_modinfo_attrs(mod
, --i
);
1763 kfree(mod
->modinfo_attrs
);
1767 static void module_remove_modinfo_attrs(struct module
*mod
, int end
)
1769 struct module_attribute
*attr
;
1772 for (i
= 0; (attr
= &mod
->modinfo_attrs
[i
]); i
++) {
1773 if (end
>= 0 && i
> end
)
1775 /* pick a field to test for end of list */
1776 if (!attr
->attr
.name
)
1778 sysfs_remove_file(&mod
->mkobj
.kobj
, &attr
->attr
);
1782 kfree(mod
->modinfo_attrs
);
1785 static void mod_kobject_put(struct module
*mod
)
1787 DECLARE_COMPLETION_ONSTACK(c
);
1788 mod
->mkobj
.kobj_completion
= &c
;
1789 kobject_put(&mod
->mkobj
.kobj
);
1790 wait_for_completion(&c
);
1793 static int mod_sysfs_init(struct module
*mod
)
1796 struct kobject
*kobj
;
1798 if (!module_sysfs_initialized
) {
1799 pr_err("%s: module sysfs not initialized\n", mod
->name
);
1804 kobj
= kset_find_obj(module_kset
, mod
->name
);
1806 pr_err("%s: module is already loaded\n", mod
->name
);
1812 mod
->mkobj
.mod
= mod
;
1814 memset(&mod
->mkobj
.kobj
, 0, sizeof(mod
->mkobj
.kobj
));
1815 mod
->mkobj
.kobj
.kset
= module_kset
;
1816 err
= kobject_init_and_add(&mod
->mkobj
.kobj
, &module_ktype
, NULL
,
1819 mod_kobject_put(mod
);
1825 static int mod_sysfs_setup(struct module
*mod
,
1826 const struct load_info
*info
,
1827 struct kernel_param
*kparam
,
1828 unsigned int num_params
)
1832 err
= mod_sysfs_init(mod
);
1836 mod
->holders_dir
= kobject_create_and_add("holders", &mod
->mkobj
.kobj
);
1837 if (!mod
->holders_dir
) {
1842 err
= module_param_sysfs_setup(mod
, kparam
, num_params
);
1844 goto out_unreg_holders
;
1846 err
= module_add_modinfo_attrs(mod
);
1848 goto out_unreg_param
;
1850 err
= add_usage_links(mod
);
1852 goto out_unreg_modinfo_attrs
;
1854 add_sect_attrs(mod
, info
);
1855 add_notes_attrs(mod
, info
);
1859 out_unreg_modinfo_attrs
:
1860 module_remove_modinfo_attrs(mod
, -1);
1862 module_param_sysfs_remove(mod
);
1864 kobject_put(mod
->holders_dir
);
1866 mod_kobject_put(mod
);
1871 static void mod_sysfs_fini(struct module
*mod
)
1873 remove_notes_attrs(mod
);
1874 remove_sect_attrs(mod
);
1875 mod_kobject_put(mod
);
1878 static void init_param_lock(struct module
*mod
)
1880 mutex_init(&mod
->param_lock
);
1882 #else /* !CONFIG_SYSFS */
1884 static int mod_sysfs_setup(struct module
*mod
,
1885 const struct load_info
*info
,
1886 struct kernel_param
*kparam
,
1887 unsigned int num_params
)
1892 static void mod_sysfs_fini(struct module
*mod
)
1896 static void module_remove_modinfo_attrs(struct module
*mod
, int end
)
1900 static void del_usage_links(struct module
*mod
)
1904 static void init_param_lock(struct module
*mod
)
1907 #endif /* CONFIG_SYSFS */
1909 static void mod_sysfs_teardown(struct module
*mod
)
1911 del_usage_links(mod
);
1912 module_remove_modinfo_attrs(mod
, -1);
1913 module_param_sysfs_remove(mod
);
1914 kobject_put(mod
->mkobj
.drivers_dir
);
1915 kobject_put(mod
->holders_dir
);
1916 mod_sysfs_fini(mod
);
1919 #ifdef CONFIG_STRICT_MODULE_RWX
1921 * LKM RO/NX protection: protect module's text/ro-data
1922 * from modification and any data from execution.
1924 * General layout of module is:
1925 * [text] [read-only-data] [ro-after-init] [writable data]
1926 * text_size -----^ ^ ^ ^
1927 * ro_size ------------------------| | |
1928 * ro_after_init_size -----------------------------| |
1929 * size -----------------------------------------------------------|
1931 * These values are always page-aligned (as is base)
1933 static void frob_text(const struct module_layout
*layout
,
1934 int (*set_memory
)(unsigned long start
, int num_pages
))
1936 BUG_ON((unsigned long)layout
->base
& (PAGE_SIZE
-1));
1937 BUG_ON((unsigned long)layout
->text_size
& (PAGE_SIZE
-1));
1938 set_memory((unsigned long)layout
->base
,
1939 layout
->text_size
>> PAGE_SHIFT
);
1942 static void frob_rodata(const struct module_layout
*layout
,
1943 int (*set_memory
)(unsigned long start
, int num_pages
))
1945 BUG_ON((unsigned long)layout
->base
& (PAGE_SIZE
-1));
1946 BUG_ON((unsigned long)layout
->text_size
& (PAGE_SIZE
-1));
1947 BUG_ON((unsigned long)layout
->ro_size
& (PAGE_SIZE
-1));
1948 set_memory((unsigned long)layout
->base
+ layout
->text_size
,
1949 (layout
->ro_size
- layout
->text_size
) >> PAGE_SHIFT
);
1952 static void frob_ro_after_init(const struct module_layout
*layout
,
1953 int (*set_memory
)(unsigned long start
, int num_pages
))
1955 BUG_ON((unsigned long)layout
->base
& (PAGE_SIZE
-1));
1956 BUG_ON((unsigned long)layout
->ro_size
& (PAGE_SIZE
-1));
1957 BUG_ON((unsigned long)layout
->ro_after_init_size
& (PAGE_SIZE
-1));
1958 set_memory((unsigned long)layout
->base
+ layout
->ro_size
,
1959 (layout
->ro_after_init_size
- layout
->ro_size
) >> PAGE_SHIFT
);
1962 static void frob_writable_data(const struct module_layout
*layout
,
1963 int (*set_memory
)(unsigned long start
, int num_pages
))
1965 BUG_ON((unsigned long)layout
->base
& (PAGE_SIZE
-1));
1966 BUG_ON((unsigned long)layout
->ro_after_init_size
& (PAGE_SIZE
-1));
1967 BUG_ON((unsigned long)layout
->size
& (PAGE_SIZE
-1));
1968 set_memory((unsigned long)layout
->base
+ layout
->ro_after_init_size
,
1969 (layout
->size
- layout
->ro_after_init_size
) >> PAGE_SHIFT
);
1972 /* livepatching wants to disable read-only so it can frob module. */
1973 void module_disable_ro(const struct module
*mod
)
1975 if (!rodata_enabled
)
1978 frob_text(&mod
->core_layout
, set_memory_rw
);
1979 frob_rodata(&mod
->core_layout
, set_memory_rw
);
1980 frob_ro_after_init(&mod
->core_layout
, set_memory_rw
);
1981 frob_text(&mod
->init_layout
, set_memory_rw
);
1982 frob_rodata(&mod
->init_layout
, set_memory_rw
);
1985 void module_enable_ro(const struct module
*mod
, bool after_init
)
1987 if (!rodata_enabled
)
1990 frob_text(&mod
->core_layout
, set_memory_ro
);
1991 frob_rodata(&mod
->core_layout
, set_memory_ro
);
1992 frob_text(&mod
->init_layout
, set_memory_ro
);
1993 frob_rodata(&mod
->init_layout
, set_memory_ro
);
1996 frob_ro_after_init(&mod
->core_layout
, set_memory_ro
);
1999 static void module_enable_nx(const struct module
*mod
)
2001 frob_rodata(&mod
->core_layout
, set_memory_nx
);
2002 frob_ro_after_init(&mod
->core_layout
, set_memory_nx
);
2003 frob_writable_data(&mod
->core_layout
, set_memory_nx
);
2004 frob_rodata(&mod
->init_layout
, set_memory_nx
);
2005 frob_writable_data(&mod
->init_layout
, set_memory_nx
);
2008 static void module_disable_nx(const struct module
*mod
)
2010 frob_rodata(&mod
->core_layout
, set_memory_x
);
2011 frob_ro_after_init(&mod
->core_layout
, set_memory_x
);
2012 frob_writable_data(&mod
->core_layout
, set_memory_x
);
2013 frob_rodata(&mod
->init_layout
, set_memory_x
);
2014 frob_writable_data(&mod
->init_layout
, set_memory_x
);
2017 /* Iterate through all modules and set each module's text as RW */
2018 void set_all_modules_text_rw(void)
2022 if (!rodata_enabled
)
2025 mutex_lock(&module_mutex
);
2026 list_for_each_entry_rcu(mod
, &modules
, list
) {
2027 if (mod
->state
== MODULE_STATE_UNFORMED
)
2030 frob_text(&mod
->core_layout
, set_memory_rw
);
2031 frob_text(&mod
->init_layout
, set_memory_rw
);
2033 mutex_unlock(&module_mutex
);
2036 /* Iterate through all modules and set each module's text as RO */
2037 void set_all_modules_text_ro(void)
2041 if (!rodata_enabled
)
2044 mutex_lock(&module_mutex
);
2045 list_for_each_entry_rcu(mod
, &modules
, list
) {
2047 * Ignore going modules since it's possible that ro
2048 * protection has already been disabled, otherwise we'll
2049 * run into protection faults at module deallocation.
2051 if (mod
->state
== MODULE_STATE_UNFORMED
||
2052 mod
->state
== MODULE_STATE_GOING
)
2055 frob_text(&mod
->core_layout
, set_memory_ro
);
2056 frob_text(&mod
->init_layout
, set_memory_ro
);
2058 mutex_unlock(&module_mutex
);
2061 static void disable_ro_nx(const struct module_layout
*layout
)
2063 if (rodata_enabled
) {
2064 frob_text(layout
, set_memory_rw
);
2065 frob_rodata(layout
, set_memory_rw
);
2066 frob_ro_after_init(layout
, set_memory_rw
);
2068 frob_rodata(layout
, set_memory_x
);
2069 frob_ro_after_init(layout
, set_memory_x
);
2070 frob_writable_data(layout
, set_memory_x
);
2074 static void disable_ro_nx(const struct module_layout
*layout
) { }
2075 static void module_enable_nx(const struct module
*mod
) { }
2076 static void module_disable_nx(const struct module
*mod
) { }
2079 #ifdef CONFIG_LIVEPATCH
2081 * Persist Elf information about a module. Copy the Elf header,
2082 * section header table, section string table, and symtab section
2083 * index from info to mod->klp_info.
2085 static int copy_module_elf(struct module
*mod
, struct load_info
*info
)
2087 unsigned int size
, symndx
;
2090 size
= sizeof(*mod
->klp_info
);
2091 mod
->klp_info
= kmalloc(size
, GFP_KERNEL
);
2092 if (mod
->klp_info
== NULL
)
2096 size
= sizeof(mod
->klp_info
->hdr
);
2097 memcpy(&mod
->klp_info
->hdr
, info
->hdr
, size
);
2099 /* Elf section header table */
2100 size
= sizeof(*info
->sechdrs
) * info
->hdr
->e_shnum
;
2101 mod
->klp_info
->sechdrs
= kmalloc(size
, GFP_KERNEL
);
2102 if (mod
->klp_info
->sechdrs
== NULL
) {
2106 memcpy(mod
->klp_info
->sechdrs
, info
->sechdrs
, size
);
2108 /* Elf section name string table */
2109 size
= info
->sechdrs
[info
->hdr
->e_shstrndx
].sh_size
;
2110 mod
->klp_info
->secstrings
= kmalloc(size
, GFP_KERNEL
);
2111 if (mod
->klp_info
->secstrings
== NULL
) {
2115 memcpy(mod
->klp_info
->secstrings
, info
->secstrings
, size
);
2117 /* Elf symbol section index */
2118 symndx
= info
->index
.sym
;
2119 mod
->klp_info
->symndx
= symndx
;
2122 * For livepatch modules, core_kallsyms.symtab is a complete
2123 * copy of the original symbol table. Adjust sh_addr to point
2124 * to core_kallsyms.symtab since the copy of the symtab in module
2125 * init memory is freed at the end of do_init_module().
2127 mod
->klp_info
->sechdrs
[symndx
].sh_addr
= \
2128 (unsigned long) mod
->core_kallsyms
.symtab
;
2133 kfree(mod
->klp_info
->sechdrs
);
2135 kfree(mod
->klp_info
);
2139 static void free_module_elf(struct module
*mod
)
2141 kfree(mod
->klp_info
->sechdrs
);
2142 kfree(mod
->klp_info
->secstrings
);
2143 kfree(mod
->klp_info
);
2145 #else /* !CONFIG_LIVEPATCH */
2146 static int copy_module_elf(struct module
*mod
, struct load_info
*info
)
2151 static void free_module_elf(struct module
*mod
)
2154 #endif /* CONFIG_LIVEPATCH */
2156 void __weak
module_memfree(void *module_region
)
2158 vfree(module_region
);
2161 void __weak
module_arch_cleanup(struct module
*mod
)
2165 void __weak
module_arch_freeing_init(struct module
*mod
)
2169 /* Free a module, remove from lists, etc. */
2170 static void free_module(struct module
*mod
)
2172 trace_module_free(mod
);
2174 mod_sysfs_teardown(mod
);
2176 /* We leave it in list to prevent duplicate loads, but make sure
2177 * that noone uses it while it's being deconstructed. */
2178 mutex_lock(&module_mutex
);
2179 mod
->state
= MODULE_STATE_UNFORMED
;
2180 mutex_unlock(&module_mutex
);
2182 /* Remove dynamic debug info */
2183 ddebug_remove_module(mod
->name
);
2185 /* Arch-specific cleanup. */
2186 module_arch_cleanup(mod
);
2188 /* Module unload stuff */
2189 module_unload_free(mod
);
2191 /* Free any allocated parameters. */
2192 destroy_params(mod
->kp
, mod
->num_kp
);
2194 if (is_livepatch_module(mod
))
2195 free_module_elf(mod
);
2197 /* Now we can delete it from the lists */
2198 mutex_lock(&module_mutex
);
2199 /* Unlink carefully: kallsyms could be walking list. */
2200 list_del_rcu(&mod
->list
);
2201 mod_tree_remove(mod
);
2202 /* Remove this module from bug list, this uses list_del_rcu */
2203 module_bug_cleanup(mod
);
2204 /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2205 synchronize_sched();
2206 mutex_unlock(&module_mutex
);
2208 /* This may be empty, but that's OK */
2209 disable_ro_nx(&mod
->init_layout
);
2210 module_arch_freeing_init(mod
);
2211 module_memfree(mod
->init_layout
.base
);
2213 percpu_modfree(mod
);
2215 /* Free lock-classes; relies on the preceding sync_rcu(). */
2216 lockdep_free_key_range(mod
->core_layout
.base
, mod
->core_layout
.size
);
2218 /* Finally, free the core (containing the module structure) */
2219 disable_ro_nx(&mod
->core_layout
);
2220 module_memfree(mod
->core_layout
.base
);
2223 update_protections(current
->mm
);
2227 void *__symbol_get(const char *symbol
)
2229 struct module
*owner
;
2230 enum mod_license license
;
2231 const struct kernel_symbol
*sym
;
2234 sym
= find_symbol(symbol
, &owner
, NULL
, &license
, true, true);
2237 if (license
!= GPL_ONLY
) {
2238 pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
2242 if (strong_try_module_get(owner
))
2246 return sym
? (void *)sym
->value
: NULL
;
2251 EXPORT_SYMBOL_GPL(__symbol_get
);
2254 * Ensure that an exported symbol [global namespace] does not already exist
2255 * in the kernel or in some other module's exported symbol table.
2257 * You must hold the module_mutex.
2259 static int verify_export_symbols(struct module
*mod
)
2262 struct module
*owner
;
2263 const struct kernel_symbol
*s
;
2265 const struct kernel_symbol
*sym
;
2268 { mod
->syms
, mod
->num_syms
},
2269 { mod
->gpl_syms
, mod
->num_gpl_syms
},
2270 { mod
->gpl_future_syms
, mod
->num_gpl_future_syms
},
2271 #ifdef CONFIG_UNUSED_SYMBOLS
2272 { mod
->unused_syms
, mod
->num_unused_syms
},
2273 { mod
->unused_gpl_syms
, mod
->num_unused_gpl_syms
},
2277 for (i
= 0; i
< ARRAY_SIZE(arr
); i
++) {
2278 for (s
= arr
[i
].sym
; s
< arr
[i
].sym
+ arr
[i
].num
; s
++) {
2279 if (find_symbol(s
->name
, &owner
, NULL
, NULL
, true, false)) {
2280 pr_err("%s: exports duplicate symbol %s"
2282 mod
->name
, s
->name
, module_name(owner
));
2290 static bool ignore_undef_symbol(Elf_Half emachine
, const char *name
)
2293 * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as
2294 * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64.
2295 * i386 has a similar problem but may not deserve a fix.
2297 * If we ever have to ignore many symbols, consider refactoring the code to
2298 * only warn if referenced by a relocation.
2300 if (emachine
== EM_386
|| emachine
== EM_X86_64
)
2301 return !strcmp(name
, "_GLOBAL_OFFSET_TABLE_");
2305 /* Change all symbols so that st_value encodes the pointer directly. */
2306 static int simplify_symbols(struct module
*mod
, const struct load_info
*info
)
2308 Elf_Shdr
*symsec
= &info
->sechdrs
[info
->index
.sym
];
2309 Elf_Sym
*sym
= (void *)symsec
->sh_addr
;
2310 unsigned long secbase
;
2313 const struct kernel_symbol
*ksym
;
2315 for (i
= 1; i
< symsec
->sh_size
/ sizeof(Elf_Sym
); i
++) {
2316 const char *name
= info
->strtab
+ sym
[i
].st_name
;
2318 switch (sym
[i
].st_shndx
) {
2320 /* Ignore common symbols */
2321 if (!strncmp(name
, "__gnu_lto", 9))
2324 /* We compiled with -fno-common. These are not
2325 supposed to happen. */
2326 pr_debug("Common symbol: %s\n", name
);
2327 pr_warn("%s: please compile with -fno-common\n",
2333 /* Don't need to do anything */
2334 pr_debug("Absolute symbol: 0x%08lx\n",
2335 (long)sym
[i
].st_value
);
2339 /* Livepatch symbols are resolved by livepatch */
2343 ksym
= resolve_symbol_wait(mod
, info
, name
);
2344 /* Ok if resolved. */
2345 if (ksym
&& !IS_ERR(ksym
)) {
2346 sym
[i
].st_value
= ksym
->value
;
2350 /* Ok if weak or ignored. */
2352 (ELF_ST_BIND(sym
[i
].st_info
) == STB_WEAK
||
2353 ignore_undef_symbol(info
->hdr
->e_machine
, name
)))
2356 pr_warn("%s: Unknown symbol %s (err %li)\n",
2357 mod
->name
, name
, PTR_ERR(ksym
));
2358 ret
= PTR_ERR(ksym
) ?: -ENOENT
;
2362 /* Divert to percpu allocation if a percpu var. */
2363 if (sym
[i
].st_shndx
== info
->index
.pcpu
)
2364 secbase
= (unsigned long)mod_percpu(mod
);
2366 secbase
= info
->sechdrs
[sym
[i
].st_shndx
].sh_addr
;
2367 sym
[i
].st_value
+= secbase
;
2375 static int apply_relocations(struct module
*mod
, const struct load_info
*info
)
2380 /* Now do relocations. */
2381 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2382 unsigned int infosec
= info
->sechdrs
[i
].sh_info
;
2384 /* Not a valid relocation section? */
2385 if (infosec
>= info
->hdr
->e_shnum
)
2388 /* Don't bother with non-allocated sections */
2389 if (!(info
->sechdrs
[infosec
].sh_flags
& SHF_ALLOC
))
2392 /* Livepatch relocation sections are applied by livepatch */
2393 if (info
->sechdrs
[i
].sh_flags
& SHF_RELA_LIVEPATCH
)
2396 if (info
->sechdrs
[i
].sh_type
== SHT_REL
)
2397 err
= apply_relocate(info
->sechdrs
, info
->strtab
,
2398 info
->index
.sym
, i
, mod
);
2399 else if (info
->sechdrs
[i
].sh_type
== SHT_RELA
)
2400 err
= apply_relocate_add(info
->sechdrs
, info
->strtab
,
2401 info
->index
.sym
, i
, mod
);
2408 /* Additional bytes needed by arch in front of individual sections */
2409 unsigned int __weak
arch_mod_section_prepend(struct module
*mod
,
2410 unsigned int section
)
2412 /* default implementation just returns zero */
2416 /* Update size with this section: return offset. */
2417 static long get_offset(struct module
*mod
, unsigned int *size
,
2418 Elf_Shdr
*sechdr
, unsigned int section
)
2422 *size
+= arch_mod_section_prepend(mod
, section
);
2423 ret
= ALIGN(*size
, sechdr
->sh_addralign
?: 1);
2424 *size
= ret
+ sechdr
->sh_size
;
2428 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2429 might -- code, read-only data, read-write data, small data. Tally
2430 sizes, and place the offsets into sh_entsize fields: high bit means it
2432 static void layout_sections(struct module
*mod
, struct load_info
*info
)
2434 static unsigned long const masks
[][2] = {
2435 /* NOTE: all executable code must be the first section
2436 * in this array; otherwise modify the text_size
2437 * finder in the two loops below */
2438 { SHF_EXECINSTR
| SHF_ALLOC
, ARCH_SHF_SMALL
},
2439 { SHF_ALLOC
, SHF_WRITE
| ARCH_SHF_SMALL
},
2440 { SHF_RO_AFTER_INIT
| SHF_ALLOC
, ARCH_SHF_SMALL
},
2441 { SHF_WRITE
| SHF_ALLOC
, ARCH_SHF_SMALL
},
2442 { ARCH_SHF_SMALL
| SHF_ALLOC
, 0 }
2446 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++)
2447 info
->sechdrs
[i
].sh_entsize
= ~0UL;
2449 pr_debug("Core section allocation order:\n");
2450 for (m
= 0; m
< ARRAY_SIZE(masks
); ++m
) {
2451 for (i
= 0; i
< info
->hdr
->e_shnum
; ++i
) {
2452 Elf_Shdr
*s
= &info
->sechdrs
[i
];
2453 const char *sname
= info
->secstrings
+ s
->sh_name
;
2455 if ((s
->sh_flags
& masks
[m
][0]) != masks
[m
][0]
2456 || (s
->sh_flags
& masks
[m
][1])
2457 || s
->sh_entsize
!= ~0UL
2458 || strstarts(sname
, ".init"))
2460 s
->sh_entsize
= get_offset(mod
, &mod
->core_layout
.size
, s
, i
);
2461 pr_debug("\t%s\n", sname
);
2464 case 0: /* executable */
2465 mod
->core_layout
.size
= debug_align(mod
->core_layout
.size
);
2466 mod
->core_layout
.text_size
= mod
->core_layout
.size
;
2468 case 1: /* RO: text and ro-data */
2469 mod
->core_layout
.size
= debug_align(mod
->core_layout
.size
);
2470 mod
->core_layout
.ro_size
= mod
->core_layout
.size
;
2472 case 2: /* RO after init */
2473 mod
->core_layout
.size
= debug_align(mod
->core_layout
.size
);
2474 mod
->core_layout
.ro_after_init_size
= mod
->core_layout
.size
;
2476 case 4: /* whole core */
2477 mod
->core_layout
.size
= debug_align(mod
->core_layout
.size
);
2482 pr_debug("Init section allocation order:\n");
2483 for (m
= 0; m
< ARRAY_SIZE(masks
); ++m
) {
2484 for (i
= 0; i
< info
->hdr
->e_shnum
; ++i
) {
2485 Elf_Shdr
*s
= &info
->sechdrs
[i
];
2486 const char *sname
= info
->secstrings
+ s
->sh_name
;
2488 if ((s
->sh_flags
& masks
[m
][0]) != masks
[m
][0]
2489 || (s
->sh_flags
& masks
[m
][1])
2490 || s
->sh_entsize
!= ~0UL
2491 || !strstarts(sname
, ".init"))
2493 s
->sh_entsize
= (get_offset(mod
, &mod
->init_layout
.size
, s
, i
)
2494 | INIT_OFFSET_MASK
);
2495 pr_debug("\t%s\n", sname
);
2498 case 0: /* executable */
2499 mod
->init_layout
.size
= debug_align(mod
->init_layout
.size
);
2500 mod
->init_layout
.text_size
= mod
->init_layout
.size
;
2502 case 1: /* RO: text and ro-data */
2503 mod
->init_layout
.size
= debug_align(mod
->init_layout
.size
);
2504 mod
->init_layout
.ro_size
= mod
->init_layout
.size
;
2508 * RO after init doesn't apply to init_layout (only
2509 * core_layout), so it just takes the value of ro_size.
2511 mod
->init_layout
.ro_after_init_size
= mod
->init_layout
.ro_size
;
2513 case 4: /* whole init */
2514 mod
->init_layout
.size
= debug_align(mod
->init_layout
.size
);
2520 static void set_license(struct module
*mod
, const char *license
)
2523 license
= "unspecified";
2525 if (!license_is_gpl_compatible(license
)) {
2526 if (!test_taint(TAINT_PROPRIETARY_MODULE
))
2527 pr_warn("%s: module license '%s' taints kernel.\n",
2528 mod
->name
, license
);
2529 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
2530 LOCKDEP_NOW_UNRELIABLE
);
2534 /* Parse tag=value strings from .modinfo section */
2535 static char *next_string(char *string
, unsigned long *secsize
)
2537 /* Skip non-zero chars */
2540 if ((*secsize
)-- <= 1)
2544 /* Skip any zero padding. */
2545 while (!string
[0]) {
2547 if ((*secsize
)-- <= 1)
2553 static char *get_modinfo(struct load_info
*info
, const char *tag
)
2556 unsigned int taglen
= strlen(tag
);
2557 Elf_Shdr
*infosec
= &info
->sechdrs
[info
->index
.info
];
2558 unsigned long size
= infosec
->sh_size
;
2560 for (p
= (char *)infosec
->sh_addr
; p
; p
= next_string(p
, &size
)) {
2561 if (strncmp(p
, tag
, taglen
) == 0 && p
[taglen
] == '=')
2562 return p
+ taglen
+ 1;
2567 static void setup_modinfo(struct module
*mod
, struct load_info
*info
)
2569 struct module_attribute
*attr
;
2572 for (i
= 0; (attr
= modinfo_attrs
[i
]); i
++) {
2574 attr
->setup(mod
, get_modinfo(info
, attr
->attr
.name
));
2578 static void free_modinfo(struct module
*mod
)
2580 struct module_attribute
*attr
;
2583 for (i
= 0; (attr
= modinfo_attrs
[i
]); i
++) {
2589 #ifdef CONFIG_KALLSYMS
2591 /* lookup symbol in given range of kernel_symbols */
2592 static const struct kernel_symbol
*lookup_symbol(const char *name
,
2593 const struct kernel_symbol
*start
,
2594 const struct kernel_symbol
*stop
)
2596 return bsearch(name
, start
, stop
- start
,
2597 sizeof(struct kernel_symbol
), cmp_name
);
2600 static int is_exported(const char *name
, unsigned long value
,
2601 const struct module
*mod
)
2603 const struct kernel_symbol
*ks
;
2605 ks
= lookup_symbol(name
, __start___ksymtab
, __stop___ksymtab
);
2607 ks
= lookup_symbol(name
, mod
->syms
, mod
->syms
+ mod
->num_syms
);
2608 return ks
!= NULL
&& ks
->value
== value
;
2612 static char elf_type(const Elf_Sym
*sym
, const struct load_info
*info
)
2614 const Elf_Shdr
*sechdrs
= info
->sechdrs
;
2616 if (ELF_ST_BIND(sym
->st_info
) == STB_WEAK
) {
2617 if (ELF_ST_TYPE(sym
->st_info
) == STT_OBJECT
)
2622 if (sym
->st_shndx
== SHN_UNDEF
)
2624 if (sym
->st_shndx
== SHN_ABS
|| sym
->st_shndx
== info
->index
.pcpu
)
2626 if (sym
->st_shndx
>= SHN_LORESERVE
)
2628 if (sechdrs
[sym
->st_shndx
].sh_flags
& SHF_EXECINSTR
)
2630 if (sechdrs
[sym
->st_shndx
].sh_flags
& SHF_ALLOC
2631 && sechdrs
[sym
->st_shndx
].sh_type
!= SHT_NOBITS
) {
2632 if (!(sechdrs
[sym
->st_shndx
].sh_flags
& SHF_WRITE
))
2634 else if (sechdrs
[sym
->st_shndx
].sh_flags
& ARCH_SHF_SMALL
)
2639 if (sechdrs
[sym
->st_shndx
].sh_type
== SHT_NOBITS
) {
2640 if (sechdrs
[sym
->st_shndx
].sh_flags
& ARCH_SHF_SMALL
)
2645 if (strstarts(info
->secstrings
+ sechdrs
[sym
->st_shndx
].sh_name
,
2652 static bool is_core_symbol(const Elf_Sym
*src
, const Elf_Shdr
*sechdrs
,
2653 unsigned int shnum
, unsigned int pcpundx
)
2655 const Elf_Shdr
*sec
;
2657 if (src
->st_shndx
== SHN_UNDEF
2658 || src
->st_shndx
>= shnum
2662 #ifdef CONFIG_KALLSYMS_ALL
2663 if (src
->st_shndx
== pcpundx
)
2667 sec
= sechdrs
+ src
->st_shndx
;
2668 if (!(sec
->sh_flags
& SHF_ALLOC
)
2669 #ifndef CONFIG_KALLSYMS_ALL
2670 || !(sec
->sh_flags
& SHF_EXECINSTR
)
2672 || (sec
->sh_entsize
& INIT_OFFSET_MASK
))
2679 * We only allocate and copy the strings needed by the parts of symtab
2680 * we keep. This is simple, but has the effect of making multiple
2681 * copies of duplicates. We could be more sophisticated, see
2682 * linux-kernel thread starting with
2683 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2685 static void layout_symtab(struct module
*mod
, struct load_info
*info
)
2687 Elf_Shdr
*symsect
= info
->sechdrs
+ info
->index
.sym
;
2688 Elf_Shdr
*strsect
= info
->sechdrs
+ info
->index
.str
;
2690 unsigned int i
, nsrc
, ndst
, strtab_size
= 0;
2692 /* Put symbol section at end of init part of module. */
2693 symsect
->sh_flags
|= SHF_ALLOC
;
2694 symsect
->sh_entsize
= get_offset(mod
, &mod
->init_layout
.size
, symsect
,
2695 info
->index
.sym
) | INIT_OFFSET_MASK
;
2696 pr_debug("\t%s\n", info
->secstrings
+ symsect
->sh_name
);
2698 src
= (void *)info
->hdr
+ symsect
->sh_offset
;
2699 nsrc
= symsect
->sh_size
/ sizeof(*src
);
2701 /* Compute total space required for the core symbols' strtab. */
2702 for (ndst
= i
= 0; i
< nsrc
; i
++) {
2703 if (i
== 0 || is_livepatch_module(mod
) ||
2704 is_core_symbol(src
+i
, info
->sechdrs
, info
->hdr
->e_shnum
,
2705 info
->index
.pcpu
)) {
2706 strtab_size
+= strlen(&info
->strtab
[src
[i
].st_name
])+1;
2711 /* Append room for core symbols at end of core part. */
2712 info
->symoffs
= ALIGN(mod
->core_layout
.size
, symsect
->sh_addralign
?: 1);
2713 info
->stroffs
= mod
->core_layout
.size
= info
->symoffs
+ ndst
* sizeof(Elf_Sym
);
2714 mod
->core_layout
.size
+= strtab_size
;
2715 mod
->core_layout
.size
= debug_align(mod
->core_layout
.size
);
2717 /* Put string table section at end of init part of module. */
2718 strsect
->sh_flags
|= SHF_ALLOC
;
2719 strsect
->sh_entsize
= get_offset(mod
, &mod
->init_layout
.size
, strsect
,
2720 info
->index
.str
) | INIT_OFFSET_MASK
;
2721 pr_debug("\t%s\n", info
->secstrings
+ strsect
->sh_name
);
2723 /* We'll tack temporary mod_kallsyms on the end. */
2724 mod
->init_layout
.size
= ALIGN(mod
->init_layout
.size
,
2725 __alignof__(struct mod_kallsyms
));
2726 info
->mod_kallsyms_init_off
= mod
->init_layout
.size
;
2727 mod
->init_layout
.size
+= sizeof(struct mod_kallsyms
);
2728 mod
->init_layout
.size
= debug_align(mod
->init_layout
.size
);
2732 * We use the full symtab and strtab which layout_symtab arranged to
2733 * be appended to the init section. Later we switch to the cut-down
2736 static void add_kallsyms(struct module
*mod
, const struct load_info
*info
)
2738 unsigned int i
, ndst
;
2742 Elf_Shdr
*symsec
= &info
->sechdrs
[info
->index
.sym
];
2744 /* Set up to point into init section. */
2745 mod
->kallsyms
= mod
->init_layout
.base
+ info
->mod_kallsyms_init_off
;
2747 mod
->kallsyms
->symtab
= (void *)symsec
->sh_addr
;
2748 mod
->kallsyms
->num_symtab
= symsec
->sh_size
/ sizeof(Elf_Sym
);
2749 /* Make sure we get permanent strtab: don't use info->strtab. */
2750 mod
->kallsyms
->strtab
= (void *)info
->sechdrs
[info
->index
.str
].sh_addr
;
2752 /* Set types up while we still have access to sections. */
2753 for (i
= 0; i
< mod
->kallsyms
->num_symtab
; i
++)
2754 mod
->kallsyms
->symtab
[i
].st_info
2755 = elf_type(&mod
->kallsyms
->symtab
[i
], info
);
2757 /* Now populate the cut down core kallsyms for after init. */
2758 mod
->core_kallsyms
.symtab
= dst
= mod
->core_layout
.base
+ info
->symoffs
;
2759 mod
->core_kallsyms
.strtab
= s
= mod
->core_layout
.base
+ info
->stroffs
;
2760 src
= mod
->kallsyms
->symtab
;
2761 for (ndst
= i
= 0; i
< mod
->kallsyms
->num_symtab
; i
++) {
2762 if (i
== 0 || is_livepatch_module(mod
) ||
2763 is_core_symbol(src
+i
, info
->sechdrs
, info
->hdr
->e_shnum
,
2764 info
->index
.pcpu
)) {
2766 dst
[ndst
++].st_name
= s
- mod
->core_kallsyms
.strtab
;
2767 s
+= strlcpy(s
, &mod
->kallsyms
->strtab
[src
[i
].st_name
],
2771 mod
->core_kallsyms
.num_symtab
= ndst
;
2774 static inline void layout_symtab(struct module
*mod
, struct load_info
*info
)
2778 static void add_kallsyms(struct module
*mod
, const struct load_info
*info
)
2781 #endif /* CONFIG_KALLSYMS */
2783 static void dynamic_debug_setup(struct module
*mod
, struct _ddebug
*debug
, unsigned int num
)
2787 #ifdef CONFIG_DYNAMIC_DEBUG
2788 if (ddebug_add_module(debug
, num
, mod
->name
))
2789 pr_err("dynamic debug error adding module: %s\n",
2794 static void dynamic_debug_remove(struct module
*mod
, struct _ddebug
*debug
)
2797 ddebug_remove_module(mod
->name
);
2800 void * __weak
module_alloc(unsigned long size
)
2802 return vmalloc_exec(size
);
2805 #ifdef CONFIG_DEBUG_KMEMLEAK
2806 static void kmemleak_load_module(const struct module
*mod
,
2807 const struct load_info
*info
)
2811 /* only scan the sections containing data */
2812 kmemleak_scan_area(mod
, sizeof(struct module
), GFP_KERNEL
);
2814 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2815 /* Scan all writable sections that's not executable */
2816 if (!(info
->sechdrs
[i
].sh_flags
& SHF_ALLOC
) ||
2817 !(info
->sechdrs
[i
].sh_flags
& SHF_WRITE
) ||
2818 (info
->sechdrs
[i
].sh_flags
& SHF_EXECINSTR
))
2821 kmemleak_scan_area((void *)info
->sechdrs
[i
].sh_addr
,
2822 info
->sechdrs
[i
].sh_size
, GFP_KERNEL
);
2826 static inline void kmemleak_load_module(const struct module
*mod
,
2827 const struct load_info
*info
)
2832 #ifdef CONFIG_MODULE_SIG
2833 static int module_sig_check(struct load_info
*info
, int flags
)
2836 const unsigned long markerlen
= sizeof(MODULE_SIG_STRING
) - 1;
2837 const void *mod
= info
->hdr
;
2840 * Require flags == 0, as a module with version information
2841 * removed is no longer the module that was signed
2844 info
->len
> markerlen
&&
2845 memcmp(mod
+ info
->len
- markerlen
, MODULE_SIG_STRING
, markerlen
) == 0) {
2846 /* We truncate the module to discard the signature */
2847 info
->len
-= markerlen
;
2848 err
= mod_verify_sig(mod
, &info
->len
);
2852 info
->sig_ok
= true;
2856 /* Not having a signature is only an error if we're strict. */
2857 if (err
== -ENOKEY
&& !sig_enforce
)
2862 #else /* !CONFIG_MODULE_SIG */
2863 static int module_sig_check(struct load_info
*info
, int flags
)
2867 #endif /* !CONFIG_MODULE_SIG */
2869 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2870 static int elf_header_check(struct load_info
*info
)
2872 if (info
->len
< sizeof(*(info
->hdr
)))
2875 if (memcmp(info
->hdr
->e_ident
, ELFMAG
, SELFMAG
) != 0
2876 || info
->hdr
->e_type
!= ET_REL
2877 || !elf_check_arch(info
->hdr
)
2878 || info
->hdr
->e_shentsize
!= sizeof(Elf_Shdr
))
2881 if (info
->hdr
->e_shoff
>= info
->len
2882 || (info
->hdr
->e_shnum
* sizeof(Elf_Shdr
) >
2883 info
->len
- info
->hdr
->e_shoff
))
2889 #define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2891 static int copy_chunked_from_user(void *dst
, const void __user
*usrc
, unsigned long len
)
2894 unsigned long n
= min(len
, COPY_CHUNK_SIZE
);
2896 if (copy_from_user(dst
, usrc
, n
) != 0)
2906 #ifdef CONFIG_LIVEPATCH
2907 static int check_modinfo_livepatch(struct module
*mod
, struct load_info
*info
)
2909 if (get_modinfo(info
, "livepatch")) {
2911 add_taint_module(mod
, TAINT_LIVEPATCH
, LOCKDEP_STILL_OK
);
2912 pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
2918 #else /* !CONFIG_LIVEPATCH */
2919 static int check_modinfo_livepatch(struct module
*mod
, struct load_info
*info
)
2921 if (get_modinfo(info
, "livepatch")) {
2922 pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
2929 #endif /* CONFIG_LIVEPATCH */
2931 static void check_modinfo_retpoline(struct module
*mod
, struct load_info
*info
)
2933 if (retpoline_module_ok(get_modinfo(info
, "retpoline")))
2936 pr_warn("%s: loading module not compiled with retpoline compiler.\n",
2940 /* Sets info->hdr and info->len. */
2941 static int copy_module_from_user(const void __user
*umod
, unsigned long len
,
2942 struct load_info
*info
)
2947 if (info
->len
< sizeof(*(info
->hdr
)))
2950 err
= security_kernel_read_file(NULL
, READING_MODULE
);
2954 /* Suck in entire file: we'll want most of it. */
2955 info
->hdr
= __vmalloc(info
->len
,
2956 GFP_KERNEL
| __GFP_NOWARN
, PAGE_KERNEL
);
2960 if (copy_chunked_from_user(info
->hdr
, umod
, info
->len
) != 0) {
2968 static void free_copy(struct load_info
*info
)
2973 static int rewrite_section_headers(struct load_info
*info
, int flags
)
2977 /* This should always be true, but let's be sure. */
2978 info
->sechdrs
[0].sh_addr
= 0;
2980 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
2981 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
2982 if (shdr
->sh_type
!= SHT_NOBITS
2983 && info
->len
< shdr
->sh_offset
+ shdr
->sh_size
) {
2984 pr_err("Module len %lu truncated\n", info
->len
);
2988 /* Mark all sections sh_addr with their address in the
2990 shdr
->sh_addr
= (size_t)info
->hdr
+ shdr
->sh_offset
;
2992 #ifndef CONFIG_MODULE_UNLOAD
2993 /* Don't load .exit sections */
2994 if (strstarts(info
->secstrings
+shdr
->sh_name
, ".exit"))
2995 shdr
->sh_flags
&= ~(unsigned long)SHF_ALLOC
;
2999 /* Track but don't keep modinfo and version sections. */
3000 if (flags
& MODULE_INIT_IGNORE_MODVERSIONS
)
3001 info
->index
.vers
= 0; /* Pretend no __versions section! */
3003 info
->index
.vers
= find_sec(info
, "__versions");
3004 info
->sechdrs
[info
->index
.vers
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
3006 info
->index
.info
= find_sec(info
, ".modinfo");
3007 if (!info
->index
.info
)
3008 info
->name
= "(missing .modinfo section)";
3010 info
->name
= get_modinfo(info
, "name");
3011 info
->sechdrs
[info
->index
.info
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
3017 * Set up our basic convenience variables (pointers to section headers,
3018 * search for module section index etc), and do some basic section
3021 * Return the temporary module pointer (we'll replace it with the final
3022 * one when we move the module sections around).
3024 static struct module
*setup_load_info(struct load_info
*info
, int flags
)
3030 /* Set up the convenience variables */
3031 info
->sechdrs
= (void *)info
->hdr
+ info
->hdr
->e_shoff
;
3032 info
->secstrings
= (void *)info
->hdr
3033 + info
->sechdrs
[info
->hdr
->e_shstrndx
].sh_offset
;
3035 err
= rewrite_section_headers(info
, flags
);
3037 return ERR_PTR(err
);
3039 /* Find internal symbols and strings. */
3040 for (i
= 1; i
< info
->hdr
->e_shnum
; i
++) {
3041 if (info
->sechdrs
[i
].sh_type
== SHT_SYMTAB
) {
3042 info
->index
.sym
= i
;
3043 info
->index
.str
= info
->sechdrs
[i
].sh_link
;
3044 info
->strtab
= (char *)info
->hdr
3045 + info
->sechdrs
[info
->index
.str
].sh_offset
;
3050 info
->index
.mod
= find_sec(info
, ".gnu.linkonce.this_module");
3051 if (!info
->index
.mod
) {
3052 pr_warn("%s: No module found in object\n",
3053 info
->name
?: "(missing .modinfo name field)");
3054 return ERR_PTR(-ENOEXEC
);
3056 /* This is temporary: point mod into copy of data. */
3057 mod
= (void *)info
->sechdrs
[info
->index
.mod
].sh_addr
;
3060 * If we didn't load the .modinfo 'name' field, fall back to
3061 * on-disk struct mod 'name' field.
3064 info
->name
= mod
->name
;
3066 if (info
->index
.sym
== 0) {
3067 pr_warn("%s: module has no symbols (stripped?)\n", info
->name
);
3068 return ERR_PTR(-ENOEXEC
);
3071 info
->index
.pcpu
= find_pcpusec(info
);
3073 /* Check module struct version now, before we try to use module. */
3074 if (!check_modstruct_version(info
, mod
))
3075 return ERR_PTR(-ENOEXEC
);
3080 static int check_modinfo(struct module
*mod
, struct load_info
*info
, int flags
)
3082 const char *modmagic
= get_modinfo(info
, "vermagic");
3085 if (flags
& MODULE_INIT_IGNORE_VERMAGIC
)
3088 /* This is allowed: modprobe --force will invalidate it. */
3090 err
= try_to_force_load(mod
, "bad vermagic");
3093 } else if (!same_magic(modmagic
, vermagic
, info
->index
.vers
)) {
3094 pr_err("%s: version magic '%s' should be '%s'\n",
3095 info
->name
, modmagic
, vermagic
);
3099 if (!get_modinfo(info
, "intree")) {
3100 if (!test_taint(TAINT_OOT_MODULE
))
3101 pr_warn("%s: loading out-of-tree module taints kernel.\n",
3103 add_taint_module(mod
, TAINT_OOT_MODULE
, LOCKDEP_STILL_OK
);
3106 check_modinfo_retpoline(mod
, info
);
3108 if (get_modinfo(info
, "staging")) {
3109 add_taint_module(mod
, TAINT_CRAP
, LOCKDEP_STILL_OK
);
3110 pr_warn("%s: module is from the staging directory, the quality "
3111 "is unknown, you have been warned.\n", mod
->name
);
3114 err
= check_modinfo_livepatch(mod
, info
);
3118 /* Set up license info based on the info section */
3119 set_license(mod
, get_modinfo(info
, "license"));
3124 static int find_module_sections(struct module
*mod
, struct load_info
*info
)
3126 mod
->kp
= section_objs(info
, "__param",
3127 sizeof(*mod
->kp
), &mod
->num_kp
);
3128 mod
->syms
= section_objs(info
, "__ksymtab",
3129 sizeof(*mod
->syms
), &mod
->num_syms
);
3130 mod
->crcs
= section_addr(info
, "__kcrctab");
3131 mod
->gpl_syms
= section_objs(info
, "__ksymtab_gpl",
3132 sizeof(*mod
->gpl_syms
),
3133 &mod
->num_gpl_syms
);
3134 mod
->gpl_crcs
= section_addr(info
, "__kcrctab_gpl");
3135 mod
->gpl_future_syms
= section_objs(info
,
3136 "__ksymtab_gpl_future",
3137 sizeof(*mod
->gpl_future_syms
),
3138 &mod
->num_gpl_future_syms
);
3139 mod
->gpl_future_crcs
= section_addr(info
, "__kcrctab_gpl_future");
3141 #ifdef CONFIG_UNUSED_SYMBOLS
3142 mod
->unused_syms
= section_objs(info
, "__ksymtab_unused",
3143 sizeof(*mod
->unused_syms
),
3144 &mod
->num_unused_syms
);
3145 mod
->unused_crcs
= section_addr(info
, "__kcrctab_unused");
3146 mod
->unused_gpl_syms
= section_objs(info
, "__ksymtab_unused_gpl",
3147 sizeof(*mod
->unused_gpl_syms
),
3148 &mod
->num_unused_gpl_syms
);
3149 mod
->unused_gpl_crcs
= section_addr(info
, "__kcrctab_unused_gpl");
3151 #ifdef CONFIG_CONSTRUCTORS
3152 mod
->ctors
= section_objs(info
, ".ctors",
3153 sizeof(*mod
->ctors
), &mod
->num_ctors
);
3155 mod
->ctors
= section_objs(info
, ".init_array",
3156 sizeof(*mod
->ctors
), &mod
->num_ctors
);
3157 else if (find_sec(info
, ".init_array")) {
3159 * This shouldn't happen with same compiler and binutils
3160 * building all parts of the module.
3162 pr_warn("%s: has both .ctors and .init_array.\n",
3168 #ifdef CONFIG_TRACEPOINTS
3169 mod
->tracepoints_ptrs
= section_objs(info
, "__tracepoints_ptrs",
3170 sizeof(*mod
->tracepoints_ptrs
),
3171 &mod
->num_tracepoints
);
3173 #ifdef HAVE_JUMP_LABEL
3174 mod
->jump_entries
= section_objs(info
, "__jump_table",
3175 sizeof(*mod
->jump_entries
),
3176 &mod
->num_jump_entries
);
3178 #ifdef CONFIG_EVENT_TRACING
3179 mod
->trace_events
= section_objs(info
, "_ftrace_events",
3180 sizeof(*mod
->trace_events
),
3181 &mod
->num_trace_events
);
3182 mod
->trace_evals
= section_objs(info
, "_ftrace_eval_map",
3183 sizeof(*mod
->trace_evals
),
3184 &mod
->num_trace_evals
);
3186 #ifdef CONFIG_TRACING
3187 mod
->trace_bprintk_fmt_start
= section_objs(info
, "__trace_printk_fmt",
3188 sizeof(*mod
->trace_bprintk_fmt_start
),
3189 &mod
->num_trace_bprintk_fmt
);
3191 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
3192 /* sechdrs[0].sh_size is always zero */
3193 mod
->ftrace_callsites
= section_objs(info
, "__mcount_loc",
3194 sizeof(*mod
->ftrace_callsites
),
3195 &mod
->num_ftrace_callsites
);
3198 mod
->extable
= section_objs(info
, "__ex_table",
3199 sizeof(*mod
->extable
), &mod
->num_exentries
);
3201 if (section_addr(info
, "__obsparm"))
3202 pr_warn("%s: Ignoring obsolete parameters\n", mod
->name
);
3204 info
->debug
= section_objs(info
, "__verbose",
3205 sizeof(*info
->debug
), &info
->num_debug
);
3210 static int move_module(struct module
*mod
, struct load_info
*info
)
3215 /* Do the allocs. */
3216 ptr
= module_alloc(mod
->core_layout
.size
);
3218 * The pointer to this block is stored in the module structure
3219 * which is inside the block. Just mark it as not being a
3222 kmemleak_not_leak(ptr
);
3226 memset(ptr
, 0, mod
->core_layout
.size
);
3227 mod
->core_layout
.base
= ptr
;
3229 if (mod
->init_layout
.size
) {
3230 ptr
= module_alloc(mod
->init_layout
.size
);
3232 * The pointer to this block is stored in the module structure
3233 * which is inside the block. This block doesn't need to be
3234 * scanned as it contains data and code that will be freed
3235 * after the module is initialized.
3237 kmemleak_ignore(ptr
);
3239 module_memfree(mod
->core_layout
.base
);
3242 memset(ptr
, 0, mod
->init_layout
.size
);
3243 mod
->init_layout
.base
= ptr
;
3245 mod
->init_layout
.base
= NULL
;
3247 /* Transfer each section which specifies SHF_ALLOC */
3248 pr_debug("final section addresses:\n");
3249 for (i
= 0; i
< info
->hdr
->e_shnum
; i
++) {
3251 Elf_Shdr
*shdr
= &info
->sechdrs
[i
];
3253 if (!(shdr
->sh_flags
& SHF_ALLOC
))
3256 if (shdr
->sh_entsize
& INIT_OFFSET_MASK
)
3257 dest
= mod
->init_layout
.base
3258 + (shdr
->sh_entsize
& ~INIT_OFFSET_MASK
);
3260 dest
= mod
->core_layout
.base
+ shdr
->sh_entsize
;
3262 if (shdr
->sh_type
!= SHT_NOBITS
)
3263 memcpy(dest
, (void *)shdr
->sh_addr
, shdr
->sh_size
);
3264 /* Update sh_addr to point to copy in image. */
3265 shdr
->sh_addr
= (unsigned long)dest
;
3266 pr_debug("\t0x%lx %s\n",
3267 (long)shdr
->sh_addr
, info
->secstrings
+ shdr
->sh_name
);
3273 static int check_module_license_and_versions(struct module
*mod
)
3275 int prev_taint
= test_taint(TAINT_PROPRIETARY_MODULE
);
3278 * ndiswrapper is under GPL by itself, but loads proprietary modules.
3279 * Don't use add_taint_module(), as it would prevent ndiswrapper from
3280 * using GPL-only symbols it needs.
3282 if (strcmp(mod
->name
, "ndiswrapper") == 0)
3283 add_taint(TAINT_PROPRIETARY_MODULE
, LOCKDEP_NOW_UNRELIABLE
);
3285 /* driverloader was caught wrongly pretending to be under GPL */
3286 if (strcmp(mod
->name
, "driverloader") == 0)
3287 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
3288 LOCKDEP_NOW_UNRELIABLE
);
3290 /* lve claims to be GPL but upstream won't provide source */
3291 if (strcmp(mod
->name
, "lve") == 0)
3292 add_taint_module(mod
, TAINT_PROPRIETARY_MODULE
,
3293 LOCKDEP_NOW_UNRELIABLE
);
3295 if (!prev_taint
&& test_taint(TAINT_PROPRIETARY_MODULE
))
3296 pr_warn("%s: module license taints kernel.\n", mod
->name
);
3298 #ifdef CONFIG_MODVERSIONS
3299 if ((mod
->num_syms
&& !mod
->crcs
)
3300 || (mod
->num_gpl_syms
&& !mod
->gpl_crcs
)
3301 || (mod
->num_gpl_future_syms
&& !mod
->gpl_future_crcs
)
3302 #ifdef CONFIG_UNUSED_SYMBOLS
3303 || (mod
->num_unused_syms
&& !mod
->unused_crcs
)
3304 || (mod
->num_unused_gpl_syms
&& !mod
->unused_gpl_crcs
)
3307 return try_to_force_load(mod
,
3308 "no versions for exported symbols");
3314 static void flush_module_icache(const struct module
*mod
)
3316 mm_segment_t old_fs
;
3318 /* flush the icache in correct context */
3323 * Flush the instruction cache, since we've played with text.
3324 * Do it before processing of module parameters, so the module
3325 * can provide parameter accessor functions of its own.
3327 if (mod
->init_layout
.base
)
3328 flush_icache_range((unsigned long)mod
->init_layout
.base
,
3329 (unsigned long)mod
->init_layout
.base
3330 + mod
->init_layout
.size
);
3331 flush_icache_range((unsigned long)mod
->core_layout
.base
,
3332 (unsigned long)mod
->core_layout
.base
+ mod
->core_layout
.size
);
3337 int __weak
module_frob_arch_sections(Elf_Ehdr
*hdr
,
3345 /* module_blacklist is a comma-separated list of module names */
3346 static char *module_blacklist
;
3347 static bool blacklisted(const char *module_name
)
3352 if (!module_blacklist
)
3355 for (p
= module_blacklist
; *p
; p
+= len
) {
3356 len
= strcspn(p
, ",");
3357 if (strlen(module_name
) == len
&& !memcmp(module_name
, p
, len
))
3364 core_param(module_blacklist
, module_blacklist
, charp
, 0400);
3366 static struct module
*layout_and_allocate(struct load_info
*info
, int flags
)
3368 /* Module within temporary copy. */
3373 mod
= setup_load_info(info
, flags
);
3377 if (blacklisted(info
->name
))
3378 return ERR_PTR(-EPERM
);
3380 err
= check_modinfo(mod
, info
, flags
);
3382 return ERR_PTR(err
);
3384 /* Allow arches to frob section contents and sizes. */
3385 err
= module_frob_arch_sections(info
->hdr
, info
->sechdrs
,
3386 info
->secstrings
, mod
);
3388 return ERR_PTR(err
);
3390 /* We will do a special allocation for per-cpu sections later. */
3391 info
->sechdrs
[info
->index
.pcpu
].sh_flags
&= ~(unsigned long)SHF_ALLOC
;
3394 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
3395 * layout_sections() can put it in the right place.
3396 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
3398 ndx
= find_sec(info
, ".data..ro_after_init");
3400 info
->sechdrs
[ndx
].sh_flags
|= SHF_RO_AFTER_INIT
;
3402 /* Determine total sizes, and put offsets in sh_entsize. For now
3403 this is done generically; there doesn't appear to be any
3404 special cases for the architectures. */
3405 layout_sections(mod
, info
);
3406 layout_symtab(mod
, info
);
3408 /* Allocate and move to the final place */
3409 err
= move_module(mod
, info
);
3411 return ERR_PTR(err
);
3413 /* Module has been copied to its final place now: return it. */
3414 mod
= (void *)info
->sechdrs
[info
->index
.mod
].sh_addr
;
3415 kmemleak_load_module(mod
, info
);
3419 /* mod is no longer valid after this! */
3420 static void module_deallocate(struct module
*mod
, struct load_info
*info
)
3422 percpu_modfree(mod
);
3423 module_arch_freeing_init(mod
);
3424 module_memfree(mod
->init_layout
.base
);
3425 module_memfree(mod
->core_layout
.base
);
3428 int __weak
module_finalize(const Elf_Ehdr
*hdr
,
3429 const Elf_Shdr
*sechdrs
,
3435 static int post_relocation(struct module
*mod
, const struct load_info
*info
)
3437 /* Sort exception table now relocations are done. */
3438 sort_extable(mod
->extable
, mod
->extable
+ mod
->num_exentries
);
3440 /* Copy relocated percpu area over. */
3441 percpu_modcopy(mod
, (void *)info
->sechdrs
[info
->index
.pcpu
].sh_addr
,
3442 info
->sechdrs
[info
->index
.pcpu
].sh_size
);
3444 /* Setup kallsyms-specific fields. */
3445 add_kallsyms(mod
, info
);
3447 /* Arch-specific module finalizing. */
3448 return module_finalize(info
->hdr
, info
->sechdrs
, mod
);
3451 /* Is this module of this name done loading? No locks held. */
3452 static bool finished_loading(const char *name
)
3458 * The module_mutex should not be a heavily contended lock;
3459 * if we get the occasional sleep here, we'll go an extra iteration
3460 * in the wait_event_interruptible(), which is harmless.
3462 sched_annotate_sleep();
3463 mutex_lock(&module_mutex
);
3464 mod
= find_module_all(name
, strlen(name
), true);
3465 ret
= !mod
|| mod
->state
== MODULE_STATE_LIVE
3466 || mod
->state
== MODULE_STATE_GOING
;
3467 mutex_unlock(&module_mutex
);
3472 /* Call module constructors. */
3473 static void do_mod_ctors(struct module
*mod
)
3475 #ifdef CONFIG_CONSTRUCTORS
3478 for (i
= 0; i
< mod
->num_ctors
; i
++)
3483 /* For freeing module_init on success, in case kallsyms traversing */
3484 struct mod_initfree
{
3485 struct rcu_head rcu
;
3489 static void do_free_init(struct rcu_head
*head
)
3491 struct mod_initfree
*m
= container_of(head
, struct mod_initfree
, rcu
);
3492 module_memfree(m
->module_init
);
3497 * This is where the real work happens.
3499 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3500 * helper command 'lx-symbols'.
3502 static noinline
int do_init_module(struct module
*mod
)
3505 struct mod_initfree
*freeinit
;
3507 freeinit
= kmalloc(sizeof(*freeinit
), GFP_KERNEL
);
3512 freeinit
->module_init
= mod
->init_layout
.base
;
3515 /* Start the module */
3516 if (mod
->init
!= NULL
)
3517 ret
= do_one_initcall(mod
->init
);
3519 goto fail_free_freeinit
;
3522 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3523 "follow 0/-E convention\n"
3524 "%s: loading module anyway...\n",
3525 __func__
, mod
->name
, ret
, __func__
);
3529 /* Now it's a first class citizen! */
3530 mod
->state
= MODULE_STATE_LIVE
;
3531 blocking_notifier_call_chain(&module_notify_list
,
3532 MODULE_STATE_LIVE
, mod
);
3534 /* Delay uevent until module has finished its init routine */
3535 kobject_uevent(&mod
->mkobj
.kobj
, KOBJ_ADD
);
3538 * We need to finish all async code before the module init sequence
3539 * is done. This has potential to deadlock if synchronous module
3540 * loading is requested from async (which is not allowed!).
3542 * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
3543 * request_module() from async workers") for more details.
3545 if (!mod
->async_probe_requested
)
3546 async_synchronize_full();
3548 mutex_lock(&module_mutex
);
3549 /* Drop initial reference. */
3551 trim_init_extable(mod
);
3552 #ifdef CONFIG_KALLSYMS
3553 /* Switch to core kallsyms now init is done: kallsyms may be walking! */
3554 rcu_assign_pointer(mod
->kallsyms
, &mod
->core_kallsyms
);
3556 module_enable_ro(mod
, true);
3557 mod_tree_remove_init(mod
);
3558 disable_ro_nx(&mod
->init_layout
);
3559 module_arch_freeing_init(mod
);
3560 mod
->init_layout
.base
= NULL
;
3561 mod
->init_layout
.size
= 0;
3562 mod
->init_layout
.ro_size
= 0;
3563 mod
->init_layout
.ro_after_init_size
= 0;
3564 mod
->init_layout
.text_size
= 0;
3566 * We want to free module_init, but be aware that kallsyms may be
3567 * walking this with preempt disabled. In all the failure paths, we
3568 * call synchronize_sched(), but we don't want to slow down the success
3569 * path, so use actual RCU here.
3570 * Note that module_alloc() on most architectures creates W+X page
3571 * mappings which won't be cleaned up until do_free_init() runs. Any
3572 * code such as mark_rodata_ro() which depends on those mappings to
3573 * be cleaned up needs to sync with the queued work - ie
3574 * rcu_barrier_sched()
3576 call_rcu_sched(&freeinit
->rcu
, do_free_init
);
3577 mutex_unlock(&module_mutex
);
3578 wake_up_all(&module_wq
);
3585 /* Try to protect us from buggy refcounters. */
3586 mod
->state
= MODULE_STATE_GOING
;
3587 synchronize_sched();
3589 blocking_notifier_call_chain(&module_notify_list
,
3590 MODULE_STATE_GOING
, mod
);
3591 klp_module_going(mod
);
3592 ftrace_release_mod(mod
);
3594 wake_up_all(&module_wq
);
3598 static int may_init_module(void)
3600 if (!capable(CAP_SYS_MODULE
) || modules_disabled
)
3607 * We try to place it in the list now to make sure it's unique before
3608 * we dedicate too many resources. In particular, temporary percpu
3609 * memory exhaustion.
3611 static int add_unformed_module(struct module
*mod
)
3616 mod
->state
= MODULE_STATE_UNFORMED
;
3618 mutex_lock(&module_mutex
);
3619 old
= find_module_all(mod
->name
, strlen(mod
->name
), true);
3621 if (old
->state
== MODULE_STATE_COMING
3622 || old
->state
== MODULE_STATE_UNFORMED
) {
3623 /* Wait in case it fails to load. */
3624 mutex_unlock(&module_mutex
);
3625 err
= wait_event_interruptible(module_wq
,
3626 finished_loading(mod
->name
));
3630 /* The module might have gone in the meantime. */
3631 mutex_lock(&module_mutex
);
3632 old
= find_module_all(mod
->name
, strlen(mod
->name
),
3637 * We are here only when the same module was being loaded. Do
3638 * not try to load it again right now. It prevents long delays
3639 * caused by serialized module load failures. It might happen
3640 * when more devices of the same type trigger load of
3641 * a particular module.
3643 if (old
&& old
->state
== MODULE_STATE_LIVE
)
3649 mod_update_bounds(mod
);
3650 list_add_rcu(&mod
->list
, &modules
);
3651 mod_tree_insert(mod
);
3655 mutex_unlock(&module_mutex
);
3660 static int complete_formation(struct module
*mod
, struct load_info
*info
)
3664 mutex_lock(&module_mutex
);
3666 /* Find duplicate symbols (must be called under lock). */
3667 err
= verify_export_symbols(mod
);
3671 /* This relies on module_mutex for list integrity. */
3672 module_bug_finalize(info
->hdr
, info
->sechdrs
, mod
);
3674 module_enable_ro(mod
, false);
3675 module_enable_nx(mod
);
3677 /* Mark state as coming so strong_try_module_get() ignores us,
3678 * but kallsyms etc. can see us. */
3679 mod
->state
= MODULE_STATE_COMING
;
3680 mutex_unlock(&module_mutex
);
3685 mutex_unlock(&module_mutex
);
3689 static int prepare_coming_module(struct module
*mod
)
3693 ftrace_module_enable(mod
);
3694 err
= klp_module_coming(mod
);
3698 blocking_notifier_call_chain(&module_notify_list
,
3699 MODULE_STATE_COMING
, mod
);
3703 static int unknown_module_param_cb(char *param
, char *val
, const char *modname
,
3706 struct module
*mod
= arg
;
3709 if (strcmp(param
, "async_probe") == 0) {
3710 mod
->async_probe_requested
= true;
3714 /* Check for magic 'dyndbg' arg */
3715 ret
= ddebug_dyndbg_module_param_cb(param
, val
, modname
);
3717 pr_warn("%s: unknown parameter '%s' ignored\n", modname
, param
);
3721 /* Allocate and load the module: note that size of section 0 is always
3722 zero, and we rely on this for optional sections. */
3723 static int load_module(struct load_info
*info
, const char __user
*uargs
,
3730 err
= module_sig_check(info
, flags
);
3734 err
= elf_header_check(info
);
3738 /* Figure out module layout, and allocate all the memory. */
3739 mod
= layout_and_allocate(info
, flags
);
3745 audit_log_kern_module(mod
->name
);
3747 /* Reserve our place in the list. */
3748 err
= add_unformed_module(mod
);
3752 #ifdef CONFIG_MODULE_SIG
3753 mod
->sig_ok
= info
->sig_ok
;
3755 pr_notice_once("%s: module verification failed: signature "
3756 "and/or required key missing - tainting "
3757 "kernel\n", mod
->name
);
3758 add_taint_module(mod
, TAINT_UNSIGNED_MODULE
, LOCKDEP_STILL_OK
);
3762 /* To avoid stressing percpu allocator, do this once we're unique. */
3763 err
= percpu_modalloc(mod
, info
);
3767 /* Now module is in final location, initialize linked lists, etc. */
3768 err
= module_unload_init(mod
);
3772 init_param_lock(mod
);
3774 /* Now we've got everything in the final locations, we can
3775 * find optional sections. */
3776 err
= find_module_sections(mod
, info
);
3780 err
= check_module_license_and_versions(mod
);
3784 /* Set up MODINFO_ATTR fields */
3785 setup_modinfo(mod
, info
);
3787 /* Fix up syms, so that st_value is a pointer to location. */
3788 err
= simplify_symbols(mod
, info
);
3792 err
= apply_relocations(mod
, info
);
3796 err
= post_relocation(mod
, info
);
3800 flush_module_icache(mod
);
3802 /* Now copy in args */
3803 mod
->args
= strndup_user(uargs
, ~0UL >> 1);
3804 if (IS_ERR(mod
->args
)) {
3805 err
= PTR_ERR(mod
->args
);
3806 goto free_arch_cleanup
;
3809 dynamic_debug_setup(mod
, info
->debug
, info
->num_debug
);
3811 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3812 ftrace_module_init(mod
);
3814 /* Finally it's fully formed, ready to start executing. */
3815 err
= complete_formation(mod
, info
);
3817 goto ddebug_cleanup
;
3819 err
= prepare_coming_module(mod
);
3823 /* Module is ready to execute: parsing args may do that. */
3824 after_dashes
= parse_args(mod
->name
, mod
->args
, mod
->kp
, mod
->num_kp
,
3826 unknown_module_param_cb
);
3827 if (IS_ERR(after_dashes
)) {
3828 err
= PTR_ERR(after_dashes
);
3829 goto coming_cleanup
;
3830 } else if (after_dashes
) {
3831 pr_warn("%s: parameters '%s' after `--' ignored\n",
3832 mod
->name
, after_dashes
);
3835 /* Link in to sysfs. */
3836 err
= mod_sysfs_setup(mod
, info
, mod
->kp
, mod
->num_kp
);
3838 goto coming_cleanup
;
3840 if (is_livepatch_module(mod
)) {
3841 err
= copy_module_elf(mod
, info
);
3846 /* Get rid of temporary copy. */
3850 trace_module_load(mod
);
3852 return do_init_module(mod
);
3855 mod_sysfs_teardown(mod
);
3857 mod
->state
= MODULE_STATE_GOING
;
3858 destroy_params(mod
->kp
, mod
->num_kp
);
3859 blocking_notifier_call_chain(&module_notify_list
,
3860 MODULE_STATE_GOING
, mod
);
3861 klp_module_going(mod
);
3863 mod
->state
= MODULE_STATE_GOING
;
3864 /* module_bug_cleanup needs module_mutex protection */
3865 mutex_lock(&module_mutex
);
3866 module_bug_cleanup(mod
);
3867 mutex_unlock(&module_mutex
);
3869 /* we can't deallocate the module until we clear memory protection */
3870 module_disable_ro(mod
);
3871 module_disable_nx(mod
);
3874 dynamic_debug_remove(mod
, info
->debug
);
3875 synchronize_sched();
3878 module_arch_cleanup(mod
);
3882 module_unload_free(mod
);
3884 mutex_lock(&module_mutex
);
3885 /* Unlink carefully: kallsyms could be walking list. */
3886 list_del_rcu(&mod
->list
);
3887 mod_tree_remove(mod
);
3888 wake_up_all(&module_wq
);
3889 /* Wait for RCU-sched synchronizing before releasing mod->list. */
3890 synchronize_sched();
3891 mutex_unlock(&module_mutex
);
3894 * Ftrace needs to clean up what it initialized.
3895 * This does nothing if ftrace_module_init() wasn't called,
3896 * but it must be called outside of module_mutex.
3898 ftrace_release_mod(mod
);
3899 /* Free lock-classes; relies on the preceding sync_rcu() */
3900 lockdep_free_key_range(mod
->core_layout
.base
, mod
->core_layout
.size
);
3902 module_deallocate(mod
, info
);
3908 SYSCALL_DEFINE3(init_module
, void __user
*, umod
,
3909 unsigned long, len
, const char __user
*, uargs
)
3912 struct load_info info
= { };
3914 err
= may_init_module();
3918 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3921 err
= copy_module_from_user(umod
, len
, &info
);
3925 return load_module(&info
, uargs
, 0);
3928 SYSCALL_DEFINE3(finit_module
, int, fd
, const char __user
*, uargs
, int, flags
)
3930 struct load_info info
= { };
3935 err
= may_init_module();
3939 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd
, uargs
, flags
);
3941 if (flags
& ~(MODULE_INIT_IGNORE_MODVERSIONS
3942 |MODULE_INIT_IGNORE_VERMAGIC
))
3945 err
= kernel_read_file_from_fd(fd
, &hdr
, &size
, INT_MAX
,
3952 return load_module(&info
, uargs
, flags
);
3955 static inline int within(unsigned long addr
, void *start
, unsigned long size
)
3957 return ((void *)addr
>= start
&& (void *)addr
< start
+ size
);
3960 #ifdef CONFIG_KALLSYMS
3962 * This ignores the intensely annoying "mapping symbols" found
3963 * in ARM ELF files: $a, $t and $d.
3965 static inline int is_arm_mapping_symbol(const char *str
)
3967 if (str
[0] == '.' && str
[1] == 'L')
3969 return str
[0] == '$' && strchr("axtd", str
[1])
3970 && (str
[2] == '\0' || str
[2] == '.');
3973 static const char *symname(struct mod_kallsyms
*kallsyms
, unsigned int symnum
)
3975 return kallsyms
->strtab
+ kallsyms
->symtab
[symnum
].st_name
;
3978 static const char *get_ksymbol(struct module
*mod
,
3980 unsigned long *size
,
3981 unsigned long *offset
)
3983 unsigned int i
, best
= 0;
3984 unsigned long nextval
;
3985 struct mod_kallsyms
*kallsyms
= rcu_dereference_sched(mod
->kallsyms
);
3987 /* At worse, next value is at end of module */
3988 if (within_module_init(addr
, mod
))
3989 nextval
= (unsigned long)mod
->init_layout
.base
+mod
->init_layout
.text_size
;
3991 nextval
= (unsigned long)mod
->core_layout
.base
+mod
->core_layout
.text_size
;
3993 /* Scan for closest preceding symbol, and next symbol. (ELF
3994 starts real symbols at 1). */
3995 for (i
= 1; i
< kallsyms
->num_symtab
; i
++) {
3996 if (kallsyms
->symtab
[i
].st_shndx
== SHN_UNDEF
)
3999 /* We ignore unnamed symbols: they're uninformative
4000 * and inserted at a whim. */
4001 if (*symname(kallsyms
, i
) == '\0'
4002 || is_arm_mapping_symbol(symname(kallsyms
, i
)))
4005 if (kallsyms
->symtab
[i
].st_value
<= addr
4006 && kallsyms
->symtab
[i
].st_value
> kallsyms
->symtab
[best
].st_value
)
4008 if (kallsyms
->symtab
[i
].st_value
> addr
4009 && kallsyms
->symtab
[i
].st_value
< nextval
)
4010 nextval
= kallsyms
->symtab
[i
].st_value
;
4017 *size
= nextval
- kallsyms
->symtab
[best
].st_value
;
4019 *offset
= addr
- kallsyms
->symtab
[best
].st_value
;
4020 return symname(kallsyms
, best
);
4023 /* For kallsyms to ask for address resolution. NULL means not found. Careful
4024 * not to lock to avoid deadlock on oopses, simply disable preemption. */
4025 const char *module_address_lookup(unsigned long addr
,
4026 unsigned long *size
,
4027 unsigned long *offset
,
4031 const char *ret
= NULL
;
4035 mod
= __module_address(addr
);
4038 *modname
= mod
->name
;
4039 ret
= get_ksymbol(mod
, addr
, size
, offset
);
4041 /* Make a copy in here where it's safe */
4043 strncpy(namebuf
, ret
, KSYM_NAME_LEN
- 1);
4051 int lookup_module_symbol_name(unsigned long addr
, char *symname
)
4056 list_for_each_entry_rcu(mod
, &modules
, list
) {
4057 if (mod
->state
== MODULE_STATE_UNFORMED
)
4059 if (within_module(addr
, mod
)) {
4062 sym
= get_ksymbol(mod
, addr
, NULL
, NULL
);
4065 strlcpy(symname
, sym
, KSYM_NAME_LEN
);
4075 int lookup_module_symbol_attrs(unsigned long addr
, unsigned long *size
,
4076 unsigned long *offset
, char *modname
, char *name
)
4081 list_for_each_entry_rcu(mod
, &modules
, list
) {
4082 if (mod
->state
== MODULE_STATE_UNFORMED
)
4084 if (within_module(addr
, mod
)) {
4087 sym
= get_ksymbol(mod
, addr
, size
, offset
);
4091 strlcpy(modname
, mod
->name
, MODULE_NAME_LEN
);
4093 strlcpy(name
, sym
, KSYM_NAME_LEN
);
4103 int module_get_kallsym(unsigned int symnum
, unsigned long *value
, char *type
,
4104 char *name
, char *module_name
, int *exported
)
4109 list_for_each_entry_rcu(mod
, &modules
, list
) {
4110 struct mod_kallsyms
*kallsyms
;
4112 if (mod
->state
== MODULE_STATE_UNFORMED
)
4114 kallsyms
= rcu_dereference_sched(mod
->kallsyms
);
4115 if (symnum
< kallsyms
->num_symtab
) {
4116 *value
= kallsyms
->symtab
[symnum
].st_value
;
4117 *type
= kallsyms
->symtab
[symnum
].st_info
;
4118 strlcpy(name
, symname(kallsyms
, symnum
), KSYM_NAME_LEN
);
4119 strlcpy(module_name
, mod
->name
, MODULE_NAME_LEN
);
4120 *exported
= is_exported(name
, *value
, mod
);
4124 symnum
-= kallsyms
->num_symtab
;
4130 static unsigned long mod_find_symname(struct module
*mod
, const char *name
)
4133 struct mod_kallsyms
*kallsyms
= rcu_dereference_sched(mod
->kallsyms
);
4135 for (i
= 0; i
< kallsyms
->num_symtab
; i
++)
4136 if (strcmp(name
, symname(kallsyms
, i
)) == 0 &&
4137 kallsyms
->symtab
[i
].st_shndx
!= SHN_UNDEF
)
4138 return kallsyms
->symtab
[i
].st_value
;
4142 /* Look for this name: can be of form module:name. */
4143 unsigned long module_kallsyms_lookup_name(const char *name
)
4147 unsigned long ret
= 0;
4149 /* Don't lock: we're in enough trouble already. */
4151 if ((colon
= strnchr(name
, MODULE_NAME_LEN
, ':')) != NULL
) {
4152 if ((mod
= find_module_all(name
, colon
- name
, false)) != NULL
)
4153 ret
= mod_find_symname(mod
, colon
+1);
4155 list_for_each_entry_rcu(mod
, &modules
, list
) {
4156 if (mod
->state
== MODULE_STATE_UNFORMED
)
4158 if ((ret
= mod_find_symname(mod
, name
)) != 0)
4166 int module_kallsyms_on_each_symbol(int (*fn
)(void *, const char *,
4167 struct module
*, unsigned long),
4174 module_assert_mutex();
4176 list_for_each_entry(mod
, &modules
, list
) {
4177 /* We hold module_mutex: no need for rcu_dereference_sched */
4178 struct mod_kallsyms
*kallsyms
= mod
->kallsyms
;
4180 if (mod
->state
== MODULE_STATE_UNFORMED
)
4182 for (i
= 0; i
< kallsyms
->num_symtab
; i
++) {
4184 if (kallsyms
->symtab
[i
].st_shndx
== SHN_UNDEF
)
4187 ret
= fn(data
, symname(kallsyms
, i
),
4188 mod
, kallsyms
->symtab
[i
].st_value
);
4195 #endif /* CONFIG_KALLSYMS */
4197 /* Maximum number of characters written by module_flags() */
4198 #define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
4200 /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
4201 static char *module_flags(struct module
*mod
, char *buf
)
4205 BUG_ON(mod
->state
== MODULE_STATE_UNFORMED
);
4207 mod
->state
== MODULE_STATE_GOING
||
4208 mod
->state
== MODULE_STATE_COMING
) {
4210 bx
+= module_flags_taint(mod
, buf
+ bx
);
4211 /* Show a - for module-is-being-unloaded */
4212 if (mod
->state
== MODULE_STATE_GOING
)
4214 /* Show a + for module-is-being-loaded */
4215 if (mod
->state
== MODULE_STATE_COMING
)
4224 #ifdef CONFIG_PROC_FS
4225 /* Called by the /proc file system to return a list of modules. */
4226 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
4228 mutex_lock(&module_mutex
);
4229 return seq_list_start(&modules
, *pos
);
4232 static void *m_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
4234 return seq_list_next(p
, &modules
, pos
);
4237 static void m_stop(struct seq_file
*m
, void *p
)
4239 mutex_unlock(&module_mutex
);
4242 static int m_show(struct seq_file
*m
, void *p
)
4244 struct module
*mod
= list_entry(p
, struct module
, list
);
4245 char buf
[MODULE_FLAGS_BUF_SIZE
];
4247 /* We always ignore unformed modules. */
4248 if (mod
->state
== MODULE_STATE_UNFORMED
)
4251 seq_printf(m
, "%s %u",
4252 mod
->name
, mod
->init_layout
.size
+ mod
->core_layout
.size
);
4253 print_unload_info(m
, mod
);
4255 /* Informative for users. */
4256 seq_printf(m
, " %s",
4257 mod
->state
== MODULE_STATE_GOING
? "Unloading" :
4258 mod
->state
== MODULE_STATE_COMING
? "Loading" :
4260 /* Used by oprofile and other similar tools. */
4261 seq_printf(m
, " 0x%pK", mod
->core_layout
.base
);
4265 seq_printf(m
, " %s", module_flags(mod
, buf
));
4271 /* Format: modulename size refcount deps address
4273 Where refcount is a number or -, and deps is a comma-separated list
4276 static const struct seq_operations modules_op
= {
4283 static int modules_open(struct inode
*inode
, struct file
*file
)
4285 return seq_open(file
, &modules_op
);
4288 static const struct file_operations proc_modules_operations
= {
4289 .open
= modules_open
,
4291 .llseek
= seq_lseek
,
4292 .release
= seq_release
,
4295 static int __init
proc_modules_init(void)
4297 proc_create("modules", 0, NULL
, &proc_modules_operations
);
4300 module_init(proc_modules_init
);
4303 /* Given an address, look for it in the module exception tables. */
4304 const struct exception_table_entry
*search_module_extables(unsigned long addr
)
4306 const struct exception_table_entry
*e
= NULL
;
4310 mod
= __module_address(addr
);
4314 if (!mod
->num_exentries
)
4317 e
= search_extable(mod
->extable
,
4324 * Now, if we found one, we are running inside it now, hence
4325 * we cannot unload the module, hence no refcnt needed.
4331 * is_module_address - is this address inside a module?
4332 * @addr: the address to check.
4334 * See is_module_text_address() if you simply want to see if the address
4335 * is code (not data).
4337 bool is_module_address(unsigned long addr
)
4342 ret
= __module_address(addr
) != NULL
;
4349 * __module_address - get the module which contains an address.
4350 * @addr: the address.
4352 * Must be called with preempt disabled or module mutex held so that
4353 * module doesn't get freed during this.
4355 struct module
*__module_address(unsigned long addr
)
4359 if (addr
< module_addr_min
|| addr
> module_addr_max
)
4362 module_assert_mutex_or_preempt();
4364 mod
= mod_find(addr
);
4366 BUG_ON(!within_module(addr
, mod
));
4367 if (mod
->state
== MODULE_STATE_UNFORMED
)
4374 * is_module_text_address - is this address inside module code?
4375 * @addr: the address to check.
4377 * See is_module_address() if you simply want to see if the address is
4378 * anywhere in a module. See kernel_text_address() for testing if an
4379 * address corresponds to kernel or module code.
4381 bool is_module_text_address(unsigned long addr
)
4386 ret
= __module_text_address(addr
) != NULL
;
4393 * __module_text_address - get the module whose code contains an address.
4394 * @addr: the address.
4396 * Must be called with preempt disabled or module mutex held so that
4397 * module doesn't get freed during this.
4399 struct module
*__module_text_address(unsigned long addr
)
4401 struct module
*mod
= __module_address(addr
);
4403 /* Make sure it's within the text section. */
4404 if (!within(addr
, mod
->init_layout
.base
, mod
->init_layout
.text_size
)
4405 && !within(addr
, mod
->core_layout
.base
, mod
->core_layout
.text_size
))
4411 /* Don't grab lock, we're oopsing. */
4412 void print_modules(void)
4415 char buf
[MODULE_FLAGS_BUF_SIZE
];
4417 printk(KERN_DEFAULT
"Modules linked in:");
4418 /* Most callers should already have preempt disabled, but make sure */
4420 list_for_each_entry_rcu(mod
, &modules
, list
) {
4421 if (mod
->state
== MODULE_STATE_UNFORMED
)
4423 pr_cont(" %s%s", mod
->name
, module_flags(mod
, buf
));
4426 if (last_unloaded_module
[0])
4427 pr_cont(" [last unloaded: %s]", last_unloaded_module
);
4431 #ifdef CONFIG_MODVERSIONS
4432 /* Generate the signature for all relevant module structures here.
4433 * If these change, we don't want to try to parse the module. */
4434 void module_layout(struct module
*mod
,
4435 struct modversion_info
*ver
,
4436 struct kernel_param
*kp
,
4437 struct kernel_symbol
*ks
,
4438 struct tracepoint
* const *tp
)
4441 EXPORT_SYMBOL(module_layout
);