1 // SPDX-License-Identifier: GPL-2.0-only
3 * fs/proc/vmcore.c Interface for accessing the crash
4 * dump from the system's previous life.
5 * Heavily borrowed from fs/proc/kcore.c
6 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
7 * Copyright (C) IBM Corporation, 2004. All rights reserved
12 #include <linux/kcore.h>
13 #include <linux/user.h>
14 #include <linux/elf.h>
15 #include <linux/elfcore.h>
16 #include <linux/export.h>
17 #include <linux/slab.h>
18 #include <linux/highmem.h>
19 #include <linux/printk.h>
20 #include <linux/memblock.h>
21 #include <linux/init.h>
22 #include <linux/crash_dump.h>
23 #include <linux/list.h>
24 #include <linux/moduleparam.h>
25 #include <linux/mutex.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pagemap.h>
28 #include <linux/uio.h>
29 #include <linux/cc_platform.h>
33 /* List representing chunks of contiguous memory areas and their offsets in
36 static LIST_HEAD(vmcore_list
);
38 /* Stores the pointer to the buffer containing kernel elf core headers. */
39 static char *elfcorebuf
;
40 static size_t elfcorebuf_sz
;
41 static size_t elfcorebuf_sz_orig
;
43 static char *elfnotes_buf
;
44 static size_t elfnotes_sz
;
45 /* Size of all notes minus the device dump notes */
46 static size_t elfnotes_orig_sz
;
48 /* Total size of vmcore file. */
49 static u64 vmcore_size
;
51 static struct proc_dir_entry
*proc_vmcore
;
53 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
54 /* Device Dump list and mutex to synchronize access to list */
55 static LIST_HEAD(vmcoredd_list
);
56 static DEFINE_MUTEX(vmcoredd_mutex
);
58 static bool vmcoredd_disabled
;
59 core_param(novmcoredd
, vmcoredd_disabled
, bool, 0);
60 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
62 /* Device Dump Size */
63 static size_t vmcoredd_orig_sz
;
65 static DEFINE_SPINLOCK(vmcore_cb_lock
);
66 DEFINE_STATIC_SRCU(vmcore_cb_srcu
);
67 /* List of registered vmcore callbacks. */
68 static LIST_HEAD(vmcore_cb_list
);
69 /* Whether the vmcore has been opened once. */
70 static bool vmcore_opened
;
72 void register_vmcore_cb(struct vmcore_cb
*cb
)
74 INIT_LIST_HEAD(&cb
->next
);
75 spin_lock(&vmcore_cb_lock
);
76 list_add_tail(&cb
->next
, &vmcore_cb_list
);
78 * Registering a vmcore callback after the vmcore was opened is
79 * very unusual (e.g., manual driver loading).
82 pr_warn_once("Unexpected vmcore callback registration\n");
83 spin_unlock(&vmcore_cb_lock
);
85 EXPORT_SYMBOL_GPL(register_vmcore_cb
);
87 void unregister_vmcore_cb(struct vmcore_cb
*cb
)
89 spin_lock(&vmcore_cb_lock
);
90 list_del_rcu(&cb
->next
);
92 * Unregistering a vmcore callback after the vmcore was opened is
93 * very unusual (e.g., forced driver removal), but we cannot stop
97 pr_warn_once("Unexpected vmcore callback unregistration\n");
98 spin_unlock(&vmcore_cb_lock
);
100 synchronize_srcu(&vmcore_cb_srcu
);
102 EXPORT_SYMBOL_GPL(unregister_vmcore_cb
);
104 static bool pfn_is_ram(unsigned long pfn
)
106 struct vmcore_cb
*cb
;
109 list_for_each_entry_srcu(cb
, &vmcore_cb_list
, next
,
110 srcu_read_lock_held(&vmcore_cb_srcu
)) {
111 if (unlikely(!cb
->pfn_is_ram
))
113 ret
= cb
->pfn_is_ram(cb
, pfn
);
121 static int open_vmcore(struct inode
*inode
, struct file
*file
)
123 spin_lock(&vmcore_cb_lock
);
124 vmcore_opened
= true;
125 spin_unlock(&vmcore_cb_lock
);
130 /* Reads a page from the oldmem device from given offset. */
131 ssize_t
read_from_oldmem(struct iov_iter
*iter
, size_t count
,
132 u64
*ppos
, bool encrypted
)
134 unsigned long pfn
, offset
;
136 ssize_t read
= 0, tmp
;
142 offset
= (unsigned long)(*ppos
% PAGE_SIZE
);
143 pfn
= (unsigned long)(*ppos
/ PAGE_SIZE
);
145 idx
= srcu_read_lock(&vmcore_cb_srcu
);
147 if (count
> (PAGE_SIZE
- offset
))
148 nr_bytes
= PAGE_SIZE
- offset
;
152 /* If pfn is not ram, return zeros for sparse dump files */
153 if (!pfn_is_ram(pfn
)) {
154 tmp
= iov_iter_zero(nr_bytes
, iter
);
157 tmp
= copy_oldmem_page_encrypted(iter
, pfn
,
161 tmp
= copy_oldmem_page(iter
, pfn
, nr_bytes
,
164 if (tmp
< nr_bytes
) {
165 srcu_read_unlock(&vmcore_cb_srcu
, idx
);
175 srcu_read_unlock(&vmcore_cb_srcu
, idx
);
181 * Architectures may override this function to allocate ELF header in 2nd kernel
183 int __weak
elfcorehdr_alloc(unsigned long long *addr
, unsigned long long *size
)
189 * Architectures may override this function to free header
191 void __weak
elfcorehdr_free(unsigned long long addr
)
195 * Architectures may override this function to read from ELF header
197 ssize_t __weak
elfcorehdr_read(char *buf
, size_t count
, u64
*ppos
)
199 struct kvec kvec
= { .iov_base
= buf
, .iov_len
= count
};
200 struct iov_iter iter
;
202 iov_iter_kvec(&iter
, READ
, &kvec
, 1, count
);
204 return read_from_oldmem(&iter
, count
, ppos
, false);
208 * Architectures may override this function to read from notes sections
210 ssize_t __weak
elfcorehdr_read_notes(char *buf
, size_t count
, u64
*ppos
)
212 struct kvec kvec
= { .iov_base
= buf
, .iov_len
= count
};
213 struct iov_iter iter
;
215 iov_iter_kvec(&iter
, READ
, &kvec
, 1, count
);
217 return read_from_oldmem(&iter
, count
, ppos
,
218 cc_platform_has(CC_ATTR_MEM_ENCRYPT
));
222 * Architectures may override this function to map oldmem
224 int __weak
remap_oldmem_pfn_range(struct vm_area_struct
*vma
,
225 unsigned long from
, unsigned long pfn
,
226 unsigned long size
, pgprot_t prot
)
228 prot
= pgprot_encrypted(prot
);
229 return remap_pfn_range(vma
, from
, pfn
, size
, prot
);
233 * Architectures which support memory encryption override this.
235 ssize_t __weak
copy_oldmem_page_encrypted(struct iov_iter
*iter
,
236 unsigned long pfn
, size_t csize
, unsigned long offset
)
238 return copy_oldmem_page(iter
, pfn
, csize
, offset
);
241 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
242 static int vmcoredd_copy_dumps(struct iov_iter
*iter
, u64 start
, size_t size
)
244 struct vmcoredd_node
*dump
;
250 mutex_lock(&vmcoredd_mutex
);
251 list_for_each_entry(dump
, &vmcoredd_list
, list
) {
252 if (start
< offset
+ dump
->size
) {
253 tsz
= min(offset
+ (u64
)dump
->size
- start
, (u64
)size
);
254 buf
= dump
->buf
+ start
- offset
;
255 if (copy_to_iter(buf
, tsz
, iter
) < tsz
) {
263 /* Leave now if buffer filled already */
267 offset
+= dump
->size
;
271 mutex_unlock(&vmcoredd_mutex
);
276 static int vmcoredd_mmap_dumps(struct vm_area_struct
*vma
, unsigned long dst
,
277 u64 start
, size_t size
)
279 struct vmcoredd_node
*dump
;
285 mutex_lock(&vmcoredd_mutex
);
286 list_for_each_entry(dump
, &vmcoredd_list
, list
) {
287 if (start
< offset
+ dump
->size
) {
288 tsz
= min(offset
+ (u64
)dump
->size
- start
, (u64
)size
);
289 buf
= dump
->buf
+ start
- offset
;
290 if (remap_vmalloc_range_partial(vma
, dst
, buf
, 0,
300 /* Leave now if buffer filled already */
304 offset
+= dump
->size
;
308 mutex_unlock(&vmcoredd_mutex
);
311 #endif /* CONFIG_MMU */
312 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
314 /* Read from the ELF header and then the crash dump. On error, negative value is
315 * returned otherwise number of bytes read are returned.
317 static ssize_t
__read_vmcore(struct iov_iter
*iter
, loff_t
*fpos
)
319 ssize_t acc
= 0, tmp
;
322 struct vmcore
*m
= NULL
;
324 if (!iov_iter_count(iter
) || *fpos
>= vmcore_size
)
327 iov_iter_truncate(iter
, vmcore_size
- *fpos
);
329 /* Read ELF core header */
330 if (*fpos
< elfcorebuf_sz
) {
331 tsz
= min(elfcorebuf_sz
- (size_t)*fpos
, iov_iter_count(iter
));
332 if (copy_to_iter(elfcorebuf
+ *fpos
, tsz
, iter
) < tsz
)
337 /* leave now if filled buffer already */
338 if (!iov_iter_count(iter
))
342 /* Read Elf note segment */
343 if (*fpos
< elfcorebuf_sz
+ elfnotes_sz
) {
346 /* We add device dumps before other elf notes because the
347 * other elf notes may not fill the elf notes buffer
348 * completely and we will end up with zero-filled data
349 * between the elf notes and the device dumps. Tools will
350 * then try to decode this zero-filled data as valid notes
351 * and we don't want that. Hence, adding device dumps before
352 * the other elf notes ensure that zero-filled data can be
355 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
356 /* Read device dumps */
357 if (*fpos
< elfcorebuf_sz
+ vmcoredd_orig_sz
) {
358 tsz
= min(elfcorebuf_sz
+ vmcoredd_orig_sz
-
359 (size_t)*fpos
, iov_iter_count(iter
));
360 start
= *fpos
- elfcorebuf_sz
;
361 if (vmcoredd_copy_dumps(iter
, start
, tsz
))
367 /* leave now if filled buffer already */
368 if (!iov_iter_count(iter
))
371 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
373 /* Read remaining elf notes */
374 tsz
= min(elfcorebuf_sz
+ elfnotes_sz
- (size_t)*fpos
,
375 iov_iter_count(iter
));
376 kaddr
= elfnotes_buf
+ *fpos
- elfcorebuf_sz
- vmcoredd_orig_sz
;
377 if (copy_to_iter(kaddr
, tsz
, iter
) < tsz
)
383 /* leave now if filled buffer already */
384 if (!iov_iter_count(iter
))
388 list_for_each_entry(m
, &vmcore_list
, list
) {
389 if (*fpos
< m
->offset
+ m
->size
) {
390 tsz
= (size_t)min_t(unsigned long long,
391 m
->offset
+ m
->size
- *fpos
,
392 iov_iter_count(iter
));
393 start
= m
->paddr
+ *fpos
- m
->offset
;
394 tmp
= read_from_oldmem(iter
, tsz
, &start
,
395 cc_platform_has(CC_ATTR_MEM_ENCRYPT
));
401 /* leave now if filled buffer already */
402 if (!iov_iter_count(iter
))
410 static ssize_t
read_vmcore(struct kiocb
*iocb
, struct iov_iter
*iter
)
412 return __read_vmcore(iter
, &iocb
->ki_pos
);
416 * The vmcore fault handler uses the page cache and fills data using the
417 * standard __read_vmcore() function.
419 * On s390 the fault handler is used for memory regions that can't be mapped
420 * directly with remap_pfn_range().
422 static vm_fault_t
mmap_vmcore_fault(struct vm_fault
*vmf
)
425 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
426 pgoff_t index
= vmf
->pgoff
;
427 struct iov_iter iter
;
433 page
= find_or_create_page(mapping
, index
, GFP_KERNEL
);
436 if (!PageUptodate(page
)) {
437 offset
= (loff_t
) index
<< PAGE_SHIFT
;
438 kvec
.iov_base
= page_address(page
);
439 kvec
.iov_len
= PAGE_SIZE
;
440 iov_iter_kvec(&iter
, READ
, &kvec
, 1, PAGE_SIZE
);
442 rc
= __read_vmcore(&iter
, &offset
);
446 return vmf_error(rc
);
448 SetPageUptodate(page
);
454 return VM_FAULT_SIGBUS
;
458 static const struct vm_operations_struct vmcore_mmap_ops
= {
459 .fault
= mmap_vmcore_fault
,
463 * vmcore_alloc_buf - allocate buffer in vmalloc memory
464 * @size: size of buffer
466 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
467 * the buffer to user-space by means of remap_vmalloc_range().
469 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
470 * disabled and there's no need to allow users to mmap the buffer.
472 static inline char *vmcore_alloc_buf(size_t size
)
475 return vmalloc_user(size
);
477 return vzalloc(size
);
482 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
483 * essential for mmap_vmcore() in order to map physically
484 * non-contiguous objects (ELF header, ELF note segment and memory
485 * regions in the 1st kernel pointed to by PT_LOAD entries) into
486 * virtually contiguous user-space in ELF layout.
490 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
491 * reported as not being ram with the zero page.
493 * @vma: vm_area_struct describing requested mapping
494 * @from: start remapping from
495 * @pfn: page frame number to start remapping to
496 * @size: remapping size
497 * @prot: protection bits
499 * Returns zero on success, -EAGAIN on failure.
501 static int remap_oldmem_pfn_checked(struct vm_area_struct
*vma
,
502 unsigned long from
, unsigned long pfn
,
503 unsigned long size
, pgprot_t prot
)
505 unsigned long map_size
;
506 unsigned long pos_start
, pos_end
, pos
;
507 unsigned long zeropage_pfn
= my_zero_pfn(0);
511 pos_end
= pfn
+ (size
>> PAGE_SHIFT
);
513 for (pos
= pos_start
; pos
< pos_end
; ++pos
) {
514 if (!pfn_is_ram(pos
)) {
516 * We hit a page which is not ram. Remap the continuous
517 * region between pos_start and pos-1 and replace
518 * the non-ram page at pos with the zero page.
520 if (pos
> pos_start
) {
521 /* Remap continuous region */
522 map_size
= (pos
- pos_start
) << PAGE_SHIFT
;
523 if (remap_oldmem_pfn_range(vma
, from
+ len
,
529 /* Remap the zero page */
530 if (remap_oldmem_pfn_range(vma
, from
+ len
,
538 if (pos
> pos_start
) {
540 map_size
= (pos
- pos_start
) << PAGE_SHIFT
;
541 if (remap_oldmem_pfn_range(vma
, from
+ len
, pos_start
,
547 do_munmap(vma
->vm_mm
, from
, len
, NULL
);
551 static int vmcore_remap_oldmem_pfn(struct vm_area_struct
*vma
,
552 unsigned long from
, unsigned long pfn
,
553 unsigned long size
, pgprot_t prot
)
558 * Check if a callback was registered to avoid looping over all
559 * pages without a reason.
561 idx
= srcu_read_lock(&vmcore_cb_srcu
);
562 if (!list_empty(&vmcore_cb_list
))
563 ret
= remap_oldmem_pfn_checked(vma
, from
, pfn
, size
, prot
);
565 ret
= remap_oldmem_pfn_range(vma
, from
, pfn
, size
, prot
);
566 srcu_read_unlock(&vmcore_cb_srcu
, idx
);
570 static int mmap_vmcore(struct file
*file
, struct vm_area_struct
*vma
)
572 size_t size
= vma
->vm_end
- vma
->vm_start
;
573 u64 start
, end
, len
, tsz
;
576 start
= (u64
)vma
->vm_pgoff
<< PAGE_SHIFT
;
579 if (size
> vmcore_size
|| end
> vmcore_size
)
582 if (vma
->vm_flags
& (VM_WRITE
| VM_EXEC
))
585 vma
->vm_flags
&= ~(VM_MAYWRITE
| VM_MAYEXEC
);
586 vma
->vm_flags
|= VM_MIXEDMAP
;
587 vma
->vm_ops
= &vmcore_mmap_ops
;
591 if (start
< elfcorebuf_sz
) {
594 tsz
= min(elfcorebuf_sz
- (size_t)start
, size
);
595 pfn
= __pa(elfcorebuf
+ start
) >> PAGE_SHIFT
;
596 if (remap_pfn_range(vma
, vma
->vm_start
, pfn
, tsz
,
607 if (start
< elfcorebuf_sz
+ elfnotes_sz
) {
610 /* We add device dumps before other elf notes because the
611 * other elf notes may not fill the elf notes buffer
612 * completely and we will end up with zero-filled data
613 * between the elf notes and the device dumps. Tools will
614 * then try to decode this zero-filled data as valid notes
615 * and we don't want that. Hence, adding device dumps before
616 * the other elf notes ensure that zero-filled data can be
617 * avoided. This also ensures that the device dumps and
618 * other elf notes can be properly mmaped at page aligned
621 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
622 /* Read device dumps */
623 if (start
< elfcorebuf_sz
+ vmcoredd_orig_sz
) {
626 tsz
= min(elfcorebuf_sz
+ vmcoredd_orig_sz
-
627 (size_t)start
, size
);
628 start_off
= start
- elfcorebuf_sz
;
629 if (vmcoredd_mmap_dumps(vma
, vma
->vm_start
+ len
,
637 /* leave now if filled buffer already */
641 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
643 /* Read remaining elf notes */
644 tsz
= min(elfcorebuf_sz
+ elfnotes_sz
- (size_t)start
, size
);
645 kaddr
= elfnotes_buf
+ start
- elfcorebuf_sz
- vmcoredd_orig_sz
;
646 if (remap_vmalloc_range_partial(vma
, vma
->vm_start
+ len
,
658 list_for_each_entry(m
, &vmcore_list
, list
) {
659 if (start
< m
->offset
+ m
->size
) {
662 tsz
= (size_t)min_t(unsigned long long,
663 m
->offset
+ m
->size
- start
, size
);
664 paddr
= m
->paddr
+ start
- m
->offset
;
665 if (vmcore_remap_oldmem_pfn(vma
, vma
->vm_start
+ len
,
666 paddr
>> PAGE_SHIFT
, tsz
,
680 do_munmap(vma
->vm_mm
, vma
->vm_start
, len
, NULL
);
684 static int mmap_vmcore(struct file
*file
, struct vm_area_struct
*vma
)
690 static const struct proc_ops vmcore_proc_ops
= {
691 .proc_open
= open_vmcore
,
692 .proc_read_iter
= read_vmcore
,
693 .proc_lseek
= default_llseek
,
694 .proc_mmap
= mmap_vmcore
,
697 static struct vmcore
* __init
get_new_element(void)
699 return kzalloc(sizeof(struct vmcore
), GFP_KERNEL
);
702 static u64
get_vmcore_size(size_t elfsz
, size_t elfnotesegsz
,
703 struct list_head
*vc_list
)
708 size
= elfsz
+ elfnotesegsz
;
709 list_for_each_entry(m
, vc_list
, list
) {
716 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
718 * @ehdr_ptr: ELF header
720 * This function updates p_memsz member of each PT_NOTE entry in the
721 * program header table pointed to by @ehdr_ptr to real size of ELF
724 static int __init
update_note_header_size_elf64(const Elf64_Ehdr
*ehdr_ptr
)
727 Elf64_Phdr
*phdr_ptr
;
728 Elf64_Nhdr
*nhdr_ptr
;
730 phdr_ptr
= (Elf64_Phdr
*)(ehdr_ptr
+ 1);
731 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
733 u64 offset
, max_sz
, sz
, real_sz
= 0;
734 if (phdr_ptr
->p_type
!= PT_NOTE
)
736 max_sz
= phdr_ptr
->p_memsz
;
737 offset
= phdr_ptr
->p_offset
;
738 notes_section
= kmalloc(max_sz
, GFP_KERNEL
);
741 rc
= elfcorehdr_read_notes(notes_section
, max_sz
, &offset
);
743 kfree(notes_section
);
746 nhdr_ptr
= notes_section
;
747 while (nhdr_ptr
->n_namesz
!= 0) {
748 sz
= sizeof(Elf64_Nhdr
) +
749 (((u64
)nhdr_ptr
->n_namesz
+ 3) & ~3) +
750 (((u64
)nhdr_ptr
->n_descsz
+ 3) & ~3);
751 if ((real_sz
+ sz
) > max_sz
) {
752 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
753 nhdr_ptr
->n_namesz
, nhdr_ptr
->n_descsz
);
757 nhdr_ptr
= (Elf64_Nhdr
*)((char*)nhdr_ptr
+ sz
);
759 kfree(notes_section
);
760 phdr_ptr
->p_memsz
= real_sz
;
762 pr_warn("Warning: Zero PT_NOTE entries found\n");
770 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
771 * headers and sum of real size of their ELF note segment headers and
774 * @ehdr_ptr: ELF header
775 * @nr_ptnote: buffer for the number of PT_NOTE program headers
776 * @sz_ptnote: buffer for size of unique PT_NOTE program header
778 * This function is used to merge multiple PT_NOTE program headers
779 * into a unique single one. The resulting unique entry will have
780 * @sz_ptnote in its phdr->p_mem.
782 * It is assumed that program headers with PT_NOTE type pointed to by
783 * @ehdr_ptr has already been updated by update_note_header_size_elf64
784 * and each of PT_NOTE program headers has actual ELF note segment
785 * size in its p_memsz member.
787 static int __init
get_note_number_and_size_elf64(const Elf64_Ehdr
*ehdr_ptr
,
788 int *nr_ptnote
, u64
*sz_ptnote
)
791 Elf64_Phdr
*phdr_ptr
;
793 *nr_ptnote
= *sz_ptnote
= 0;
795 phdr_ptr
= (Elf64_Phdr
*)(ehdr_ptr
+ 1);
796 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
797 if (phdr_ptr
->p_type
!= PT_NOTE
)
800 *sz_ptnote
+= phdr_ptr
->p_memsz
;
807 * copy_notes_elf64 - copy ELF note segments in a given buffer
809 * @ehdr_ptr: ELF header
810 * @notes_buf: buffer into which ELF note segments are copied
812 * This function is used to copy ELF note segment in the 1st kernel
813 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
814 * size of the buffer @notes_buf is equal to or larger than sum of the
815 * real ELF note segment headers and data.
817 * It is assumed that program headers with PT_NOTE type pointed to by
818 * @ehdr_ptr has already been updated by update_note_header_size_elf64
819 * and each of PT_NOTE program headers has actual ELF note segment
820 * size in its p_memsz member.
822 static int __init
copy_notes_elf64(const Elf64_Ehdr
*ehdr_ptr
, char *notes_buf
)
825 Elf64_Phdr
*phdr_ptr
;
827 phdr_ptr
= (Elf64_Phdr
*)(ehdr_ptr
+ 1);
829 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
831 if (phdr_ptr
->p_type
!= PT_NOTE
)
833 offset
= phdr_ptr
->p_offset
;
834 rc
= elfcorehdr_read_notes(notes_buf
, phdr_ptr
->p_memsz
,
838 notes_buf
+= phdr_ptr
->p_memsz
;
844 /* Merges all the PT_NOTE headers into one. */
845 static int __init
merge_note_headers_elf64(char *elfptr
, size_t *elfsz
,
846 char **notes_buf
, size_t *notes_sz
)
848 int i
, nr_ptnote
=0, rc
=0;
850 Elf64_Ehdr
*ehdr_ptr
;
852 u64 phdr_sz
= 0, note_off
;
854 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
856 rc
= update_note_header_size_elf64(ehdr_ptr
);
860 rc
= get_note_number_and_size_elf64(ehdr_ptr
, &nr_ptnote
, &phdr_sz
);
864 *notes_sz
= roundup(phdr_sz
, PAGE_SIZE
);
865 *notes_buf
= vmcore_alloc_buf(*notes_sz
);
869 rc
= copy_notes_elf64(ehdr_ptr
, *notes_buf
);
873 /* Prepare merged PT_NOTE program header. */
874 phdr
.p_type
= PT_NOTE
;
876 note_off
= sizeof(Elf64_Ehdr
) +
877 (ehdr_ptr
->e_phnum
- nr_ptnote
+1) * sizeof(Elf64_Phdr
);
878 phdr
.p_offset
= roundup(note_off
, PAGE_SIZE
);
879 phdr
.p_vaddr
= phdr
.p_paddr
= 0;
880 phdr
.p_filesz
= phdr
.p_memsz
= phdr_sz
;
883 /* Add merged PT_NOTE program header*/
884 tmp
= elfptr
+ sizeof(Elf64_Ehdr
);
885 memcpy(tmp
, &phdr
, sizeof(phdr
));
888 /* Remove unwanted PT_NOTE program headers. */
889 i
= (nr_ptnote
- 1) * sizeof(Elf64_Phdr
);
891 memmove(tmp
, tmp
+i
, ((*elfsz
)-sizeof(Elf64_Ehdr
)-sizeof(Elf64_Phdr
)));
892 memset(elfptr
+ *elfsz
, 0, i
);
893 *elfsz
= roundup(*elfsz
, PAGE_SIZE
);
895 /* Modify e_phnum to reflect merged headers. */
896 ehdr_ptr
->e_phnum
= ehdr_ptr
->e_phnum
- nr_ptnote
+ 1;
898 /* Store the size of all notes. We need this to update the note
899 * header when the device dumps will be added.
901 elfnotes_orig_sz
= phdr
.p_memsz
;
907 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
909 * @ehdr_ptr: ELF header
911 * This function updates p_memsz member of each PT_NOTE entry in the
912 * program header table pointed to by @ehdr_ptr to real size of ELF
915 static int __init
update_note_header_size_elf32(const Elf32_Ehdr
*ehdr_ptr
)
918 Elf32_Phdr
*phdr_ptr
;
919 Elf32_Nhdr
*nhdr_ptr
;
921 phdr_ptr
= (Elf32_Phdr
*)(ehdr_ptr
+ 1);
922 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
924 u64 offset
, max_sz
, sz
, real_sz
= 0;
925 if (phdr_ptr
->p_type
!= PT_NOTE
)
927 max_sz
= phdr_ptr
->p_memsz
;
928 offset
= phdr_ptr
->p_offset
;
929 notes_section
= kmalloc(max_sz
, GFP_KERNEL
);
932 rc
= elfcorehdr_read_notes(notes_section
, max_sz
, &offset
);
934 kfree(notes_section
);
937 nhdr_ptr
= notes_section
;
938 while (nhdr_ptr
->n_namesz
!= 0) {
939 sz
= sizeof(Elf32_Nhdr
) +
940 (((u64
)nhdr_ptr
->n_namesz
+ 3) & ~3) +
941 (((u64
)nhdr_ptr
->n_descsz
+ 3) & ~3);
942 if ((real_sz
+ sz
) > max_sz
) {
943 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
944 nhdr_ptr
->n_namesz
, nhdr_ptr
->n_descsz
);
948 nhdr_ptr
= (Elf32_Nhdr
*)((char*)nhdr_ptr
+ sz
);
950 kfree(notes_section
);
951 phdr_ptr
->p_memsz
= real_sz
;
953 pr_warn("Warning: Zero PT_NOTE entries found\n");
961 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
962 * headers and sum of real size of their ELF note segment headers and
965 * @ehdr_ptr: ELF header
966 * @nr_ptnote: buffer for the number of PT_NOTE program headers
967 * @sz_ptnote: buffer for size of unique PT_NOTE program header
969 * This function is used to merge multiple PT_NOTE program headers
970 * into a unique single one. The resulting unique entry will have
971 * @sz_ptnote in its phdr->p_mem.
973 * It is assumed that program headers with PT_NOTE type pointed to by
974 * @ehdr_ptr has already been updated by update_note_header_size_elf32
975 * and each of PT_NOTE program headers has actual ELF note segment
976 * size in its p_memsz member.
978 static int __init
get_note_number_and_size_elf32(const Elf32_Ehdr
*ehdr_ptr
,
979 int *nr_ptnote
, u64
*sz_ptnote
)
982 Elf32_Phdr
*phdr_ptr
;
984 *nr_ptnote
= *sz_ptnote
= 0;
986 phdr_ptr
= (Elf32_Phdr
*)(ehdr_ptr
+ 1);
987 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
988 if (phdr_ptr
->p_type
!= PT_NOTE
)
991 *sz_ptnote
+= phdr_ptr
->p_memsz
;
998 * copy_notes_elf32 - copy ELF note segments in a given buffer
1000 * @ehdr_ptr: ELF header
1001 * @notes_buf: buffer into which ELF note segments are copied
1003 * This function is used to copy ELF note segment in the 1st kernel
1004 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
1005 * size of the buffer @notes_buf is equal to or larger than sum of the
1006 * real ELF note segment headers and data.
1008 * It is assumed that program headers with PT_NOTE type pointed to by
1009 * @ehdr_ptr has already been updated by update_note_header_size_elf32
1010 * and each of PT_NOTE program headers has actual ELF note segment
1011 * size in its p_memsz member.
1013 static int __init
copy_notes_elf32(const Elf32_Ehdr
*ehdr_ptr
, char *notes_buf
)
1016 Elf32_Phdr
*phdr_ptr
;
1018 phdr_ptr
= (Elf32_Phdr
*)(ehdr_ptr
+ 1);
1020 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
1022 if (phdr_ptr
->p_type
!= PT_NOTE
)
1024 offset
= phdr_ptr
->p_offset
;
1025 rc
= elfcorehdr_read_notes(notes_buf
, phdr_ptr
->p_memsz
,
1029 notes_buf
+= phdr_ptr
->p_memsz
;
1035 /* Merges all the PT_NOTE headers into one. */
1036 static int __init
merge_note_headers_elf32(char *elfptr
, size_t *elfsz
,
1037 char **notes_buf
, size_t *notes_sz
)
1039 int i
, nr_ptnote
=0, rc
=0;
1041 Elf32_Ehdr
*ehdr_ptr
;
1043 u64 phdr_sz
= 0, note_off
;
1045 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
1047 rc
= update_note_header_size_elf32(ehdr_ptr
);
1051 rc
= get_note_number_and_size_elf32(ehdr_ptr
, &nr_ptnote
, &phdr_sz
);
1055 *notes_sz
= roundup(phdr_sz
, PAGE_SIZE
);
1056 *notes_buf
= vmcore_alloc_buf(*notes_sz
);
1060 rc
= copy_notes_elf32(ehdr_ptr
, *notes_buf
);
1064 /* Prepare merged PT_NOTE program header. */
1065 phdr
.p_type
= PT_NOTE
;
1067 note_off
= sizeof(Elf32_Ehdr
) +
1068 (ehdr_ptr
->e_phnum
- nr_ptnote
+1) * sizeof(Elf32_Phdr
);
1069 phdr
.p_offset
= roundup(note_off
, PAGE_SIZE
);
1070 phdr
.p_vaddr
= phdr
.p_paddr
= 0;
1071 phdr
.p_filesz
= phdr
.p_memsz
= phdr_sz
;
1074 /* Add merged PT_NOTE program header*/
1075 tmp
= elfptr
+ sizeof(Elf32_Ehdr
);
1076 memcpy(tmp
, &phdr
, sizeof(phdr
));
1077 tmp
+= sizeof(phdr
);
1079 /* Remove unwanted PT_NOTE program headers. */
1080 i
= (nr_ptnote
- 1) * sizeof(Elf32_Phdr
);
1081 *elfsz
= *elfsz
- i
;
1082 memmove(tmp
, tmp
+i
, ((*elfsz
)-sizeof(Elf32_Ehdr
)-sizeof(Elf32_Phdr
)));
1083 memset(elfptr
+ *elfsz
, 0, i
);
1084 *elfsz
= roundup(*elfsz
, PAGE_SIZE
);
1086 /* Modify e_phnum to reflect merged headers. */
1087 ehdr_ptr
->e_phnum
= ehdr_ptr
->e_phnum
- nr_ptnote
+ 1;
1089 /* Store the size of all notes. We need this to update the note
1090 * header when the device dumps will be added.
1092 elfnotes_orig_sz
= phdr
.p_memsz
;
1097 /* Add memory chunks represented by program headers to vmcore list. Also update
1098 * the new offset fields of exported program headers. */
1099 static int __init
process_ptload_program_headers_elf64(char *elfptr
,
1102 struct list_head
*vc_list
)
1105 Elf64_Ehdr
*ehdr_ptr
;
1106 Elf64_Phdr
*phdr_ptr
;
1110 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
1111 phdr_ptr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
)); /* PT_NOTE hdr */
1113 /* Skip Elf header, program headers and Elf note segment. */
1114 vmcore_off
= elfsz
+ elfnotes_sz
;
1116 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
1117 u64 paddr
, start
, end
, size
;
1119 if (phdr_ptr
->p_type
!= PT_LOAD
)
1122 paddr
= phdr_ptr
->p_offset
;
1123 start
= rounddown(paddr
, PAGE_SIZE
);
1124 end
= roundup(paddr
+ phdr_ptr
->p_memsz
, PAGE_SIZE
);
1127 /* Add this contiguous chunk of memory to vmcore list.*/
1128 new = get_new_element();
1133 list_add_tail(&new->list
, vc_list
);
1135 /* Update the program header offset. */
1136 phdr_ptr
->p_offset
= vmcore_off
+ (paddr
- start
);
1137 vmcore_off
= vmcore_off
+ size
;
1142 static int __init
process_ptload_program_headers_elf32(char *elfptr
,
1145 struct list_head
*vc_list
)
1148 Elf32_Ehdr
*ehdr_ptr
;
1149 Elf32_Phdr
*phdr_ptr
;
1153 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
1154 phdr_ptr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
)); /* PT_NOTE hdr */
1156 /* Skip Elf header, program headers and Elf note segment. */
1157 vmcore_off
= elfsz
+ elfnotes_sz
;
1159 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
1160 u64 paddr
, start
, end
, size
;
1162 if (phdr_ptr
->p_type
!= PT_LOAD
)
1165 paddr
= phdr_ptr
->p_offset
;
1166 start
= rounddown(paddr
, PAGE_SIZE
);
1167 end
= roundup(paddr
+ phdr_ptr
->p_memsz
, PAGE_SIZE
);
1170 /* Add this contiguous chunk of memory to vmcore list.*/
1171 new = get_new_element();
1176 list_add_tail(&new->list
, vc_list
);
1178 /* Update the program header offset */
1179 phdr_ptr
->p_offset
= vmcore_off
+ (paddr
- start
);
1180 vmcore_off
= vmcore_off
+ size
;
1185 /* Sets offset fields of vmcore elements. */
1186 static void set_vmcore_list_offsets(size_t elfsz
, size_t elfnotes_sz
,
1187 struct list_head
*vc_list
)
1192 /* Skip Elf header, program headers and Elf note segment. */
1193 vmcore_off
= elfsz
+ elfnotes_sz
;
1195 list_for_each_entry(m
, vc_list
, list
) {
1196 m
->offset
= vmcore_off
;
1197 vmcore_off
+= m
->size
;
1201 static void free_elfcorebuf(void)
1203 free_pages((unsigned long)elfcorebuf
, get_order(elfcorebuf_sz_orig
));
1205 vfree(elfnotes_buf
);
1206 elfnotes_buf
= NULL
;
1209 static int __init
parse_crash_elf64_headers(void)
1215 addr
= elfcorehdr_addr
;
1217 /* Read Elf header */
1218 rc
= elfcorehdr_read((char *)&ehdr
, sizeof(Elf64_Ehdr
), &addr
);
1222 /* Do some basic Verification. */
1223 if (memcmp(ehdr
.e_ident
, ELFMAG
, SELFMAG
) != 0 ||
1224 (ehdr
.e_type
!= ET_CORE
) ||
1225 !vmcore_elf64_check_arch(&ehdr
) ||
1226 ehdr
.e_ident
[EI_CLASS
] != ELFCLASS64
||
1227 ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
||
1228 ehdr
.e_version
!= EV_CURRENT
||
1229 ehdr
.e_ehsize
!= sizeof(Elf64_Ehdr
) ||
1230 ehdr
.e_phentsize
!= sizeof(Elf64_Phdr
) ||
1231 ehdr
.e_phnum
== 0) {
1232 pr_warn("Warning: Core image elf header is not sane\n");
1236 /* Read in all elf headers. */
1237 elfcorebuf_sz_orig
= sizeof(Elf64_Ehdr
) +
1238 ehdr
.e_phnum
* sizeof(Elf64_Phdr
);
1239 elfcorebuf_sz
= elfcorebuf_sz_orig
;
1240 elfcorebuf
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1241 get_order(elfcorebuf_sz_orig
));
1244 addr
= elfcorehdr_addr
;
1245 rc
= elfcorehdr_read(elfcorebuf
, elfcorebuf_sz_orig
, &addr
);
1249 /* Merge all PT_NOTE headers into one. */
1250 rc
= merge_note_headers_elf64(elfcorebuf
, &elfcorebuf_sz
,
1251 &elfnotes_buf
, &elfnotes_sz
);
1254 rc
= process_ptload_program_headers_elf64(elfcorebuf
, elfcorebuf_sz
,
1255 elfnotes_sz
, &vmcore_list
);
1258 set_vmcore_list_offsets(elfcorebuf_sz
, elfnotes_sz
, &vmcore_list
);
1265 static int __init
parse_crash_elf32_headers(void)
1271 addr
= elfcorehdr_addr
;
1273 /* Read Elf header */
1274 rc
= elfcorehdr_read((char *)&ehdr
, sizeof(Elf32_Ehdr
), &addr
);
1278 /* Do some basic Verification. */
1279 if (memcmp(ehdr
.e_ident
, ELFMAG
, SELFMAG
) != 0 ||
1280 (ehdr
.e_type
!= ET_CORE
) ||
1281 !vmcore_elf32_check_arch(&ehdr
) ||
1282 ehdr
.e_ident
[EI_CLASS
] != ELFCLASS32
||
1283 ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
||
1284 ehdr
.e_version
!= EV_CURRENT
||
1285 ehdr
.e_ehsize
!= sizeof(Elf32_Ehdr
) ||
1286 ehdr
.e_phentsize
!= sizeof(Elf32_Phdr
) ||
1287 ehdr
.e_phnum
== 0) {
1288 pr_warn("Warning: Core image elf header is not sane\n");
1292 /* Read in all elf headers. */
1293 elfcorebuf_sz_orig
= sizeof(Elf32_Ehdr
) + ehdr
.e_phnum
* sizeof(Elf32_Phdr
);
1294 elfcorebuf_sz
= elfcorebuf_sz_orig
;
1295 elfcorebuf
= (void *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
1296 get_order(elfcorebuf_sz_orig
));
1299 addr
= elfcorehdr_addr
;
1300 rc
= elfcorehdr_read(elfcorebuf
, elfcorebuf_sz_orig
, &addr
);
1304 /* Merge all PT_NOTE headers into one. */
1305 rc
= merge_note_headers_elf32(elfcorebuf
, &elfcorebuf_sz
,
1306 &elfnotes_buf
, &elfnotes_sz
);
1309 rc
= process_ptload_program_headers_elf32(elfcorebuf
, elfcorebuf_sz
,
1310 elfnotes_sz
, &vmcore_list
);
1313 set_vmcore_list_offsets(elfcorebuf_sz
, elfnotes_sz
, &vmcore_list
);
1320 static int __init
parse_crash_elf_headers(void)
1322 unsigned char e_ident
[EI_NIDENT
];
1326 addr
= elfcorehdr_addr
;
1327 rc
= elfcorehdr_read(e_ident
, EI_NIDENT
, &addr
);
1330 if (memcmp(e_ident
, ELFMAG
, SELFMAG
) != 0) {
1331 pr_warn("Warning: Core image elf header not found\n");
1335 if (e_ident
[EI_CLASS
] == ELFCLASS64
) {
1336 rc
= parse_crash_elf64_headers();
1339 } else if (e_ident
[EI_CLASS
] == ELFCLASS32
) {
1340 rc
= parse_crash_elf32_headers();
1344 pr_warn("Warning: Core image elf header is not sane\n");
1348 /* Determine vmcore size. */
1349 vmcore_size
= get_vmcore_size(elfcorebuf_sz
, elfnotes_sz
,
1355 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1357 * vmcoredd_write_header - Write vmcore device dump header at the
1358 * beginning of the dump's buffer.
1359 * @buf: Output buffer where the note is written
1361 * @size: Size of the dump
1363 * Fills beginning of the dump's buffer with vmcore device dump header.
1365 static void vmcoredd_write_header(void *buf
, struct vmcoredd_data
*data
,
1368 struct vmcoredd_header
*vdd_hdr
= (struct vmcoredd_header
*)buf
;
1370 vdd_hdr
->n_namesz
= sizeof(vdd_hdr
->name
);
1371 vdd_hdr
->n_descsz
= size
+ sizeof(vdd_hdr
->dump_name
);
1372 vdd_hdr
->n_type
= NT_VMCOREDD
;
1374 strncpy((char *)vdd_hdr
->name
, VMCOREDD_NOTE_NAME
,
1375 sizeof(vdd_hdr
->name
));
1376 memcpy(vdd_hdr
->dump_name
, data
->dump_name
, sizeof(vdd_hdr
->dump_name
));
1380 * vmcoredd_update_program_headers - Update all Elf program headers
1381 * @elfptr: Pointer to elf header
1382 * @elfnotesz: Size of elf notes aligned to page size
1383 * @vmcoreddsz: Size of device dumps to be added to elf note header
1385 * Determine type of Elf header (Elf64 or Elf32) and update the elf note size.
1386 * Also update the offsets of all the program headers after the elf note header.
1388 static void vmcoredd_update_program_headers(char *elfptr
, size_t elfnotesz
,
1391 unsigned char *e_ident
= (unsigned char *)elfptr
;
1392 u64 start
, end
, size
;
1396 vmcore_off
= elfcorebuf_sz
+ elfnotesz
;
1398 if (e_ident
[EI_CLASS
] == ELFCLASS64
) {
1399 Elf64_Ehdr
*ehdr
= (Elf64_Ehdr
*)elfptr
;
1400 Elf64_Phdr
*phdr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
));
1402 /* Update all program headers */
1403 for (i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++) {
1404 if (phdr
->p_type
== PT_NOTE
) {
1405 /* Update note size */
1406 phdr
->p_memsz
= elfnotes_orig_sz
+ vmcoreddsz
;
1407 phdr
->p_filesz
= phdr
->p_memsz
;
1411 start
= rounddown(phdr
->p_offset
, PAGE_SIZE
);
1412 end
= roundup(phdr
->p_offset
+ phdr
->p_memsz
,
1415 phdr
->p_offset
= vmcore_off
+ (phdr
->p_offset
- start
);
1419 Elf32_Ehdr
*ehdr
= (Elf32_Ehdr
*)elfptr
;
1420 Elf32_Phdr
*phdr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
));
1422 /* Update all program headers */
1423 for (i
= 0; i
< ehdr
->e_phnum
; i
++, phdr
++) {
1424 if (phdr
->p_type
== PT_NOTE
) {
1425 /* Update note size */
1426 phdr
->p_memsz
= elfnotes_orig_sz
+ vmcoreddsz
;
1427 phdr
->p_filesz
= phdr
->p_memsz
;
1431 start
= rounddown(phdr
->p_offset
, PAGE_SIZE
);
1432 end
= roundup(phdr
->p_offset
+ phdr
->p_memsz
,
1435 phdr
->p_offset
= vmcore_off
+ (phdr
->p_offset
- start
);
1442 * vmcoredd_update_size - Update the total size of the device dumps and update
1444 * @dump_size: Size of the current device dump to be added to total size
1446 * Update the total size of all the device dumps and update the Elf program
1447 * headers. Calculate the new offsets for the vmcore list and update the
1448 * total vmcore size.
1450 static void vmcoredd_update_size(size_t dump_size
)
1452 vmcoredd_orig_sz
+= dump_size
;
1453 elfnotes_sz
= roundup(elfnotes_orig_sz
, PAGE_SIZE
) + vmcoredd_orig_sz
;
1454 vmcoredd_update_program_headers(elfcorebuf
, elfnotes_sz
,
1457 /* Update vmcore list offsets */
1458 set_vmcore_list_offsets(elfcorebuf_sz
, elfnotes_sz
, &vmcore_list
);
1460 vmcore_size
= get_vmcore_size(elfcorebuf_sz
, elfnotes_sz
,
1462 proc_vmcore
->size
= vmcore_size
;
1466 * vmcore_add_device_dump - Add a buffer containing device dump to vmcore
1469 * Allocate a buffer and invoke the calling driver's dump collect routine.
1470 * Write Elf note at the beginning of the buffer to indicate vmcore device
1471 * dump and add the dump to global list.
1473 int vmcore_add_device_dump(struct vmcoredd_data
*data
)
1475 struct vmcoredd_node
*dump
;
1480 if (vmcoredd_disabled
) {
1481 pr_err_once("Device dump is disabled\n");
1485 if (!data
|| !strlen(data
->dump_name
) ||
1486 !data
->vmcoredd_callback
|| !data
->size
)
1489 dump
= vzalloc(sizeof(*dump
));
1495 /* Keep size of the buffer page aligned so that it can be mmaped */
1496 data_size
= roundup(sizeof(struct vmcoredd_header
) + data
->size
,
1499 /* Allocate buffer for driver's to write their dumps */
1500 buf
= vmcore_alloc_buf(data_size
);
1506 vmcoredd_write_header(buf
, data
, data_size
-
1507 sizeof(struct vmcoredd_header
));
1509 /* Invoke the driver's dump collection routing */
1510 ret
= data
->vmcoredd_callback(data
, buf
+
1511 sizeof(struct vmcoredd_header
));
1516 dump
->size
= data_size
;
1518 /* Add the dump to driver sysfs list */
1519 mutex_lock(&vmcoredd_mutex
);
1520 list_add_tail(&dump
->list
, &vmcoredd_list
);
1521 mutex_unlock(&vmcoredd_mutex
);
1523 vmcoredd_update_size(data_size
);
1532 EXPORT_SYMBOL(vmcore_add_device_dump
);
1533 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1535 /* Free all dumps in vmcore device dump list */
1536 static void vmcore_free_device_dumps(void)
1538 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
1539 mutex_lock(&vmcoredd_mutex
);
1540 while (!list_empty(&vmcoredd_list
)) {
1541 struct vmcoredd_node
*dump
;
1543 dump
= list_first_entry(&vmcoredd_list
, struct vmcoredd_node
,
1545 list_del(&dump
->list
);
1549 mutex_unlock(&vmcoredd_mutex
);
1550 #endif /* CONFIG_PROC_VMCORE_DEVICE_DUMP */
1553 /* Init function for vmcore module. */
1554 static int __init
vmcore_init(void)
1558 /* Allow architectures to allocate ELF header in 2nd kernel */
1559 rc
= elfcorehdr_alloc(&elfcorehdr_addr
, &elfcorehdr_size
);
1563 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1564 * then capture the dump.
1566 if (!(is_vmcore_usable()))
1568 rc
= parse_crash_elf_headers();
1570 pr_warn("Kdump: vmcore not initialized\n");
1573 elfcorehdr_free(elfcorehdr_addr
);
1574 elfcorehdr_addr
= ELFCORE_ADDR_ERR
;
1576 proc_vmcore
= proc_create("vmcore", S_IRUSR
, NULL
, &vmcore_proc_ops
);
1578 proc_vmcore
->size
= vmcore_size
;
1581 fs_initcall(vmcore_init
);
1583 /* Cleanup function for vmcore module. */
1584 void vmcore_cleanup(void)
1587 proc_remove(proc_vmcore
);
1591 /* clear the vmcore list. */
1592 while (!list_empty(&vmcore_list
)) {
1595 m
= list_first_entry(&vmcore_list
, struct vmcore
, list
);
1601 /* clear vmcore device dump list */
1602 vmcore_free_device_dumps();