structkexec_info { structkexec_segment *segment; int nr_segments; structmemory_range *memory_range; int memory_ranges; structmemory_range *crash_range; int nr_crash_ranges; void *entry; structmem_ehdrrhdr; unsignedlong backup_start; unsignedlong kexec_flags; unsignedlong backup_src_start; unsignedlong backup_src_size; /* Set to 1 if we are using kexec file syscall */ unsignedlong file_mode :1;
/* Filled by kernel image processing code */ int initrd_fd; char *command_line; int command_line_len; };
staticvoid __init reserve_crashkernel(void) { unsignedlonglong crash_base, crash_size; int ret;
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), &crash_size, &crash_base); /* no crashkernel= or invalid value specified */ if (ret || !crash_size) return;
crash_size = PAGE_ALIGN(crash_size);
if (crash_base == 0) { /* Current arm64 boot protocol requires 2MB alignment */ crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, crash_size, SZ_2M); if (crash_base == 0) { pr_warn("cannot allocate crashkernel (size:0x%llx)\n", crash_size); return; } } else { /* User specifies base address explicitly. */ if (!memblock_is_region_memory(crash_base, crash_size)) { pr_warn("cannot reserve crashkernel: region is not memory\n"); return; }
if (memblock_is_region_reserved(crash_base, crash_size)) { pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); return; }
if (!IS_ALIGNED(crash_base, SZ_2M)) { pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); return; } } memblock_reserve(crash_base, crash_size);
/* We only trust the superuser with rebooting the system. */ if (!capable(CAP_SYS_BOOT) || kexec_load_disabled) return -EPERM;
/* * Verify we have a legal set of flags * This leaves us room for future extensions. */ if ((flags & KEXEC_FLAGS) != (flags & ~KEXEC_ARCH_MASK)) return -EINVAL;
/* Verify we are on the appropriate architecture */ if (((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH) && ((flags & KEXEC_ARCH_MASK) != KEXEC_ARCH_DEFAULT)) return -EINVAL;
/* Put an artificial cap on the number * of segments passed to kexec_load. */ if (nr_segments > KEXEC_SEGMENT_MAX) return -EINVAL;
/* Because we write directly to the reserved memory * region when loading crash kernels we need a mutex here to * prevent multiple crash kernels from attempting to load * simultaneously, and to prevent a crash kernel from loading * over the top of a in use crash kernel. * * KISS: always take the mutex. */ if (!mutex_trylock(&kexec_mutex)) return -EBUSY;
result = do_kexec_load(entry, nr_segments, segments, flags);
if (flags & KEXEC_ON_CRASH) { dest_image = &kexec_crash_image; if (kexec_crash_image) arch_kexec_unprotect_crashkres(); } else { dest_image = &kexec_image; }
if (nr_segments == 0) { /* Uninstall image */ kimage_free(xchg(dest_image, NULL)); return0; } if (flags & KEXEC_ON_CRASH) { /* * Loading another kernel to switch to if this one * crashes. Free any current crash dump kernel before * we corrupt it. */ kimage_free(xchg(&kexec_crash_image, NULL)); }
ret = kimage_alloc_init(&image, entry, nr_segments, segments, flags); if (ret) return ret;
if (flags & KEXEC_PRESERVE_CONTEXT) image->preserve_context = 1;
ret = machine_kexec_prepare(image); if (ret) goto out;
for (i = 0; i < nr_segments; i++) { ret = kimage_load_segment(image, &image->segment[i]); if (ret) goto out; }
kimage_terminate(image);
/* Install the new kernel and uninstall the old */ image = xchg(dest_image, image);
/* * No panic_cpu check version of crash_kexec(). This function is called * only when panic_cpu holds the current CPU number; this is the only CPU * which processes crash_kexec routines. */ void __crash_kexec(struct pt_regs *regs) { /* Take the kexec_mutex here to prevent sys_kexec_load * running on one cpu from replacing the crash kernel * we are using after a panic on a different cpu. * * If the crash kernel was not located in a fixed area * of memory the xchg(&kexec_crash_image) would be * sufficient. But since I reuse the memory... */ if (mutex_trylock(&kexec_mutex)) { if (kexec_crash_image) { structpt_regsfixed_regs;
/** * machine_kexec - Do the kexec reboot. * * Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC. */ voidmachine_kexec(struct kimage *kimage) { phys_addr_t reboot_code_buffer_phys; void *reboot_code_buffer; bool in_kexec_crash = (kimage == kexec_crash_image); bool stuck_cpus = cpus_are_stuck_in_kernel();
clear_abnormal_magic();
/* * New cpus may have become stuck_in_kernel after we loaded the image. */ BUG_ON(!in_kexec_crash && (stuck_cpus || (num_online_cpus() > 1))); WARN(in_kexec_crash && (stuck_cpus || smp_crash_stop_failed()), "Some CPUs may be stale, kdump will be unreliable.\n");
/* * Copy arm64_relocate_new_kernel to the reboot_code_buffer for use * after the kernel is shut down. */ memcpy(reboot_code_buffer, arm64_relocate_new_kernel, arm64_relocate_new_kernel_size);
/* Flush the reboot_code_buffer in preparation for its execution. */ __flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size); flush_icache_range((uintptr_t)reboot_code_buffer, arm64_relocate_new_kernel_size);
/* Flush the kimage list and its buffers. */ kexec_list_flush(kimage);
/* Flush the new image if already in place. */ if ((kimage != kexec_crash_image) && (kimage->head & IND_DONE)) kexec_segment_flush(kimage);
/* * cpu_soft_restart will shutdown the MMU, disable data caches, then * transfer control to the reboot_code_buffer which contains a copy of * the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel * uses physical addressing to relocate the new image to its final * position and transfers control to the image entry point when the * relocation is complete. */
/* * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for * cpu_soft_restart. * * @el2_switch: Flag to indicate a swich to EL2 is needed. * @entry: Location to jump to for soft reset. * arg0: First argument passed to @entry. * arg1: Second argument passed to @entry. * arg2: Third argument passed to @entry. * * Put the CPU into the same state as it would be if it had been reset, and * branch to what would be the reset vector. It must be executed with the * flat identity mapping. */ ENTRY(__cpu_soft_restart) /* Clear sctlr_el1 flags. */ mrs x12, sctlr_el1 ldr x13, =SCTLR_ELx_FLAGS bic x12, x12, x13 msr sctlr_el1, x12 isb
/* * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it. * * The memory that the old kernel occupies may be overwritten when coping the * new image to its final location. To assure that the * arm64_relocate_new_kernel routine which does that copy is not overwritten, * all code and data needed by arm64_relocate_new_kernel must be between the * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec * control_code_page, a special page which has been set up to be preserved * during the copy operation. */ ENTRY(arm64_relocate_new_kernel)
/* Init function for vmcore module. */ staticint __init vmcore_init(void) { int rc = 0;
/* Allow architectures to allocate ELF header in 2nd kernel */ rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size); if (rc) return rc; /* * If elfcorehdr= has been passed in cmdline or created in 2nd kernel, * then capture the dump. */ if (!(is_vmcore_usable())) return rc; rc = parse_crash_elf_headers(); if (rc) { pr_warn("Kdump: vmcore not initialized\n"); return rc; } elfcorehdr_free(elfcorehdr_addr); elfcorehdr_addr = ELFCORE_ADDR_ERR;
/* * is_kdump_kernel() checks whether this kernel is booting after a panic of * previous kernel or not. This is determined by checking if previous kernel * has passed the elf core header address on command line. * * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of * previous kernel. */
/* is_vmcore_usable() checks if the kernel is booting after a panic and * the vmcore region is usable. * * This makes use of the fact that due to alignment -2ULL is not * a valid pointer, much in the vain of IS_ERR(), except * dealing directly with an unsigned long long rather than a pointer. */
/* Read from the ELF header and then the crash dump. On error, negative value is * returned otherwise number of bytes read are returned. */ staticssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos, int userbuf) { ssize_t acc = 0, tmp; size_t tsz; u64 start; structvmcore *m = NULL;
if (buflen == 0 || *fpos >= vmcore_size) return0;
/* trim buflen to not go beyond EOF */ if (buflen > vmcore_size - *fpos) buflen = vmcore_size - *fpos;
crash 7.2.5 Copyright (C) 2002-2019 Red Hat, Inc. Copyright (C) 2004, 2005, 2006, 2010 IBM Corporation Copyright (C) 1999-2006 Hewlett-Packard Co Copyright (C) 2005, 2006, 2011, 2012 Fujitsu Limited Copyright (C) 2006, 2007 VA Linux Systems Japan K.K. Copyright (C) 2005, 2011 NEC Corporation Copyright (C) 1999, 2002, 2007 Silicon Graphics, Inc. Copyright (C) 1999, 2000, 2001, 2002 Mission Critical Linux, Inc. This program is free software, covered by the GNU General Public License, and you are welcome to change it and/or distribute copies of it under certain conditions. Enter "help copying" to see the conditions. This program has absolutely no warranty. Enter "help warranty" for details. GNU gdb (GDB) 7.6 Copyright (C) 2013 Free Software Foundation, Inc. License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html> This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law. Type "show copying" and "show warranty" for details. This GDB was configured as "--host=x86_64-unknown-linux-gnu --target=aarch64-elf-linux"... Redefine command "pstring"? (y or n) [answered Y; input not from terminal]