Linux Audio

Check our new training course

Embedded Linux Audio

Check our new training course
with Creative Commons CC-BY-SA
lecture materials

 Bootlin logo

Elixir Cross Referencer

Loading...
 one
 two
 three
 four
 five
 six
 seven
 eight
 nine
 ten
 eleven
 twelve
 thirteen
 fourteen
 fifteen
 sixteen
 seventeen
 eighteen
 nineteen
 twenty
 twenty-one
 twenty-two
 twenty-three
 twenty-four
 twenty-five
 twenty-six
 twenty-seven
 twenty-eight
 twenty-nine
 thirty
 thirty-one
 thirty-two
 thirty-three
 thirty-four
 thirty-five
 thirty-six
 thirty-seven
 thirty-eight
 thirty-nine
 forty
 forty-one
 forty-two
 forty-three
 forty-four
 forty-five
 forty-six
 forty-seven
 forty-eight
 forty-nine
 fifty
 fifty-one
 fifty-two
 fifty-three
 fifty-four
 fifty-five
 fifty-six
 fifty-seven
 fifty-eight
 fifty-nine
 sixty
 sixty-one
 sixty-two
 sixty-three
 sixty-four
 sixty-five
 sixty-six
 sixty-seven
 sixty-eight
 sixty-nine
 seventy
 seventy-one
 seventy-two
 seventy-three
 seventy-four
 seventy-five
 seventy-six
 seventy-seven
 seventy-eight
 seventy-nine
 eighty
 eighty-one
 eighty-two
 eighty-three
 eighty-four
 eighty-five
 eighty-six
 eighty-seven
 eighty-eight
 eighty-nine
 ninety
 ninety-one
 ninety-two
 ninety-three
 ninety-four
 ninety-five
 ninety-six
 ninety-seven
 ninety-eight
 ninety-nine
 one hundred
 one hundred and one
 one hundred and two
 one hundred and three
 one hundred and four
 one hundred and five
 one hundred and six
 one hundred and seven
 one hundred and eight
 one hundred and nine
 one hundred and ten
 one hundred and eleven
 one hundred and twelve
 one hundred and thirteen
 one hundred and fourteen
 one hundred and fifteen
 one hundred and sixteen
 one hundred and seventeen
 one hundred and eighteen
 one hundred and nineteen
 one hundred and twenty
 one hundred and twenty-one
 one hundred and twenty-two
 one hundred and twenty-three
 one hundred and twenty-four
 one hundred and twenty-five
 one hundred and twenty-six
 one hundred and twenty-seven
 one hundred and twenty-eight
 one hundred and twenty-nine
 one hundred and thirty
 one hundred and thirty-one
 one hundred and thirty-two
 one hundred and thirty-three
 one hundred and thirty-four
 one hundred and thirty-five
 one hundred and thirty-six
 one hundred and thirty-seven
 one hundred and thirty-eight
 one hundred and thirty-nine
 one hundred and forty
 one hundred and forty-one
 one hundred and forty-two
 one hundred and forty-three
 one hundred and forty-four
 one hundred and forty-five
 one hundred and forty-six
 one hundred and forty-seven
 one hundred and forty-eight
 one hundred and forty-nine
 one hundred and fifty
 one hundred and fifty-one
 one hundred and fifty-two
 one hundred and fifty-three
 one hundred and fifty-four
 one hundred and fifty-five
 one hundred and fifty-six
 one hundred and fifty-seven
 one hundred and fifty-eight
 one hundred and fifty-nine
 one hundred and sixty
 one hundred and sixty-one
 one hundred and sixty-two
 one hundred and sixty-three
 one hundred and sixty-four
 one hundred and sixty-five
 one hundred and sixty-six
 one hundred and sixty-seven
 one hundred and sixty-eight
 one hundred and sixty-nine
 one hundred and seventy
 one hundred and seventy-one
 one hundred and seventy-two
 one hundred and seventy-three
 one hundred and seventy-four
 one hundred and seventy-five
 one hundred and seventy-six
 one hundred and seventy-seven
 one hundred and seventy-eight
 one hundred and seventy-nine
 one hundred and eighty
 one hundred and eighty-one
 one hundred and eighty-two
 one hundred and eighty-three
 one hundred and eighty-four
 one hundred and eighty-five
 one hundred and eighty-six
 one hundred and eighty-seven
 one hundred and eighty-eight
 one hundred and eighty-nine
 one hundred and ninety
 one hundred and ninety-one
 one hundred and ninety-two
 one hundred and ninety-three
 one hundred and ninety-four
 one hundred and ninety-five
 one hundred and ninety-six
 one hundred and ninety-seven
 one hundred and ninety-eight
 one hundred and ninety-nine
 two hundred
 two hundred and one
 two hundred and two
 two hundred and three
 two hundred and four
 two hundred and five
 two hundred and six
 two hundred and seven
 two hundred and eight
 two hundred and nine
 two hundred and ten
 two hundred and eleven
 two hundred and twelve
 two hundred and thirteen
 two hundred and fourteen
 two hundred and fifteen
 two hundred and sixteen
 two hundred and seventeen
 two hundred and eighteen
 two hundred and nineteen
 two hundred and twenty
 two hundred and twenty-one
 two hundred and twenty-two
 two hundred and twenty-three
 two hundred and twenty-four
 two hundred and twenty-five
 two hundred and twenty-six
 two hundred and twenty-seven
 two hundred and twenty-eight
 two hundred and twenty-nine
 two hundred and thirty
 two hundred and thirty-one
 two hundred and thirty-two
 two hundred and thirty-three
 two hundred and thirty-four
 two hundred and thirty-five
 two hundred and thirty-six
 two hundred and thirty-seven
 two hundred and thirty-eight
 two hundred and thirty-nine
 two hundred and forty
 two hundred and forty-one
 two hundred and forty-two
 two hundred and forty-three
 two hundred and forty-four
 two hundred and forty-five
 two hundred and forty-six
 two hundred and forty-seven
 two hundred and forty-eight
 two hundred and forty-nine
 two hundred and fifty
 two hundred and fifty-one
 two hundred and fifty-two
 two hundred and fifty-three
 two hundred and fifty-four
 two hundred and fifty-five
 two hundred and fifty-six
 two hundred and fifty-seven
 two hundred and fifty-eight
 two hundred and fifty-nine
 two hundred and sixty
 two hundred and sixty-one
 two hundred and sixty-two
 two hundred and sixty-three
 two hundred and sixty-four
 two hundred and sixty-five
 two hundred and sixty-six
 two hundred and sixty-seven
 two hundred and sixty-eight
 two hundred and sixty-nine
 two hundred and seventy
 two hundred and seventy-one
 two hundred and seventy-two
 two hundred and seventy-three
 two hundred and seventy-four
 two hundred and seventy-five
 two hundred and seventy-six
 two hundred and seventy-seven
 two hundred and seventy-eight
 two hundred and seventy-nine
 two hundred and eighty
 two hundred and eighty-one
 two hundred and eighty-two
 two hundred and eighty-three
 two hundred and eighty-four
 two hundred and eighty-five
 two hundred and eighty-six
 two hundred and eighty-seven
 two hundred and eighty-eight
 two hundred and eighty-nine
 two hundred and ninety
 two hundred and ninety-one
 two hundred and ninety-two
 two hundred and ninety-three
 two hundred and ninety-four
 two hundred and ninety-five
 two hundred and ninety-six
 two hundred and ninety-seven
 two hundred and ninety-eight
 two hundred and ninety-nine
 three hundred
 three hundred and one
 three hundred and two
 three hundred and three
 three hundred and four
 three hundred and five
 three hundred and six
 three hundred and seven
 three hundred and eight
 three hundred and nine
 three hundred and ten
 three hundred and eleven
 three hundred and twelve
 three hundred and thirteen
 three hundred and fourteen
 three hundred and fifteen
 three hundred and sixteen
 three hundred and seventeen
 three hundred and eighteen
 three hundred and nineteen
 three hundred and twenty
 three hundred and twenty-one
 three hundred and twenty-two
 three hundred and twenty-three
 three hundred and twenty-four
 three hundred and twenty-five
 three hundred and twenty-six
 three hundred and twenty-seven
 three hundred and twenty-eight
 three hundred and twenty-nine
 three hundred and thirty
 three hundred and thirty-one
 three hundred and thirty-two
 three hundred and thirty-three
 three hundred and thirty-four
 three hundred and thirty-five
 three hundred and thirty-six
 three hundred and thirty-seven
 three hundred and thirty-eight
 three hundred and thirty-nine
 three hundred and forty
 three hundred and forty-one
 three hundred and forty-two
 three hundred and forty-three
 three hundred and forty-four
 three hundred and forty-five
 three hundred and forty-six
 three hundred and forty-seven
 three hundred and forty-eight
 three hundred and forty-nine
 three hundred and fifty
 three hundred and fifty-one
 three hundred and fifty-two
 three hundred and fifty-three
 three hundred and fifty-four
 three hundred and fifty-five
 three hundred and fifty-six
 three hundred and fifty-seven
 three hundred and fifty-eight
 three hundred and fifty-nine
 three hundred and sixty
 three hundred and sixty-one
 three hundred and sixty-two
 three hundred and sixty-three
 three hundred and sixty-four
 three hundred and sixty-five
 three hundred and sixty-six
 three hundred and sixty-seven
 three hundred and sixty-eight
 three hundred and sixty-nine
 three hundred and seventy
 three hundred and seventy-one
 three hundred and seventy-two
 three hundred and seventy-three
 three hundred and seventy-four
 three hundred and seventy-five
 three hundred and seventy-six
 three hundred and seventy-seven
 three hundred and seventy-eight
 three hundred and seventy-nine
 three hundred and eighty
 three hundred and eighty-one
 three hundred and eighty-two
 three hundred and eighty-three
 three hundred and eighty-four
 three hundred and eighty-five
 three hundred and eighty-six
 three hundred and eighty-seven
 three hundred and eighty-eight
 three hundred and eighty-nine
 three hundred and ninety
 three hundred and ninety-one
 three hundred and ninety-two
 three hundred and ninety-three
 three hundred and ninety-four
 three hundred and ninety-five
 three hundred and ninety-six
 three hundred and ninety-seven
 three hundred and ninety-eight
 three hundred and ninety-nine
 four hundred
 four hundred and one
 four hundred and two
 four hundred and three
 four hundred and four
 four hundred and five
 four hundred and six
 four hundred and seven
 four hundred and eight
 four hundred and nine
 four hundred and ten
 four hundred and eleven
 four hundred and twelve
 four hundred and thirteen
 four hundred and fourteen
 four hundred and fifteen
 four hundred and sixteen
 four hundred and seventeen
 four hundred and eighteen
 four hundred and nineteen
 four hundred and twenty
 four hundred and twenty-one
 four hundred and twenty-two
 four hundred and twenty-three
 four hundred and twenty-four
 four hundred and twenty-five
 four hundred and twenty-six
 four hundred and twenty-seven
 four hundred and twenty-eight
 four hundred and twenty-nine
 four hundred and thirty
 four hundred and thirty-one
 four hundred and thirty-two
 four hundred and thirty-three
 four hundred and thirty-four
 four hundred and thirty-five
 four hundred and thirty-six
 four hundred and thirty-seven
 four hundred and thirty-eight
 four hundred and thirty-nine
 four hundred and forty
 four hundred and forty-one
 four hundred and forty-two
 four hundred and forty-three
 four hundred and forty-four
 four hundred and forty-five
 four hundred and forty-six
 four hundred and forty-seven
 four hundred and forty-eight
 four hundred and forty-nine
 four hundred and fifty
 four hundred and fifty-one
 four hundred and fifty-two
 four hundred and fifty-three
 four hundred and fifty-four
 four hundred and fifty-five
 four hundred and fifty-six
 four hundred and fifty-seven
 four hundred and fifty-eight
 four hundred and fifty-nine
 four hundred and sixty
 four hundred and sixty-one
 four hundred and sixty-two
 four hundred and sixty-three
 four hundred and sixty-four
 four hundred and sixty-five
 four hundred and sixty-six
 four hundred and sixty-seven
 four hundred and sixty-eight
 four hundred and sixty-nine
 four hundred and seventy
 four hundred and seventy-one
 four hundred and seventy-two
 four hundred and seventy-three
 four hundred and seventy-four
 four hundred and seventy-five
 four hundred and seventy-six
 four hundred and seventy-seven
 four hundred and seventy-eight
 four hundred and seventy-nine
 four hundred and eighty
 four hundred and eighty-one
 four hundred and eighty-two
 four hundred and eighty-three
 four hundred and eighty-four
 four hundred and eighty-five
 four hundred and eighty-six
 four hundred and eighty-seven
 four hundred and eighty-eight
 four hundred and eighty-nine
 four hundred and ninety
 four hundred and ninety-one
 four hundred and ninety-two
 four hundred and ninety-three
 four hundred and ninety-four
 four hundred and ninety-five
 four hundred and ninety-six
 four hundred and ninety-seven
 four hundred and ninety-eight
 four hundred and ninety-nine
 five hundred
 five hundred and one
 five hundred and two
 five hundred and three
 five hundred and four
 five hundred and five
 five hundred and six
 five hundred and seven
 five hundred and eight
 five hundred and nine
 five hundred and ten
 five hundred and eleven
 five hundred and twelve
 five hundred and thirteen
 five hundred and fourteen
 five hundred and fifteen
 five hundred and sixteen
 five hundred and seventeen
 five hundred and eighteen
 five hundred and nineteen
 five hundred and twenty
 five hundred and twenty-one
 five hundred and twenty-two
 five hundred and twenty-three
 five hundred and twenty-four
 five hundred and twenty-five
 five hundred and twenty-six
 five hundred and twenty-seven
 five hundred and twenty-eight
 five hundred and twenty-nine
 five hundred and thirty
 five hundred and thirty-one
 five hundred and thirty-two
 five hundred and thirty-three
 five hundred and thirty-four
 five hundred and thirty-five
 five hundred and thirty-six
 five hundred and thirty-seven
 five hundred and thirty-eight
 five hundred and thirty-nine
 five hundred and forty
 five hundred and forty-one
 five hundred and forty-two
 five hundred and forty-three
 five hundred and forty-four
 five hundred and forty-five
 five hundred and forty-six
 five hundred and forty-seven
 five hundred and forty-eight
 five hundred and forty-nine
 five hundred and fifty
 five hundred and fifty-one
 five hundred and fifty-two
 five hundred and fifty-three
 five hundred and fifty-four
 five hundred and fifty-five
 five hundred and fifty-six
 five hundred and fifty-seven
 five hundred and fifty-eight
 five hundred and fifty-nine
 five hundred and sixty
 five hundred and sixty-one
 five hundred and sixty-two
 five hundred and sixty-three
 five hundred and sixty-four
 five hundred and sixty-five
 five hundred and sixty-six
 five hundred and sixty-seven
 five hundred and sixty-eight
 five hundred and sixty-nine
 five hundred and seventy
 five hundred and seventy-one
 five hundred and seventy-two
 five hundred and seventy-three
 five hundred and seventy-four
 five hundred and seventy-five
 five hundred and seventy-six
 five hundred and seventy-seven
 five hundred and seventy-eight
 five hundred and seventy-nine
 five hundred and eighty
 five hundred and eighty-one
 five hundred and eighty-two
 five hundred and eighty-three
 five hundred and eighty-four
 five hundred and eighty-five
 five hundred and eighty-six
 five hundred and eighty-seven
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
 *   linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
 *
 *  Copyright (C) 2000 Andrea Arcangeli < andrea@suse.de > SuSE
 *  Copyright (C) 2000 Pavel Machek < pavel@suse.cz >
 *  Copyright (C) 2000 Karsten Keil < kkeil@suse.de >
 *  Copyright (C) 2001,2002 Andi Kleen < ak@suse.de >
 *  Copyright (C) 2005 Eric Biederman < ebiederm@xmission.com >
 */


 #include < linux/linkage.h >
 #include < linux/threads.h >
 #include < linux/init.h >
 #include < linux/pgtable.h >
 #include <asm/segment.h>
 #include <asm/page.h>
 #include <asm/msr.h>
 #include <asm/cache.h>
 #include <asm/processor-flags.h>
 #include <asm/percpu.h>
 #include <asm/nops.h>
 #include " ../entry/calling. h "
 #include <asm/export.h>
 #include <asm/nospec-branch.h>
 #include <asm/fixmap.h>

 /*
 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
 * because we need identity-mapped pages.
 */
 #define l4_index (x)	(((x) >> 39) & 511)
 #define pud_index (x)	(((x) >> PUD_SHIFT ) & ( PTRS_PER_PUD -1))

 L4_PAGE_OFFSET  =  l4_index ( __PAGE_OFFSET_BASE_L4 )
 L4_START_KERNEL  =  l4_index ( __START_KERNEL_map )

 L3_START_KERNEL  =  pud_index ( __START_KERNEL_map )

	 . text
	 __HEAD
	 .code64
 SYM_CODE_START_NOALIGN ( startup_64 )
	 UNWIND_HINT_EMPTY
	 /*
 * At this point the CPU runs in 64bit mode CS. L = 1 CS.D = 0,
 * and someone has loaded an identity mapped page table
 * for us.   These identity mapped page tables map all of the
 * kernel pages and possibly all of memory.
 *
 * %rsi holds a physical pointer to real_mode_data.
 *
 * We come here either directly from a 64bit bootloader, or from
 * arch/x86/boot/compressed/head_64.S.
 *
 * We only come here initially at boot nothing else comes here.
 *
 * Since we may be loaded at an address different from what we were
 * compiled to run at we first fixup the physical addresses in our page
 * tables and then reload them.
 */

	 /* Set up the stack for verify_cpu(),  similar to initial_stack below */
	 leaq	 ( __end_init_task  -  SIZEOF_PTREGS )( % rip ),  % rsp

	 leaq	 _text ( % rip ),  % rdi
	 pushq	 % rsi
	 call	 startup_64_setup_env
	 popq	 % rsi

	 /* Now switch to __KERNEL_CS so IRET works reliably */
	 pushq	 $ __KERNEL_CS
	 leaq	 . Lon_kernel_cs ( % rip ),  % rax
	 pushq	 % rax
	 lretq

 . Lon_kernel_cs :
	 UNWIND_HINT_EMPTY

	 /* Sanitize CPU configuration */
	 call  verify_cpu

	 /*
 * Perform pagetable fixups.  Additionally, if SME is active, encrypt
 * the kernel and retrieve the modifier (SME encryption mask if SME
 * is active) to be added to the initial pgdir entry that will be
 * programmed into CR3.
 */
	 leaq	 _text ( % rip ),  % rdi
	 pushq	 % rsi
	 call	 __startup_64
	 popq	 % rsi

	 /* Form the CR3 value being sure to include the CR3 modifier */
	 addq	 $ ( early_top_pgt  -  __START_KERNEL_map ),  % rax
	 jmp  one f
 SYM_CODE_END ( startup_64 )

 SYM_CODE_START ( secondary_startup_64 )
	 UNWIND_HINT_EMPTY
	 /*
 * At this point the CPU runs in 64bit mode CS. L = 1 CS.D = 0,
 * and someone has loaded a mapped page table.
 *
 * %rsi holds a physical pointer to real_mode_data.
 *
 * We come here either from startup_64 (using physical addresses)
 * or from trampoline. S (using virtual addresses).
 *
 * Using virtual addresses from trampoline. S removes the need
 * to have any identity mapped pages in the kernel page table
 * after the boot processor executes this code.
 */

	 /* Sanitize CPU configuration */
	 call  verify_cpu

	 /*
 * The secondary_startup_64_no_verify entry point is only used by
 * SEV-ES guests.  In those guests the call to verify_cpu() would cause
 * #VC exceptions which can not be handled at this stage of secondary
 * CPU bringup.
 *
 * All non SEV-ES systems,  especially Intel systems, need to execute
 * verify_cpu() above to make sure NX is enabled.
 */
 SYM_INNER_LABEL ( secondary_startup_64_no_verify ,  SYM_L_GLOBAL )
	 UNWIND_HINT_EMPTY

	 /*
 * Retrieve the modifier (SME encryption mask if SME is active) to be
 * added to the initial pgdir entry that will be programmed into CR3.
 */
	 pushq	 % rsi
	 call	 __startup_secondary_64
	 popq	 % rsi

	 /* Form the CR3 value being sure to include the CR3 modifier */
	 addq	 $ ( init_top_pgt  -  __START_KERNEL_map ),  % rax
 1:

	 /* Enable PAE mode, PGE and LA57 */
	 movl	 $ ( X86_CR4_PAE  |  X86_CR4_PGE ),  % ecx
 #ifdef CONFIG_X86_5LEVEL
	 testl	 $1 ,  __pgtable_l5_enabled ( % rip )
	 jz	 one f
	 orl	 $ X86_CR4_LA57 ,  % ecx
 1:
 #endif
	 movq	 % rcx ,  % cr4

	 /* Setup early boot stage 4-/5-level pagetables. */
	 addq	 phys_base ( % rip ),  % rax

	 /*
 * For SEV guests: Verify that the C-bit is correct.  A malicious
 * hypervisor could lie about the C-bit position to perform a ROP
 * attack on the guest by writing to the unencrypted stack and wait for
 * the next RET instruction.
 * %rsi carries pointer to realmode data and is callee-clobbered.  Save
 * and restore it.
 */
	 pushq	 % rsi
	 movq	 % rax ,  % rdi
	 call	 sev_verify_cbit
	 popq	 % rsi

	 /* Switch to new page-table */
	 movq	 % rax ,  % cr3

	 /* Ensure I am executing from virtual addresses */
	 movq	 $1f ,  % rax
	 ANNOTATE_RETPOLINE_SAFE
	 jmp	 * % rax
 1:
	 UNWIND_HINT_EMPTY

	 /*
 * We must switch to a new descriptor in kernel space for the GDT
 * because soon the kernel won't have access anymore to the userspace
 * addresses where we're currently running on. We have to do that here
 * because in 32bit we couldn't load a 64bit linear address.
 */
	 lgdt	 early_gdt_descr ( % rip )

	 /* set up data segments */
	 xorl  % eax , % eax
	 movl  % eax , % ds
	 movl  % eax , % ss
	 movl  % eax , % es

	 /*
 * We don't really need to load %fs or %gs,  but load them anyway
 * to kill any stale realmode selectors.   This allows execution
 * under VT hardware.
 */
	 movl  % eax , % fs
	 movl  % eax , % gs

	 /* Set up %gs.
 *
 * The base of %gs always points to fixed_percpu_data.  If the
 * stack protector canary is enabled,  it is located at %gs:40.
 * Note that,  on SMP, the boot cpu uses init data section until
 * the per cpu areas are set up.
 */
	 movl	 $ MSR_GS_BASE , % ecx
	 movl	 initial_gs ( % rip ), % eax
	 movl	 initial_gs + four ( % rip ), % edx
	 wrmsr

	 /*
 * Setup a boot time stack - Any secondary CPU will have lost its stack
 * by now because the cr3-switch above unmaps the real-mode stack
 */
	 movq  initial_stack ( % rip ),  % rsp

	 /* Setup and Load IDT */
	 pushq	 % rsi
	 call	 early_setup_idt
	 popq	 % rsi

	 /* Check if nx is implemented */
	 movl	 $0x80000001 ,  % eax
	 cpuid
	 movl	 % edx , % edi

	 /* Setup EFER (Extended Feature Enable Register) */
	 movl	 $ MSR_EFER ,  % ecx
	 rdmsr
	 btsl	 $ _EFER_SCE ,  % eax	 /* Enable System Call */
	 btl	 $20 , % edi		 /* No Execute supported? */
	 jnc      one f
	 btsl	 $ _EFER_NX ,  % eax
	 btsq	 $ _PAGE_BIT_NX , early_pmd_flags ( % rip )
 1:	 wrmsr				 /* Make changes effective */

	 /* Setup cr0 */
	 movl	 $ CR0_STATE ,  % eax
	 /* Make changes effective */
	 movq	 % rax ,  % cr0

	 /* zero EFLAGS after setting rsp */
	 pushq  $0
	 popfq

	 /* rsi is pointer to real mode structure with interesting info.
 pass it to C */
	 movq	 % rsi ,  % rdi

 . Ljump_to_C_code :
	 /*
 * Jump to run C code and to be on a real kernel address.
 * Since we are running on identity-mapped space we have to jump
 * to the full 64bit address,  this is only possible as indirect
 * jump.   In addition we need to ensure %cs is set so we make this
 * a far return.
 *
 * Note: do not change to far jump indirect with 64bit offset.
 *
 * AMD does not support far jump indirect with 64bit offset.
 * AMD64 Architecture Programmer's Manual,  Volume 3: states only
 *	JMP FAR mem16:16 FF /5 Far jump indirect,
 *		with the target specified by a far pointer in memory.
 *	JMP FAR mem16:32 FF /5 Far jump indirect,
 *		with the target specified by a far pointer in memory.
 *
 * Intel64 does support 64bit offset.
 * Software Developer Manual Vol 2: states:
 *	FF /5 JMP m16:16 Jump far, absolute indirect,
 *		address given in m16:16
 *	FF /5 JMP m16:32 Jump far, absolute indirect,
 *		address given in m16:32.
 *	REX. W + FF /5 JMP m16:64 Jump far, absolute indirect,
 *		address given in m16:64.
 */
	 pushq	 $. Lafter_lret	 # put return address  on  stack for unwinder
	 xorl	 % ebp ,  % ebp	 # clear  frame  pointer
	 movq	 initial_code ( % rip ),  % rax
	 pushq	 $ __KERNEL_CS	 # set  correct  cs
	 pushq	 % rax		 # target  address  in  negative  space
	 lretq
 . Lafter_lret :
 SYM_CODE_END ( secondary_startup_64 )

 #include " verify_cpu.S "
 #include " sev_verify_cbit.S "

 #ifdef CONFIG_HOTPLUG_CPU
 /*
 * Boot CPU0 entry point.  It's called from play_dead().  Everything has been set
 * up already except stack.  We just set up stack here. Then call
 * start_secondary() via . Ljump_to_C_code.
 */
 SYM_CODE_START ( start_cpu0 )
	 UNWIND_HINT_EMPTY
	 movq	 initial_stack ( % rip ),  % rsp
	 jmp	 . Ljump_to_C_code
 SYM_CODE_END ( start_cpu0 )
 #endif

 #ifdef CONFIG_AMD_MEM_ENCRYPT
 /*
 * VC Exception handler used during early boot when running on kernel
 * addresses,  but before the switch to the idt_table can be made.
 * The early_idt_handler_array can't be used here because it calls into a lot
 * of __init code and this handler is also used during CPU offlining/onlining.
 * Therefore this handler ends up in the .text section so that it stays around
 * when .init.text is freed.
 */
 SYM_CODE_START_NOALIGN ( vc_boot_ghcb )
	 UNWIND_HINT_IRET_REGS  offset = eight

	 /* Build pt_regs */
	 PUSH_AND_CLEAR_REGS

	 /* Call C handler */
	 movq     % rsp ,  % rdi
	 movq	 ORIG_RAX ( % rsp ),  % rsi
	 movq	 initial_vc_handler ( % rip ),  % rax
	 ANNOTATE_RETPOLINE_SAFE
	 call	 * % rax

	 /* Unwind pt_regs */
	 POP_REGS

	 /* Remove Error Code */
	 addq     $8 ,  % rsp

	 /* Pure iret required here - don't use INTERRUPT_RETURN */
	 iretq
 SYM_CODE_END ( vc_boot_ghcb )
 #endif

	 /* Both SMP bootup and ACPI suspend change these variables */
	 __REFDATA
	 .balign	 eight
 SYM_DATA ( initial_code ,	 . quad  x86_64_start_kernel )
 SYM_DATA ( initial_gs ,	 . quad  INIT_PER_CPU_VAR ( fixed_percpu_data ))
 #ifdef CONFIG_AMD_MEM_ENCRYPT
 SYM_DATA ( initial_vc_handler ,	 . quad  handle_vc_boot_ghcb )
 #endif

 /*
 * The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
 * reliably detect the end of the stack.
 */
 SYM_DATA ( initial_stack ,  . quad  init_thread_union  +  THREAD_SIZE  -  SIZEOF_PTREGS )
	 __FINITDATA

	 __INIT
 SYM_CODE_START ( early_idt_handler_array )
	 i  =  zero
	 .rept  NUM_EXCEPTION_VECTORS
	 .if  (( EXCEPTION_ERRCODE_MASK  >>  i )  &  one )  ==  zero
		 UNWIND_HINT_IRET_REGS
		 pushq  $0	 # Dummy error code , to make stack  frame uniform
	 .else
		 UNWIND_HINT_IRET_REGS  offset = eight
	 .endif
	 pushq  $i		 # 72(% rsp ) Vector  number
	 jmp  early_idt_handler_common
	 UNWIND_HINT_IRET_REGS
	 i  =  i  +  one
	 . fill  early_idt_handler_array  +  i * EARLY_IDT_HANDLER_SIZE  -  .,  one ,  0xcc
	 .endr
	 UNWIND_HINT_IRET_REGS  offset = sixteen
 SYM_CODE_END ( early_idt_handler_array )

 SYM_CODE_START_LOCAL ( early_idt_handler_common )
	 /*
 * The stack is the hardware frame,  an error code or zero, and the
 * vector number.
 */
	 cld

	 incl  early_recursion_flag ( % rip )

	 /* The vector number is currently in the pt_regs->di slot. */
	 pushq  % rsi				 /* pt_regs->si */
	 movq  eight ( % rsp ),  % rsi			 /* RSI = vector number */
	 movq  % rdi ,  eight ( % rsp )			 /* pt_regs->di = RDI */
	 pushq  % rdx				 /* pt_regs->dx */
	 pushq  % rcx				 /* pt_regs->cx */
	 pushq  % rax				 /* pt_regs->ax */
	 pushq  % r8				 /* pt_regs->r8 */
	 pushq  % r9				 /* pt_regs->r9 */
	 pushq  % r10				 /* pt_regs->r10 */
	 pushq  % r11				 /* pt_regs->r11 */
	 pushq  % rbx				 /* pt_regs->bx */
	 pushq  % rbp				 /* pt_regs->bp */
	 pushq  % r12				 /* pt_regs->r12 */
	 pushq  % r13				 /* pt_regs->r13 */
	 pushq  % r14				 /* pt_regs->r14 */
	 pushq  % r15				 /* pt_regs->r15 */
	 UNWIND_HINT_REGS

	 movq  % rsp , % rdi		 /* RDI = pt_regs;  RSI is already trapnr */
	 call  do_early_exception

	 decl  early_recursion_flag ( % rip )
	 jmp  restore_regs_and_return_to_kernel
 SYM_CODE_END ( early_idt_handler_common )

 #ifdef CONFIG_AMD_MEM_ENCRYPT
 /*
 * VC Exception handler used during very early boot.  The
 * early_idt_handler_array can't be used because it returns via the
 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
 *
 * This handler will end up in the .init.text section and not be
 * available to boot secondary CPUs.
 */
 SYM_CODE_START_NOALIGN ( vc_no_ghcb )
	 UNWIND_HINT_IRET_REGS  offset = eight

	 /* Build pt_regs */
	 PUSH_AND_CLEAR_REGS

	 /* Call C handler */
	 movq     % rsp ,  % rdi
	 movq	 ORIG_RAX ( % rsp ),  % rsi
	 call     do_vc_no_ghcb

	 /* Unwind pt_regs */
	 POP_REGS

	 /* Remove Error Code */
	 addq     $8 ,  % rsp

	 /* Pure iret required here - don't use INTERRUPT_RETURN */
	 iretq
 SYM_CODE_END ( vc_no_ghcb )
 #endif

 #define SYM_DATA_START_PAGE_ALIGNED (name)			\
	 SYM_START ( name ,  SYM_L_GLOBAL ,  .balign  PAGE_SIZE )

 #ifdef CONFIG_PAGE_TABLE_ISOLATION
 /*
 * Each PGD needs to be 8k long and 8k aligned.   We do not
 * ever go out to userspace with these, so we do not
 * strictly *need* the second page,  but this allows us to
 * have a single set_pgd() implementation that does not
 * need to worry about whether it has 4k or 8k to work
 * with.
 *
 * This ensures PGDs are 8k long:
 */
 #define PTI_USER_PGD_FILL five hundred and twelve
 /* This ensures they are 8k-aligned: */
 #define SYM_DATA_START_PTI_ALIGNED (name) \
	 SYM_START ( name ,  SYM_L_GLOBAL ,  .balign  two  *  PAGE_SIZE )
 #else
 #define SYM_DATA_START_PTI_ALIGNED (name) \
	 SYM_DATA_START_PAGE_ALIGNED ( name )
 #define PTI_USER_PGD_FILL zero
 #endif

 /* Automate the creation of 1 to 1 mapping pmd entries */
 #define PMDS ( START , PERM, COUNT )			\
	 i  =  zero  ; 						\
	 .rept  ( COUNT )  ; 					\
	 . quad	 ( START )  +  ( i  <<  PMD_SHIFT )  +  ( PERM )  ; 	\
	 i  =  i  +  one  ; 					\
	 .endr

	 __INITDATA
	 .balign  four

 SYM_DATA_START_PTI_ALIGNED ( early_top_pgt )
	 . fill	 five hundred and twelve , eight , zero
	 . fill	 PTI_USER_PGD_FILL , eight , zero
 SYM_DATA_END ( early_top_pgt )

 SYM_DATA_START_PAGE_ALIGNED ( early_dynamic_pgts )
	 . fill	 five hundred and twelve * EARLY_DYNAMIC_PAGE_TABLES , eight , zero
 SYM_DATA_END ( early_dynamic_pgts )

 SYM_DATA ( early_recursion_flag ,  .long  zero )

	 .data

 #if defined ( CONFIG_XEN_PV ) || defined ( CONFIG_PVH )
 SYM_DATA_START_PTI_ALIGNED ( init_top_pgt )
	 . quad    level3_ident_pgt  -  __START_KERNEL_map  +  _KERNPG_TABLE_NOENC
	 . org     init_top_pgt  +  L4_PAGE_OFFSET * eight ,  zero
	 . quad    level3_ident_pgt  -  __START_KERNEL_map  +  _KERNPG_TABLE_NOENC
	 . org     init_top_pgt  +  L4_START_KERNEL * eight ,  zero
	 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
	 . quad    level3_kernel_pgt  -  __START_KERNEL_map  +  _PAGE_TABLE_NOENC
	 . fill	 PTI_USER_PGD_FILL , eight , zero
 SYM_DATA_END ( init_top_pgt )

 SYM_DATA_START_PAGE_ALIGNED ( level3_ident_pgt )
	 . quad	 level2_ident_pgt  -  __START_KERNEL_map  +  _KERNPG_TABLE_NOENC
	 . fill	 five hundred and eleven ,  eight ,  zero
 SYM_DATA_END ( level3_ident_pgt )
 SYM_DATA_START_PAGE_ALIGNED ( level2_ident_pgt )
	 /*
 * Since I easily can, map the first 1G.
 * Don't set NX because code runs from these pages.
 *
 * Note: This sets _PAGE_GLOBAL despite whether
 * the CPU supports it or it is enabled.   But,
 * the CPU should ignore the bit.
 */
	 PMDS ( zero ,  __PAGE_KERNEL_IDENT_LARGE_EXEC ,  PTRS_PER_PMD )
 SYM_DATA_END ( level2_ident_pgt )
 #else
 SYM_DATA_START_PTI_ALIGNED ( init_top_pgt )
	 . fill	 five hundred and twelve , eight , zero
	 . fill	 PTI_USER_PGD_FILL , eight , zero
 SYM_DATA_END ( init_top_pgt )
 #endif

 #ifdef CONFIG_X86_5LEVEL
 SYM_DATA_START_PAGE_ALIGNED ( level4_kernel_pgt )
	 . fill	 five hundred and eleven , eight , zero
	 . quad	 level3_kernel_pgt  -  __START_KERNEL_map  +  _PAGE_TABLE_NOENC
 SYM_DATA_END ( level4_kernel_pgt )
 #endif

 SYM_DATA_START_PAGE_ALIGNED ( level3_kernel_pgt )
	 . fill	 L3_START_KERNEL , eight , zero
	 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
	 . quad	 level2_kernel_pgt  -  __START_KERNEL_map  +  _KERNPG_TABLE_NOENC
	 . quad	 level2_fixmap_pgt  -  __START_KERNEL_map  +  _PAGE_TABLE_NOENC
 SYM_DATA_END ( level3_kernel_pgt )

 SYM_DATA_START_PAGE_ALIGNED ( level2_kernel_pgt )
	 /*
 * Kernel high mapping.
 *
 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
 * virtual address space,  which is 1 GiB if RANDOMIZE_BASE is enabled,
 * 512 MiB otherwise.
 *
 * (NOTE: after that starts the module area, see MODULES_VADDR.)
 *
 * This table is eventually used by the kernel during normal runtime.
 * Care must be taken to clear out undesired bits later, like _PAGE_RW
 * or _PAGE_GLOBAL in some cases.
 */
	 PMDS ( zero ,  __PAGE_KERNEL_LARGE_EXEC ,  KERNEL_IMAGE_SIZE / PMD_SIZE )
 SYM_DATA_END ( level2_kernel_pgt )

 SYM_DATA_START_PAGE_ALIGNED ( level2_fixmap_pgt )
	 . fill	 ( five hundred and twelve  -  four  -  FIXMAP_PMD_NUM ), eight , zero
	 pgtno  =  zero
	 .rept  ( FIXMAP_PMD_NUM )
	 . quad  level1_fixmap_pgt  +  ( pgtno  <<  PAGE_SHIFT )  -  __START_KERNEL_map  \
		 +  _PAGE_TABLE_NOENC ;
	 pgtno  =  pgtno  +  one
	 .endr
	 /* 6 MB reserved space + a 2MB hole */
	 . fill	 four , eight , zero
 SYM_DATA_END ( level2_fixmap_pgt )

 SYM_DATA_START_PAGE_ALIGNED ( level1_fixmap_pgt )
	 .rept  ( FIXMAP_PMD_NUM )
	 . fill	 five hundred and twelve , eight , zero
	 .endr
 SYM_DATA_END ( level1_fixmap_pgt )

 # undef  PMDS

	 .data
	 . align  sixteen

 SYM_DATA ( early_gdt_descr ,		 . word  GDT_ENTRIES * 8-1 )
 SYM_DATA_LOCAL ( early_gdt_descr_base ,	 . quad  INIT_PER_CPU_VAR ( gdt_page ))

	 . align  sixteen
 /* This must match the first entry in level2_kernel_pgt */
 SYM_DATA ( phys_base ,  . quad  0x0 )
 EXPORT_SYMBOL ( phys_base )

 #include " ../../ x86/xen/xen-head.S "

	 __PAGE_ALIGNED_BSS
 SYM_DATA_START_PAGE_ALIGNED ( empty_zero_page )
	 . skip  PAGE_SIZE
 SYM_DATA_END ( empty_zero_page )
 EXPORT_SYMBOL ( empty_zero_page )