startup.S
start64:
//gshen为各个ELx配置终端向量
//
// program the VBARs
//
ldr x1, =el1_vectors
msr VBAR_EL1, x1
ldr x1, =el2_vectors
msr VBAR_EL2, x1
ldr x1, =el3_vectors
msr VBAR_EL3, x1
//SCR_EL3, Secure Configuration Register
msr SCR_EL3, xzr // Ensure NS bit is initially clear, so secure copy of ICC_SRE_EL1 can be configured
isb
mov x0, #15
msr ICC_SRE_EL3, x0
isb
msr ICC_SRE_EL1, x0 // Secure copy of ICC_SRE_EL1
//
// set lower exception levels as non-secure, with no access
// back to EL2 or EL3, and are AArch64 capable
//
mov x3, #(SCR_EL3_RW | \
SCR_EL3_SMD | \
SCR_EL3_NS) // Set NS bit, to access Non-secure registers
msr SCR_EL3, x3
isb
mov x0, #15
msr ICC_SRE_EL2, x0
isb
msr ICC_SRE_EL1, x0 // Non-secure copy of ICC_SRE_EL1
//
// no traps or VM modifications from the Hypervisor, EL1 is AArch64
//
mov x2, #HCR_EL2_RW
msr HCR_EL2, x2
//
// VMID is still significant, even when virtualisation is not
// being used, so ensure VTTBR_EL2 is properly initialised
//
msr VTTBR_EL2, xzr
//
// VMPIDR_EL2 holds the value of the Virtualization Multiprocessor ID. This is the value returned by Non-secure EL1 reads of MPIDR_EL1.
// VPIDR_EL2 holds the value of the Virtualization Processor ID. This is the value returned by Non-secure EL1 reads of MIDR_EL1.
// Both of these registers are architecturally UNKNOWN at reset, and so they must be set to the correct value
// (even if EL2/virtualization is not being used), otherwise non-secure EL1 reads of MPIDR_EL1/MIDR_EL1 will return garbage values.
// This guarantees that any future reads of MPIDR_EL1 and MIDR_EL1 from Non-secure EL1 will return the correct value.
//
// keep MPIDR_EL1.Aff0 (i.e. the CPU no. on Cortex-A cores) in
// x19 (defined by the AAPCS as callee-saved), so we can re-use
// the number later
//
mrs x0, MPIDR_EL1
ubfx x19, x0, #MPIDR_EL1_AFF0_LSB, #MPIDR_EL1_AFF_WIDTH
msr VMPIDR_EL2, x0
mrs x0, MIDR_EL1
msr VPIDR_EL2, x0
//
// neither EL3 nor EL2 trap floating point or accesses to CPACR
//
msr CPTR_EL3, xzr
msr CPTR_EL2, xzr
//
// SCTLR_ELx may come out of reset with UNKNOWN values so we will
// set the fields to 0 except, possibly, the endianess field(s).
// Note that setting SCTLR_EL2 or the EL0 related fields of SCTLR_EL1
// is not strictly needed, since we're never in EL2 or EL0
//
#ifdef __ARM_BIG_ENDIAN
mov x0, #(SCTLR_ELx_EE | SCTLR_EL1_E0E)
#else
mov x0, #0
#endif
msr SCTLR_EL3, x0
msr SCTLR_EL2, x0
msr SCTLR_EL1, x0
#ifdef CORTEXA
//
// Configure ACTLR_EL[23]
// ----------------------
//
// These bits are IMPLEMENTATION DEFINED, so are different for
// different processors
//
// For Cortex-A57, the controls we set are:
//
// Enable lower level access to CPUACTLR_EL1
// Enable lower level access to CPUECTLR_EL1
// Enable lower level access to L2CTLR_EL1
// Enable lower level access to L2ECTLR_EL1
// Enable lower level access to L2ACTLR_EL1
//
mov x0, #((1 << 0) | \
(1 << 1) | \
(1 << 4) | \
(1 << 5) | \
(1 << 6))
msr ACTLR_EL3, x0
msr ACTLR_EL2, x0
//
// configure CPUECTLR_EL1
//
// These bits are IMP DEF, so need to different for different
// processors
//
// SMPEN - bit 6 - Enables the processor to receive cache
// and TLB maintenance operations
//
// Note: For Cortex-A57/53 SMPEN should be set before enabling
// the caches and MMU, or performing any cache and TLB
// maintenance operations.
//
// This register has a defined reset value, so we use a
// read-modify-write sequence to set SMPEN
//
mrs x0, S3_1_c15_c2_1 // Read EL1 CPU Extended Control Register
orr x0, x0, #(1 << 6) // Set the SMPEN bit
msr S3_1_c15_c2_1, x0 // Write EL1 CPU Extended Control Register
isb
#endif
//
// That's the last of the control settings for now
//
// Note: no ISB after all these changes, as registers won't be
// accessed until after an exception return, which is itself a
// context synchronisation event
//
//
// Setup some EL3 stack space, ready for calling some subroutines, below.
//
// Stack space allocation is CPU-specific, so use CPU
// number already held in x19
//
// 2^12 bytes per CPU for the EL3 stacks
//
ldr x0, =__el3_stack
sub x0, x0, x19, lsl #12