中断处理-汇编部分
中断向量表
关于中断的初始化配置,请查看:
https://blog.youkuaiyun.com/weixin_47191387/article/details/131833648
《Xen-hypervisor中SPI中断虚拟化的实现-gic的初始化》
由于配置了HCR_EL2寄存器的HCR_IMO,中断会路由到xen,跳转到xen的中断向量表中执行。
xen中断向量表如下:
/*
* Exception vectors.
*/
.macro ventry label
.align 7
b \label
.endm
.align 11
ENTRY(hyp_traps_vector)
//当前cpu运行在EL2t的时候的中断向量
ventry hyp_sync_invalid /* Synchronous EL2t */
ventry hyp_irq_invalid /* IRQ EL2t */
ventry hyp_fiq_invalid /* FIQ EL2t */
ventry hyp_error_invalid /* Error EL2t */
//当前cpu运行在EL2h的时候的中断向量
ventry hyp_sync /* Synchronous EL2h */
ventry hyp_irq /* IRQ EL2h */
ventry hyp_fiq_invalid /* FIQ EL2h */
ventry hyp_error /* Error EL2h */
//当前cpu运行在EL0 & EL1的时候的中断向量
ventry guest_sync /* Synchronous 64-bit EL0/EL1 */
ventry guest_irq /* IRQ 64-bit EL0/EL1 */
ventry guest_fiq_invalid /* FIQ 64-bit EL0/EL1 */
ventry guest_error /* Error 64-bit EL0/EL1 */
//当前cpu运行在EL0 & EL1 compat的时候的中断向量
ventry guest_sync_compat /* Synchronous 32-bit EL0/EL1 */
ventry guest_irq_compat /* IRQ 32-bit EL0/EL1 */
ventry guest_fiq_invalid_compat /* FIQ 32-bit EL0/EL1 */
ventry guest_error_compat /* Error 32-bit EL0/EL1 */
- ventry是个宏,对于当前cpu运行在EL0 & EL1的时候,处理中断会跳转到guest_irq的label。
- xen的中断向量表会处理来自EL2t,EL2h,EL0 & EL1(guest )以及EL0 & EL1 compat(guest
compat)的同步异常( Synchronous exception,SError,IRQ以及FIQ。其中FIQ会被认为是BAD_FIQ,然后panic。
guest_irq
guest_irq是处理当前cpu运行在EL0 & EL1的时候,来的中断,实现如下:
guest_irq:
guest_vector compat=0, iflags=IFLAGS__A__, trap=irq
/*
* Generate a guest vector.
*
* iflags: Correspond to the list of interrupts to unmask
* save_x0_x1: See the description on top of the macro 'entry'
*/
.macro guest_vector compat, iflags, trap, save_x0_x1=1
entry hyp=0, compat=\compat, save_x0_x1=\save_x0_x1
alternative_if_not SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
bl check_pending_guest_serror
alternative_else_nop_endif
bl enter_hypervisor_from_guest_preirq
msr daifclr, \iflags
bl enter_hypervisor_from_guest
alternative_if SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
cbnz x19, 1f
mov x0, sp
bl do_trap_\trap
1:
alternative_else
nop
mov x0, sp
bl do_trap_\trap
alternative_endif
exit hyp=0, compat=\compat
.endm
- guest_irq主要调用guest_vector的宏。变量非compat模式,flag是IFLAGS__A__,trap是种类是irq。不用保存x0,x1。
- guest_vector调用entry宏做cpu register的压栈保存。
- check_pending_guest_serror处理guest的serror。
- enter_hypervisor_from_guest_preirq打开SSBD workaround。
- 清除daif的flags。
- enter_hypervisor_from_guest ,根据List Registers中virq的状态更新virq的pending_irq。
- 跳转到do_trap_\trap(这里是do_trap_irq)处理中断。
- guest_vector最后调用exit,恢复进入中断之前的cpu的寄存器,调用eret指令返回到guset。
entry宏
VCPU相关的寄存器结构如下,entry的时候会将guest VCPU 相关的寄存器压栈保存,exit的时候在弹出,恢复guest VCPU 的寄存器。
/* On stack VCPU state */
struct cpu_user_regs
{
/*
* The mapping AArch64 <-> AArch32 is based on D1.20.1 in ARM DDI
* 0487A.d.
*
* AArch64 AArch32
*/
__DECL_REG(x0, r0/*_usr*/);
__DECL_REG(x1, r1/*_usr*/);
__DECL_REG(x2, r2/*_usr*/);
__DECL_REG(x3, r3/*_usr*/);
__DECL_REG(x4, r4/*_usr*/);
__DECL_REG(x5, r5/*_usr*/);
__DECL_REG(x6, r6/*_usr*/);
__DECL_REG(x7, r7/*_usr*/);
__DECL_REG(x8, r8/*_usr*/);
__DECL_REG(x9, r9/*_usr*/);
__DECL_REG(x10, r10/*_usr*/);
__DECL_REG(x11 , r11/*_usr*/);
__DECL_REG(x12, r12/*_usr*/);
__DECL_REG(x13, /* r13_usr */ sp_usr);
__DECL_REG(x14, /* r14_usr */ lr_usr);
__DECL_REG(x15, /* r13_hyp */ __unused_sp_hyp);
__DECL_REG(x16, /* r14_irq */ lr_irq);
__DECL_REG(x17, /* r13_irq */ sp_irq);
__DECL_REG(x18, /* r14_svc */ lr_svc);
__DECL_REG(x19, /* r13_svc */ sp_svc);
__DECL_REG(x20, /* r14_abt */ lr_abt);
__DECL_REG(x21, /* r13_abt */ sp_abt);
__DECL_REG(x22, /* r14_und */ lr_und);
__DECL_REG(x23, /* r13_und */ sp_und);
__DECL_REG(x24, r8_fiq);
__DECL_REG(x25, r9_fiq);
__DECL_REG(x26, r10_fiq);
__DECL_REG(x27, r11_fiq);
__DECL_REG(x28, r12_fiq);
__DECL_REG(/* x29 */ fp, /* r13_fiq */ sp_fiq);
__DECL_REG(/* x30 */ lr, /* r14_fiq */ lr_fiq);
register_t sp; /* Valid for hypervisor frames */
/* Return address and mode */
__DECL_REG(pc, pc32); /* ELR_EL2 */
uint64_t cpsr; /* SPSR_EL2 */
uint64_t hsr; /* ESR_EL2 */
/* The kernel frame should be 16-byte aligned. */
uint64_t pad0;
/* Outer guest frame only from here on... */
union {
uint64_t spsr_el1; /* AArch64 */
uint32_t spsr_svc; /* AArch32 */
};
/* AArch32 guests only */
uint32_t spsr_fiq, spsr_irq, spsr_und, spsr_abt;
/* AArch64 guests only */
uint64_t sp_el0;
uint64_t sp_el1, elr_el1;
};
entry是个宏,将x2-x9压栈,elr_el2,spsr_el2,esr_el2压栈,实现如下:
/*
* Save state on entry to hypervisor, restore on exit
*
* save_x0_x1: Does the macro needs to save x0/x1? Defaults to 1
* If 0, we rely on the on x0/x1 to have been saved at the correct
* position on the stack before.
*/
.macro entry, hyp, compat, save_x0_x1=1
sub sp, sp, #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
.if \hyp == 0 /* Guest mode */
clobber_gp_top_halves compat=\compat, save_x0_x1=\save_x0_x1
.endif
push x28, x29
push x26, x27
push x24, x25
push x22, x23
push x20, x21
push x18, x19
push x16, x17
push x14, x15
push x12, x13
push x10, x11
push x8, x9
push x6, x7
push x4, x5
push x2, x3
/*
* The caller may already have saved x0/x1 on the stack at the
* correct address and corrupt them with another value. Only
* save them if save_x0_x1 == 1.
*/
.if \save_x0_x1 == 1
push x0, x1
.else
sub sp, sp, #16
.endif
.if \hyp == 1 /* Hypervisor mode */
add x21, sp, #UREGS_kernel_sizeof
.else /* Guest mode */
entry_guest \compat
mov x21, ~0 /* sp only valid for hyp frame XXX */
.endif
stp lr, x21, [sp, #UREGS_LR]
mrs x21, elr_el2
str x21, [sp, #UREGS_PC]
add x21, sp, #UREGS_CPSR
mrs x22, spsr_el2
mrs x23, esr_el2
stp x22, x23, [x21]
.endm
entry_guest会将guest的SPSR_el1,SP_el0 ,SP_el1,ELR_el1压栈保存, exit_guest的时候在弹出,恢复guest VCPU 的寄存器。
/*
* Save/restore guest mode specific state, outer stack frame
*/
.macro entry_guest, compat
add x21, sp, #UREGS_SPSR_el1
mrs x23, SPSR_el1
str x23, [x21]
.if \compat == 0 /* Aarch64 mode */
add x21, sp, #UREGS_SP_el0
mrs x22, SP_el0
str x22, [x21]
add x21, sp, #UREGS_SP_el1
mrs x22, SP_el1
mrs x23, ELR_el1
stp x22, x23, [x21]
.else /* Aarch32 mode */
add x21, sp, #UREGS_SPSR_fiq
mrs x22, SPSR_fiq
mrs x23, SPSR_irq
stp w22, w23, [x21]
add x21, sp, #UREGS_SPSR_und
mrs x22, SPSR_und
mrs x23, SPSR_abt
stp w22, w23, [x21]
.endif
.endm
exit宏
exit的宏实现如下:
.macro exit, hyp, compat
.if \hyp == 0 /* Guest mode */
bl leave_hypervisor_to_guest /* Mask IRQ on return */
exit_guest \compat
.endif
b return_from_trap
.endm
- leave_hypervisor_to_guest
- exit_guest 压栈保存的SPSR_el1,SP_el0 ,SP_el1,ELR_el1恢复guest VCPU 的寄存器。
- return_from_trap 将guest vcpu的寄存器x0-x29,elr_el2,spsr_el2恢复,最后调用eret跳到guest上下文空间。
leave_hypervisor_to_guest实现如下:
/*
* Actions that needs to be done before entering the guest. This is the
* last thing executed before the guest context is fully restored.
*
* The function will return with IRQ masked.
*/
void leave_hypervisor_to_guest(void)
{
local_irq_disable();
/*
* check_for_vcpu_work() may return true if there are more work to before
* the vCPU can safely resume. This gives us an opportunity to deschedule
* the vCPU if needed.
*/
while ( check_for_vcpu_work() ) //本文不涉及
check_for_pcpu_work();//通过do_softirq处理
check_for_pcpu_work();
vgic_sync_to_lrs(); //主要是给pending的irq添加到List Registers中。
/*
* If the SErrors handle option is "DIVERSE", we have to prevent
* slipping the hypervisor SError to guest. In this option, before
* returning from trap, we have to synchronize SErrors to guarantee
* that the pending SError would be caught in hypervisor.
*
* If option is NOT "DIVERSE", SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
* will be set to cpu_hwcaps. This means we can use the alternative
* to skip synchronizing SErrors for other SErrors handle options.
*/
SYNCHRONIZE_SERROR(SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT);
/*
* The hypervisor runs with the workaround always present.
* If the guest wants it disabled, so be it...
*/
if ( needs_ssbd_flip(current) )
arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2_FID, 0, NULL);
}
最后调用return_from_trap,将guest vcpu的寄存器x0-x29,elr_el2,spsr_el2恢复,最后调用eret跳到guest上下文空间。
return_from_trap:
msr daifset, #IFLAGS___I_ /* Mask interrupts */
ldr x21, [sp, #UREGS_PC] /* load ELR */
ldr x22, [sp, #UREGS_CPSR] /* load SPSR */
pop x0, x1
pop x2, x3
pop x4, x5
pop x6, x7
pop x8, x9
msr elr_el2, x21 /* set up the return data */
msr spsr_el2, x22
pop x10, x11
pop x12, x13
pop x14, x15
pop x16, x17
pop x18, x19
pop x20, x21
pop x22, x23
pop x24, x25
pop x26, x27
pop x28, x29
ldr lr, [sp], #(UREGS_SPSR_el1 - UREGS_LR) /* CPSR, PC, SP, LR */
eret
sb