中断初始化
在 ARM 体系架构下,定义了 7 种异常,每一种异常都有自己的入口地址,即 异常向量表, 当异常发生时, 处理器会自动跳转到相应的入口处执行。 对于 ARMv4 及其以上的版本,异常向量表的起始位置由协处理器 15(cp15)的控制寄存器 (c1)里的 V 位(bit13)有关,当 V=0时,异常向量表的起始位置在 0x00000000, 而当 V=1时,异常向量表就起始于 0xffff0000 位置。假设我们已 经分析知道异常向量表放置于 0xffff0000 起始位置,而 IRQ 中断处理入口地址 为: 0xffff0018, 那么当发生一 IRQ 中断异常时, 处理器会自动跳转到 0xffff0018 这个虚拟地址上。 0xffff0018 这个虚拟地址上是一条跳转指令: W(b) vector_irq + stubs_offset
asmlinkage void __init
start_kernel(void)
+-- setup_arch(&command_line);
+-- paging_init(mdesc);
+-- devicemaps_init(mdesc);
+-- vectors = early_alloc(PAGE_SIZE);
+-- early_trap_init(vectors);
+-- early_irq_init();
+-- init_IRQ();
+-- ...
+-- softirq_init();
+-- if (!irqs_disabled())
printk(KERN_CRIT "start_kernel(): bug: interrupts were "
"enabled early\n");
+-- early_boot_irqs_disabled = false;
+-- local_irq_enable();
//异常向量表会被拷贝到vectors为起始地址的一段地址段内,对于以前版本的内核,该地址由CONFIG_VECTORS_BASE给出,
//最新的内核版本中该地址由引导内核的代码传入的一个名为cr_alignment的变量的bit13决定:
:void __init
early_trap_init(void *vectors_base)
{
//vectors的定义可能为0xffff0000或者0x00000000, 具体是哪个值取决于vectors_high()是否为真
//@arch/arm/mm/mmu.c
//#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
//而vectors_high()的定义又在cp15.h文件中:
//@arch/arm/include/asm/cp15.h
/*
* #define CR_V (1 << 13) // Vectors relocated to 0xffff0000
*
* #if __LINUX_ARM_ARCH__ >= 4
* #define vectors_high() (cr_alignment & CR_V)
* #else
* #define vectors_high() (0)
* #endif
*/
unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
/*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
//将各种向量表拷贝到vectors地址段去
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start); //拷贝中断异常向量表
memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start); //拷贝irq-vectors
memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); //拷贝辅助函数
/*
* Copy signal return handlers into the vector page, and
* set sigreturn to be a pointer to these.
*/
memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes, sizeof(sigreturn_codes));
memcpy((void *)KERN_RESTART_CODE, syscall_restart_code, sizeof(syscall_restart_code));
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
/*
* Set up the device mappings. Since we clear out the page tables for all
* mappings above VMALLOC_START, we will remove any debug device mappings.
* This means you have to be careful how you debug this function, or any
* called function. This means you can't use any function or debugging
* method which may touch any device, otherwise the kernel _will_ crash.
*/
static void __init devicemaps_init(struct machine_desc *mdesc)
{
struct map_desc map;
unsigned long addr;
void *vectors;
//Allocate the vector page early.
vectors = early_alloc(PAGE_SIZE);
early_trap_init(vectors);
for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
//Map the kernel if it is XIP. It is always first in the modulearea.
#ifdef CONFIG_XIP_KERNEL
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
map.virtual = MODULES_VADDR;
map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
create_mapping(&map);
#endif
//Map the cache flushing regions.
#ifdef FLUSH_BASE
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
map.virtual = FLUSH_BASE;
map.length = SZ_1M;
map.type = MT_CACHECLEAN;
create_mapping(&map);
#endif
#ifdef FLUSH_BASE_MINICACHE
map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
map.virtual = FLUSH_BASE_MINICACHE;
map.length = SZ_1M;
map.type = MT_MINICLEAN;
create_mapping(&map);
#endif
//Create a mapping for the machine vectors at the high-vectors
//location (0xffff0000). If we aren't using high-vectors, also
//create a mapping at the low-vectors virtual address.
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = 0xffff0000; //默认将异常向量表映射到0xffff0000
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
create_mapping(&map);
if (!vectors_high()) {
map.virtual = 0; //如果vectors_high()为假,则将异常向量表映射到0x00000000
map.type = MT_LOW_VECTORS;
create_mapping(&map);
}
/*
* Ask the machine support to map in the statically mapped devices.
*/
if (mdesc->map_io)
mdesc->map_io();
/*
* Finally flush the caches and tlb to ensure that we're in a
* consistent state wrt the writebuffer. This also ensures that
* any write-allocated cache lines in the vector page are written
* back. After this point, we can start to touch devices again.
*/
local_flush_tlb_all();
flush_cache_all();
}
#if __LINUX_ARM_ARCH__ >= 4
#define vectors_high() (cr_alignment & CR_V)
#else
#define vectors_high() (0)
#endif
因此对于armv4及以上版本的arm芯片,vectors_high()取决于cr_alignment,
而低于armv4版本的芯片,vectors_high()为常值0, 即中断向量表固定被映射在0x00000000
那么cr_alignment是怎么来的?
//异常向量表
//@irq-vectors@kernel/arch/arm/kernel/entry-armv.S:
__stubs_start:
/*
* Interrupt dispatcher
*/
//vector_stub是一个宏,后面介绍
vector_stub irq, IRQ_MODE, 4
.long __irq_usr @ 0 (USR_26 / USR_32)
.long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
.long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
.long __irq_svc @ 3 (SVC_26 / SVC_32)
.long __irq_invalid @ 4
.long __irq_invalid @ 5
.long __irq_invalid @ 6
……
.word vector_swi
.globl __stubs_end
__stubs_end:
//@irq-vectors@kernel/arch/arm/kernel/entry-armv.S:
.globl __vectors_start
__vectors_start:
ARM( swi SYS_ERROR0 ) //指定地址:__vectors_start+ 0x00
THUMB( svc #0 ) //指定地址:__vectors_start+ 0x02
THUMB( nop ), r5, lr //指定地址:__vectors_start+ 0x03
W(b) vector_und + stubs_offset //指定地址:__vectors_start+ 0x04
W(ldr) pc, .LCvswi + stubs_offset //指定地址:__vectors_start+ 0x08
W(b) vector_pabt + stubs_offsetber, r1 = struct pt_regs * //指定地址:__vectors_start+ 0x0c
W(b) vector_dabt + stubs_offset //指定地址:__vectors_start+ 0x10
W(b) vector_addrexcptn + stubs_offset //指定地址:__vectors_start+ 0x14
W(b) vector_irq + stubs_offset //指定地址:__vectors_start+ 0x18
W(b) vector_fiq + stubs_offset
#ifdef CONFIG_SMP
.globl __vectors_end
__vectors_end:
.data
.globl cr_alignment
.globl cr_no_alignment
cr_alignment:
.space 4
cr_no_alignment:
.space 4
static int __init alignment_init(void)
{
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
&alignment_proc_fops);
if (!res)
return -ENOMEM;
#endif
if (cpu_is_v6_unaligned()) {
cr_alignment &= ~CR_A;
cr_no_alignment &= ~CR_A;
set_cr(cr_alignment);
ai_usermode = safe_usermode(ai_usermode, false);
}
hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
/*
* ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section
* fault, not as alignment error.
*
* TODO: handle ARMv6K properly. Runtime check for 'K' extension is
* needed.
*/
if (cpu_architecture() <= CPU_ARCH_ARMv6) {
hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
}
return 0;
}
fs_initcall(alignment_init);
//@arch/arm/include/asm/cp15.h
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */
//@arch/arm/kernel/head.S
/*
* The following calls CPU specific code in a position independent
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of
* xxx_proc_info structure selected by __lookup_processor_type
* above. On return, the CPU will be ready for the MMU to be
* turned on, and r0 will hold the CPU control register value.
*/
ldr r13, =__mmap_switched @ address to jump to after
@ mmu has been enabled
adr lr, BSYM(1f) @ return (PIC) address
mov r8, r4 @ set TTBR1 to swapper_pg_dir
ARM( add pc, r10, #PROCINFO_INITFUNC )
THUMB( add r12, r10, #PROCINFO_INITFUNC )
THUMB( mov pc, r12 )
1: b __enable_mmu
ENDPROC(stext)
//@head.S
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
*
* Ensure that we're in SVC mode, and IRQs are disabled. Lookup
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
mrc p15, 0, r9, c0, c0 @ get processor id //从c0中拿到processor id,保存在r9
bl __lookup_processor_type //结果保存在r5中?
movs r10, r5 @ invalid processor? //r5 ==> r10 因此r10就指向struct proc_info_list
moveq r0, #'p' @ yes, error 'p'
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p
/*
* Use the page tables supplied from __cpu_up.
*/
adr r4, __secondary_data
ldmia r4, {r5, r7, r12} @ address to jump to after
sub lr, r4, r5 @ mmu has been enabled
ldr r4, [r7, lr] @ get secondary_data.pgdir
add r7, r7, #4
ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir
adr lr, BSYM(__enable_mmu) @ return address
mov r13, r12 @ __secondary_switched address
//因此这里pc会指向struct proc_info_list.__cpu_flush
//对于__arm1020_proc_info也就是将pc指向"b __arm1020_setup"这条指令
ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor
@ (return control reg)
THUMB( add r12, r10, #PROCINFO_INITFUNC )
THUMB( mov pc, r12 )
ENDPROC(secondary_startup)
//@head-common.S
__lookup_processor_type:
adr r3, __lookup_processor_type_data
ldmia r3, {r4 - r6} @ r5:__proc_info_begin r6:__proc_info_end
sub r3, r3, r4 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space
1: ldmia r5, {r3, r4} @ value, mask
and r4, r4, r9 @ mask wanted bits
teq r3, r4
beq 2f
add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) //r5指向每个struct proc_info_list结构的首地址
cmp r5, r6
blo 1b
mov r5, #0 @ unknown processor
2: mov pc, lr
ENDPROC(__lookup_processor_type)
//struct proc_info_list@arch/arm/include/asm/procinfo.h
/*
* Note! struct processor is always defined if we're
* using MULTI_CPU, otherwise this entry is unused,
* but still exists.
*
* NOTE! The following structure is defined by assembly
* language, NOT C code. For more information, check:
* arch/arm/mm/proc-*.S and arch/arm/kernel/head.S
*/
struct proc_info_list {
unsigned int cpu_val;
unsigned int cpu_mask;
unsigned long __cpu_mm_mmu_flags; /* used by head.S */
unsigned long __cpu_io_mmu_flags; /* used by head.S */
unsigned long __cpu_flush; /* used by head.S */
const char *arch_name;
const char *elf_name;
unsigned int elf_hwcap;
const char *cpu_name;
struct processor *proc;
struct cpu_tlb_fns *tlb;
struct cpu_user_fns *user;
struct cpu_cache_fns *cache;
};
//@arch/arm/kernel/asm-offsets.c
DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush));
//@head-common.S
.align 2
.type __lookup_processor_type_data, %object
__lookup_processor_type_data:
.long .
.long __proc_info_begin
.long __proc_info_end
.size __lookup_processor_type_data, . - __lookup_processor_type_data
//@arch/arm/kernel/vmlinux.lds.S
#define PROC_INFO \
. = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
*(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .;
.align
.section ".proc.info.init", #alloc, #execinstr
.type __arm1020_proc_info,#object
__arm1020_proc_info: //__arm1020_proc_info被分配在".proc.info.init"段
.long 0x4104a200 @ ARM 1020T (Architecture v5T)
.long 0xff0ffff0
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
.long PMD_TYPE_SECT | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
b __arm1020_setup //跳转到__arm1020_setup()
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
.long cpu_arm1020_name
.long arm1020_processor_functions
.long v4wbi_tlb_fns
.long v4wb_user_fns
.long arm1020_cache_fns
.size __arm1020_proc_info, . - __arm1020_proc_info
.type __arm920_setup, #function
__arm920_setup:
mov r0, #0
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
#endif
adr r5, arm920_crval
ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0 @ get control register v4
bic r0, r0, r5
orr r0, r0, r6
mov pc, lr
.size __arm920_setup, . - __arm920_setup
/*
* R
* .RVI ZFRS BLDP WCAM
* ..11 0001 ..11 0101
*
*/
.type arm920_crval, #object
arm920_crval:
crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130
//@arch/arm/kernel/head.S
/*
* Kernel startup entry point.
* ---------------------------
*
* This is normally called from the decompressor code. The requirements
* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
* r1 = machine nr, r2 = atags or dtb pointer.
*
* This code is mostly position independent, so if you link the kernel at
* 0xc0008000, you call this at __pa(0xc0008000).
*
* See linux/arch/arm/tools/mach-types for the complete list of machine
* numbers for r1.
*
* We're trying to keep crap to a minimum; DO NOT add any machine specific
* crap here - that's what the boot loader (or in extreme, well justified
* circumstances, zImage) is for.
*/
.arm
__HEAD
ENTRY(stext)
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
THUMB( .thumb ) @ switch to Thumb now.
THUMB(1: )
setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
@ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p @ yes, error 'p'
#endif
#ifndef CONFIG_XIP_KERNEL
adr r3, 2f
ldmia r3, {r4, r8}
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET
#else
ldr r8, =PHYS_OFFSET @ always constant in this case
#endif
/*
* r1 = machine no, r2 = atags or dtb,
* r8 = phys_offset, r9 = cpuid, r10 = procinfo
*/
bl __vet_atags
#ifdef CONFIG_SMP_ON_UP
bl __fixup_smp
#endif
#ifdef CONFIG_ARM_PATCH_PHYS_VIRT
bl __fixup_pv_table
#endif
bl __create_page_tables
/*
* The following calls CPU specific code in a position independent
* manner. See arch/arm/mm/proc-*.S for details. r10 = base of
* xxx_proc_info structure selected by __lookup_processor_type
* above. On return, the CPU will be ready for the MMU to be
* turned on, and r0 will hold the CPU control register value.
*/
//将__mmap_switched地址装入r13,在__enable_mmu自程序段的末尾会跳转到r13指向的地址
//而__mmap_switched会后会start_kernel()
ldr r13, =__mmap_switched @ address to jump to after
@ mmu has been enabled
adr lr, BSYM(1f) @ return (PIC) address
mov r8, r4 @ set TTBR1 to swapper_pg_dir
ARM( add pc, r10, #PROCINFO_INITFUNC ) @//在此之前r10已经被置为sturct proc_info_list结构的首地址
THUMB( add r12, r10, #PROCINFO_INITFUNC ) @//因此r10+#PROCINFO_INITFUNC就是struct proc_info_list.__cpu_flush
THUMB( mov pc, r12 ) @//对于__arm1020_proc_info也就是将pc指向"b __arm1020_setup"这条指令
1: b __enable_mmu @//__arm1020的最后一条指令是mov pc, lr
ENDPROC(stext) @//而lr已经被BSYM(1f),也就是这里的 "b __enable_mmu"
.ltorg @//因此代码的执行会转到__enable_mmu
#ifndef CONFIG_XIP_KERNEL
2: .long .
.long PAGE_OFFSET
#endif
//__enable_mmu@arch/arm/kernel/head.S
/*
* Setup common bits before finally enabling the MMU. Essentially
* this is just loading the page table pointer and domain access
* registers.
*
* r0 = cp#15 control register
* r1 = machine ID
* r2 = atags or dtb pointer
* r4 = page table pointer
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
*/
__enable_mmu:
#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
orr r0, r0, #CR_A
#else
bic r0, r0, #CR_A
#endif
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CR_C
#endif
#ifdef CONFIG_CPU_BPREDICT_DISABLE
bic r0, r0, #CR_Z
#endif
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
#ifdef CONFIG_ARM_LPAE
mov r5, #0
mcrr p15, 0, r4, r5, c2 @ load TTBR0
#else
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif
b __turn_mmu_on @//代码的执行转到__turn_mmu_on
ENDPROC(__enable_mmu)
//__turn_mmu_on@arch/arm/kernel/head.S
/*
* Enable the MMU. This completely changes the structure of the visible
* memory space. You will not be able to trace execution through this.
* If you have an enquiry about this, *please* check the linux-arm-kernel
* mailing list archives BEFORE sending another post to the list.
*
* r0 = cp#15 control register
* r1 = machine ID
* r2 = atags or dtb pointer
* r9 = processor ID
* r13 = *virtual* address to jump to upon completion
*
* other registers depend on the function called upon completion
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(__turn_mmu_on)
mov r0, r0
instr_sync
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg
instr_sync
mov r3, r3
mov r3, r13 @//在stext的后面部分,__mmap_switched地址被载入r13,
mov pc, r3 @//这里将pc指向r13也就是跳转到__mmap_switched子程序段
__turn_mmu_on_end:
ENDPROC(__turn_mmu_on)
.popsection
//__mmap_switched@arch/arm/kernel/head.S
/*
* The following fragment of code is executed with the MMU on in MMU mode,
* and uses absolute addresses; this is not position independent.
*
* r0 = cp#15 control register
* r1 = machine ID
* r2 = atags/dtb pointer
* r9 = processor ID
*/
__INIT
__mmap_switched:
adr r3, __mmap_switched_data
ldmia r3!, {r4, r5, r6, r7} @// 加载__data_loc 、__data_start 、__bss_start 及_end 的地址
cmp r4, r5 @ Copy data segment if needed 比较__data_start 与__data_loc 是否相等
1: cmpne r5, r6 @// 如果不相等,进行数据搬运
ldrne fp, [r4], #4
strne fp, [r5], #4
bne 1b
mov fp, #0 @ Clear BSS (and zero fp)
1: cmp r6, r7 @// 给bss 段清0
strcc fp, [r6],#4
bcc 1b
ARM( ldmia r3, {r4, r5, r6, r7, sp}) @// 这里r3 保存的已经是processor_id 的地址了
THUMB( ldmia r3, {r4, r5, r6, r7} )
THUMB( ldr sp, [r3, #16] )
str r9, [r4] @ Save processor ID
str r1, [r5] @ Save machine type
str r2, [r6] @ Save atags pointer
bic r4, r0, #CR_A @ Clear 'A' bit
stmia r7, {r0, r4} @ Save control register values ==> &cr_alignment
b start_kernel @ //从这个转入c代码的内核初始化过程 -- start_kernel
ENDPROC(__mmap_switched)
.align 2
.type __mmap_switched_data, %object
__mmap_switched_data:
.long __data_loc @ r4
.long _sdata @ r5
.long __bss_start @ r6
.long _end @ r7
.long processor_id @ r4
.long __machine_arch_type @ r5
.long __atags_pointer @ r6
.long cr_alignment @ r7 //".long xxx" 实际上就是声明一个32bit的变量,
.long init_thread_union + THREAD_START_SP @ sp //并将其后的编译地址作为该变量的初始值.
.size __mmap_switched_data, . - __mmap_switched_data //所以这里就是定义了一个值为cr_alignment的编译地址的变量
//那么cr_alignment的编译地址又在哪里产生呢? 这取决与它在何处定义:
//__mmap_switched@arch/arm/kernel/entry-armv.S
.data
.globl cr_alignment @//定义一个名为cr_alignment的全局变量
.globl cr_no_alignment
cr_alignment:
.space 4 @//为cr_alignment变量分配4Byte的地址空间,其编译地址用cr_alignment标记
cr_no_alignment:
.space 4