bl cache_on跳转的返回

本文深入探讨了ARM处理器的缓存控制机制,详细解析了cache_on函数如何根据不同的处理器ID调用相应的缓存开启、关闭及刷新函数。通过分析处理器特性寄存器和缓存类型表,展示了如何匹配处理器ID并执行对应的缓存操作。
bl	cache_on                              	@ bl跳转会将返回地址(下一条指令的地址)保存到lr
转载地址:https://blog.youkuaiyun.com/coldsnow33/article/details/37727331
  1. cache_on: mov r3, #8 @ cache_on function //偏移为8,对应cache_on

  2. b call_cache_fn @ b跳转不会将返回地址保存到lr

http://blog.youkuaiyun.com/coldsnow33/article/details/37727621

 
  1. call_cache_fn: adr r12, proc_types

  2. #ifdef CONFIG_CPU_CP15

  3. mrc p15, 0, r9, c0, c0 @ get processor ID

  4. #else

  5. ldr r9, =CONFIG_PROCESSOR_ID

  6. #endif

  7. 1: ldr r1, [r12, #0] @ get value

  8. ldr r2, [r12, #4] @ get mask

  9. eor r1, r1, r9 @ (real ^ match)

  10. tst r1, r2 @ & mask

  11. ARM( addeq pc, r12, r3 ) @ call cache function

  12. THUMB( addeq r12, r3 )

  13. THUMB( moveq pc, r12 ) @ call cache function//match到ID后就跳转到对应架构的cache_on了

  14. add r12, r12, #PROC_ENTRY_SIZE @ 没有match到ID,PROC_ENTRY_SIZE为4*5,每条指令4byte,proc_types表中每个架构对应5个条目

  15. b 1b @ 再跳转回tag1时, r12保存了下一个架构的条目,继续match

 
  1. .align 2

  2. .type proc_types,#object

  3. proc_types:

  4. .word 0x41000000 @ old ARM ID

  5. .word 0xff00f000

  6. mov pc, lr

  7. THUMB( nop )

  8. mov pc, lr

  9. THUMB( nop )

  10. mov pc, lr

  11. THUMB( nop )

  12.  
  13. .word 0x41007000 @ ARM7/710

  14. .word 0xfff8fe00

  15. mov pc, lr

  16. THUMB( nop )

  17. mov pc, lr

  18. THUMB( nop )

  19. mov pc, lr

  20. THUMB( nop )

  21.  
  22. .word 0x41807200 @ ARM720T (writethrough)

  23. .word 0xffffff00

  24. W(b) __armv4_mmu_cache_on

  25. W(b) __armv4_mmu_cache_off

  26. mov pc, lr

  27. THUMB( nop )

  28.  
  29. .word 0x41007400 @ ARM74x

  30. .word 0xff00ff00

  31. W(b) __armv3_mpu_cache_on

  32. W(b) __armv3_mpu_cache_off

  33. W(b) __armv3_mpu_cache_flush

  34.  
  35. .word 0x41009400 @ ARM94x

  36. .word 0xff00ff00

  37. W(b) __armv4_mpu_cache_on

  38. W(b) __armv4_mpu_cache_off

  39. W(b) __armv4_mpu_cache_flush

  40.  
  41. .word 0x41069260 @ ARM926EJ-S (v5TEJ)

  42. .word 0xff0ffff0

  43. W(b) __arm926ejs_mmu_cache_on

  44. W(b) __armv4_mmu_cache_off

  45. W(b) __armv5tej_mmu_cache_flush

  46.  
  47. .word 0x00007000 @ ARM7 IDs

  48. .word 0x0000f000

  49. mov pc, lr

  50. THUMB( nop )

  51. mov pc, lr

  52. THUMB( nop )

  53. mov pc, lr

  54. THUMB( nop )

  55.  
  56. @ Everything from here on will be the new ID system.

  57.  
  58. .word 0x4401a100 @ sa110 / sa1100

  59. .word 0xffffffe0

  60. W(b) __armv4_mmu_cache_on

  61. W(b) __armv4_mmu_cache_off

  62. W(b) __armv4_mmu_cache_flush

  63.  
  64. .word 0x6901b110 @ sa1110

  65. .word 0xfffffff0

  66. W(b) __armv4_mmu_cache_on

  67. W(b) __armv4_mmu_cache_off

  68. W(b) __armv4_mmu_cache_flush

  69.  
  70. .word 0x56056900

  71. .word 0xffffff00 @ PXA9xx

  72. W(b) __armv4_mmu_cache_on

  73. W(b) __armv4_mmu_cache_off

  74. W(b) __armv4_mmu_cache_flush

  75.  
  76. .word 0x56158000 @ PXA168

  77. .word 0xfffff000

  78. W(b) __armv4_mmu_cache_on

  79. W(b) __armv4_mmu_cache_off

  80. W(b) __armv5tej_mmu_cache_flush

  81.  
  82. .word 0x56050000 @ Feroceon

  83. .word 0xff0f0000

  84. W(b) __armv4_mmu_cache_on

  85. W(b) __armv4_mmu_cache_off

  86. W(b) __armv5tej_mmu_cache_flush

  87.  
  88. #ifdef CONFIG_CPU_FEROCEON_OLD_ID

  89. /* this conflicts with the standard ARMv5TE entry */

  90. .long 0x41009260 @ Old Feroceon

  91. .long 0xff00fff0

  92. b __armv4_mmu_cache_on

  93. b __armv4_mmu_cache_off

  94. b __armv5tej_mmu_cache_flush

  95. #endif

  96.  
  97. .word 0x66015261 @ FA526

  98. .word 0xff01fff1

  99. W(b) __fa526_cache_on

  100. W(b) __armv4_mmu_cache_off

  101. W(b) __fa526_cache_flush

  102.  
  103. @ These match on the architecture ID

  104.  
  105. .word 0x00020000 @ ARMv4T

  106. .word 0x000f0000

  107. W(b) __armv4_mmu_cache_on

  108. W(b) __armv4_mmu_cache_off

  109. W(b) __armv4_mmu_cache_flush

  110.  
  111. .word 0x00050000 @ ARMv5TE

  112. .word 0x000f0000

  113. W(b) __armv4_mmu_cache_on

  114. W(b) __armv4_mmu_cache_off

  115. W(b) __armv4_mmu_cache_flush

  116.  
  117. .word 0x00060000 @ ARMv5TEJ

  118. .word 0x000f0000

  119. W(b) __armv4_mmu_cache_on

  120. W(b) __armv4_mmu_cache_off

  121. W(b) __armv5tej_mmu_cache_flush

  122.  
  123. .word 0x0007b000 @ ARMv6

  124. .word 0x000ff000

  125. W(b) __armv6_mmu_cache_on

  126. W(b) __armv4_mmu_cache_off

  127. W(b) __armv6_mmu_cache_flush

  128.  
  129. .word 0x000f0000 @ new CPU Id

  130. .word 0x000f0000

  131. W(b) __armv7_mmu_cache_on

  132. W(b) __armv7_mmu_cache_off

  133. W(b) __armv7_mmu_cache_flush

  134.  
  135. .word 0 @ unrecognised type

  136. .word 0

  137. mov pc, lr

  138. THUMB( nop )

  139. mov pc, lr

  140. THUMB( nop )

  141. mov pc, lr

  142. THUMB( nop )

  143.  
  144. .size proc_types, . - proc_types

  145.  

如果ID没有match,就会查到表中的最后一个条目,mov pc, lr,跳回去。

 
  1. armv7_mmu_cache_on:

  2. mov r12, lr @ lr保存的是bl cache_on的下一条指令,下面还有tag的跳转,会重写lr,需要保存lr到其他寄存器#ifdef CONFIG_MMU

  3. mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0

  4. tst r11, #0xf @ VMSA

  5. movne r6, #CB_BITS | 0x02 @ !XN

  6. blne __setup_mmu @ bl跳转会保存返回地址到lr

  7. mov r0, #0

  8. mcr p15, 0, r0, c7, c10, 4 @ drain write buffer

  9. tst r11, #0xf @ VMSA

  10. mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs

  11. #endif

  12. mrc p15, 0, r0, c1, c0, 0 @ read control reg

  13. bic r0, r0, #1 << 28 @ clear SCTLR.TRE

  14. orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement

  15. orr r0, r0, #0x003c @ write buffer

  16. bic r0, r0, #2 @ A (no unaligned access fault)

  17. orr r0, r0, #1 << 22 @ U (v6 unaligned access model)

  18. @ (needed for ARM1176)

  19. #ifdef CONFIG_MMU

  20. ARM_BE8( orr r0, r0, #1 << 25 ) @ big-endian page tables

  21. mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg

  22. orrne r0, r0, #1 @ MMU enabled

  23. movne r1, #0xfffffffd @ domain 0 = client

  24. bic r6, r6, #1 << 31 @ 32-bit translation system

  25. bic r6, r6, #3 << 0 @ use only ttbr0

  26. mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer

  27. mcrne p15, 0, r1, c3, c0, 0 @ load domain access control

  28. mcrne p15, 0, r6, c2, c0, 2 @ load ttb control

  29. #endif

  30. mcr p15, 0, r0, c7, c5, 4 @ ISB

  31. mcr p15, 0, r0, c1, c0, 0 @ load control register

  32. mrc p15, 0, r0, c1, c0, 0 @ and read it back

  33. mov r0, #0

  34. mcr p15, 0, r0, c7, c5, 4 @ ISB

  35. mov pc, r12 @ 返回到bl cache_on的下一条指令

/* * linux/arch/arm/kernel/head.S * * Copyright (C) 1994-2002 Russell King * Copyright (c) 2003 ARM Limited * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Kernel startup code for all 32-bit CPUs */ #include <linux/linkage.h> #include <linux/init.h> #include <asm/assembler.h> #include <asm/cp15.h> #include <asm/domain.h> #include <asm/ptrace.h> #include <asm/asm-offsets.h> #include <asm/memory.h> #include <asm/thread_info.h> #include <asm/pgtable.h> #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) #include CONFIG_DEBUG_LL_INCLUDE #endif /* * swapper_pg_dir is the virtual address of the initial page table. * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect * the least significant 16 bits to be 0x8000, but we could probably * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. */ #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) #if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 #error KERNEL_RAM_VADDR must start at 0xXXXX8000 #endif #ifdef CONFIG_ARM_LPAE /* LPAE requires an additional page for the PGD */ #define PG_DIR_SIZE 0x5000 #define PMD_ORDER 3 #else #define PG_DIR_SIZE 0x4000 #define PMD_ORDER 2 #endif .globl swapper_pg_dir .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE .macro pgtbl, rd, phys add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE .endm /* * Kernel startup entry point. * --------------------------- * * This is normally called from the decompressor code. The requirements * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, * r1 = machine nr, r2 = atags or dtb pointer. * * This code is mostly position independent, so if you link the kernel at * 0xc0008000, you call this at __pa(0xc0008000). * * See linux/arch/arm/tools/mach-types for the complete list of machine * numbers for r1. * * We're trying to keep crap to a minimum; DO NOT add any machine specific * crap here - that's what the boot loader (or in extreme, well justified * circumstances, zImage) is for. */ .arm __HEAD ENTRY(stext) THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. THUMB( bx r9 ) @ If this is a Thumb-2 kernel, THUMB( .thumb ) @ switch to Thumb now. THUMB(1: ) #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install #endif @ ensure svc mode and all interrupts masked safe_svcmode_maskall r9 mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type @ r5=procinfo r9=cpuid movs r10, r5 @ invalid processor (r5=0)? THUMB( it eq ) @ force fixup-able long branch encoding beq __error_p @ yes, error 'p' #ifdef CONFIG_ARM_LPAE mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0 and r3, r3, #0xf @ extract VMSA support cmp r3, #5 @ long-descriptor translation table format? THUMB( it lo ) @ force fixup-able long branch encoding blo __error_p @ only classic page table format #endif #ifndef CONFIG_XIP_KERNEL adr r3, 2f ldmia r3, {r4, r8} sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) add r8, r8, r4 @ PHYS_OFFSET #else ldr r8, =PHYS_OFFSET @ always constant in this case #endif /* * r1 = machine no, r2 = atags or dtb, * r8 = phys_offset, r9 = cpuid, r10 = procinfo */ bl __vet_atags #ifdef CONFIG_SMP_ON_UP bl __fixup_smp #endif #ifdef CONFIG_ARM_PATCH_PHYS_VIRT bl __fixup_pv_table #endif bl __create_page_tables /* * The following calls CPU specific code in a position independent * manner. See arch/arm/mm/proc-*.S for details. r10 = base of * xxx_proc_info structure selected by __lookup_processor_type * above. On return, the CPU will be ready for the MMU to be * turned on, and r0 will hold the CPU control register value. */ ldr r13, =__mmap_switched @ address to jump to after @ mmu has been enabled adr lr, BSYM(1f) @ return (PIC) address mov r8, r4 @ set TTBR1 to swapper_pg_dir ARM( add pc, r10, #PROCINFO_INITFUNC ) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) 1: b __enable_mmu ENDPROC(stext) .ltorg #ifndef CONFIG_XIP_KERNEL 2: .long . .long PAGE_OFFSET #endif /* * Setup the initial page tables. We only setup the barest * amount which are required to get the kernel running, which * generally means mapping in the kernel code. * * r8 = phys_offset, r9 = cpuid, r10 = procinfo * * Returns: * r0, r3, r5-r7 corrupted * r4 = physical page table address */ __create_page_tables: pgtbl r4, r8 @ page table address /* * Clear the swapper page table */ mov r0, r4 mov r3, #0 add r6, r0, #PG_DIR_SIZE 1: str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 str r3, [r0], #4 teq r0, r6 bne 1b #ifdef CONFIG_ARM_LPAE /* * Build the PGD table (first level) to point to the PMD table. A PGD * entry is 64-bit wide. */ mov r0, r4 add r3, r4, #0x1000 @ first PMD table address orr r3, r3, #3 @ PGD block type mov r6, #4 @ PTRS_PER_PGD mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER 1: #ifdef CONFIG_CPU_ENDIAN_BE8 str r7, [r0], #4 @ set top PGD entry bits str r3, [r0], #4 @ set bottom PGD entry bits #else str r3, [r0], #4 @ set bottom PGD entry bits str r7, [r0], #4 @ set top PGD entry bits #endif add r3, r3, #0x1000 @ next PMD table subs r6, r6, #1 bne 1b add r4, r4, #0x1000 @ point to the PMD tables #ifdef CONFIG_CPU_ENDIAN_BE8 add r4, r4, #4 @ we only write the bottom word #endif #endif ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags /* * Create identity mapping to cater for __enable_mmu. * This identity mapping will be removed by paging_init(). */ adr r0, __turn_mmu_on_loc ldmia r0, {r3, r5, r6} sub r0, r0, r3 @ virt->phys offset add r5, r5, r0 @ phys __turn_mmu_on add r6, r6, r0 @ phys __turn_mmu_on_end mov r5, r5, lsr #SECTION_SHIFT mov r6, r6, lsr #SECTION_SHIFT 1: orr r3, r7, r5, lsl #SECTION_SHIFT @ flags + kernel base str r3, [r4, r5, lsl #PMD_ORDER] @ identity mapping cmp r5, r6 addlo r5, r5, #1 @ next section blo 1b /* * Map our RAM from the start to the end of the kernel .bss section. */ add r0, r4, #PAGE_OFFSET >> (SECTION_SHIFT - PMD_ORDER) ldr r6, =(_end - 1) orr r3, r8, r7 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 1: str r3, [r0], #1 << PMD_ORDER add r3, r3, #1 << SECTION_SHIFT cmp r0, r6 bls 1b #ifdef CONFIG_XIP_KERNEL /* * Map the kernel image separately as it is not located in RAM. */ #define XIP_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) mov r3, pc mov r3, r3, lsr #SECTION_SHIFT orr r3, r7, r3, lsl #SECTION_SHIFT add r0, r4, #(XIP_START & 0xff000000) >> (SECTION_SHIFT - PMD_ORDER) str r3, [r0, #((XIP_START & 0x00f00000) >> SECTION_SHIFT) << PMD_ORDER]! ldr r6, =(_edata_loc - 1) add r0, r0, #1 << PMD_ORDER add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ORDER) 1: cmp r0, r6 add r3, r3, #1 << SECTION_SHIFT strls r3, [r0], #1 << PMD_ORDER bls 1b #endif /* * Then map boot params address in r2 if specified. * We map 2 sections in case the ATAGs/DTB crosses a section boundary. */ mov r0, r2, lsr #SECTION_SHIFT movs r0, r0, lsl #SECTION_SHIFT subne r3, r0, r8 addne r3, r3, #PAGE_OFFSET addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) orrne r6, r7, r0 strne r6, [r3], #1 << PMD_ORDER addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] #if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) sub r4, r4, #4 @ Fixup page table pointer @ for 64-bit descriptors #endif #ifdef CONFIG_DEBUG_LL #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) /* * Map in IO space for serial debugging. * This allows debug messages to be output * via a serial console before paging_init. */ addruart r7, r3, r0 mov r3, r3, lsr #SECTION_SHIFT mov r3, r3, lsl #PMD_ORDER add r0, r4, r3 mov r3, r7, lsr #SECTION_SHIFT ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags orr r3, r7, r3, lsl #SECTION_SHIFT #ifdef CONFIG_ARM_LPAE mov r7, #1 << (54 - 32) @ XN #ifdef CONFIG_CPU_ENDIAN_BE8 str r7, [r0], #4 str r3, [r0], #4 #else str r3, [r0], #4 str r7, [r0], #4 #endif #else orr r3, r3, #PMD_SECT_XN str r3, [r0], #4 #endif #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ /* we don't need any serial debugging mappings */ ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags #endif #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) /* * If we're using the NetWinder or CATS, we also need to map * in the 16550-type serial port for the debug messages */ add r0, r4, #0xff000000 >> (SECTION_SHIFT - PMD_ORDER) orr r3, r7, #0x7c000000 str r3, [r0] #endif #ifdef CONFIG_ARCH_RPC /* * Map in screen at 0x02000000 & SCREEN2_BASE * Similar reasons here - for debug. This is * only for Acorn RiscPC architectures. */ add r0, r4, #0x02000000 >> (SECTION_SHIFT - PMD_ORDER) orr r3, r7, #0x02000000 str r3, [r0] add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER) str r3, [r0] #endif #endif #ifdef CONFIG_ARM_LPAE sub r4, r4, #0x1000 @ point to the PGD table #endif mov pc, lr ENDPROC(__create_page_tables) .ltorg .align __turn_mmu_on_loc: .long . .long __turn_mmu_on .long __turn_mmu_on_end #if defined(CONFIG_SMP) __CPUINIT ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. * * Ensure that we're in SVC mode, and IRQs are disabled. Lookup * the processor type - there is no need to check the machine type * as it has already been validated by the primary processor. */ #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install_secondary #endif safe_svcmode_maskall r9 mrc p15, 0, r9, c0, c0 @ get processor id bl __lookup_processor_type movs r10, r5 @ invalid processor? moveq r0, #'p' @ yes, error 'p' THUMB( it eq ) @ force fixup-able long branch encoding beq __error_p /* * Use the page tables supplied from __cpu_up. */ adr r4, __secondary_data ldmia r4, {r5, r7, r12} @ address to jump to after sub lr, r4, r5 @ mmu has been enabled ldr r4, [r7, lr] @ get secondary_data.pgdir add r7, r7, #4 ldr r8, [r7, lr] @ get secondary_data.swapper_pg_dir adr lr, BSYM(__enable_mmu) @ return address mov r13, r12 @ __secondary_switched address ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor @ (return control reg) THUMB( add r12, r10, #PROCINFO_INITFUNC ) THUMB( mov pc, r12 ) ENDPROC(secondary_startup) /* * r6 = &secondary_data */ ENTRY(__secondary_switched) ldr sp, [r7, #4] @ get secondary_data.stack mov fp, #0 b secondary_start_kernel ENDPROC(__secondary_switched) .align .type __secondary_data, %object __secondary_data: .long . .long secondary_data .long __secondary_switched #endif /* defined(CONFIG_SMP) */ /* * Setup common bits before finally enabling the MMU. Essentially * this is just loading the page table pointer and domain access * registers. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags or dtb pointer * r4 = page table pointer * r9 = processor ID * r13 = *virtual* address to jump to upon completion */ __enable_mmu: #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6 orr r0, r0, #CR_A #else bic r0, r0, #CR_A #endif #ifdef CONFIG_CPU_DCACHE_DISABLE bic r0, r0, #CR_C #endif #ifdef CONFIG_CPU_BPREDICT_DISABLE bic r0, r0, #CR_Z #endif #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif #ifdef CONFIG_ARM_LPAE mov r5, #0 mcrr p15, 0, r4, r5, c2 @ load TTBR0 #else mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT)) mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer #endif b __turn_mmu_on ENDPROC(__enable_mmu) /* * Enable the MMU. This completely changes the structure of the visible * memory space. You will not be able to trace execution through this. * If you have an enquiry about this, *please* check the linux-arm-kernel * mailing list archives BEFORE sending another post to the list. * * r0 = cp#15 control register * r1 = machine ID * r2 = atags or dtb pointer * r9 = processor ID * r13 = *virtual* address to jump to upon completion * * other registers depend on the function called upon completion */ .align 5 .pushsection .idmap.text, "ax" ENTRY(__turn_mmu_on) mov r0, r0 instr_sync mcr p15, 0, r0, c1, c0, 0 @ write control reg mrc p15, 0, r3, c0, c0, 0 @ read id reg instr_sync mov r3, r3 mov r3, r13 mov pc, r3 __turn_mmu_on_end: ENDPROC(__turn_mmu_on) .popsection #ifdef CONFIG_SMP_ON_UP __INIT __fixup_smp: and r3, r9, #0x000f0000 @ architecture version teq r3, #0x000f0000 @ CPU ID supported? bne __fixup_smp_on_up @ no, assume UP bic r3, r9, #0x00ff0000 bic r3, r3, #0x0000000f @ mask 0xff00fff0 mov r4, #0x41000000 orr r4, r4, #0x0000b000 orr r4, r4, #0x00000020 @ val 0x4100b020 teq r3, r4 @ ARM 11MPCore? moveq pc, lr @ yes, assume SMP mrc p15, 0, r0, c0, c0, 5 @ read MPIDR and r0, r0, #0xc0000000 @ multiprocessing extensions and teq r0, #0x80000000 @ not part of a uniprocessor system? moveq pc, lr @ yes, assume SMP __fixup_smp_on_up: adr r0, 1f ldmia r0, {r3 - r5} sub r3, r0, r3 add r4, r4, r3 add r5, r5, r3 b __do_fixup_smp_on_up ENDPROC(__fixup_smp) .align 1: .word . .word __smpalt_begin .word __smpalt_end .pushsection .data .globl smp_on_up smp_on_up: ALT_SMP(.long 1) ALT_UP(.long 0) .popsection #endif .text __do_fixup_smp_on_up: cmp r4, r5 movhs pc, lr ldmia r4!, {r0, r6} ARM( str r6, [r0, r3] ) THUMB( add r0, r0, r3 ) #ifdef __ARMEB__ THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian. #endif THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3. THUMB( strh r6, [r0] ) b __do_fixup_smp_on_up ENDPROC(__do_fixup_smp_on_up) ENTRY(fixup_smp) stmfd sp!, {r4 - r6, lr} mov r4, r0 add r5, r0, r1 mov r3, #0 bl __do_fixup_smp_on_up ldmfd sp!, {r4 - r6, pc} ENDPROC(fixup_smp) #ifdef CONFIG_ARM_PATCH_PHYS_VIRT /* __fixup_pv_table - patch the stub instructions with the delta between * PHYS_OFFSET and PAGE_OFFSET, which is assumed to be 16MiB aligned and * can be expressed by an immediate shifter operand. The stub instruction * has a form of '(add|sub) rd, rn, #imm'. */ __HEAD __fixup_pv_table: adr r0, 1f ldmia r0, {r3-r5, r7} sub r3, r0, r3 @ PHYS_OFFSET - PAGE_OFFSET add r4, r4, r3 @ adjust table start address add r5, r5, r3 @ adjust table end address add r7, r7, r3 @ adjust __pv_phys_offset address str r8, [r7] @ save computed PHYS_OFFSET to __pv_phys_offset mov r6, r3, lsr #24 @ constant for add/sub instructions teq r3, r6, lsl #24 @ must be 16MiB aligned THUMB( it ne @ cross section branch ) bne __error str r6, [r7, #4] @ save to __pv_offset b __fixup_a_pv_table ENDPROC(__fixup_pv_table) .align 1: .long . .long __pv_table_begin .long __pv_table_end 2: .long __pv_phys_offset .text __fixup_a_pv_table: #ifdef CONFIG_THUMB2_KERNEL lsls r6, #24 beq 2f clz r7, r6 lsr r6, #24 lsl r6, r7 bic r6, #0x0080 lsrs r7, #1 orrcs r6, #0x0080 orr r6, r6, r7, lsl #12 orr r6, #0x4000 b 2f 1: add r7, r3 ldrh ip, [r7, #2] and ip, 0x8f00 orr ip, r6 @ mask in offset bits 31-24 strh ip, [r7, #2] 2: cmp r4, r5 ldrcc r7, [r4], #4 @ use branch for delay slot bcc 1b bx lr #else b 2f 1: ldr ip, [r7, r3] bic ip, ip, #0x000000ff orr ip, ip, r6 @ mask in offset bits 31-24 str ip, [r7, r3] 2: cmp r4, r5 ldrcc r7, [r4], #4 @ use branch for delay slot bcc 1b mov pc, lr #endif ENDPROC(__fixup_a_pv_table) ENTRY(fixup_pv_table) stmfd sp!, {r4 - r7, lr} ldr r2, 2f @ get address of __pv_phys_offset mov r3, #0 @ no offset mov r4, r0 @ r0 = table start add r5, r0, r1 @ r1 = table size ldr r6, [r2, #4] @ get __pv_offset bl __fixup_a_pv_table ldmfd sp!, {r4 - r7, pc} ENDPROC(fixup_pv_table) .align 2: .long __pv_phys_offset .data .globl __pv_phys_offset .type __pv_phys_offset, %object __pv_phys_offset: .long 0 .size __pv_phys_offset, . - __pv_phys_offset __pv_offset: .long 0 #endif #include "head-common.S" 解释以上代码
最新发布
09-24
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值