1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
| // ...... .macro exception_entry label /* Each entry of the exeception table should be 0x80 aligned */ .align 7 b \label .endm
/* See more details about the bias in registers.h */ .macro exception_enter sub sp, sp, #ARCH_EXEC_CONT_SIZE stp x0, x1, [sp, #16 * 0] stp x2, x3, [sp, #16 * 1] stp x4, x5, [sp, #16 * 2] stp x6, x7, [sp, #16 * 3] stp x8, x9, [sp, #16 * 4] stp x10, x11, [sp, #16 * 5] stp x12, x13, [sp, #16 * 6] stp x14, x15, [sp, #16 * 7] // ... .endm
.macro exception_exit ldp x22, x23, [sp, #16 * 16] ldp x30, x21, [sp, #16 * 15] msr sp_el0, x21 msr elr_el1, x22 msr spsr_el1, x23 ldp x0, x1, [sp, #16 * 0] ldp x2, x3, [sp, #16 * 1] ldp x4, x5, [sp, #16 * 2] ldp x6, x7, [sp, #16 * 3] ldp x8, x9, [sp, #16 * 4] // ... add sp, sp, #ARCH_EXEC_CONT_SIZE eret .endm
.macro switch_to_cpu_stack mrs x24, TPIDR_EL1 add x24, x24, #OFFSET_LOCAL_CPU_STACK ldr x24, [x24] mov sp, x24 .endm
.macro switch_to_thread_ctx mrs x24, TPIDR_EL1 add x24, x24, #OFFSET_CURRENT_EXEC_CTX ldr x24, [x24] mov sp, x24 .endm
/* el1_vector should be set in VBAR_EL1. The last 11 bits of VBAR_EL1 are reserved. */ .align 11 EXPORT(el1_vector) exception_entry sync_el1t // Synchronous EL1t exception_entry irq_el1t // IRQ EL1t exception_entry fiq_el1t // FIQ EL1t exception_entry error_el1t // Error EL1t
exception_entry sync_el1h // Synchronous EL1h exception_entry irq_el1h // IRQ EL1h exception_entry fiq_el1h // FIQ EL1h exception_entry error_el1h // Error EL1h
exception_entry sync_el0_64 // Synchronous 64-bit EL0 exception_entry irq_el0_64 // IRQ 64-bit EL0 exception_entry fiq_el0_64 // FIQ 64-bit EL0 exception_entry error_el0_64 // Error 64-bit EL0
exception_entry sync_el0_32 // Synchronous 32-bit EL0 exception_entry irq_el0_32 // IRQ 32-bit EL0 exception_entry fiq_el0_32 // FIQ 32-bit EL0 exception_entry error_el0_32 // Error 32-bit EL0
/* * The selected stack pointer can be indicated by a suffix to the Exception Level: * - t: SP_EL0 is used * - h: SP_ELx is used * * ChCore does not enable or handle irq_el1t, fiq_xxx, and error_xxx. * The SPSR_EL1 of idle threads is set to 0b0101, which means interrupt * are enabled during the their execution and SP_EL1 is selected (h). * Thus, irq_el1h is enabled and handled. * * Similarly, sync_el1t is also not enabled while we simply reuse the handler for * sync_el0 to handle sync_el1h (e.g., page fault during copy_to_user and fpu). */
irq_el1h: /* Simply reusing exception_enter/exit is OK. */ exception_enter #ifndef CHCORE_KERNEL_RT switch_to_cpu_stack #endif bl handle_irq_el1 /* should never reach here */ b .
irq_el1t: fiq_el1t: fiq_el1h: error_el1t: error_el1h: sync_el1t: bl unexpected_handler
sync_el1h: exception_enter mov x0, #SYNC_EL1h mrs x1, esr_el1 mrs x2, elr_el1 bl handle_entry_c str x0, [sp, #16 * 16] /* store the return value as the ELR_EL1 */ exception_exit
sync_el0_64: exception_enter #ifndef CHCORE_KERNEL_RT switch_to_cpu_stack #endif mrs x25, esr_el1 lsr x24, x25, #ESR_EL1_EC_SHIFT cmp x24, #ESR_EL1_EC_SVC_64 b.eq el0_syscall mov x0, SYNC_EL0_64 mrs x1, esr_el1 mrs x2, elr_el1 bl handle_entry_c #ifdef CHCORE_KERNEL_RT bl do_pending_resched #else switch_to_thread_ctx #endif exception_exit
el0_syscall:
/* hooking syscall: ease tracing or debugging */ #if ENABLE_HOOKING_SYSCALL == ON sub sp, sp, #16 * 8 stp x0, x1, [sp, #16 * 0] stp x2, x3, [sp, #16 * 1] stp x4, x5, [sp, #16 * 2] stp x6, x7, [sp, #16 * 3] stp x8, x9, [sp, #16 * 4] stp x10, x11, [sp, #16 * 5] stp x12, x13, [sp, #16 * 6] stp x14, x15, [sp, #16 * 7]
mov x0, x8 bl hook_syscall
ldp x0, x1, [sp, #16 * 0] ldp x2, x3, [sp, #16 * 1] ldp x4, x5, [sp, #16 * 2] ldp x6, x7, [sp, #16 * 3] ldp x8, x9, [sp, #16 * 4] ldp x10, x11, [sp, #16 * 5] ldp x12, x13, [sp, #16 * 6] ldp x14, x15, [sp, #16 * 7] add sp, sp, #16 * 8 #endif
adr x27, syscall_table // syscall table in x27 uxtw x16, w8 // syscall number in x16 ldr x16, [x27, x16, lsl #3] // find the syscall entry blr x16
/* Ret from syscall */ // bl disable_irq #ifdef CHCORE_KERNEL_RT str x0, [sp] bl do_pending_resched #else switch_to_thread_ctx str x0, [sp] #endif exception_exit
irq_el0_64: exception_enter #ifndef CHCORE_KERNEL_RT switch_to_cpu_stack #endif bl handle_irq /* should never reach here */ b .
error_el0_64: sync_el0_32: irq_el0_32: fiq_el0_32: error_el0_32: bl unexpected_handler
fiq_el0_64: exception_enter #ifndef CHCORE_KERNEL_RT switch_to_cpu_stack #endif bl handle_fiq /* should never reach here */ b .
// 实现线程切换功能,通过异常返回机制切换到目标线程 /* void eret_to_thread(u64 sp) */ BEGIN_FUNC(__eret_to_thread) mov sp, x0 dmb ish /* smp_mb() */ #ifdef CHCORE_KERNEL_RT bl finish_switch #endif exception_exit END_FUNC(__eret_to_thread)
|