voiddo_page_fault(u64 esr, u64 fault_ins_addr, int type, u64 *fix_addr) { vaddr_t fault_addr; int fsc; // fault status code int wnr; int ret; // 从far_el1寄存器读取汇编 fault_addr = get_fault_addr(); // #define GET_ESR_EL1_FSC(esr_el1) (((esr_el1) >> ESR_EL1_FSC_SHIFT) & FSC_MASK) fsc = GET_ESR_EL1_FSC(esr); switch (fsc) { case DFSC_TRANS_FAULT_L0: case DFSC_TRANS_FAULT_L1: case DFSC_TRANS_FAULT_L2: case DFSC_TRANS_FAULT_L3: { // 地址转换错误,根据vma进行进一步处理,也就是缺页异常 ret = handle_trans_fault(current_thread->vmspace, fault_addr); if (ret != 0) { // 没有正确处理 /* The trap happens in the kernel */ if (type < SYNC_EL0_64) { // EL1 的 type, 表示内核态的异常,跳转到no_context标签 goto no_context; } // 用户态的异常处理失败,打印信息后退出 kinfo("do_page_fault: faulting ip is 0x%lx (real IP)," "faulting address is 0x%lx," "fsc is trans_fault (0b%b)," "type is %d\n", fault_ins_addr, fault_addr, fsc, type); kprint_vmr(current_thread->vmspace);
kinfo("current_cap_group is %s\n", current_cap_group->cap_group_name);
sys_exit_group(-1); } break; } case DFSC_PERM_FAULT_L1: case DFSC_PERM_FAULT_L2: case DFSC_PERM_FAULT_L3: // 权限错误 wnr = GET_ESR_EL1_WnR(esr); // WnR, ESR bit[6]. Write not Read. The cause of data abort. if (wnr) { //写权限错误 ret = handle_perm_fault( current_thread->vmspace, fault_addr, VMR_WRITE); } else { //读权限错误 ret = handle_perm_fault( current_thread->vmspace, fault_addr, VMR_READ); }
if (ret != 0) { /* The trap happens in the kernel */ if (type < SYNC_EL0_64) { goto no_context; } sys_exit_group(-1); } break; case DFSC_ACCESS_FAULT_L1: case DFSC_ACCESS_FAULT_L2: case DFSC_ACCESS_FAULT_L3: // Access faults:没有access bit的pte,此处还不支持处理,仅打印信息 kinfo("do_page_fault: fsc is access_fault (0b%b)\n", fsc); BUG_ON(1); break; default: //默认处理流程,指遇到了奇奇怪怪的的错误,且系统当前还不支持处理它们 //因此这里的处理流程就是打印错误相关的信息,然后触发内核panic终止之 kinfo("do_page_fault: faulting ip is 0x%lx (real IP)," "faulting address is 0x%lx," "fsc is unsupported now (0b%b)\n", fault_ins_addr, fault_addr, fsc); kprint_vmr(current_thread->vmspace);
kinfo("current_cap_group is %s\n", current_cap_group->cap_group_name);
/* This struct represents some physical memory resource */ structpmobject { paddr_t start; size_t size; pmo_type_t type; /* record physical pages for on-demand-paging pmo */ structradix *radix; /* * The field of 'private' depends on 'type'. * PMO_FILE: it points to fmap_fault_pool * others: NULL */ void *private; structlist_headmapping_list; };
inthandle_trans_fault(struct vmspace *vmspace, vaddr_t fault_addr) { structvmregion *vmr; structpmobject *pmo; paddr_t pa; unsignedlong offset; unsignedlong index; int ret = 0;
/* * Grab lock here. * Because two threads (in same process) on different cores * may fault on the same page, so we need to prevent them * from adding the same mapping twice. */ lock(&vmspace->vmspace_lock); vmr = find_vmr_for_va(vmspace, fault_addr);
if (vmr == NULL) { kinfo("handle_trans_fault: no vmr found for va 0x%lx!\n", fault_addr); dump_pgfault_error(); unlock(&vmspace->vmspace_lock);
#if defined(CHCORE_ARCH_AARCH64) || defined(CHCORE_ARCH_SPARC) /* kernel fault fixup is only supported on AArch64 and Sparc */ return -EFAULT; #endif sys_exit_group(-1); }
pmo = vmr->pmo; /* Get the offset in the pmo for faulting addr */ offset = ROUND_DOWN(fault_addr, PAGE_SIZE) - vmr->start + vmr->offset; vmr_prop_t perm = vmr->perm; switch (pmo->type) { case PMO_ANONYM: case PMO_SHM: { /* Boundary check */ BUG_ON(offset >= pmo->size);
/* Get the index in the pmo radix for faulting addr */ index = offset / PAGE_SIZE;
fault_addr = ROUND_DOWN(fault_addr, PAGE_SIZE);
pa = get_page_from_pmo(pmo, index); if (pa == 0) { /* * Not committed before. Then, allocate the physical * page. */ void *new_va = get_pages(0); long rss = 0; if (new_va == NULL) { unlock(&vmspace->vmspace_lock); return -ENOMEM; } pa = virt_to_phys(new_va); BUG_ON(pa == 0); /* Clear to 0 for the newly allocated page */ memset((void *)phys_to_virt(pa), 0, PAGE_SIZE); /* * Record the physical page in the radix tree: * the offset is used as index in the radix tree */ kdebug("commit: index: %ld, 0x%lx\n", index, pa); commit_page_to_pmo(pmo, index, pa);
/* Add mapping in the page table */ lock(&vmspace->pgtbl_lock); map_range_in_pgtbl(vmspace->pgtbl, fault_addr, pa, PAGE_SIZE, perm, &rss); vmspace->rss += rss; unlock(&vmspace->pgtbl_lock); } else { /* * pa != 0: the faulting address has be committed a * physical page. * * For concurrent page faults: * * When type is PMO_ANONYM, the later faulting threads * of the process do not need to modify the page * table because a previous faulting thread will do * that. (This is always true for the same process) * However, if one process map an anonymous pmo for * another process (e.g., main stack pmo), the faulting * thread (e.g, in the new process) needs to update its * page table. * So, for simplicity, we just update the page table. * Note that adding the same mapping is harmless. * * When type is PMO_SHM, the later faulting threads * needs to add the mapping in the page table. * Repeated mapping operations are harmless. */ if (pmo->type == PMO_SHM || pmo->type == PMO_ANONYM) { /* Add mapping in the page table */ long rss = 0; lock(&vmspace->pgtbl_lock); map_range_in_pgtbl(vmspace->pgtbl, fault_addr, pa, PAGE_SIZE, perm, &rss); vmspace->rss += rss; unlock(&vmspace->pgtbl_lock); } }
if (perm & VMR_EXEC) { arch_flush_cache(fault_addr, PAGE_SIZE, SYNC_IDCACHE); }
break; } case PMO_FILE: { unlock(&vmspace->vmspace_lock); fault_addr = ROUND_DOWN(fault_addr, PAGE_SIZE); handle_user_fault(pmo, ROUND_DOWN(fault_addr, PAGE_SIZE)); BUG("Should never be here!\n"); break; } case PMO_FORBID: { kinfo("Forbidden memory access (pmo->type is PMO_FORBID).\n"); dump_pgfault_error();
unlock(&vmspace->vmspace_lock); sys_exit_group(-1); break; } default: { kinfo("handle_trans_fault: faulting vmr->pmo->type" "(pmo type %d at 0x%lx)\n", vmr->pmo->type, fault_addr); dump_pgfault_error();