-#include <arch/abi.h>
#include <klibc/string.h>
#include <lunaix/clock.h>
-#include <lunaix/common.h>
#include <lunaix/mm/mmap.h>
#include <lunaix/mm/pmm.h>
#include <lunaix/mm/region.h>
#include <lunaix/syscall.h>
#include <lunaix/syslog.h>
+#include <sys/abi.h>
+#include <sys/mm/mempart.h>
+
LOG_MODULE("PROC")
ptr_t
x86_page_table* ptd = (x86_page_table*)PG_MOUNT_1;
x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12));
- size_t kspace_l1inx = L1_INDEX(KERNEL_MM_BASE);
+ size_t kspace_l1inx = L1_INDEX(KERNEL_EXEC);
for (size_t i = 0; i < PG_MAX_ENTRIES - 1; i++) {
x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12));
// only remove user address space
- for (size_t i = 0; i < L1_INDEX(KERNEL_MM_BASE); i++) {
+ for (size_t i = 0; i < L1_INDEX(KERNEL_EXEC); i++) {
x86_pte_t ptde = pptd->entry[i];
if (!ptde || !(ptde & PG_PRESENT)) {
continue;
struct mm_region* mapped;
struct mmap_param param = { .vms_mnt = VMS_MOUNT_1,
.pvms = &pcb->mm,
- .mlen = USTACK_SIZE,
+ .mlen = USR_STACK_SIZE,
.proct = PROT_READ | PROT_WRITE,
.flags = MAP_ANON | MAP_PRIVATE | MAP_FIXED,
.type = REGION_TYPE_STACK };
int status = 0;
- if ((status = mem_map(NULL, &mapped, USTACK_END, NULL, ¶m))) {
+ if ((status = mem_map(NULL, &mapped, USR_STACK, NULL, ¶m))) {
kprint_panic("fail to alloc user stack: %d", status);
}
x86_pte_t* curproc = &PTE_MOUNTED(VMS_SELF, i);
x86_pte_t* newproc = &PTE_MOUNTED(VMS_MOUNT_1, i);
- cpu_invplg((ptr_t)newproc);
+ cpu_flush_page((ptr_t)newproc);
if ((attr & REGION_MODE_MASK) == REGION_RSHARED) {
// 如果读共享,则将两者的都标注为只读,那么任何写入都将会应用COW策略。
- cpu_invplg((ptr_t)curproc);
- cpu_invplg((ptr_t)(i << 12));
+ cpu_flush_page((ptr_t)curproc);
+ cpu_flush_page((ptr_t)(i << 12));
*curproc = *curproc & ~PG_WRITE;
*newproc = *newproc & ~PG_WRITE;
pcb->intr_ctx = __current->intr_ctx;
pcb->parent = __current;
- memcpy(pcb->fxstate, __current->fxstate, 512);
-
if (__current->cwd) {
pcb->cwd = __current->cwd;
vfs_ref_dnode(pcb->cwd);
__copy_fdtable(pcb);
region_copy(&__current->mm, &pcb->mm);
- setup_proc_mem(pcb, VMS_SELF);
+ /*
+ * store the return value for forked process.
+ * this will be implicit carried over after kernel stack is copied.
+ */
+ store_retval(0);
+
+ copy_kernel_stack(pcb, VMS_SELF);
// 根据 mm_region 进一步配置页表
vmm_unmount_pd(VMS_MOUNT_1);
- // 正如同fork,返回两次。
- store_retval_to(pcb, 0);
-
commit_process(pcb);
return pcb->pid;
}
-extern void __kernel_end;
+extern void __kexec_end;
void
-setup_proc_mem(struct proc_info* proc, ptr_t usedMnt)
+copy_kernel_stack(struct proc_info* proc, ptr_t usedMnt)
{
// copy the entire kernel page table
pid_t pid = proc->pid;
vmm_mount_pd(VMS_MOUNT_1, pt_copy); // 将新进程的页表挂载到挂载点#2
// copy the kernel stack
- for (size_t i = KSTACK_START >> 12; i <= KSTACK_TOP >> 12; i++) {
+ for (size_t i = KERNEL_STACK >> 12; i <= KERNEL_STACK_END >> 12; i++) {
volatile x86_pte_t* ppte = &PTE_MOUNTED(VMS_MOUNT_1, i);
/*
In the name of Celestia our glorious goddess, I will fucking HATE
the TLB for the rest of my LIFE!
*/
- cpu_invplg((ptr_t)ppte);
+ cpu_flush_page((ptr_t)ppte);
x86_pte_t p = *ppte;
ptr_t ppa = vmm_dup_page(pid, PG_ENTRY_ADDR(p));
*ppte = (p & 0xfff) | ppa;
}
- // 我们不需要分配内核的区域,因为所有的内核代码和数据段只能通过系统调用来访问,任何非法的访问
- // 都会导致eip落在区域外面,从而segmentation fault.
-
- // 至于其他的区域我们暂时没有办法知道,因为那需要知道用户程序的信息。我们留到之后在处理。
proc->page_table = pt_copy;
}
\ No newline at end of file