chore: optimize memcpy and memset with x86 feature.
#include <stdint.h>
// Virtual memory manager
+#define VMAP_NULL 0
+#define VMAP_IGNORE 1
+
/**
* @brief 初始化虚拟内存管理器
*
* @return int
*/
int
-vmm_set_mapping(uintptr_t mnt, uintptr_t va, uintptr_t pa, pt_attr attr);
+vmm_set_mapping(uintptr_t mnt,
+ uintptr_t va,
+ uintptr_t pa,
+ pt_attr attr,
+ int options);
/**
* @brief 删除一个映射
* @param va
* @return int
*/
-int
+uintptr_t
vmm_del_mapping(uintptr_t mnt, uintptr_t va);
/**
// 为内核创建一个专属栈空间。
for (size_t i = 0; i < (KSTACK_SIZE >> PG_SIZE_BITS); i++) {
uintptr_t pa = pmm_alloc_page(KERNEL_PID, 0);
- vmm_set_mapping(
- PD_REFERENCED, KSTACK_START + (i << PG_SIZE_BITS), pa, PG_PREM_RW);
+ vmm_set_mapping(PD_REFERENCED,
+ KSTACK_START + (i << PG_SIZE_BITS),
+ pa,
+ PG_PREM_RW,
+ VMAP_NULL);
}
kprintf(KINFO "[MM] Allocated %d pages for stack start at %p\n",
KSTACK_SIZE >> PG_SIZE_BITS,
vmm_set_mapping(PD_REFERENCED,
VGA_BUFFER_VADDR + (i << PG_SIZE_BITS),
VGA_BUFFER_PADDR + (i << PG_SIZE_BITS),
- PG_PREM_URW);
+ PG_PREM_URW,
+ VMAP_NULL);
}
// 更新VGA缓冲区位置至虚拟地址
vmm_dup_page(pid_t pid, void* pa)
{
void* new_ppg = pmm_alloc_page(pid, 0);
- vmm_set_mapping(PD_REFERENCED, PG_MOUNT_3, new_ppg, PG_PREM_RW);
- vmm_set_mapping(PD_REFERENCED, PG_MOUNT_4, pa, PG_PREM_RW);
+ vmm_set_mapping(PD_REFERENCED, PG_MOUNT_3, new_ppg, PG_PREM_RW, VMAP_NULL);
+ vmm_set_mapping(PD_REFERENCED, PG_MOUNT_4, pa, PG_PREM_RW, VMAP_NULL);
asm volatile("movl %1, %%edi\n"
"movl %2, %%esi\n"
perm = 0;
}
- return vmm_set_mapping(PD_REFERENCED, heap->brk, 0, PG_WRITE | perm) !=
- NULL;
+ return vmm_set_mapping(
+ PD_REFERENCED, heap->brk, 0, PG_WRITE | perm, VMAP_NULL) != NULL;
}
int
vmm_set_mapping(PD_REFERENCED,
PG_ALIGN(current_brk) + PG_SIZE + i,
0,
- PG_WRITE | user);
+ PG_WRITE | user,
+ VMAP_NULL);
}
}
#include <lunaix/mm/vmm.h>
#include <lunaix/spike.h>
-#include <stdbool.h>
-
void
vmm_init()
{
}
int
-vmm_set_mapping(uintptr_t mnt, uintptr_t va, uintptr_t pa, pt_attr attr)
+vmm_set_mapping(uintptr_t mnt,
+ uintptr_t va,
+ uintptr_t pa,
+ pt_attr attr,
+ int options)
{
assert((uintptr_t)va % PG_SIZE == 0);
// This must be writable
l1pt->entry[l1_inx] = NEW_L1_ENTRY(attr | PG_WRITE, new_l1pt_pa);
memset((void*)l2pt, 0, PG_SIZE);
+ } else {
+ x86_pte_t pte = l2pt->entry[l2_inx];
+ if (pte && (options & VMAP_IGNORE)) {
+ return 1;
+ }
}
if (mnt == PD_REFERENCED) {
return 1;
}
-int
+uintptr_t
vmm_del_mapping(uintptr_t mnt, uintptr_t va)
{
assert(((uintptr_t)va & 0xFFFU) == 0);
cpu_invplg(va);
l2pt->entry[l2_index] = PTE_NULL;
+
+ return PG_ENTRY_ADDR(l2pte);
}
+
+ return 0;
}
int
vmm_lookup(uintptr_t va, v_mapping* mapping)
{
- // va = va & ~0xfff;
-
uint32_t l1_index = L1_INDEX(va);
uint32_t l2_index = L2_INDEX(va);
pmm_mark_page_occupied(KERNEL_PID, FLOOR(ioapic_addr, PG_SIZE_BITS), 0);
vmm_set_mapping(
- PD_REFERENCED, APIC_BASE_VADDR, __APIC_BASE_PADDR, PG_PREM_RW);
- vmm_set_mapping(PD_REFERENCED, IOAPIC_BASE_VADDR, ioapic_addr, PG_PREM_RW);
+ PD_REFERENCED, APIC_BASE_VADDR, __APIC_BASE_PADDR, PG_PREM_RW, VMAP_NULL);
+ vmm_set_mapping(
+ PD_REFERENCED, IOAPIC_BASE_VADDR, ioapic_addr, PG_PREM_RW, VMAP_NULL);
apic_init();
ioapic_init();
multiboot_memory_map_t* mmaps = _k_init_mb_info->mmap_addr;
size_t map_size =
_k_init_mb_info->mmap_length / sizeof(multiboot_memory_map_t);
- v_mapping mapping;
+ // v_mapping mapping;
for (unsigned int i = 0; i < map_size; i++) {
multiboot_memory_map_t mmap = mmaps[i];
if (mmap.type == MULTIBOOT_MEMORY_AVAILABLE) {
size_t pg_num = CEIL(mmap.len_low, PG_SIZE_BITS);
for (size_t j = 0; j < pg_num; j++) {
uintptr_t _pa = pa + (j << PG_SIZE_BITS);
- if (vmm_lookup(_pa, &mapping) && *mapping.pte) {
- continue;
- }
- vmm_set_mapping(PD_REFERENCED, _pa, _pa, PG_PREM_R);
+ // if (vmm_lookup(_pa, &mapping) && *mapping.pte) {
+ // continue;
+ // }
+ vmm_set_mapping(PD_REFERENCED, _pa, _pa, PG_PREM_R, VMAP_IGNORE);
pmm_mark_page_occupied(KERNEL_PID, _pa >> 12, 0);
}
}
__dup_pagetable(pid_t pid, uintptr_t mount_point)
{
void* ptd_pp = pmm_alloc_page(pid, PP_FGPERSIST);
- vmm_set_mapping(PD_REFERENCED, PG_MOUNT_1, ptd_pp, PG_PREM_RW);
+ vmm_set_mapping(PD_REFERENCED, PG_MOUNT_1, ptd_pp, PG_PREM_RW, VMAP_NULL);
x86_page_table* ptd = PG_MOUNT_1;
x86_page_table* pptd = (x86_page_table*)(mount_point | (0x3FF << 12));
}
void* pt_pp = pmm_alloc_page(pid, PP_FGPERSIST);
- vmm_set_mapping(PD_REFERENCED, PG_MOUNT_2, pt_pp, PG_PREM_RW);
+ vmm_set_mapping(
+ PD_REFERENCED, PG_MOUNT_2, pt_pp, PG_PREM_RW, VMAP_NULL);
x86_page_table* ppt = (x86_page_table*)(mount_point | (i << 12));
x86_page_table* pt = PG_MOUNT_2;
for (size_t i = 0; i <= pg_size; i += 4096) {
uintptr_t pa = pmm_alloc_page(KERNEL_PID, PP_FGPERSIST);
- vmm_set_mapping(PD_REFERENCED, &__proc_table + i, pa, PG_PREM_RW);
+ vmm_set_mapping(
+ PD_REFERENCED, &__proc_table + i, pa, PG_PREM_RW, VMAP_NULL);
}
sched_ctx = (struct scheduler){ ._procs = (struct proc_info*)&__proc_table,
-#include <stdint.h>
#include <klibc/string.h>
+#include <stdint.h>
void*
memcpy(void* dest, const void* src, size_t num)
{
- uint8_t* dest_ptr = (uint8_t*)dest;
- const uint8_t* src_ptr = (const uint8_t*)src;
- for (size_t i = 0; i < num; i++) {
- *(dest_ptr + i) = *(src_ptr + i);
- }
+ asm volatile("movl %1, %%edi\n"
+ "rep movsb\n" ::"S"(src),
+ "r"(dest),
+ "c"(num)
+ : "edi", "memory");
return dest;
}
void*
memset(void* ptr, int value, size_t num)
{
- uint8_t* c_ptr = (uint8_t*)ptr;
- for (size_t i = 0; i < num; i++) {
- *(c_ptr + i) = (uint8_t)value;
- }
+ asm volatile("movl %1, %%edi\n"
+ "rep stosb\n" ::"c"(num),
+ "r"(ptr),
+ "a"(value)
+ : "edi", "memory");
return ptr;
}