#define BS_SIZE (KERNEL_MM_BASE - UMMAP_START)
int
-mem_has_overlap(vm_regions_t* regions, ptr_t start, size_t len)
+mem_has_overlap(vm_regions_t* regions, ptr_t start, ptr_t end)
{
- ptr_t end = start + end - 1;
struct mm_region *pos, *n;
llist_for_each(pos, n, regions, head)
{
return 0;
}
+int
+mem_adjust_inplace(vm_regions_t* regions,
+ struct mm_region* region,
+ ptr_t newend)
+{
+ ssize_t len = newend - region->start;
+ if (len == 0) {
+ return 0;
+ }
+
+ if (len < 0) {
+ return EINVAL;
+ }
+
+ if (mem_has_overlap(regions, region->start, newend)) {
+ return ENOMEM;
+ }
+
+ region->end = newend;
+
+ return 0;
+}
+
int
mem_map(void** addr_out,
struct mm_region** created,
vm_regions_t* vm_regions = ¶m->pvms->regions;
if ((param->flags & MAP_FIXED_NOREPLACE)) {
- if (mem_has_overlap(vm_regions, found_loc, param->mlen)) {
+ if (mem_has_overlap(vm_regions, found_loc, param->mlen + found_loc)) {
return EEXIST;
}
goto found;
{
if (last_end < found_loc) {
size_t avail_space = pos->start - found_loc;
- if ((int)avail_space > 0 && avail_space > param->mlen) {
+ if (pos->start > found_loc && avail_space > param->mlen) {
goto found;
}
found_loc = pos->end + PG_SIZE;
}
- last_end = pos->end + PG_SIZE;
+ last_end = pos->end;
}
return ENOMEM;
}
}
- while (&pos->head != regions && cur_addr > pos->start) {
+ while (&pos->head != regions && cur_addr >= pos->start) {
u32_t l = pos->end - cur_addr;
pos->end = cur_addr;