diff --git a/headers/alloc.h b/headers/alloc.h index bf25346..2a852e4 100644 --- a/headers/alloc.h +++ b/headers/alloc.h @@ -61,7 +61,8 @@ typedef struct Zone { * For TINY and SMALL, the zone will be divided in blocks. * For LARGE, it will be entire page(s). */ -extern Zone *zones[3]; +extern Zone *vzones[3]; +extern Zone *kzones[3]; /*----------- UTILS ----------*/ block_type_t get_type(size_t size); @@ -71,6 +72,7 @@ size_t align_mem(size_t addr); /*-------- ALLOCATOR ---------*/ int new_vzone(block_type_t type, size_t size); +int new_kzone(block_type_t type, size_t size); /*----------------------------*/ void *vmalloc(size_t size); @@ -78,3 +80,8 @@ void vfree(void *ptr); void *vrealloc(void *ptr, size_t size); void show_valloc_mem(void); size_t vsize(void *virt_addr); +void *kmalloc(size_t size); +void kfree(void *ptr); +void *krealloc(void *ptr, size_t size); +void show_kalloc_mem(void); +size_t ksize(void *phys_addr); diff --git a/src/kernel.c b/src/kernel.c index 7cd597b..cc7dd63 100644 --- a/src/kernel.c +++ b/src/kernel.c @@ -46,7 +46,7 @@ static void awa(void) uint32_t pid = fork(); PRINT_INT(pid); if (pid < 0) - kprintf("camille il a une grosse bite (18cm)\n"); + kprintf("awais"); kprintf("awaille\n"); if (pid) wait(); @@ -66,11 +66,15 @@ void kernel_main(multiboot_info_t *mbd, uint32_t magic) create_kernel_task(); vmalloc(1231231); // uint32_t nb_alloc = 0; - // while (vmalloc(10)) + // while (kmalloc(10)) // nb_alloc++; // kprintf("%d\n", nb_alloc); for (uint8_t i = 0; i < 10; i++) vmalloc(32); + char *str = kmalloc(20); + show_kalloc_mem(); + strcpy(str, "test\n"); + kprintf("%s", str); // exec_fn(owo); // exec_fn(owo); // exec_fn(owo); diff --git a/src/memory/phys/allocator.c b/src/memory/phys/allocator.c new file mode 100644 index 0000000..ec04044 --- /dev/null +++ b/src/memory/phys/allocator.c @@ -0,0 +1,70 @@ +#include "alloc.h" +#include "debug.h" +#include "kpanic.h" +#include "kprintf.h" +#include "memory.h" +#include "task.h" + +Zone *kzones[3]; + +static void add_zone(Zone *zone, block_type_t type) +{ + // We put the zone at the beginning of the list + if (kzones[type]) { + assert(kzones[type] != zone); + zone->next = kzones[type]; + kzones[type]->prev = zone; + } + kzones[type] = zone; +} + +static void new_block(Zone *zone, uint32_t zone_size) +{ + Block *new_block = (Block *)align_mem((uint32_t)zone + sizeof(Zone)); + + // Metadata + new_block->in_use = false; + new_block->size = zone_size - sizeof(Zone) - sizeof(Block); + new_block->sub_size = new_block->size; + new_block->ptr = (Block *)((uint32_t)new_block + sizeof(Block)); + new_block->zone = zone; + + // Init future linked lists + new_block->prev = NULL; + new_block->prev_free = NULL; + new_block->prev_used = NULL; + new_block->next = NULL; + new_block->next_free = NULL; + new_block->next_used = NULL; + + if (zone->free) { + zone->free->prev = new_block; + zone->free->prev_free = new_block; + new_block->next = zone->free; + new_block->next_free = zone->free; + } + zone->free = new_block; + assert(zone->free == new_block); +} + +int new_kzone(block_type_t type, uint32_t size) +{ + // assert(current_task->pid); + void *heap = alloc_frame(); + if (heap == NULL) { + kprintf(KERN_ERR "error: alloc_frame failed\n"); + return (-1); + } + + Zone *zone = (Zone *)heap; + zone->type = type; + zone->size = size; + zone->used = NULL; + zone->next = NULL; + zone->prev = NULL; + + new_block(zone, size); + add_zone(heap, type); + + return (0); +} diff --git a/src/memory/phys/info.c b/src/memory/phys/info.c new file mode 100644 index 0000000..ca8ee40 --- /dev/null +++ b/src/memory/phys/info.c @@ -0,0 +1,52 @@ +#include "alloc.h" +#include "kprintf.h" +#include + +// FULL_INFO is to display (or not) both used and unused blocks +#define FULL_INFO 1 + +void show_kalloc_mem(void) +{ + char *const zones_name[3] = {"TINY", "SMALL", "LARGE"}; + uint32_t total_size = 0; + + for (block_type_t type = 0; type < 3; ++type) { + int count = 0; + for (Zone *zone_it = kzones[type]; zone_it != NULL; + zone_it = zone_it->next) { + if (zone_it->used) + kprintf("---------- IN_USE %s [n°%d - %p] " + "----------\n", + zones_name[type], count, zone_it); + for (Block *block_it = zone_it->used; block_it != NULL; + block_it = block_it->next_used) { + /* i++; */ + /* if (i < 10) */ + kprintf("%p - %p : %u bytes\n", block_it->ptr, + (uint32_t)block_it->ptr + + block_it->sub_size + sizeof(Block), + block_it->sub_size); + total_size += block_it->sub_size; + } + if (zone_it->used) + kprintf("\n"); + count++; +#if FULL_INFO + if (zone_it->free) + kprintf("---------- AVAILABLE %s [n°%d - %p] " + "----------\n", + zones_name[type], count, zone_it); + for (Block *block_it = zone_it->free; block_it != NULL; + block_it = block_it->next_free) { + kprintf("%p - %p : %u bytes\n", block_it->ptr, + (uint32_t)block_it->ptr + + block_it->sub_size + sizeof(Block), + block_it->sub_size); + } + if (zone_it->free) + kprintf("\n"); +#endif + } + } + kprintf("Total: %u\n", total_size); +} diff --git a/src/memory/phys/kfree.c b/src/memory/phys/kfree.c new file mode 100644 index 0000000..6002bc3 --- /dev/null +++ b/src/memory/phys/kfree.c @@ -0,0 +1,116 @@ +#include "alloc.h" +#include "kprintf.h" +#include "memory.h" +#include + +static void remove_used(Block *to_free) +{ + Block *left = to_free->prev_used; + Block *right = to_free->next_used; + + to_free->next_used = NULL; + to_free->prev_used = NULL; + + if (!left && !right) { + to_free->zone->used = NULL; + return; + } + if (!left) + to_free->zone->used = right; + else + left->next_used = right; + if (right) + right->prev_used = left; +} + +/* + * If all the blocks of the zone have been kfreed, + * we can unmap the zone and delete it from the list of zones + */ +static int unmap_zone(Zone *zone) +{ + int err = 0; + block_type_t type = zone->type; + Zone *left = zone->prev; + Zone *right = zone->next; + zone->prev = NULL; + zone->next = NULL; + + if (!left && !right) { + kzones[type] = NULL; + goto unmap; + } + if (!left) + kzones[type] = right; + else + left->next = right; + if (right) + right->prev = left; +unmap: + err = free_pages((void *)zone, zone->size); + if (err) + kprintf(KERN_ERR "error: munmap failed\n"); + return (err); +} + +/* + * If the newly kfreed block is next to another previously + * kfreed block, merge both of these and update the size + */ +static Block *merge_blocks(Block *left, Block *right) +{ + if (right->next) + right->next->prev = left; + if (right->next_free) { + right->next_free->prev_free = left; + left->next_free = right->next_free; + } + left->next = right->next; + left->size += right->size + sizeof(Block); + return (left); +} + +// Simply add the new block to the list of available blocks +static int add_available(Block *available, Block *merged) +{ + Zone *zone = available->zone; + if (merged != zone->free && available != zone->free) + available->next_free = zone->free; + if (zone->free) + zone->free->prev_free = available; + zone->free = available; + if (zone->type == LARGE) + return (unmap_zone(zone)); + return (0); +} + +/* + * ptr: pointer to kfree, if the pointer is invalid the kfree() + * function will have an undefined behavior (most likely segfault) + * + * First, we remove the block from the list of in_use blocks + * Then, we check if the block needs to be merged with another + * neighboring block, if so we replace the previous block by the + * newly merged block + * Finally, we add the block to the list of available blocks + */ +void kfree(void *ptr) +{ + if (ptr == NULL) + return; + Block *to_free = (Block *)((uint32_t)ptr - sizeof(Block)); + Block *to_merge = NULL; + to_free->in_use = false; + remove_used(to_free); + if (to_free->prev && !to_free->prev->in_use) { + to_merge = to_free; + to_free = merge_blocks(to_free->prev, to_free); + } + if (to_free->next && !to_free->next->in_use) { + to_merge = to_free->next; + to_free = merge_blocks(to_free, to_free->next); + } + int err = add_available(to_free, to_merge); + if (err) + kprintf(KERN_ERR "kfree: fatal error\n"); +} diff --git a/src/memory/phys/kmalloc.c b/src/memory/phys/kmalloc.c new file mode 100644 index 0000000..60328c5 --- /dev/null +++ b/src/memory/phys/kmalloc.c @@ -0,0 +1,162 @@ +#include "alloc.h" +#include "debug.h" +#include "kprintf.h" +#include "terminal.h" +#include + +/* + * Find first available (not in_use) block + * in a zone matching the size we need + */ +static Block *find_block(Zone *head, uint32_t size) +{ + for (Zone *zone_it = head; zone_it != NULL; zone_it = zone_it->next) { + for (Block *block_it = zone_it->free; block_it != NULL; + block_it = block_it->next_free) { + assert(block_it); + assert(!block_it->in_use); + if (size <= block_it->size) { + assert(block_it->zone == zone_it); + return (block_it); + } + } + } + return (NULL); +} + +// PARTIALLY DEPRECATED +/* + * This will split the newly allocated block to use + * the remaining bytes for a new block + * This is our linked list of blocks + * ... -> [5] -> [6] -> ... + * After the allocation, this will become + * ... -> [5] -> [new] -> [6] -> ... + * + * For an example of [5].size = 32 and requiring a kmalloc of 10 + * Let's say the metadata takes a size of 2: + * ... -> [metadata][data][remaining size] -> [6] + * ^ ^ ^ + * 2 10 20 + * + * So now our block [new] will become: + * [5] -> [metadata][available data] -> [6] + * ^ ^ + * 2 18 + * We can see that it now has its own metadata and available + * data and it points towards [6] + */ +static void frag_block(Zone *zone, Block *old_block, uint32_t size) +{ + Block *new_block = (Block *)align_mem((uint32_t)old_block + size); + assert(new_block < + (Block *)((uint32_t)zone + get_zone_size(zone->type))); + + if (old_block->size - align_mem(size) < sizeof(Block)) { + zone->free = NULL; + goto last_block; + } + + // Newly created block metadata + new_block->size = old_block->size - align_mem(size); + new_block->sub_size = new_block->size; + new_block->in_use = false; + new_block->ptr = (void *)((uint32_t)new_block + sizeof(Block)); + new_block->zone = zone; + + new_block->prev = old_block; + new_block->next = old_block->next; + old_block->next = new_block; + + new_block->prev_used = NULL; + new_block->next_used = NULL; + + new_block->prev_free = old_block->prev_free; + new_block->next_free = NULL; + if (new_block != old_block->next_free) + new_block->next_free = old_block->next_free; + + if (zone->free == old_block) + zone->free = new_block; + +last_block: + old_block->next_free = NULL; + old_block->prev_free = NULL; + + // Newly in_use block metadata + old_block->in_use = true; + old_block->size = size - sizeof(Block); + old_block->sub_size = old_block->size; + + if (zone->used == NULL) { + zone->used = old_block; + return; + } + old_block->prev_used = NULL; + old_block->next_used = zone->used; + zone->used->prev_used = old_block; + zone->used = old_block; +} + +// Set the block to used and unset free +static void save_block(Zone *head, Block *block, Zone *zone) +{ + zone->free = NULL; + block->in_use = true; + if (head->used) { + head->used->prev_used = block; + head->used->prev = block; + block->next = head->used; + block->next_used = head->used; + } + head->used = block; +} + +/* + * size: size needed by the user to get allocated + * + * First, we init the allocator if it's the first time + * Then we search if there is an available block in all + * the zones currently mapped + * If no block has been found (NULL), we create 1 new zone of + * the corresponding type + * We then search again for an available block (should not be NULL) + * Finally, if type == LARGE, we just have to change the block to used + * else, we frag the block to use just what's needed + * + * ptr: returns the aligned pointer of the block (after the metadata) + */ +void *kmalloc(uint32_t size) +{ + void *ptr = NULL; + + if (size == 0) { + kprintf(KERN_WARNING "kmalloc: can't kmalloc(0)\n"); + return NULL; + } + + // Find the zone we need to search + block_type_t type = get_type(size); + Zone *head = kzones[type]; + + // Find an available block in a zone of type "type" + Block *available = find_block(head, size); + if (available == NULL) { + uint32_t full_size; + if (type == LARGE) + full_size = size + sizeof(Block) + sizeof(Zone); + else + full_size = get_zone_size(type); + if (new_kzone(type, full_size) == -1) + return NULL; + head = kzones[type]; + available = find_block(head, size); + } + assert(available != NULL); + if (type == LARGE) + save_block(head, available, available->zone); + else + frag_block(available->zone, available, size + sizeof(Block)); + ptr = available->ptr; + return ptr; +} diff --git a/src/memory/phys/krealloc.c b/src/memory/phys/krealloc.c new file mode 100644 index 0000000..647de68 --- /dev/null +++ b/src/memory/phys/krealloc.c @@ -0,0 +1,37 @@ +#include "alloc.h" +#include "string.h" +#include + +// Prototype for kfree and kmalloc +void kfree(void *ptr); +void *kmalloc(uint32_t size); + +/* + * ptr: block to resize (undefined behavior if invalid) + * size: size needed by the user to get kreallocated + * + * If we have a size <= to the previous size, we don't have + * to do anything, we just change sub_size for info purposes + * and return the same pointer + * Else, we allocate a new block and copy the content of + * the previous block in the new one and kfree the old block + * + * ptr: returns the aligned pointer of the kreallocated block + */ +void *krealloc(void *ptr, uint32_t size) +{ + void *new_ptr = NULL; + if (ptr == NULL) + return NULL; + Block *block = (Block *)((uint32_t)ptr - sizeof(Block)); + if (block->size >= size) { + block->sub_size = size; + return (ptr); + } + new_ptr = kmalloc(size); + if (new_ptr == NULL) + return NULL; + memmove(new_ptr, ptr, block->size); + kfree(ptr); + return (new_ptr); +} diff --git a/src/memory/phys/ksize.c b/src/memory/phys/ksize.c new file mode 100644 index 0000000..952d993 --- /dev/null +++ b/src/memory/phys/ksize.c @@ -0,0 +1,9 @@ +#include "alloc.h" +#include + +uint32_t ksize(void *ptr) +{ + Block *meta_data = (Block *)((uint32_t)ptr - sizeof(Block)); + + return meta_data->sub_size; +} diff --git a/src/memory/virt/allocator.c b/src/memory/virt/allocator.c index 3285498..ec465e3 100644 --- a/src/memory/virt/allocator.c +++ b/src/memory/virt/allocator.c @@ -5,17 +5,17 @@ #include "memory.h" #include "task.h" -Zone *zones[3]; +Zone *vzones[3]; static void add_zone(Zone *zone, block_type_t type) { // We put the zone at the beginning of the list - if (zones[type]) { - assert(zones[type] != zone); - zone->next = zones[type]; - zones[type]->prev = zone; + if (vzones[type]) { + assert(vzones[type] != zone); + zone->next = vzones[type]; + vzones[type]->prev = zone; } - zones[type] = zone; + vzones[type] = zone; } static void new_block(Zone *zone, uint32_t zone_size) diff --git a/src/memory/virt/info.c b/src/memory/virt/info.c index 175ab8c..885e87c 100644 --- a/src/memory/virt/info.c +++ b/src/memory/virt/info.c @@ -12,7 +12,7 @@ void show_valloc_mem(void) for (block_type_t type = 0; type < 3; ++type) { int count = 0; - for (Zone *zone_it = zones[type]; zone_it != NULL; + for (Zone *zone_it = vzones[type]; zone_it != NULL; zone_it = zone_it->next) { if (zone_it->used) kprintf("---------- IN_USE %s [n°%d - %p] " diff --git a/src/memory/virt/vfree.c b/src/memory/virt/vfree.c index 40bb27d..70a3dcf 100644 --- a/src/memory/virt/vfree.c +++ b/src/memory/virt/vfree.c @@ -37,11 +37,11 @@ static int unmap_zone(Zone *zone) zone->next = NULL; if (!left && !right) { - zones[type] = NULL; + vzones[type] = NULL; goto unmap; } if (!left) - zones[type] = right; + vzones[type] = right; else left->next = right; if (right) diff --git a/src/memory/virt/vmalloc.c b/src/memory/virt/vmalloc.c index ade2cf2..31a23b8 100644 --- a/src/memory/virt/vmalloc.c +++ b/src/memory/virt/vmalloc.c @@ -137,7 +137,7 @@ void *vmalloc(uint32_t size) // Find the zone we need to search block_type_t type = get_type(size); - Zone *head = zones[type]; + Zone *head = vzones[type]; // Find an available block in a zone of type "type" Block *available = find_block(head, size); @@ -149,7 +149,7 @@ void *vmalloc(uint32_t size) full_size = get_zone_size(type); if (new_vzone(type, full_size) == -1) return NULL; - head = zones[type]; + head = vzones[type]; available = find_block(head, size); } assert(available != NULL);