From 2520dd0254628cc07743cc98138c8a006b63f6bf Mon Sep 17 00:00:00 2001 From: "Enrico Fraccaroli (Galfurian)" Date: Mon, 30 Sep 2024 09:37:16 -0400 Subject: [PATCH 1/9] Comment main structures and macros of slab.c and __alloc_slab_page. --- mentos/src/mem/slab.c | 164 ++++++++++++++++++++++++++++++------------ 1 file changed, 120 insertions(+), 44 deletions(-) diff --git a/mentos/src/mem/slab.c b/mentos/src/mem/slab.c index 234f2ffb..ce9e9177 100644 --- a/mentos/src/mem/slab.c +++ b/mentos/src/mem/slab.c @@ -1,5 +1,8 @@ -/// @file mouse.h -/// @brief Driver for *PS2* Mouses. +/// @file slab.c +/// @brief Memory slab allocator implementation in kernel. This file provides +/// functions for managing memory allocation using the slab allocator technique. +/// Slab allocators are efficient in managing frequent small memory allocations +/// with minimal fragmentation. /// @copyright (c) 2014-2024 This file is distributed under the MIT License. /// See LICENSE.md for details. @@ -14,63 +17,122 @@ #include "mem/slab.h" #include "mem/zone_allocator.h" -/// @brief Use it to manage cached pages. +/// @brief Structure to represent an individual memory object within a slab. +/// @details This structure is used to manage individual objects allocated from +/// the slab. It contains a linked list to connect objects in the cache. typedef struct kmem_obj { - /// The list_head for this object. + /// @brief Linked list node for tracking objects in the slab. list_head objlist; -} kmem_obj; +} kmem_obj_t; -/// Max order of kmalloc cache allocations, if greater raw page allocation is done. +/// @brief Maximum order of kmalloc cache allocations. +/// @details If a requested memory allocation exceeds this order, a raw page +/// allocation is done instead of using the slab cache. #define MAX_KMALLOC_CACHE_ORDER 12 -#define KMEM_OBJ_OVERHEAD sizeof(kmem_obj) -#define KMEM_START_OBJ_COUNT 8 -#define KMEM_MAX_REFILL_OBJ_COUNT 64 -#define KMEM_OBJ(cachep, addr) ((kmem_obj *)(addr)) -#define ADDR_FROM_KMEM_OBJ(cachep, kmem_obj) ((void *)(kmem_obj)) +/// @brief Overhead size for each memory object in the slab cache. +/// @details This defines the extra space required for managing the object, +/// including the `kmem_obj` structure itself. +#define KMEM_OBJ_OVERHEAD sizeof(kmem_obj_t) -// The list of caches. +/// @brief Initial object count for each slab. +/// @details The starting number of objects in a newly allocated slab cache. +#define KMEM_START_OBJ_COUNT 8 + +/// @brief Maximum number of objects to refill in a slab cache at once. +/// @details This defines the upper limit on how many objects to replenish in +/// the slab when it runs out of free objects. +#define KMEM_MAX_REFILL_OBJ_COUNT 64 + +/// @brief Macro to convert an address into a kmem_obj pointer. +/// @param addr Address of the object. +/// @return Pointer to a kmem_obj structure. +#define KMEM_OBJ_FROM_ADDR(addr) ((kmem_obj_t *)(addr)) + +/// @brief Macro to get the address from a kmem_obj structure. +/// @param object Pointer to the kmem_obj structure. +/// @return Address of the object as a `void *`. +#define ADDR_FROM_KMEM_OBJ(object) ((void *)(object)) + +/// @brief List of all active memory caches in the system. static list_head kmem_caches_list; -// Cache where we will store the data about caches. + +/// @brief Cache used for managing metadata about the memory caches themselves. static kmem_cache_t kmem_cache; -// Caches for each order of the malloc. + +/// @brief Array of slab caches for different orders of kmalloc. static kmem_cache_t *malloc_blocks[MAX_KMALLOC_CACHE_ORDER]; +/// @brief Allocates and initializes a new slab page for a memory cache. +/// @param cachep Pointer to the memory cache (`kmem_cache_t`) for which a new +/// slab page is being allocated. +/// @param flags Allocation flags (e.g., GFP_KERNEL) passed to control memory +/// allocation behavior. +/// @return 0 on success, -1 on failure. static int __alloc_slab_page(kmem_cache_t *cachep, gfp_t flags) { - // ALlocate the required number of pages. + // Allocate the required number of pages for the slab based on cache's + // gfp_order. The higher the gfp_order, the more pages are allocated. page_t *page = _alloc_pages(flags, cachep->gfp_order); + + // Check if page allocation failed. if (!page) { pr_crit("Failed to allocate a new page from slab.\n"); return -1; } - // Initialize the lists. - list_head_init(&page->slabs); - list_head_init(&page->slab_freelist); - // Save in the root page the kmem_cache_t pointer, to allow freeing - // arbitrary pointers. + + // Initialize the linked lists for the slab page. + // These lists track which objects in the page are free and which are in use. + list_head_init(&page->slabs); // Initialize the list of slabs (active objects). + list_head_init(&page->slab_freelist); // Initialize the free list (unused objects). + + // Save a reference to the `kmem_cache_t` structure in the root page. + // This is necessary for freeing arbitrary pointers and tracking cache ownership. page[0].container.slab_cache = cachep; - // Update slab main pages of all child pages, to allow reconstructing which - // page handles a specified address + + // Update the slab main page pointer for all child pages (in case the allocation + // consists of multiple pages) to point back to the root page. + // This helps in reconstructing the main slab page when dealing with subpages. for (unsigned int i = 1; i < (1U << cachep->gfp_order); i++) { - page[i].container.slab_main_page = page; + page[i].container.slab_main_page = page; // Link child pages to the main page. } - // Compute the slab size. + + // Calculate the total size of the slab (in bytes) by multiplying the page size + // by the number of pages allocated (determined by the cache's gfp_order). unsigned int slab_size = PAGE_SIZE * (1U << cachep->gfp_order); - // Update the page objects counters. - page->slab_objcnt = slab_size / cachep->size; - page->slab_objfree = page->slab_objcnt; - // Get the page address. + + // Update object counters for the page. + // The total number of objects in the slab is determined by the slab size + // divided by the size of each object in the cache. + page->slab_objcnt = slab_size / cachep->size; // Total number of objects. + page->slab_objfree = page->slab_objcnt; // Initially, all objects are free. + + // Get the starting physical address of the allocated slab page. unsigned int pg_addr = get_lowmem_address_from_page(page); - // Build the objects structures + + // Check if `get_lowmem_address_from_page` failed. + if (!pg_addr) { + pr_crit("Failed to get low memory address for slab page.\n"); + return -1; + } + + // Loop through each object in the slab and initialize its kmem_obj structure. + // Each object is inserted into the free list, indicating that it is available. for (unsigned int i = 0; i < page->slab_objcnt; i++) { - kmem_obj *obj = KMEM_OBJ(cachep, pg_addr + cachep->size * i); + // Calculate the object's address by adding the offset (i * object size) to the page address. + kmem_obj_t *obj = KMEM_OBJ_FROM_ADDR(pg_addr + cachep->size * i); + + // Insert the object into the slab's free list, making it available for allocation. list_head_insert_after(&obj->objlist, &page->slab_freelist); } - // Add the page to the slab list and update the counters + + // Insert the page into the cache's list of free slab pages. list_head_insert_after(&page->slabs, &cachep->slabs_free); - cachep->total_num += page->slab_objcnt; - cachep->free_num += page->slab_objcnt; + + // Update the cache's total object counters to reflect the new slab. + cachep->total_num += page->slab_objcnt; // Increase the total number of objects in the cache. + cachep->free_num += page->slab_objcnt; // Increase the number of free objects. + return 0; } @@ -138,16 +200,16 @@ static inline void *__kmem_cache_alloc_slab(kmem_cache_t *cachep, page_t *slab_p } slab_page->slab_objfree--; cachep->free_num--; - - kmem_obj *obj = list_entry(elem_listp, kmem_obj, objlist); - + kmem_obj_t *obj = list_entry(elem_listp, kmem_obj_t, objlist); + if (!obj) { + pr_warning("The kmem object is invalid\n"); + return NULL; + } // Get the element from the kmem_obj object - void *elem = ADDR_FROM_KMEM_OBJ(cachep, obj); - + void *elem = ADDR_FROM_KMEM_OBJ(obj); if (cachep->ctor) { cachep->ctor(elem); } - return elem; } @@ -241,7 +303,6 @@ void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) if (flags == 0) { flags = cachep->flags; } - // Refill the cache in an exponential fashion, capping at KMEM_MAX_REFILL_OBJ_COUNT to avoid // too big allocations __kmem_cache_refill(cachep, min(cachep->total_num, KMEM_MAX_REFILL_OBJ_COUNT), flags); @@ -254,15 +315,30 @@ void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) // Add a free slab to partial list because in any case an element will // be removed before the function returns list_head *free_slab = list_head_pop(&cachep->slabs_free); + if (!free_slab) { + pr_crit("We retrieved an invalid slab from the free list."); + return NULL; + } list_head_insert_after(free_slab, &cachep->slabs_partial); } - page_t *slab_page = list_entry(cachep->slabs_partial.next, page_t, slabs); - void *ptr = __kmem_cache_alloc_slab(cachep, slab_page); + if (!slab_page) { + pr_crit("We retrieved an invalid slab from the partial list."); + return NULL; + } + void *ptr = __kmem_cache_alloc_slab(cachep, slab_page); + if (!ptr) { + pr_crit("We failed to allocate a slab."); + return NULL; + } - // If the slab is now full, add it to the full slabs list + // If the slab is now full, add it to the full slabs list. if (slab_page->slab_objfree == 0) { list_head *slab_full_elem = list_head_pop(&cachep->slabs_partial); + if (!slab_full_elem) { + pr_crit("We retrieved an invalid slab from the partial list."); + return NULL; + } list_head_insert_after(slab_full_elem, &cachep->slabs_full); } #ifdef ENABLE_CACHE_TRACE @@ -293,7 +369,7 @@ void kmem_cache_free(void *ptr) cachep->dtor(ptr); } - kmem_obj *obj = KMEM_OBJ(cachep, ptr); + kmem_obj_t *obj = KMEM_OBJ_FROM_ADDR(ptr); // Add object to the free list list_head_insert_after(&obj->objlist, &slab_page->slab_freelist); From eb9eea3b38953a9ce2f9a3768cc3a2f84b857e2b Mon Sep 17 00:00:00 2001 From: "Enrico Fraccaroli (Galfurian)" Date: Mon, 30 Sep 2024 10:19:12 -0400 Subject: [PATCH 2/9] Add error checking to list_head and slab. --- libc/inc/sys/list_head.h | 52 ++++-- mentos/inc/mem/slab.h | 64 +++++--- mentos/src/kernel.c | 9 +- mentos/src/mem/slab.c | 339 ++++++++++++++++++++++++++++++--------- 4 files changed, 347 insertions(+), 117 deletions(-) diff --git a/libc/inc/sys/list_head.h b/libc/inc/sys/list_head.h index f13244a3..7c956041 100644 --- a/libc/inc/sys/list_head.h +++ b/libc/inc/sys/list_head.h @@ -10,10 +10,8 @@ /// @brief Structure used to implement the list_head data structure. typedef struct list_head { - /// @brief The previous element. - struct list_head *prev; - /// @brief The subsequent element. - struct list_head *next; + struct list_head *prev; ///< The previous element. + struct list_head *next; ///< The subsequent element. } list_head; /// @brief Get the struct for this entry. @@ -66,6 +64,7 @@ typedef struct list_head { /// @param head The head of your list. static inline void list_head_init(list_head *head) { + assert(head && "Variable head is NULL."); // Ensure head is not NULL head->next = head->prev = head; } @@ -74,7 +73,7 @@ static inline void list_head_init(list_head *head) /// @return 1 if empty, 0 otherwise. static inline int list_head_empty(const list_head *head) { - assert(head && "Variable head is NULL."); + assert(head && "Variable head is NULL."); // Ensure head is not NULL return head->next == head; } @@ -83,6 +82,8 @@ static inline int list_head_empty(const list_head *head) /// @return the size of the list. static inline unsigned list_head_size(const list_head *head) { + assert(head && "Variable head is NULL."); // Ensure head is not NULL + unsigned size = 0; if (!list_head_empty(head)) { list_for_each_decl(it, head) size += 1; @@ -91,12 +92,15 @@ static inline unsigned list_head_size(const list_head *head) } /// @brief Insert the new entry after the given location. -/// @param new_entry the new element we want to insert. -/// @param location the element after which we insert. +/// @param new_entry The new element we want to insert. +/// @param location The element after which we insert. static inline void list_head_insert_after(list_head *new_entry, list_head *location) { - assert(new_entry && "Variable new_entry is NULL."); - assert(location && "Variable location is NULL."); + assert(new_entry && "Variable new_entry is NULL."); // Check for NULL new_entry + assert(location && "Variable location is NULL."); // Check for NULL location + assert(location->prev && "Variable location->prev is NULL."); // Check location is valid + assert(location->next && "Variable location->next is NULL."); // Check location is valid + // We store the old `next` element. list_head *old_next = location->next; // We insert our element. @@ -132,10 +136,12 @@ static inline void list_head_insert_before(list_head *new_entry, list_head *loca /// @param entry the entry we want to remove. static inline void list_head_remove(list_head *entry) { + assert(entry && "Variable entry is NULL."); // Check for NULL entry + assert(entry->prev && "Attribute entry->prev is NULL."); // Check previous pointer + assert(entry->next && "Attribute entry->next is NULL."); // Check next pointer + // Check if the element is actually in a list. if (!list_head_empty(entry)) { - assert(entry->prev && "Attribute entry->prev is NULL."); - assert(entry->next && "Attribute entry->next is NULL."); // We link the `previous` element to the `next` one. entry->prev->next = entry->next; // We link the `next` element to the `previous` one. @@ -151,6 +157,8 @@ static inline void list_head_remove(list_head *entry) /// @return a list_head pointing to the element we removed, NULL on failure. static inline list_head *list_head_pop(list_head *head) { + assert(head && "Variable head is NULL."); // Check for NULL head + // Check if the list is not empty. if (!list_head_empty(head)) { // Store the pointer. @@ -168,11 +176,14 @@ static inline list_head *list_head_pop(list_head *head) /// @param secondary the secondary list, which gets appended, and re-initialized as empty. static inline void list_head_append(list_head *main, list_head *secondary) { + assert(main && "Variable main is NULL."); // Check for NULL main + assert(secondary && "Variable secondary is NULL."); // Check for NULL secondary + // Check that both lists are actually filled with entries. if (!list_head_empty(main) && !list_head_empty(secondary)) { - assert(main->prev && "Attribute main->prev is NULL."); - assert(secondary->next && "Attribute secondary->next is NULL."); - assert(secondary->prev && "Attribute secondary->prev is NULL."); + assert(main->prev && "Attribute main->prev is NULL."); // Check main's previous pointer + assert(secondary->next && "Attribute secondary->next is NULL."); // Check secondary's next pointer + assert(secondary->prev && "Attribute secondary->prev is NULL."); // Check secondary's previous pointer // Connect the last element of the main list to the first one of the secondary list. main->prev->next = secondary->next; // Connect the first element of the secondary list to the last one of the main list. @@ -191,11 +202,15 @@ static inline void list_head_append(list_head *main, list_head *secondary) /// @param entry2 the second entry which will take the place of the first entry. static inline void list_head_replace(list_head *entry1, list_head *entry2) { + assert(entry1 && "Variable entry1 is NULL."); // Check for NULL entry1 + assert(entry2 && "Variable entry2 is NULL."); // Check for NULL entry2 + // First we need to remove the second entry. list_head_remove(entry2); - assert(entry2->next && "Attribute entry2->next is NULL."); - assert(entry2->prev && "Attribute entry2->prev is NULL."); - // Then, we can place second entry where the first entry is. + assert(entry2->next && "Attribute entry2->next is NULL."); // Check entry2's next pointer + assert(entry2->prev && "Attribute entry2->prev is NULL."); // Check entry2's previous pointer + + // Then, we can place the second entry where the first entry is. entry2->next = entry1->next; entry2->next->prev = entry2; entry2->prev = entry1->prev; @@ -209,6 +224,9 @@ static inline void list_head_replace(list_head *entry1, list_head *entry2) /// @param entry2 the second entry. static inline void list_head_swap(list_head *entry1, list_head *entry2) { + assert(entry1 && "Variable entry1 is NULL."); // Check for NULL entry1 + assert(entry2 && "Variable entry2 is NULL."); // Check for NULL entry2 + list_head *pos = entry2->prev; list_head_replace(entry1, entry2); if (pos == entry1) { diff --git a/mentos/inc/mem/slab.h b/mentos/inc/mem/slab.h index 4157df0d..99d1237e 100644 --- a/mentos/inc/mem/slab.h +++ b/mentos/inc/mem/slab.h @@ -55,17 +55,26 @@ typedef struct kmem_cache_t { list_head slabs_free; } kmem_cache_t; -/// Initialize the slab system -void kmem_cache_init(void); - -/// @brief Creates a new slab cache. -/// @param name Name of the cache. -/// @param size Size of the objects contained inside the cache. -/// @param align Memory alignment for the objects inside the cache. -/// @param flags Flags used to define the properties of the cache. -/// @param ctor Constructor for initializing the cache elements. -/// @param dtor Destructor for finalizing the cache elements. -/// @return Pointer to the object used to manage the cache. +/// @brief Initializes the kernel memory cache system. +/// @details This function initializes the global cache list and creates the +/// main cache for managing kmem_cache_t structures. It also creates caches for +/// different order sizes for kmalloc allocations. +/// @note This function should be called during system initialization. +/// @return Returns 0 on success, or -1 if an error occurs. +int kmem_cache_init(void); + +/// @brief Creates a new kmem_cache structure. +/// @details This function allocates memory for a new cache and initializes it +/// with the provided parameters. The cache is ready for use after this function +/// returns. +/// @param name Name of the cache. +/// @param size Size of each object in the cache. +/// @param align Alignment requirement for objects in the cache. +/// @param flags Flags for slab allocation. +/// @param ctor Constructor function for initializing objects (can be NULL). +/// @param dtor Destructor function for cleaning up objects (can be NULL). +/// @return Pointer to the newly created kmem_cache_t, or NULL if allocation +/// fails. kmem_cache_t *kmem_cache_create( const char *name, unsigned int size, @@ -74,9 +83,13 @@ kmem_cache_t *kmem_cache_create( kmem_fun_t ctor, kmem_fun_t dtor); -/// @brief Deletes the given cache. -/// @param cachep Pointer to the cache. -void kmem_cache_destroy(kmem_cache_t *cachep); +/// @brief Destroys a specified kmem_cache structure. +/// @details This function cleans up and frees all memory associated with the +/// specified cache, including all associated slab pages. After calling this +/// function, the cache should no longer be used. +/// @param cachep Pointer to the kmem_cache_t structure to destroy. +/// @return Returns 0 on success, or -1 if an error occurs. +int kmem_cache_destroy(kmem_cache_t *cachep); #ifdef ENABLE_CACHE_TRACE @@ -103,14 +116,15 @@ void pr_kmem_cache_free(const char *file, const char *fun, int line, void *addr) #define kmem_cache_free(...) pr_kmem_cache_free(__FILE__, __func__, __LINE__, __VA_ARGS__) #else -/// @brief Allocs a new object using the provided cache. -/// @param cachep The cache used to allocate the object. -/// @param flags Flags used to define where we are going to Get Free Pages (GFP). -/// @return Pointer to the allocated space. + +/// @brief Allocates an object from the specified kmem_cache_t. +/// @param cachep Pointer to the cache from which to allocate the object. +/// @param flags Flags for the allocation (e.g., GFP_KERNEL). +/// @return Pointer to the allocated object, or NULL if allocation fails. void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags); -/// @brief Frees an cache allocated object. -/// @param addr Address of the object. +/// @brief Frees an object previously allocated from a kmem_cache_t. +/// @param ptr Pointer to the object to free. void kmem_cache_free(void *addr); #endif @@ -140,13 +154,13 @@ void pr_kfree(const char *file, const char *fun, int line, void *addr); #else -/// @brief Provides dynamically allocated memory in kernel space. -/// @param size The amount of memory to allocate. -/// @return A pointer to the allocated memory. +/// @brief Allocates memory of the specified size using kmalloc. +/// @param size Size of the memory to allocate. +/// @return Pointer to the allocated memory, or NULL if allocation fails. void *kmalloc(unsigned int size); -/// @brief Frees dynamically allocated memory in kernel space. -/// @param ptr The pointer to the allocated memory. +/// @brief Frees memory allocated by kmalloc or kmem_cache_alloc. +/// @param ptr Pointer to the memory to free. void kfree(void *ptr); #endif diff --git a/mentos/src/kernel.c b/mentos/src/kernel.c index 8336c840..8f8c60e1 100644 --- a/mentos/src/kernel.c +++ b/mentos/src/kernel.c @@ -152,7 +152,10 @@ int kmain(boot_info_t *boot_informations) //========================================================================== pr_notice("Initialize slab allocator.\n"); printf("Initialize slab..."); - kmem_cache_init(); + if (kmem_cache_init() < 0) { + print_fail(); + return 1; + } print_ok(); //========================================================================== @@ -409,8 +412,8 @@ int kmain(boot_info_t *boot_informations) //========================================================================== // TODO: fix the hardcoded check for the flags set by GRUB runtests = boot_info.multiboot_header->flags == 0x1a67 && - bitmask_check(boot_info.multiboot_header->flags, MULTIBOOT_FLAG_CMDLINE) && - strcmp((char *)boot_info.multiboot_header->cmdline, "runtests") == 0; + bitmask_check(boot_info.multiboot_header->flags, MULTIBOOT_FLAG_CMDLINE) && + strcmp((char *)boot_info.multiboot_header->cmdline, "runtests") == 0; task_struct *init_p; if (runtests) { diff --git a/mentos/src/mem/slab.c b/mentos/src/mem/slab.c index ce9e9177..0240453b 100644 --- a/mentos/src/mem/slab.c +++ b/mentos/src/mem/slab.c @@ -136,41 +136,129 @@ static int __alloc_slab_page(kmem_cache_t *cachep, gfp_t flags) return 0; } -static void __kmem_cache_refill(kmem_cache_t *cachep, unsigned int free_num, gfp_t flags) +/// @brief Refills a memory cache with new slab pages to reach a specified number of free objects. +/// @details This function allocates new slab pages as needed until the cache has at least `free_num` free objects. +/// If a page allocation fails, the refill process is aborted. +/// @param cachep Pointer to the memory cache (`kmem_cache_t`) that needs to be refilled. +/// @param free_num The desired number of free objects in the cache. +/// @param flags Allocation flags used for controlling memory allocation behavior (e.g., GFP_KERNEL). +/// @return 0 on success, -1 on failure. +static int __kmem_cache_refill(kmem_cache_t *cachep, unsigned int free_num, gfp_t flags) { + // Continue allocating slab pages until the cache has at least `free_num` + // free objects. while (cachep->free_num < free_num) { + // Attempt to allocate a new slab page. If allocation fails, print a + // warning and abort the refill process. if (__alloc_slab_page(cachep, flags) < 0) { - pr_warning("Cannot allocate a page, abort refill\n"); - break; + pr_crit("Failed to allocate a new slab page, aborting refill\n"); + return -1; // Return -1 if page allocation fails. } } + return 0; } -static void __compute_size_and_order(kmem_cache_t *cachep) +/// @brief Computes and sets the size and gfp order for a memory cache. +/// @details This function adjusts the size of objects in the cache based on +/// padding and alignment requirements, and calculates the `gfp_order` (number +/// of contiguous pages) needed for slab allocations. +/// @param cachep Pointer to the memory cache (`kmem_cache_t`) whose size and +/// order are being computed. +/// @return 0 on success, -1 on failure. +static int __compute_size_and_order(kmem_cache_t *cachep) { - // Align the whole object to the required padding. + // Check for invalid or uninitialized object sizes or alignment. + // If `object_size` or `align` is zero, the cache cannot be correctly + // configured. + if (cachep->object_size == 0) { + pr_crit("Object size is invalid (0), cannot compute cache size and order.\n"); + return -1; + } + if (cachep->align == 0) { + pr_crit("Alignment is invalid (0), cannot compute cache size and order.\n"); + return -1; + } + + // Align the object size to the required padding. + // The object size is padded based on either the `KMEM_OBJ_OVERHEAD` or the + // provided alignment requirement. Ensure that the object size is at least + // as large as the overhead and is aligned to the cache's alignment. cachep->size = round_up( - max(cachep->object_size, KMEM_OBJ_OVERHEAD), - max(8, cachep->align)); - // Compute the gfp order + max(cachep->object_size, KMEM_OBJ_OVERHEAD), // Ensure object size is larger than the overhead. + max(8, cachep->align)); // Ensure alignment is at least 8 bytes for proper memory alignment. + + // Check if the computed size is valid. + if (cachep->size == 0) { + pr_crit("Computed object size is invalid (0), cannot proceed with cache allocation.\n"); + return -1; + } + + // Compute the `gfp_order` based on the total object size and page size. + // The `gfp_order` determines how many contiguous pages will be allocated + // for the slab. unsigned int size = round_up(cachep->size, PAGE_SIZE) / PAGE_SIZE; + + // Reset `gfp_order` to 0 before calculating. + cachep->gfp_order = 0; + + // Calculate the order by determining how many divisions by 2 the size + // undergoes until it becomes smaller than or equal to 1. while ((size /= 2) > 0) { cachep->gfp_order++; } + + // Check for a valid `gfp_order`. Ensure that it's within reasonable limits. + if (cachep->gfp_order > MAX_BUDDYSYSTEM_GFP_ORDER) { + pr_crit("Calculated gfp_order exceeds system limits (%d).\n", MAX_BUDDYSYSTEM_GFP_ORDER); + cachep->gfp_order = MAX_BUDDYSYSTEM_GFP_ORDER; + } + + // Additional consistency check (optional): + // Verify that the calculated gfp_order leads to a valid page allocation size. + if ((cachep->gfp_order == 0) && (cachep->size > PAGE_SIZE)) { + pr_crit("Calculated gfp_order is 0, but object size exceeds one page. Potential issue in size computation.\n"); + return -1; + } + return 0; } -static void __kmem_cache_create( - kmem_cache_t *cachep, - const char *name, - unsigned int size, - unsigned int align, - slab_flags_t flags, - kmem_fun_t ctor, - kmem_fun_t dtor, - unsigned int start_count) +/// @brief Initializes and creates a new memory cache. +/// @details This function sets up a new memory cache (`kmem_cache_t`) with the provided parameters such as +/// object size, alignment, constructor/destructor functions, and flags. It also initializes slab lists, +/// computes the appropriate size and order, refills the cache with objects, and adds it to the global cache list. +/// @param cachep Pointer to the memory cache structure to initialize. +/// @param name Name of the cache. +/// @param size Size of the objects to be allocated from the cache. +/// @param align Alignment requirement for the objects. +/// @param flags Slab allocation flags. +/// @param ctor Constructor function to initialize objects (optional, can be NULL). +/// @param dtor Destructor function to clean up objects (optional, can be NULL). +/// @param start_count Initial number of objects to populate in the cache. +/// @return 0 on success, -1 on failure. +static int __kmem_cache_create( + kmem_cache_t *cachep, // Pointer to the cache structure to be created. + const char *name, // Name of the cache. + unsigned int size, // Size of the objects in the cache. + unsigned int align, // Object alignment. + slab_flags_t flags, // Allocation flags. + kmem_fun_t ctor, // Constructor function for cache objects. + kmem_fun_t dtor, // Destructor function for cache objects. + unsigned int start_count) // Initial number of objects to populate in the cache. { + // Log the creation of a new cache. pr_info("Creating new cache `%s` with objects of size `%d`.\n", name, size); + // Input validation checks. + if (!cachep) { + pr_crit("Invalid cache pointer (NULL), cannot create cache.\n"); + return -1; + } + if (!name || size == 0) { + pr_crit("Invalid cache name or object size (size = %d).\n", size); + return -1; + } + + // Set up the basic properties of the cache. *cachep = (kmem_cache_t){ .name = name, .object_size = size, @@ -180,77 +268,142 @@ static void __kmem_cache_create( .dtor = dtor }; + // Initialize the list heads for free, partial, and full slabs. list_head_init(&cachep->slabs_free); list_head_init(&cachep->slabs_partial); list_head_init(&cachep->slabs_full); + // Compute the object size and gfp_order for slab allocations. + // If there's an issue with size or order computation, this function should handle it internally. __compute_size_and_order(cachep); + // Refill the cache with `start_count` objects. + // If the refill fails (due to slab page allocation failure), a warning is logged. __kmem_cache_refill(cachep, start_count, flags); + // Insert the cache into the global list of caches. + // No error check needed here as list operations usually don't fail. list_head_insert_after(&cachep->cache_list, &kmem_caches_list); + + return 0; } +/// @brief Allocates an object from a specified slab page. +/// @details This function retrieves a free object from the given slab page's free list. +/// It decrements the count of free objects in both the slab page and the cache. +/// If the constructor function is defined, it will be called to initialize the object. +/// @param cachep Pointer to the cache from which the object is being allocated. +/// @param slab_page Pointer to the slab page from which to allocate the object. +/// @return Pointer to the allocated object, or NULL if allocation fails. static inline void *__kmem_cache_alloc_slab(kmem_cache_t *cachep, page_t *slab_page) { + // Retrieve and remove the first element from the slab's free list. list_head *elem_listp = list_head_pop(&slab_page->slab_freelist); + + // Check if the free list is empty. if (!elem_listp) { - pr_warning("There are no FREE element inside the slab_freelist\n"); - return NULL; + pr_crit("There are no FREE elements inside the slab_freelist\n"); + return NULL; // Return NULL if no free elements are available. } + + // Decrement the count of free objects in the slab page and the cache. slab_page->slab_objfree--; cachep->free_num--; - kmem_obj_t *obj = list_entry(elem_listp, kmem_obj_t, objlist); - if (!obj) { - pr_warning("The kmem object is invalid\n"); + + // Get the kmem object from the list entry. + kmem_obj_t *object = list_entry(elem_listp, kmem_obj_t, objlist); + + // Check if the kmem object pointer is valid. + if (!object) { + pr_crit("The kmem object is invalid\n"); return NULL; } - // Get the element from the kmem_obj object - void *elem = ADDR_FROM_KMEM_OBJ(obj); + + // Get the address of the allocated element from the kmem object. + void *elem = ADDR_FROM_KMEM_OBJ(object); + + // Call the constructor function if it is defined to initialize the object. if (cachep->ctor) { cachep->ctor(elem); } - return elem; + + return elem; // Return the pointer to the allocated object. } -static inline void __kmem_cache_free_slab(kmem_cache_t *cachep, page_t *slab_page) +/// @brief Frees a slab page and updates the associated cache statistics. +/// @details This function updates the total and free object counts in the cache +/// and resets the slab page's metadata to indicate that it is no longer in use. +/// It also frees the memory associated with the slab page. +/// @param cachep Pointer to the cache from which the slab page is being freed. +/// @param slab_page Pointer to the slab page to be freed. +/// @return Returns 0 on success, or -1 if an error occurs. +static inline int __kmem_cache_free_slab(kmem_cache_t *cachep, page_t *slab_page) { + // Validate input parameters. + if (!cachep || !slab_page) { + pr_crit("Invalid cache or slab_page pointer (NULL).\n"); + return -1; // Return error if either pointer is NULL. + } + + // Update the free and total object counts in the cache. cachep->free_num -= slab_page->slab_objfree; cachep->total_num -= slab_page->slab_objcnt; - // Clear objcnt, used as a flag to check if the page belongs to the slab + + // Clear the object count and reset the main page pointer as a flag to + // indicate the page is no longer active. slab_page->slab_objcnt = 0; slab_page->container.slab_main_page = NULL; - // Reset all non-root slab pages + // Reset the main page pointers for all non-root slab pages. This loop + // assumes the first page is the root and resets pointers for child pages. for (unsigned int i = 1; i < (1U << cachep->gfp_order); i++) { + // Clear main page pointer for each child page. (slab_page + i)->container.slab_main_page = NULL; } + // Free the memory associated with the slab page. __free_pages(slab_page); + + return 0; // Return success. } -void kmem_cache_init(void) +int kmem_cache_init(void) { - // Initialize the list of caches. + // Initialize the list of caches to keep track of all memory caches. list_head_init(&kmem_caches_list); - // Create a cache to store the data about caches. - __kmem_cache_create( - &kmem_cache, - "kmem_cache_t", - sizeof(kmem_cache_t), - alignof(kmem_cache_t), - GFP_KERNEL, - NULL, - NULL, 32); + + // Create a cache to store metadata about kmem_cache_t structures. + if (__kmem_cache_create( + &kmem_cache, + "kmem_cache_t", + sizeof(kmem_cache_t), + alignof(kmem_cache_t), + GFP_KERNEL, + NULL, + NULL, + 32) < 0) { + pr_crit("Failed to create kmem_cache for kmem_cache_t.\n"); + return -1; // Early exit if kmem_cache creation fails. + } + + // Create caches for different order sizes for kmalloc allocations. for (unsigned int i = 0; i < MAX_KMALLOC_CACHE_ORDER; i++) { malloc_blocks[i] = kmem_cache_create( "kmalloc", - 1u << i, - 1u << i, + 1u << i, // Size of the allocation (2^i). + 1u << i, // Alignment of the allocation. GFP_KERNEL, - NULL, - NULL); + NULL, // Constructor (none). + NULL); // Destructor (none). + + // Check if the cache was created successfully. + if (!malloc_blocks[i]) { + pr_crit("Failed to create kmalloc cache for order %u.\n", i); + return -1; // Early exit if kmem_cache creation fails. + } } + + return 0; // Return success. } kmem_cache_t *kmem_cache_create( @@ -261,35 +414,58 @@ kmem_cache_t *kmem_cache_create( kmem_fun_t ctor, kmem_fun_t dtor) { + // Allocate memory for a new kmem_cache_t structure. kmem_cache_t *cachep = (kmem_cache_t *)kmem_cache_alloc(&kmem_cache, GFP_KERNEL); + + // Check if memory allocation for the cache failed. if (!cachep) { - return cachep; + pr_crit("Failed to allocate memory for kmem_cache_t.\n"); + return NULL; // Return NULL to indicate failure. } - __kmem_cache_create(cachep, name, size, align, flags, ctor, dtor, KMEM_START_OBJ_COUNT); + // Initialize the kmem_cache_t structure. + if (__kmem_cache_create(cachep, name, size, align, flags, ctor, dtor, KMEM_START_OBJ_COUNT) < 0) { + pr_crit("Failed to initialize kmem_cache for '%s'.\n", name); + // Free allocated memory if initialization fails. + kmem_cache_free(cachep); + return NULL; // Return NULL to indicate failure. + } - return cachep; + return cachep; // Return the pointer to the newly created cache. } -void kmem_cache_destroy(kmem_cache_t *cachep) +int kmem_cache_destroy(kmem_cache_t *cachep) { + // Validate input parameter. + if (!cachep) { + pr_crit("Cannot destroy a NULL cache pointer.\n"); + return -1; // Early exit if cache pointer is NULL. + } + + // Free all slabs in the free list. while (!list_head_empty(&cachep->slabs_free)) { list_head *slab_list = list_head_pop(&cachep->slabs_free); __kmem_cache_free_slab(cachep, list_entry(slab_list, page_t, slabs)); } + // Free all slabs in the partial list. while (!list_head_empty(&cachep->slabs_partial)) { list_head *slab_list = list_head_pop(&cachep->slabs_partial); __kmem_cache_free_slab(cachep, list_entry(slab_list, page_t, slabs)); } + // Free all slabs in the full list. while (!list_head_empty(&cachep->slabs_full)) { list_head *slab_list = list_head_pop(&cachep->slabs_full); __kmem_cache_free_slab(cachep, list_entry(slab_list, page_t, slabs)); } + // Free the cache structure itself. kmem_cache_free(cachep); + // Remove the cache from the global cache list. list_head_remove(&cachep->cache_list); + + return 0; // Return success. } #ifdef ENABLE_CACHE_TRACE @@ -298,53 +474,63 @@ void *pr_kmem_cache_alloc(const char *file, const char *fun, int line, kmem_cach void *kmem_cache_alloc(kmem_cache_t *cachep, gfp_t flags) #endif { + // Check if there are any partially filled slabs. if (list_head_empty(&cachep->slabs_partial)) { + // If no partial slabs, check for free slabs. if (list_head_empty(&cachep->slabs_free)) { + // If no flags are specified, use the cache's flags. if (flags == 0) { flags = cachep->flags; } - // Refill the cache in an exponential fashion, capping at KMEM_MAX_REFILL_OBJ_COUNT to avoid - // too big allocations - __kmem_cache_refill(cachep, min(cachep->total_num, KMEM_MAX_REFILL_OBJ_COUNT), flags); + // Attempt to refill the cache, limiting the number of objects. + if (__kmem_cache_refill(cachep, min(cachep->total_num, KMEM_MAX_REFILL_OBJ_COUNT), flags) < 0) { + pr_crit("Failed to refill cache in `%s`\n", cachep->name); + return NULL; // Return NULL to indicate failure. + } + // If still no free slabs, log an error and return NULL. if (list_head_empty(&cachep->slabs_free)) { pr_crit("Cannot allocate more slabs in `%s`\n", cachep->name); - return NULL; + return NULL; // Return NULL to indicate failure. } } - // Add a free slab to partial list because in any case an element will - // be removed before the function returns + // Move a free slab to the partial list since we're about to allocate from it. list_head *free_slab = list_head_pop(&cachep->slabs_free); if (!free_slab) { pr_crit("We retrieved an invalid slab from the free list."); - return NULL; + return NULL; // Return NULL to indicate failure. } list_head_insert_after(free_slab, &cachep->slabs_partial); } + + // Retrieve the slab page from the partial list. page_t *slab_page = list_entry(cachep->slabs_partial.next, page_t, slabs); if (!slab_page) { pr_crit("We retrieved an invalid slab from the partial list."); - return NULL; + return NULL; // Return NULL to indicate failure. } + + // Allocate an object from the slab page. void *ptr = __kmem_cache_alloc_slab(cachep, slab_page); if (!ptr) { pr_crit("We failed to allocate a slab."); - return NULL; + return NULL; // Return NULL to indicate failure. } - // If the slab is now full, add it to the full slabs list. + // If the slab is now full, move it to the full slabs list. if (slab_page->slab_objfree == 0) { list_head *slab_full_elem = list_head_pop(&cachep->slabs_partial); if (!slab_full_elem) { pr_crit("We retrieved an invalid slab from the partial list."); - return NULL; + return NULL; // Return NULL to indicate failure. } list_head_insert_after(slab_full_elem, &cachep->slabs_full); } + #ifdef ENABLE_CACHE_TRACE - pr_notice("CHACE-ALLOC 0x%p in %-20s at %s:%d\n", ptr, cachep->name, file, line); + pr_notice("CACHE-ALLOC 0x%p in %-20s at %s:%d\n", ptr, cachep->name, file, line); #endif - return ptr; + return ptr; // Return pointer to the allocated object. } #ifdef ENABLE_CACHE_TRACE @@ -353,41 +539,45 @@ void pr_kmem_cache_free(const char *file, const char *fun, int line, void *ptr) void kmem_cache_free(void *ptr) #endif { + // Get the slab page corresponding to the given pointer. page_t *slab_page = get_lowmem_page_from_address((uint32_t)ptr); - // If the slab main page is a lowmem page, change to it as it's the root page + // If the slab main page is a low memory page, update to the root page. if (is_lowmem_page_struct(slab_page->container.slab_main_page)) { slab_page = slab_page->container.slab_main_page; } + // Retrieve the cache pointer from the slab page. kmem_cache_t *cachep = slab_page->container.slab_cache; #ifdef ENABLE_CACHE_TRACE - pr_notice("CHACE-FREE 0x%p in %-20s at %s:%d\n", ptr, cachep->name, file, line); + pr_notice("CACHE-FREE 0x%p in %-20s at %s:%d\n", ptr, cachep->name, file, line); #endif + // Call the destructor if defined. if (cachep->dtor) { cachep->dtor(ptr); } + // Get the kmem_obj from the pointer. kmem_obj_t *obj = KMEM_OBJ_FROM_ADDR(ptr); - // Add object to the free list + // Add object to the free list of the slab. list_head_insert_after(&obj->objlist, &slab_page->slab_freelist); slab_page->slab_objfree++; cachep->free_num++; - // Now page is completely free + // If the slab is completely free, move it to the free list. if (slab_page->slab_objfree == slab_page->slab_objcnt) { - // Remove page from partial list + // Remove the page from the partial list. list_head_remove(&slab_page->slabs); - // Add page to free list + // Add the page to the free list. list_head_insert_after(&slab_page->slabs, &cachep->slabs_free); } - // Now page is not full, so change its list + // If the page is not full, update its list status. else if (slab_page->slab_objfree == 1) { - // Remove page from full list + // Remove the page from the full list. list_head_remove(&slab_page->slabs); - // Add page to partial list + // Add the page to the partial list. list_head_insert_after(&slab_page->slabs, &cachep->slabs_partial); } } @@ -399,22 +589,25 @@ void *kmalloc(unsigned int size) #endif { unsigned int order = 0; + + // Determine the order based on the size requested. while (size != 0) { order++; size /= 2; } - // If size does not fit in the maximum cache order, allocate raw pages + // Allocate memory. If size exceeds the maximum cache order, allocate raw pages. void *ptr; if (order >= MAX_KMALLOC_CACHE_ORDER) { ptr = (void *)__alloc_pages_lowmem(GFP_KERNEL, order - 12); } else { ptr = kmem_cache_alloc(malloc_blocks[order], GFP_KERNEL); } + #ifdef ENABLE_ALLOC_TRACE pr_notice("KMALLOC 0x%p at %s:%d\n", ptr, file, line); #endif - return ptr; + return ptr; // Return pointer to the allocated memory. } #ifdef ENABLE_ALLOC_TRACE @@ -426,12 +619,14 @@ void kfree(void *ptr) #ifdef ENABLE_ALLOC_TRACE pr_notice("KFREE 0x%p at %s:%d\n", ptr, file, line); #endif + // Get the slab page from the pointer's address. page_t *page = get_lowmem_page_from_address((uint32_t)ptr); - // If the address is part of the cache + // If the address belongs to a cache, free it using kmem_cache_free. if (page->container.slab_main_page) { kmem_cache_free(ptr); } else { + // Otherwise, free the raw pages. free_pages_lowmem((uint32_t)ptr); } } From d214ab398033c696fa166db8a36d5d02078eb7d5 Mon Sep 17 00:00:00 2001 From: "Enrico Fraccaroli (Galfurian)" Date: Mon, 30 Sep 2024 11:56:19 -0400 Subject: [PATCH 3/9] Improve comments and error checking for the zone allocator. --- mentos/inc/mem/zone_allocator.h | 61 +++-- mentos/src/mem/zone_allocator.c | 428 ++++++++++++++++++++++++++------ 2 files changed, 382 insertions(+), 107 deletions(-) diff --git a/mentos/inc/mem/zone_allocator.h b/mentos/inc/mem/zone_allocator.h index fdbfb1fb..3092fe4b 100644 --- a/mentos/inc/mem/zone_allocator.h +++ b/mentos/inc/mem/zone_allocator.h @@ -113,12 +113,13 @@ typedef struct pg_data_t { extern page_t *mem_map; extern pg_data_t *contig_page_data; -/// @brief Find the nearest block's order of size greater than the amount of -/// byte. -/// @param base_addr The start address, used to handle extra page calculation in -/// case of not page aligned addresses. -/// @param amount The amount of byte which we want to calculate order. -/// @return The block's order greater and nearest than amount. +/// @brief Finds the nearest order of memory allocation that can accommodate a +/// given amount of memory. +/// @param base_addr the base address from which to calculate the number of +/// pages. +/// @param amount the amount of memory (in bytes) to allocate. +/// @return The nearest order (power of two) that is greater than or equal to +/// the number of pages required. uint32_t find_nearest_order_greater(uint32_t base_addr, uint32_t amount); /// @brief Physical memory manager initialization. @@ -126,24 +127,26 @@ uint32_t find_nearest_order_greater(uint32_t base_addr, uint32_t amount); /// @return Outcome of the operation. int pmmngr_init(boot_info_t *boot_info); -/// @brief Alloc a single cached page. -/// @param gfp_mask The GetFreePage mask. -/// @return Pointer to the page. +/// @brief Allocates a cached page based on the given GFP mask. +/// @param gfp_mask The GFP mask specifying the allocation constraints. +/// @return A pointer to the allocated page, or NULL if allocation fails. page_t *alloc_page_cached(gfp_t gfp_mask); /// @brief Free a page allocated with alloc_page_cached. /// @param page Pointer to the page to free. -void free_page_cached(page_t *page); +/// @return Returns 0 on success, or -1 if an error occurs. +int free_page_cached(page_t *page); /// @brief Find the first free page frame, set it allocated and return the /// memory address of the page frame. /// @param gfp_mask GFP_FLAGS to decide the zone allocation. -/// @return Memory address of the first free block. +/// @return The low memory address of the allocated page, or 0 if allocation fails. uint32_t __alloc_page_lowmem(gfp_t gfp_mask); /// @brief Frees the given page frame address. /// @param addr The block address. -void free_page_lowmem(uint32_t addr); +/// @return Returns 0 on success, or -1 if an error occurs. +int free_page_lowmem(uint32_t addr); /// @brief Find the first free 2^order amount of page frames, set it allocated /// and return the memory address of the first page frame allocated. @@ -156,7 +159,7 @@ uint32_t __alloc_pages_lowmem(gfp_t gfp_mask, uint32_t order); /// and return the memory address of the first page frame allocated. /// @param gfp_mask GFP_FLAGS to decide the zone allocation. /// @param order The logarithm of the size of the page frame. -/// @return Memory address of the first free page frame allocated. +/// @return Memory address of the first free page frame allocated, or NULL if allocation fails. page_t *_alloc_pages(gfp_t gfp_mask, uint32_t order); /// @brief Get the start address of the corresponding page. @@ -174,34 +177,38 @@ uint32_t get_physical_address_from_page(page_t *page); /// @return The page that corresponds to the physical address. page_t *get_page_from_physical_address(uint32_t phy_addr); -/// @brief Get the page that contains the specified address. -/// @param addr A phisical address. -/// @return The page that corresponds to the address. +/// @brief Retrieves the low memory page corresponding to the given virtual +/// address. +/// @param addr the virtual address to convert. +/// @return A pointer to the corresponding page, or NULL if the address is out +/// of range. page_t *get_lowmem_page_from_address(uint32_t addr); /// @brief Frees from the given page frame address up to 2^order amount of page /// frames. /// @param addr The page frame address. -void free_pages_lowmem(uint32_t addr); +/// @return Returns 0 on success, or -1 if an error occurs. +int free_pages_lowmem(uint32_t addr); /// @brief Frees from the given page frame address up to 2^order amount of page /// frames. /// @param page The page. -void __free_pages(page_t *page); +/// @return Returns 0 on success, or -1 if an error occurs. +int __free_pages(page_t *page); -/// @brief Returns the total space for the given zone. -/// @param gfp_mask GFP_FLAGS to decide the zone. -/// @return Total space of the given zone. +/// @brief Retrieves the total space of the zone corresponding to the given GFP mask. +/// @param gfp_mask The GFP mask specifying the allocation constraints. +/// @return The total space of the zone, or 0 if the zone cannot be retrieved. unsigned long get_zone_total_space(gfp_t gfp_mask); -/// @brief Returns the total free space for the given zone. -/// @param gfp_mask GFP_FLAGS to decide the zone. -/// @return Total free space of the given zone. +/// @brief Retrieves the free space of the zone corresponding to the given GFP mask. +/// @param gfp_mask The GFP mask specifying the allocation constraints. +/// @return The free space of the zone, or 0 if the zone cannot be retrieved. unsigned long get_zone_free_space(gfp_t gfp_mask); -/// @brief Returns the total cached space for the given zone. -/// @param gfp_mask GFP_FLAGS to decide the zone. -/// @return Total cached space of the given zone. +/// @brief Retrieves the cached space of the zone corresponding to the given GFP mask. +/// @param gfp_mask The GFP mask specifying the allocation constraints. +/// @return The cached space of the zone, or 0 if the zone cannot be retrieved. unsigned long get_zone_cached_space(gfp_t gfp_mask); /// @brief Checks if the specified address points to a page_t (or field) that diff --git a/mentos/src/mem/zone_allocator.c b/mentos/src/mem/zone_allocator.c index 61a3ddbe..455a9305 100644 --- a/mentos/src/mem/zone_allocator.c +++ b/mentos/src/mem/zone_allocator.c @@ -17,18 +17,32 @@ #include "string.h" #include "sys/list_head.h" -/// TODO: Comment. +/// @brief Aligns the given address down to the nearest page boundary. +/// @param addr The address to align. +/// @return The aligned address. #define MIN_PAGE_ALIGN(addr) ((addr) & (~(PAGE_SIZE - 1))) -/// TODO: Comment. + +/// @brief Aligns the given address up to the nearest page boundary. +/// @param addr The address to align. +/// @return The aligned address. #define MAX_PAGE_ALIGN(addr) (((addr) & (~(PAGE_SIZE - 1))) + PAGE_SIZE) -/// TODO: Comment. + +/// @brief Aligns the given address down to the nearest order boundary. +/// @param addr The address to align. +/// @return The aligned address. #define MIN_ORDER_ALIGN(addr) ((addr) & (~((PAGE_SIZE << (MAX_BUDDYSYSTEM_GFP_ORDER - 1)) - 1))) -/// TODO: Comment. + +/// @brief Aligns the given address up to the nearest order boundary. +/// @param addr The address to align. +/// @return The aligned address. #define MAX_ORDER_ALIGN(addr) \ (((addr) & (~((PAGE_SIZE << (MAX_BUDDYSYSTEM_GFP_ORDER - 1)) - 1))) + \ (PAGE_SIZE << (MAX_BUDDYSYSTEM_GFP_ORDER - 1))) -/// Array of all physical blocks +// #define MAX_MEM_MAP_SIZE (PAGE_SIZE * MAX_ORDER * MAX_NUMNODES) +#define MAX_MEM_MAP_SIZE (PAGE_SIZE * MAX_BUDDYSYSTEM_GFP_ORDER * 256) + +/// Array of all physical blocks. page_t *mem_map = NULL; /// Memory node. pg_data_t *contig_page_data = NULL; @@ -39,88 +53,172 @@ uint32_t lowmem_page_base = 0; page_t *get_lowmem_page_from_address(uint32_t addr) { + // Ensure the address is within the valid range. + if (addr < lowmem_virt_base) { + pr_crit("Address is below low memory virtual base.\n"); + return NULL; // Return NULL to indicate failure. + } + + // Calculate the offset from the low memory virtual base address. unsigned int offset = addr - lowmem_virt_base; - return mem_map + lowmem_page_base + (offset / PAGE_SIZE); + + // Determine the index of the corresponding page structure in the memory map. + unsigned int page_index = lowmem_page_base + (offset / PAGE_SIZE); + + // Check for overflow. + if (page_index >= MAX_MEM_MAP_SIZE) { + pr_crit("Address is out of bounds.\n"); + return NULL; // Return NULL to indicate failure. + } + + // Return the pointer to the page structure. + return mem_map + page_index; } uint32_t get_lowmem_address_from_page(page_t *page) { - unsigned int offset = (page - mem_map) - lowmem_page_base; - return lowmem_virt_base + offset * PAGE_SIZE; + // Check for NULL page pointer. + assert(page && "Invalid page pointer."); + + // Calculate the index of the page in the memory map. + unsigned int page_index = page - mem_map; + + // Calculate the offset from the low memory base address. + unsigned int offset = page_index - lowmem_page_base; + + // Return the corresponding low memory virtual address. + return lowmem_virt_base + (offset * PAGE_SIZE); } uint32_t get_physical_address_from_page(page_t *page) { - return (page - mem_map) * PAGE_SIZE; + // Ensure the page pointer is not NULL. + assert(page && "Invalid page pointer."); + + // Calculate the index of the page in the memory map. + unsigned int page_index = page - mem_map; + + // Return the corresponding physical address by multiplying the index by the + // page size. + return page_index * PAGE_SIZE; } page_t *get_page_from_physical_address(uint32_t phy_addr) { - return mem_map + (phy_addr / PAGE_SIZE); -} + // Ensure the physical address is valid. + assert(phy_addr % PAGE_SIZE == 0 && "Address must be page-aligned."); + + // Calculate the index of the page in the memory map. + unsigned int page_index = phy_addr / PAGE_SIZE; + // Check for overflow: ensure the index does not exceed the maximum memory + // map size. + if (page_index >= MAX_MEM_MAP_SIZE) { + pr_crit("Physical address is out of bounds.\n"); + return NULL; // Return NULL to indicate failure. + } + + // Return the pointer to the corresponding page structure in the memory map. + return mem_map + page_index; +} /// @brief Get the zone that contains a page frame. /// @param page A page descriptor. -/// @return The zone requested. +/// @return The zone requested or NULL if the page is not within any zone. static zone_t *get_zone_from_page(page_t *page) { + // Validate the input parameter. + assert(page && "Invalid input: page is NULL."); + zone_t *zone; page_t *last_page; + // Iterate over all the zones. for (int zone_index = 0; zone_index < contig_page_data->nr_zones; zone_index++) { // Get the zone at the given index. zone = contig_page_data->node_zones + zone_index; + + // Check if the zone was retrieved successfully. assert(zone && "Failed to retrieve the zone."); - // Get the last page of the zone. + + // Get the last page of the zone by adding the size to the memory map. last_page = zone->zone_mem_map + zone->size; + + // Check if the last page of the zone was retrieved successfully. assert(last_page && "Failed to retrieve the last page of the zone."); - // Check if the page is before the last page of the zone. + + // Check if the given page is within the current zone. if (page < last_page) { - return zone; + return zone; // Return the zone if the page is within its range. } } - // Error: page is over memory size. + + // If no zone contains the page, return NULL. + // This could represent an error where the page doesn't belong to any zone. + assert(0 && "Error: page is over memory size or not part of any zone."); return (zone_t *)NULL; } -/// @brief Get a zone from gfp_mask +/// @brief Get a zone from gfp_mask. /// @param gfp_mask GFP_FLAG see gfp.h. -/// @return The zone requested. +/// @return The zone requested or NULL if the gfp_mask is not recognized. static zone_t *get_zone_from_flags(gfp_t gfp_mask) { + // Ensure that contig_page_data and node_zones are valid. + assert(contig_page_data && "contig_page_data is NULL."); + assert(contig_page_data->node_zones && "node_zones is NULL."); + switch (gfp_mask) { case GFP_KERNEL: case GFP_ATOMIC: case GFP_NOFS: case GFP_NOIO: case GFP_NOWAIT: + // Return the normal zone for these GFP flags. return &contig_page_data->node_zones[ZONE_NORMAL]; + case GFP_HIGHUSER: + // Return the high memory zone for GFP_HIGHUSER. return &contig_page_data->node_zones[ZONE_HIGHMEM]; + default: + // If the gfp_mask does not match any known flags, return NULL. + assert(0 && "Error: Unrecognized gfp_mask."); return (zone_t *)NULL; } } /// @brief Checks if the memory is clean. -/// @param gfp_mask the mask which specifies the zone we are interested in. +/// @param gfp_mask The mask which specifies the zone we are interested in. /// @return 1 if clean, 0 on error. static int is_memory_clean(gfp_t gfp_mask) { - // Get the corresponding zone. + // Get the corresponding zone based on the gfp_mask. zone_t *zone = get_zone_from_flags(gfp_mask); + + // Assert that the zone is valid. assert(zone && "Failed to retrieve the zone given the gfp_mask!"); + // Get the last free area list of the buddy system. bb_free_area_t *area = zone->buddy_system.free_area + (MAX_BUDDYSYSTEM_GFP_ORDER - 1); + + // Assert that the area is valid. assert(area && "Failed to retrieve the last free_area for the given zone!"); + // Compute the total size of the zone. unsigned int total_size = (zone->size / (1UL << (MAX_BUDDYSYSTEM_GFP_ORDER - 1))); - // Check if the size of the zone is equal to the remaining pages inside the free area. + + // Check if the size of the zone matches the number of free pages in the area. if (area->nr_free != total_size) { pr_crit("Number of blocks of free pages is different than expected (%d vs %d).\n", area->nr_free, total_size); + + // Dump the current state of the buddy system for debugging purposes. buddy_system_dump(&zone->buddy_system); + + // Return 0 to indicate an error. return 0; } + + // Return 1 if the memory is clean (i.e., the sizes match). return 1; } @@ -206,39 +304,53 @@ static int pmm_check(void) return 1; } -/// @brief Initializes the memory attributes. -/// @param name Zone's name. -/// @param zone_index Zone's index. -/// @param adr_from the lowest address of the zone -/// @param adr_to the highest address of the zone (not included!) +/// @brief Initializes the memory attributes for a specified zone. +/// @param name the zone's name. +/// @param zone_index the zone's index. +/// @param adr_from the lowest address of the zone. +/// @param adr_to the highest address of the zone (not included!). static void zone_init(char *name, int zone_index, uint32_t adr_from, uint32_t adr_to) { - assert((adr_from < adr_to) && "Inserted bad block addresses!"); - assert(((adr_from & 0xfffff000) == adr_from) && "Inserted bad block addresses!"); - assert(((adr_to & 0xfffff000) == adr_to) && "Inserted bad block addresses!"); + // Ensure that the provided addresses are valid. + assert((adr_from < adr_to) && "Inserted bad block addresses: adr_from must be less than adr_to."); + assert(((adr_from & 0xfffff000) == adr_from) && "Inserted bad block addresses: adr_from must be aligned."); + assert(((adr_to & 0xfffff000) == adr_to) && "Inserted bad block addresses: adr_to must be aligned."); + + // Ensure that the zone_index is within the valid range. assert((zone_index < contig_page_data->nr_zones) && "The index is above the number of zones."); - // Take the zone_t structure that correspondes to the zone_index. + + // Take the zone_t structure that corresponds to the zone_index. zone_t *zone = contig_page_data->node_zones + zone_index; + + // Assert that the zone was retrieved successfully. assert(zone && "Failed to retrieve the zone."); - // Number of page frames in the zone. + + // Calculate the number of page frames in the zone. size_t num_page_frames = (adr_to - adr_from) / PAGE_SIZE; - // Index of the first page frame of the zone. + + // Calculate the index of the first page frame of the zone. uint32_t first_page_frame = adr_from / PAGE_SIZE; - // Update zone info. - zone->name = name; - zone->size = num_page_frames; - zone->free_pages = num_page_frames; - zone->zone_mem_map = mem_map + first_page_frame; - zone->zone_start_pfn = first_page_frame; - // Set to zero all page structures. + + // Update zone information. + zone->name = name; // Set the zone's name. + zone->size = num_page_frames; // Set the total number of page frames. + zone->free_pages = num_page_frames; // Initialize free pages to the total number. + zone->zone_mem_map = mem_map + first_page_frame; // Map the memory for the zone. + zone->zone_start_pfn = first_page_frame; // Set the starting page frame number. + + // Clear the page structures in the memory map. memset(zone->zone_mem_map, 0, zone->size * sizeof(page_t)); + // Initialize the buddy system for the new zone. - buddy_system_init(&zone->buddy_system, - name, - zone->zone_mem_map, - BBSTRUCT_OFFSET(page_t, bbpage), - sizeof(page_t), - num_page_frames); + buddy_system_init( + &zone->buddy_system, + name, + zone->zone_mem_map, + BBSTRUCT_OFFSET(page_t, bbpage), + sizeof(page_t), + num_page_frames); + + // Dump the current state of the buddy system for debugging purposes. buddy_system_dump(&zone->buddy_system); } @@ -250,16 +362,26 @@ static void zone_init(char *name, int zone_index, uint32_t adr_from, uint32_t ad unsigned int find_nearest_order_greater(uint32_t base_addr, uint32_t amount) { + // Calculate the starting page frame number (PFN) based on the base address. uint32_t start_pfn = base_addr / PAGE_SIZE; - uint32_t end_pfn = (base_addr + amount + PAGE_SIZE - 1) / PAGE_SIZE; - // Get the number of pages. + + // Calculate the ending page frame number (PFN) based on the base address and amount. + uint32_t end_pfn = (base_addr + amount + PAGE_SIZE - 1) / PAGE_SIZE; + + // Ensure that the number of pages is positive. + assert(end_pfn > start_pfn && "Calculated number of pages must be greater than zero."); + + // Calculate the number of pages required. uint32_t npages = end_pfn - start_pfn; - // Find the fitting order. + + // Find the fitting order (power of two) that can accommodate the required + // number of pages. unsigned int order = 0; while ((1UL << order) < npages) { ++order; } - return order; + + return order; // Return the calculated order. } int pmmngr_init(boot_info_t *boot_info) @@ -361,121 +483,267 @@ int pmmngr_init(boot_info_t *boot_info) page_t *alloc_page_cached(gfp_t gfp_mask) { + // Get the zone corresponding to the given GFP mask. zone_t *zone = get_zone_from_flags(gfp_mask); - return PG_FROM_BBSTRUCT(bb_alloc_page_cached(&zone->buddy_system), page_t, bbpage); + + // Ensure the zone is valid. + if (!zone) { + pr_crit("Failed to get zone from GFP mask.\n"); + return NULL; // Return NULL to indicate failure. + } + + // Allocate a page from the buddy system of the zone. + bb_page_t *bbpage = bb_alloc_page_cached(&zone->buddy_system); + + // Ensure the allocation was successful. + if (!bbpage) { + pr_crit("Failed to allocate page from buddy system.\n"); + return NULL; // Return NULL to indicate failure. + } + + // Convert the buddy system page structure to the page_t structure. + return PG_FROM_BBSTRUCT(bbpage, page_t, bbpage); } -void free_page_cached(page_t *page) +int free_page_cached(page_t *page) { + // Ensure the page pointer is not NULL. + if (!page) { + pr_crit("Invalid page pointer: NULL.\n"); + return -1; // Return -1 to indicate failure. + } + + // Get the zone that contains the given page. zone_t *zone = get_zone_from_page(page); + + // Ensure the zone is valid. + if (!zone) { + pr_crit("Failed to get zone from page.\n"); + return -1; // Return -1 to indicate failure. + } + + // Free the page from the buddy system of the zone. bb_free_page_cached(&zone->buddy_system, &page->bbpage); + + return 0; // Return success. } uint32_t __alloc_page_lowmem(gfp_t gfp_mask) { - return get_lowmem_address_from_page(alloc_page_cached(gfp_mask)); + // Allocate a cached page based on the given GFP mask. + page_t *page = alloc_page_cached(gfp_mask); + + // Ensure the page allocation was successful. + if (!page) { + pr_crit("Failed to allocate low memory page.\n"); + return 0; // Return 0 to indicate failure. + } + + // Get the low memory address from the allocated page. + return get_lowmem_address_from_page(page); } -void free_page_lowmem(uint32_t addr) +int free_page_lowmem(uint32_t addr) { + // Get the page corresponding to the given low memory address. page_t *page = get_lowmem_page_from_address(addr); + + // Ensure the page retrieval was successful. + if (!page) { + pr_crit("Failed to retrieve page from address: 0x%x\n", addr); + return -1; // Return -1 to indicate failure. + } + + // Free the cached page. free_page_cached(page); + + return 0; // Return success. } uint32_t __alloc_pages_lowmem(gfp_t gfp_mask, uint32_t order) { - assert((order <= (MAX_BUDDYSYSTEM_GFP_ORDER - 1)) && gfp_mask == GFP_KERNEL && "Order is exceeding limit."); + // Ensure the order is within the valid range. + if (order >= MAX_BUDDYSYSTEM_GFP_ORDER) { + pr_emerg("Order exceeds the maximum limit.\n"); + return 0; // Return 0 to indicate failure. + } + // Ensure the GFP mask is correct. + if (gfp_mask != GFP_KERNEL) { + pr_emerg("Invalid GFP mask. Expected GFP_KERNEL.\n"); + return 0; // Return 0 to indicate failure. + } + + // Allocate the pages based on the given GFP mask and order. page_t *page = _alloc_pages(gfp_mask, order); - // Get the index of the first page frame of the block. + // Ensure the page allocation was successful. + if (!page) { + pr_emerg("Page allocation failed.\n"); + return 0; // Return 0 to indicate failure. + } + + // Get the low memory address of the first page in the allocated block. uint32_t block_frame_adr = get_lowmem_address_from_page(page); - if (block_frame_adr == -1) { - pr_emerg("MEM. REQUEST FAILED"); + + // Ensure the address retrieval was successful. + if (block_frame_adr == (uint32_t)-1) { + pr_emerg("Failed to get low memory address from page.\n"); + return 0; // Return 0 to indicate failure. } #if 0 - else { - pr_debug("BS-G: addr: %p (page: %p order: %d)\n", block_frame_adr, page, order); - } + pr_debug("BS-G: addr: %p (page: %p order: %d)\n", block_frame_adr, page, order); #endif + + // Return the low memory address of the first page in the allocated block. return block_frame_adr; } page_t *_alloc_pages(gfp_t gfp_mask, uint32_t order) { + // Calculate the block size based on the order. uint32_t block_size = 1UL << order; + // Get the zone corresponding to the given GFP mask. zone_t *zone = get_zone_from_flags(gfp_mask); - page_t *page = NULL; - // Search for a block of page frames by using the BuddySystem. - page = PG_FROM_BBSTRUCT(bb_alloc_pages(&zone->buddy_system, order), page_t, bbpage); + // Ensure the zone is valid. + if (!zone) { + pr_emerg("Failed to get zone from GFP mask.\n"); + return NULL; // Return NULL to indicate failure. + } - // Set page counters - for (int i = 0; i < block_size; i++) { - set_page_count(&page[i], 1); + // Allocate a page from the buddy system of the zone. + bb_page_t *bbpage = bb_alloc_pages(&zone->buddy_system, order); + + // Ensure the allocation was successful. + if (!bbpage) { + pr_crit("Failed to allocate page from buddy system.\n"); + return NULL; // Return NULL to indicate failure. } - assert(page && "Cannot allocate pages."); + // Convert the buddy system page structure to the page_t structure. + page_t *page = PG_FROM_BBSTRUCT(bbpage, page_t, bbpage); - // Decrement the number of pages in the zone. - if (page) { - zone->free_pages -= block_size; + // Ensure the page allocation was successful. + if (!page) { + pr_emerg("Page allocation failed.\n"); + return NULL; // Return NULL to indicate failure. } + // Set page counters for each page in the block. + for (uint32_t i = 0; i < block_size; i++) { + set_page_count(&page[i], 1); + } + + // Decrement the number of free pages in the zone. + zone->free_pages -= block_size; + #if 0 pr_warning("BS-A: (page: %p order: %d)\n", page, order); #endif + + // Return the pointer to the first page in the allocated block. return page; } -void free_pages_lowmem(uint32_t addr) +int free_pages_lowmem(uint32_t addr) { + // Get the page corresponding to the given low memory address. page_t *page = get_lowmem_page_from_address(addr); - assert(page && "Page is over memory size."); + + // Ensure the page retrieval was successful. + if (!page) { + pr_emerg("Failed to retrieve page from address: 0x%x. Page is over memory size.\n", addr); + return -1; // Return -1 to indicate failure. + } + + // Free the pages starting from the given page. __free_pages(page); + + return 0; // Return success. } -void __free_pages(page_t *page) +int __free_pages(page_t *page) { + // Get the zone that contains the given page. zone_t *zone = get_zone_from_page(page); - assert(zone && "Page is over memory size."); - assert(zone->zone_mem_map <= page && "Page is below the selected zone!"); + // Ensure the zone retrieval was successful. + if (!zone) { + pr_emerg("Failed to get zone from page. Page is over memory size.\n"); + return -1; // Return -1 to indicate failure. + } + + // Ensure the page is within the selected zone. + if (zone->zone_mem_map > page) { + pr_emerg("Page is below the selected zone!\n"); + return -1; // Return -1 to indicate failure. + } + // Get the order and block size of the page. uint32_t order = page->bbpage.order; uint32_t block_size = 1UL << order; - for (int i = 0; i < block_size; i++) { + // Set page counters to 0 for each page in the block. + for (uint32_t i = 0; i < block_size; i++) { set_page_count(&page[i], 0); } + // Free the pages in the buddy system. bb_free_pages(&zone->buddy_system, &page->bbpage); + // Increment the number of free pages in the zone. zone->free_pages += block_size; + #if 0 pr_warning("BS-F: (page: %p order: %d)\n", page, order); #endif - //buddy_system_dump(&zone->buddy_system); + + return 0; // Return success. } unsigned long get_zone_total_space(gfp_t gfp_mask) { + // Get the zone corresponding to the given GFP mask. zone_t *zone = get_zone_from_flags(gfp_mask); - assert(zone && "Cannot retrieve the correct zone."); + + // Ensure the zone retrieval was successful. + if (!zone) { + pr_emerg("Cannot retrieve the correct zone for GFP mask: 0x%x.\n", gfp_mask); + return 0; // Return 0 to indicate failure. + } + + // Return the total space of the zone. return buddy_system_get_total_space(&zone->buddy_system); } unsigned long get_zone_free_space(gfp_t gfp_mask) { + // Get the zone corresponding to the given GFP mask. zone_t *zone = get_zone_from_flags(gfp_mask); - assert(zone && "Cannot retrieve the correct zone."); + + // Ensure the zone retrieval was successful. + if (!zone) { + pr_emerg("Cannot retrieve the correct zone for GFP mask: 0x%x.\n", gfp_mask); + return 0; // Return 0 to indicate failure. + } + + // Return the free space of the zone. return buddy_system_get_free_space(&zone->buddy_system); } unsigned long get_zone_cached_space(gfp_t gfp_mask) { + // Get the zone corresponding to the given GFP mask. zone_t *zone = get_zone_from_flags(gfp_mask); - assert(zone && "Cannot retrieve the correct zone."); + + // Ensure the zone retrieval was successful. + if (!zone) { + pr_emerg("Cannot retrieve the correct zone for GFP mask: 0x%x.\n", gfp_mask); + return 0; // Return 0 to indicate failure. + } + + // Return the cached space of the zone. return buddy_system_get_cached_space(&zone->buddy_system); } From d7e348d7db04e829f5318480ddaebb936287ad28 Mon Sep 17 00:00:00 2001 From: "Enrico Fraccaroli (Galfurian)" Date: Mon, 30 Sep 2024 13:08:34 -0400 Subject: [PATCH 4/9] Improve comments and error checking for the virtual memory mapping routines. --- mentos/inc/mem/vmem_map.h | 46 ++++---- mentos/src/kernel.c | 5 +- mentos/src/mem/vmem_map.c | 236 ++++++++++++++++++++++++++++++-------- 3 files changed, 212 insertions(+), 75 deletions(-) diff --git a/mentos/inc/mem/vmem_map.h b/mentos/inc/mem/vmem_map.h index 45ff225c..26b90f58 100644 --- a/mentos/inc/mem/vmem_map.h +++ b/mentos/inc/mem/vmem_map.h @@ -24,43 +24,43 @@ typedef struct virt_map_page_t { bb_page_t bbpage; } virt_map_page_t; -/// @brief Initialize the virtual memory mapper -void virt_init(void); +/// @brief Initialize the virtual memory mapper. +/// @return Returns 0 on success, or -1 if an error occurs. +int virt_init(void); -/// @brief Map a page range to virtual memory -/// @param page The start page of the mapping -/// @param pfn_count The number of pages to map -/// @return The virtual address of the mapping +/// @brief Maps physical pages to virtual memory. +/// @param page Pointer to the physical page. +/// @param pfn_count The number of page frames to map. +/// @return The virtual address of the mapped pages, or 0 on failure. uint32_t virt_map_physical_pages(page_t *page, int pfn_count); -/// @brief Allocate a virtual page range of the specified size. -/// @param size The required amount. -/// @return Pointer to the allocated memory. +/// @brief Allocates virtual pages for a given size. +/// @param size The size in bytes to allocate. +/// @return Pointer to the allocated virtual pages, or NULL on failure. virt_map_page_t *virt_map_alloc(uint32_t size); -/// @brief Map a page to a memory area portion. -/// @param mm The memory descriptor. -/// @param vpage Pointer to the virtual page. -/// @param vaddr The starting address of the are. -/// @param size The size of the area. -/// @return Address of the mapped area. -uint32_t virt_map_vaddress(mm_struct_t *mm, - virt_map_page_t *vpage, - uint32_t vaddr, - uint32_t size); +/// @brief Maps a virtual address to a virtual memory area. +/// @param mm Pointer to the memory management structure. +/// @param vpage Pointer to the virtual map page. +/// @param vaddr The virtual address to map. +/// @param size The size of the memory area to map. +/// @return The starting virtual address of the mapped area, or 0 on failure. +uint32_t virt_map_vaddress(mm_struct_t *mm, virt_map_page_t *vpage, uint32_t addr, uint32_t size); /// @brief Checks if an address belongs to the virtual memory mapping. /// @param addr The address to check. /// @return 1 if it belongs to the virtual memory mapping, 0 otherwise. int virtual_check_address(uint32_t addr); -/// @brief Unmap an address. -/// @param addr The address to unmap. -void virt_unmap(uint32_t addr); +/// @brief Unmaps a virtual address from the virtual memory. +/// @param addr The virtual address to unmap. +/// @return Returns 0 on success, or -1 if an error occurs. +int virt_unmap(uint32_t addr); /// @brief Unmap a page. /// @param page Pointer to the page to unmap. -void virt_unmap_pg(virt_map_page_t *page); +/// @return Returns 0 on success, or -1 if an error occurs. +int virt_unmap_pg(virt_map_page_t *page); /// @brief Memcpy from different processes virtual addresses /// @param dst_mm The destination memory struct diff --git a/mentos/src/kernel.c b/mentos/src/kernel.c index 8f8c60e1..8a4652de 100644 --- a/mentos/src/kernel.c +++ b/mentos/src/kernel.c @@ -197,7 +197,10 @@ int kmain(boot_info_t *boot_informations) //========================================================================== pr_notice("Initialize virtual memory mapping.\n"); printf("Initialize virtual memory mapping..."); - virt_init(); + if (virt_init() < 0) { + print_fail(); + return 1; + } print_ok(); //========================================================================== diff --git a/mentos/src/mem/vmem_map.c b/mentos/src/mem/vmem_map.c index ed427614..44074bc1 100644 --- a/mentos/src/mem/vmem_map.c +++ b/mentos/src/mem/vmem_map.c @@ -16,20 +16,24 @@ /// Virtual addresses manager. static virt_map_page_manager_t virt_default_mapping; -/// TODO: check. +/// Number of virtual memory pages. #define VIRTUAL_MEMORY_PAGES_COUNT (VIRTUAL_MEMORY_SIZE_MB * 256) -/// TODO: check. + +/// Base address for virtual memory mapping. #define VIRTUAL_MAPPING_BASE (PROCAREA_END_ADDR + 0x38000000) -/// TODO: check. -#define VIRT_PAGE_TO_ADDRESS(page) ((((page)-virt_pages) * PAGE_SIZE) + VIRTUAL_MAPPING_BASE) -/// TODO: check. -#define VIRT_ADDRESS_TO_PAGE(addr) ((((addr)-VIRTUAL_MAPPING_BASE) / PAGE_SIZE) + virt_pages) + +/// Converts a virtual page to its address. +#define VIRT_PAGE_TO_ADDRESS(page) ((((page) - virt_pages) * PAGE_SIZE) + VIRTUAL_MAPPING_BASE) + +/// Converts an address to its corresponding virtual page. +#define VIRT_ADDRESS_TO_PAGE(addr) ((((addr) - VIRTUAL_MAPPING_BASE) / PAGE_SIZE) + virt_pages) /// Array of virtual pages. virt_map_page_t virt_pages[VIRTUAL_MEMORY_PAGES_COUNT]; -void virt_init(void) +int virt_init(void) { + // Initialize the buddy system for virtual memory management. buddy_system_init( &virt_default_mapping.bb_instance, "virt_manager", @@ -38,44 +42,73 @@ void virt_init(void) sizeof(virt_map_page_t), VIRTUAL_MEMORY_PAGES_COUNT); + // Get the main page directory. page_directory_t *mainpgd = paging_get_main_directory(); + // Error handling: Failed to get the main page directory. + if (!mainpgd) { + pr_crit("Failed to get the main page directory\n"); + return -1; // Return -1 to indicate failure. + } + // Calculate the starting page frame number, page table, and table index. uint32_t start_virt_pfn = VIRTUAL_MAPPING_BASE / PAGE_SIZE; uint32_t start_virt_pgt = start_virt_pfn / 1024; uint32_t start_virt_tbl_idx = start_virt_pfn % 1024; uint32_t pfn_num = VIRTUAL_MEMORY_PAGES_COUNT; - // Alloc all page tables inside the main directory, so they will be shared across - // all page directories of processes + // Allocate all page tables inside the main directory, so they will be + // shared across all page directories of processes. + page_dir_entry_t *entry; + page_table_t *table; for (uint32_t i = start_virt_pgt; i < 1024 && (pfn_num > 0); i++) { - page_dir_entry_t *entry = mainpgd->entries + i; - - page_table_t *table; - - // Alloc virtual page table - entry->present = 1; - entry->rw = 0; - entry->global = 1; - entry->user = 0; - entry->accessed = 0; - entry->available = 1; - table = kmem_cache_alloc(pgtbl_cache, GFP_KERNEL); + // Get the page directory entry. + entry = mainpgd->entries + i; + + // Alloc virtual page table. + entry->present = 1; // Mark the entry as present + entry->rw = 0; // Read-only + entry->global = 1; // Global page + entry->user = 0; // Kernel mode + entry->accessed = 0; // Not accessed + entry->available = 1; // Available for system use + + // Allocate a new page table. + table = kmem_cache_alloc(pgtbl_cache, GFP_KERNEL); + // Error handling: failed to allocate page table. + if (!table) { + pr_crit("Failed to allocate page table\n"); + return -1; + } + // Determine the starting page index. uint32_t start_page = (i == start_virt_pgt) ? start_virt_tbl_idx : 0; + // Initialize the pages in the table. for (uint32_t j = start_page; j < 1024 && (pfn_num > 0); j++, pfn_num--) { - table->pages[j].frame = 0; - table->pages[j].rw = 0; - table->pages[j].present = 0; - table->pages[j].global = 1; - table->pages[j].user = 0; + table->pages[j].frame = 0; // No frame allocated + table->pages[j].rw = 0; // Read-only + table->pages[j].present = 0; // Not present + table->pages[j].global = 1; // Global page + table->pages[j].user = 0; // Kernel mode } + // Get the physical address of the allocated page table. page_t *table_page = get_lowmem_page_from_address((uint32_t)table); - uint32_t phy_addr = get_physical_address_from_page(table_page); - entry->frame = phy_addr >> 12u; + // Error handling: failed to get low memory page from address. + if (!table_page) { + pr_crit("Failed to get low memory page from address\n"); + return -1; + } + + // Get the physical address. + uint32_t phy_addr = get_physical_address_from_page(table_page); + + // Set the frame address in the page directory entry. + entry->frame = phy_addr >> 12u; } + + return 0; // Return success. } /// @brief Allocates a virtual page, given the page frame count. @@ -83,44 +116,107 @@ void virt_init(void) /// @return pointer to the virtual page. static virt_map_page_t *_alloc_virt_pages(uint32_t pfn_count) { - unsigned order = find_nearest_order_greater(0, pfn_count << 12); - virt_map_page_t *vpage = PG_FROM_BBSTRUCT(bb_alloc_pages(&virt_default_mapping.bb_instance, order), virt_map_page_t, bbpage); - return vpage; + // Find the nearest order greater than or equal to the page frame count. + unsigned order = find_nearest_order_greater(0, pfn_count << 12); + + // Allocate pages from the buddy system. + bb_page_t *bbpage = bb_alloc_pages(&virt_default_mapping.bb_instance, order); + // Error handling: failed to allocate pages from the buddy system. + if (!bbpage) { + pr_crit("Failed to allocate pages from the buddy system\n"); + return NULL; // Return NULL to indicate failure. + } + + // Convert the buddy system page to a virtual map page. + virt_map_page_t *vpage = PG_FROM_BBSTRUCT(bbpage, virt_map_page_t, bbpage); + // Error handling: failed to convert from buddy system page to virtual map page. + if (!vpage) { + pr_emerg("Failed to convert from buddy system page to virtual map page.\n"); + return NULL; // Return NULL to indicate failure. + } + + return vpage; // Return the allocated virtual page. } uint32_t virt_map_physical_pages(page_t *page, int pfn_count) { + // Allocate virtual pages for the given page frame count. virt_map_page_t *vpage = _alloc_virt_pages(pfn_count); + // Error handling: failed to allocate virtual pages. if (!vpage) { - return 0; + pr_crit("Failed to allocate virtual pages\n"); + return 0; // Return 0 to indicate failure. } + // Convert the virtual page to its corresponding virtual address. uint32_t virt_address = VIRT_PAGE_TO_ADDRESS(vpage); - uint32_t phy_address = get_physical_address_from_page(page); - mem_upd_vm_area(paging_get_main_directory(), virt_address, phy_address, - pfn_count * PAGE_SIZE, MM_PRESENT | MM_RW | MM_GLOBAL | MM_UPDADDR); - return virt_address; + // Get the physical address of the given page. + uint32_t phy_address = get_physical_address_from_page(page); + + // Get the main page directory. + page_directory_t *mainpgd = paging_get_main_directory(); + // Error handling: Failed to get the main page directory. + if (!mainpgd) { + pr_crit("Failed to get the main page directory\n"); + return -1; // Return -1 to indicate failure. + } + + // Update the virtual memory area with the new mapping. + mem_upd_vm_area( + mainpgd, virt_address, + phy_address, + pfn_count * PAGE_SIZE, + MM_PRESENT | MM_RW | MM_GLOBAL | MM_UPDADDR); + + return virt_address; // Return the virtual address of the mapped pages. } virt_map_page_t *virt_map_alloc(uint32_t size) { + // Calculate the number of pages required to cover the given size. uint32_t pages_count = (size + PAGE_SIZE - 1) / PAGE_SIZE; - return _alloc_virt_pages(pages_count); + + // Allocate the required number of virtual pages. + virt_map_page_t *vpages = _alloc_virt_pages(pages_count); + // Error handling: failed to allocate virtual pages. + if (!vpages) { + pr_crit("Failed to allocate virtual pages for size %u\n", size); + return NULL; // Return NULL to indicate failure. + } + + return vpages; // Return the pointer to the allocated virtual pages. } uint32_t virt_map_vaddress(mm_struct_t *mm, virt_map_page_t *vpage, uint32_t vaddr, uint32_t size) { + // Error handling: ensure the memory management structure and page directory are valid. + if (!mm || !mm->pgd) { + pr_crit("Invalid memory management structure or page directory\n"); + return 0; // Return 0 to indicate failure. + } + + // Convert the virtual map page to its corresponding virtual address. uint32_t start_map_virt_address = VIRT_PAGE_TO_ADDRESS(vpage); - // Clone the source vaddr the the requested virtual memory portion - mem_clone_vm_area(mm->pgd, - paging_get_main_directory(), - vaddr, - start_map_virt_address, - size, - MM_PRESENT | MM_RW | MM_GLOBAL | MM_UPDADDR); - return start_map_virt_address; + // Get the main page directory. + page_directory_t *mainpgd = paging_get_main_directory(); + // Error handling: Failed to get the main page directory. + if (!mainpgd) { + pr_crit("Failed to get the main page directory\n"); + return -1; // Return -1 to indicate failure. + } + + // Clone the source vaddr the the requested virtual memory portion. + mem_clone_vm_area( + mm->pgd, + mainpgd, + vaddr, + start_map_virt_address, + size, + MM_PRESENT | MM_RW | MM_GLOBAL | MM_UPDADDR); + + return start_map_virt_address; // Return the starting virtual address of the mapped area. } int virtual_check_address(uint32_t addr) @@ -128,55 +224,93 @@ int virtual_check_address(uint32_t addr) return addr >= VIRTUAL_MAPPING_BASE; // && addr < VIRTUAL_MAPPING_BASE + VIRTUAL_MEMORY_PAGES_COUNT * PAGE_SIZE; } -void virt_unmap(uint32_t addr) +int virt_unmap(uint32_t addr) { + // Convert the virtual address to its corresponding virtual map page. virt_map_page_t *page = VIRT_ADDRESS_TO_PAGE(addr); + // Error handling: ensure the page is valid. + if (!page) { + pr_crit("Failed to convert address %u to virtual map page\n", addr); + return -1; // Return -1 to indicate failure. + } + + // Unmap the virtual map page. virt_unmap_pg(page); + + return 0; // Return success. } -void virt_unmap_pg(virt_map_page_t *page) +int virt_unmap_pg(virt_map_page_t *page) { + // Error handling: ensure the page is valid. + if (!page) { + pr_crit("Invalid virtual map page\n"); + return -1; // Return -1 to indicate failure. + } + + // Convert the virtual map page to its corresponding virtual address. uint32_t addr = VIRT_PAGE_TO_ADDRESS(page); - // Set all virtual pages as not present - mem_upd_vm_area(paging_get_main_directory(), addr, 0, - (1 << page->bbpage.order) * PAGE_SIZE, MM_GLOBAL); + // Get the main page directory. + page_directory_t *mainpgd = paging_get_main_directory(); + // Error handling: Failed to get the main page directory. + if (!mainpgd) { + pr_crit("Failed to get the main page directory\n"); + return -1; // Return -1 to indicate failure. + } - // and avoiding unwanted memory accesses by the kernel + // Set all virtual pages as not present to avoid unwanted memory accesses by the kernel. + mem_upd_vm_area(mainpgd, addr, 0, (1 << page->bbpage.order) * PAGE_SIZE, MM_GLOBAL); + + // Free the pages in the buddy system. bb_free_pages(&virt_default_mapping.bb_instance, &page->bbpage); + + return 0; // Return success. } // FIXME: Check if this function should support unaligned page-boundaries copy void virt_memcpy(mm_struct_t *dst_mm, uint32_t dst_vaddr, mm_struct_t *src_mm, uint32_t src_vaddr, uint32_t size) { + // Buffer size for copying. const uint32_t VMEM_BUFFER_SIZE = 65536; + // Determine the buffer size to use for copying. uint32_t buffer_size = min(VMEM_BUFFER_SIZE, size); + // Allocate virtual pages for the source and destination. virt_map_page_t *src_vpage = virt_map_alloc(size); virt_map_page_t *dst_vpage = virt_map_alloc(size); + // Error handling: ensure both source and destination virtual pages are allocated. if (!src_vpage || !dst_vpage) { kernel_panic("Cannot copy virtual memory address, unable to reserve vmem!"); } + // Loop to copy memory in chunks. for (;;) { + // Map the source and destination virtual addresses to the allocated + // virtual pages. uint32_t src_map = virt_map_vaddress(src_mm, src_vpage, src_vaddr, buffer_size); uint32_t dst_map = virt_map_vaddress(dst_mm, dst_vpage, dst_vaddr, buffer_size); + // Determine the size to copy in this iteration. uint32_t cpy_size = min(buffer_size, size); + // Perform the memory copy. memcpy((void *)dst_map, (void *)src_map, cpy_size); + // Check if the entire size has been copied. if (size <= buffer_size) { break; } + // Update the remaining size and addresses for the next iteration. size -= cpy_size; src_vaddr += cpy_size; dst_vaddr += cpy_size; } + // Unmap the allocated virtual pages. virt_unmap_pg(src_vpage); virt_unmap_pg(dst_vpage); } From 6a5dfc116273f61aebe6ae5d875758820bd5b98c Mon Sep 17 00:00:00 2001 From: "Enrico Fraccaroli (Galfurian)" Date: Mon, 30 Sep 2024 15:18:01 -0400 Subject: [PATCH 5/9] Improve comments and error checking for the remaining memory routines. --- mentos/inc/mem/paging.h | 202 ++++--- mentos/inc/proc_access.h | 20 + mentos/src/mem/kheap.c | 604 ++++++++++++++------ mentos/src/mem/paging.c | 1129 +++++++++++++++++++++++++++++-------- mentos/src/mem/vmem_map.c | 24 +- 5 files changed, 1476 insertions(+), 503 deletions(-) diff --git a/mentos/inc/mem/paging.h b/mentos/inc/mem/paging.h index 58c03b4b..d230c786 100644 --- a/mentos/inc/mem/paging.h +++ b/mentos/inc/mem/paging.h @@ -12,125 +12,134 @@ #include "boot.h" #include "stdint.h" -/// Size of a page. -#define PAGE_SIZE 4096U +/// 4KB pages (2^12 = 4096 bytes) +#define PAGE_SHIFT 12 +/// Size of a page (4096 bytes). +#define PAGE_SIZE (1 << PAGE_SHIFT) +// Maximum number of physical page frame numbers (PFNs). +#define MAX_PHY_PFN (1UL << (32 - PAGE_SHIFT)) + /// The start of the process area. #define PROCAREA_START_ADDR 0x00000000 /// The end of the process area (and start of the kernel area). #define PROCAREA_END_ADDR 0xC0000000 +/// For a single page table in a 32-bit system. +#define MAX_PAGE_TABLE_ENTRIES 1024 +/// For a page directory with 1024 entries. +#define MAX_PAGE_DIR_ENTRIES 1024 + /// @brief An entry of a page directory. typedef struct page_dir_entry_t { - unsigned int present : 1; ///< TODO: Comment. - unsigned int rw : 1; ///< TODO: Comment. - unsigned int user : 1; ///< TODO: Comment. - unsigned int w_through : 1; ///< TODO: Comment. - unsigned int cache : 1; ///< TODO: Comment. - unsigned int accessed : 1; ///< TODO: Comment. - unsigned int reserved : 1; ///< TODO: Comment. - unsigned int page_size : 1; ///< TODO: Comment. - unsigned int global : 1; ///< TODO: Comment. - unsigned int available : 3; ///< TODO: Comment. - unsigned int frame : 20; ///< TODO: Comment. + unsigned int present : 1; ///< Page is present in memory. + unsigned int rw : 1; ///< Read/write permission (0 = read-only, 1 = read/write). + unsigned int user : 1; ///< User/supervisor (0 = supervisor, 1 = user). + unsigned int w_through : 1; ///< Write-through caching enabled. + unsigned int cache : 1; ///< Cache disabled. + unsigned int accessed : 1; ///< Page has been accessed. + unsigned int reserved : 1; ///< Reserved. + unsigned int page_size : 1; ///< Page size (0 = 4 KB, 1 = 4 MB). + unsigned int global : 1; ///< Global page (not flushed by TLB). + unsigned int available : 3; ///< Available for system use. + unsigned int frame : 20; ///< Frame address (shifted right 12 bits). } page_dir_entry_t; /// @brief An entry of a page table. typedef struct page_table_entry_t { - unsigned int present : 1; ///< TODO: Comment. - unsigned int rw : 1; ///< TODO: Comment. - unsigned int user : 1; ///< TODO: Comment. - unsigned int w_through : 1; ///< TODO: Comment. - unsigned int cache : 1; ///< TODO: Comment. - unsigned int accessed : 1; ///< TODO: Comment. - unsigned int dirty : 1; ///< TODO: Comment. - unsigned int zero : 1; ///< TODO: Comment. - unsigned int global : 1; ///< TODO: Comment. - unsigned int kernel_cow : 1; ///< TODO: Comment. - unsigned int available : 2; ///< TODO: Comment. - unsigned int frame : 20; ///< TODO: Comment. + unsigned int present : 1; ///< Page is present in memory. + unsigned int rw : 1; ///< Read/write permission (0 = read-only, 1 = read/write). + unsigned int user : 1; ///< User/supervisor (0 = supervisor, 1 = user). + unsigned int w_through : 1; ///< Write-through caching enabled. + unsigned int cache : 1; ///< Cache disabled. + unsigned int accessed : 1; ///< Page has been accessed. + unsigned int dirty : 1; ///< Page has been written to. + unsigned int zero : 1; ///< Reserved (set to 0). + unsigned int global : 1; ///< Global page (not flushed by TLB). + unsigned int kernel_cow : 1; ///< Kernel copy-on-write. + unsigned int available : 2; ///< Available for system use. + unsigned int frame : 20; ///< Frame address (shifted right 12 bits). } page_table_entry_t; /// @brief Flags associated with virtual memory areas. enum MEMMAP_FLAGS { - MM_USER = 0x1, ///< Area belongs to user. - MM_GLOBAL = 0x2, ///< Area is global. - MM_RW = 0x4, ///< Area has user read/write perm. - MM_PRESENT = 0x8, ///< Area is valid. + MM_USER = 0x1, ///< Area belongs to user mode (accessible by user-level processes). + MM_GLOBAL = 0x2, ///< Area is global (not flushed from TLB on context switch). + MM_RW = 0x4, ///< Area has read/write permissions. + MM_PRESENT = 0x8, ///< Area is present in memory. // Kernel flags - MM_COW = 0x10, ///< Area is copy on write. - MM_UPDADDR = 0x20, ///< Check? + MM_COW = 0x10, ///< Area is copy-on-write (used for forked processes). + MM_UPDADDR = 0x20, ///< Update address (used for special memory mappings). }; /// @brief A page table. /// @details /// It contains 1024 entries which can be addressed by 10 bits (log_2(1024)). typedef struct page_table_t { - page_table_entry_t pages[1024]; ///< Array of pages. + /// @brief Array of page table entries. + page_table_entry_t pages[MAX_PAGE_TABLE_ENTRIES]; } __attribute__((aligned(PAGE_SIZE))) page_table_t; /// @brief A page directory. /// @details In the two-level paging, this is the first level. typedef struct page_directory_t { - /// We need a table that contains virtual address, so that we can - /// actually get to the tables (size: 1024 * 4 = 4096 byte). - page_dir_entry_t entries[1024]; + /// @brief Array of page directory entries. + /// @details + /// We need a table that contains virtual addresses, so that we can actually + /// get to the tables (size: 1024 * 4 = 4096 bytes). + page_dir_entry_t entries[MAX_PAGE_DIR_ENTRIES]; } __attribute__((aligned(PAGE_SIZE))) page_directory_t; /// @brief Virtual Memory Area, used to store details of a process segment. typedef struct vm_area_struct_t { - /// Memory descriptor associated. + /// Pointer to the memory descriptor associated with this area. struct mm_struct_t *vm_mm; /// Start address of the segment, inclusive. uint32_t vm_start; /// End address of the segment, exclusive. uint32_t vm_end; - /// List of memory areas. + /// Linked list of memory areas. list_head vm_list; - /// Permissions. + /// Page protection flags (permissions). pgprot_t vm_page_prot; - /// Flags. + /// Flags indicating attributes of the memory area. unsigned short vm_flags; - /// rbtree node. - // struct rb_node vm_rb; } vm_area_struct_t; /// @brief Memory Descriptor, used to store details about the memory of a user process. typedef struct mm_struct_t { - /// List of memory area (vm_area_struct reference). + /// List of memory areas (vm_area_struct references). list_head mmap_list; - // /// rbtree of memory area. - // struct rb_root mm_rb; - /// Last memory area used. + /// Pointer to the last used memory area. vm_area_struct_t *mmap_cache; - /// Process page directory. + /// Pointer to the process's page directory. page_directory_t *pgd; - /// Number of memory area. + /// Number of memory areas. int map_count; - /// List of mm_struct. + /// List of mm_structs. list_head mm_list; - /// CODE start. + /// Start address of the code segment. uint32_t start_code; - /// CODE end. + /// End address of the code segment. uint32_t end_code; - /// DATA start. + /// Start address of the data segment. uint32_t start_data; - /// DATA end. + /// End address of the data segment. uint32_t end_data; - /// HEAP start. + /// Start address of the heap. uint32_t start_brk; - /// HEAP end. + /// End address of the heap. uint32_t brk; - /// STACK start. + /// Start address of the stack. uint32_t start_stack; - /// ARGS start. + /// Start address of the arguments. uint32_t arg_start; - /// ARGS end. + /// End address of the arguments. uint32_t arg_end; - /// ENVIRONMENT start. + /// Start address of the environment variables. uint32_t env_start; - /// ENVIRONMENT end. + /// End address of the environment variables. uint32_t env_end; - /// Number of mapped pages. + /// Total number of mapped pages. unsigned int total_vm; } mm_struct_t; @@ -138,22 +147,28 @@ typedef struct mm_struct_t { extern kmem_cache_t *pgtbl_cache; /// @brief Comparison function between virtual memory areas. -/// @param vma0 the first vm_area. -/// @param vma1 the second vm_area. -/// @return true if vma0 is after vma1. +/// @param vma0 Pointer to the first vm_area_struct's list_head. +/// @param vma1 Pointer to the second vm_area_struct's list_head. +/// @return 1 if vma0 starts after vma1 ends, 0 otherwise. static inline int vm_area_compare(const list_head *vma0, const list_head *vma1) { + // Retrieve the vm_area_struct from the list_head for vma0. vm_area_struct_t *_vma0 = list_entry(vma0, vm_area_struct_t, vm_list); + // Retrieve the vm_area_struct from the list_head for vma1. vm_area_struct_t *_vma1 = list_entry(vma1, vm_area_struct_t, vm_list); + // Compare the start address of vma0 with the end address of vma1. return _vma0->vm_start > _vma1->vm_end; } -/// @brief Initializes paging -/// @param info Information coming from bootloader. -void paging_init(boot_info_t *info); +/// @brief Initializes the paging system, sets up memory caches, page +/// directories, and maps important memory regions. +/// @param info Pointer to the boot information structure, containing kernel +/// addresses and other details. +/// @return 0 on success, -1 on error. +int paging_init(boot_info_t *info); /// @brief Provide access to the main page directory. -/// @return A pointer to the main page directory. +/// @return A pointer to the main page directory, or NULL if main_mm is not initialized. page_directory_t *paging_get_main_directory(void); /// @brief Provide access to the current paging directory. @@ -170,9 +185,15 @@ static inline void paging_switch_directory(page_directory_t *dir) set_cr3((uintptr_t)dir); } +/// @brief Checks if the given page directory is the current one. +/// @param pgd A pointer to the page directory to check. +/// @return 1 if the given page directory is the current one, 0 otherwise. +int is_current_pgd(page_directory_t *pgd); + /// @brief Switches paging directory, the pointer can be a lowmem address. /// @param dir A pointer to the new page directory. -void paging_switch_directory_va(page_directory_t *dir); +/// @return Returns 0 on success, or -1 if an error occurs. +int paging_switch_directory_va(page_directory_t *dir); /// @brief Invalidate a single tlb page (the one that maps the specified virtual address) /// @param addr The address of the page table. @@ -198,20 +219,21 @@ static inline int paging_is_enabled(void) /// @param f The interrupt stack frame. void page_fault_handler(pt_regs *f); -/// @brief Gets a page from a virtual address -/// @param pgdir The target page directory. -/// @param virt_start The virtual address to query -/// @param size A pointer to the requested size of the data, size is updated if physical memory is not contiguous -/// @return Pointer to the page. +/// @brief Maps a virtual address to a corresponding physical page. +/// @param pgdir The page directory. +/// @param virt_start The starting virtual address to map. +/// @param size Pointer to a size_t variable to store the size of the mapped memory. +/// @return A pointer to the physical page corresponding to the virtual address, or NULL on error. page_t *mem_virtual_to_page(page_directory_t *pgdir, uint32_t virt_start, size_t *size); -/// @brief Creates a virtual to physical mapping, incrementing pages usage counters. -/// @param pgd The target page directory. -/// @param virt_start The virtual address to map to. -/// @param phy_start The physical address to map. -/// @param size The size of the segment. -/// @param flags The flags for the memory range. -void mem_upd_vm_area(page_directory_t *pgd, uint32_t virt_start, uint32_t phy_start, size_t size, uint32_t flags); +/// @brief Updates the virtual memory area in a page directory. +/// @param pgd The page directory to update. +/// @param virt_start The starting virtual address to update. +/// @param phy_start The starting physical address to map to the virtual addresses. +/// @param size The size of the memory area to update. +/// @param flags Flags to set for the page table entries. +/// @return 0 on success, or -1 on failure. +int mem_upd_vm_area(page_directory_t *pgd, uint32_t virt_start, uint32_t phy_start, size_t size, uint32_t flags); /// @brief Clones a range of pages between two distinct page tables /// @param src_pgd The source page directory. @@ -220,12 +242,13 @@ void mem_upd_vm_area(page_directory_t *pgd, uint32_t virt_start, uint32_t phy_st /// @param dst_start The destination virtual address for the clone. /// @param size The size of the segment. /// @param flags The flags for the new dst memory range. -void mem_clone_vm_area(page_directory_t *src_pgd, - page_directory_t *dst_pgd, - uint32_t src_start, - uint32_t dst_start, - size_t size, - uint32_t flags); +/// @return 0 on success, -1 on failure. +int mem_clone_vm_area(page_directory_t *src_pgd, + page_directory_t *dst_pgd, + uint32_t src_start, + uint32_t dst_start, + size_t size, + uint32_t flags); /// @brief Create a virtual memory area. /// @param mm The memory descriptor which will contain the new segment. @@ -254,7 +277,7 @@ uint32_t clone_vm_area(mm_struct_t *mm, /// @brief Destroys a virtual memory area. /// @param mm the memory descriptor from which we will destroy the area. /// @param area the are we want to destroy. -/// @return 0 if the area was destroyed, or 1 if the operation failed. +/// @return 0 if the area was destroyed, or -1 if the operation failed. int destroy_vm_area(mm_struct_t *mm, vm_area_struct_t *area); /// @brief Searches for the virtual memory area at the given address. @@ -274,7 +297,7 @@ int is_valid_vm_area(mm_struct_t *mm, uintptr_t vm_start, uintptr_t vm_end); /// @param mm the memory descriptor which should contain the new area. /// @param length the size of the empty spot. /// @param vm_start where we save the starting address for the new area. -/// @return 0 on success, 1 on failure. +/// @return 0 on success, -1 on error, or 1 if no free area is found. int find_free_vm_area(mm_struct_t *mm, size_t length, uintptr_t *vm_start); /// @brief Creates the main memory descriptor. @@ -289,4 +312,5 @@ mm_struct_t *clone_process_image(mm_struct_t *mmp); /// @brief Free Memory Descriptor with all the memory segment contained. /// @param mm The Memory Descriptor to free. -void destroy_process_image(mm_struct_t *mm); +/// @return Returns -1 on error, otherwise 0. +int destroy_process_image(mm_struct_t *mm); diff --git a/mentos/inc/proc_access.h b/mentos/inc/proc_access.h index 92dce200..cd002f59 100644 --- a/mentos/inc/proc_access.h +++ b/mentos/inc/proc_access.h @@ -148,6 +148,26 @@ static inline void set_cr0(uintptr_t cr0) : "r"(cr0)); } + +/// @brief Reads the current cr2 value. +/// @return the value we read. +static inline uintptr_t get_cr2(void) +{ + uintptr_t cr2; + __asm__ __volatile__("mov %%cr2, %0" + : "=r"(cr2)); + return (cr2); +} + +/// @brief Sets the cr2 value. +/// @param cr2 the value we want to set. +static inline void set_cr2(uintptr_t cr2) +{ + __asm__ __volatile__("mov %0, %%cr2" + : + : "r"(cr2)); +} + /// @brief Reads the current cr3 value. /// @return the value we read. static inline uintptr_t get_cr3(void) diff --git a/mentos/src/mem/kheap.c b/mentos/src/mem/kheap.c index 65e05b9e..72c628df 100644 --- a/mentos/src/mem/kheap.c +++ b/mentos/src/mem/kheap.c @@ -23,73 +23,108 @@ #include "sys/bitops.h" #include "sys/list_head.h" -/// The heap size. +/// @brief The size of the heap in bytes, defined as 4 megabytes. #define HEAP_SIZE (4 * M) -/// The lower bound address, when randomly placing the virtual memory area. + +/// @brief The lower bound address for virtual memory area placement. +/// This address marks the starting point of the heap. #define HEAP_VM_LB 0x40000000 -/// The upper bound address, when randomly placing the virtual memory area. + +/// @brief The upper bound address for virtual memory area placement. +/// This address marks the endpoint of the heap, ensuring no overlap with other memory regions. #define HEAP_VM_UB 0x50000000 -/// Overhead given by the block_t itself. +/// @brief The overhead introduced by the block_t structure itself. +/// This is used for memory management and bookkeeping. #define OVERHEAD sizeof(block_t) -/// Align the given address. + +/// @brief Aligns the given address to the nearest upper page boundary. +/// The address will be aligned to 4096 bytes (0x1000). +/// @param addr The address to align. +/// @return The aligned address. #define ADDR_ALIGN(addr) ((((uint32_t)(addr)) & 0xFFFFF000) + 0x1000) -/// Checks if the given address is aligned. + +/// @brief Checks if the given address is aligned to a page boundary. +/// @param addr The address to check. +/// @return Non-zero value if the address is aligned, zero otherwise. #define IS_ALIGN(addr) ((((uint32_t)(addr)) & 0x00000FFF) == 0) -/// @brief Identifies a block of memory. +/// @brief Represents a block of memory within the heap. +/// This structure includes metadata for managing memory allocation and free status. typedef struct block_t { - /// @brief Single bit that identifies if the block is free. + /// @brief A single bit indicating if the block is free (1) or allocated (0). unsigned int is_free : 1; - /// @brief Size of the block. + + /// @brief The size of the block in bytes. + /// This includes the space for the block's overhead. unsigned int size : 31; - /// @brief Entry in the list of blocks. + + /// @brief Entry in the list of all blocks in the heap. list_head list; + /// @brief Entry in the list of free blocks. list_head free; } block_t; -/// @brief Maps the heap memory to this three easily accessible values. +/// @brief Maps the heap memory to easily accessible values. +/// This structure contains pointers to the list of all blocks and the list of free blocks. typedef struct { - /// @brief List of blocks. + /// @brief List of all memory blocks, both free and allocated. list_head list; - /// @brief List of free blocks. + + /// @brief List of free blocks available for allocation. list_head free; } heap_header_t; -/// @brief Returns the given size, rounded in multiples of 16. -/// @param size the given size. -/// @return the size rounded to the nearest multiple of 16. +/// @brief Returns the given size, rounded to the nearest multiple of 16. This +/// is useful for ensuring memory alignment requirements are met. +/// @param size The given size to be rounded. +/// @return The size rounded to the nearest multiple of 16. static inline uint32_t __blkmngr_get_rounded_size(uint32_t size) { return round_up(size, 16); } -/// @brief Checks if the given size fits inside the block. -/// @param block The given block. -/// @param size The size to check -/// @return 1 if it fits, 0 otherwise. +/// @brief Checks if the given size fits inside the block. This function +/// verifies whether the specified size can be accommodated within the block's +/// available size. +/// @param block The given block to check. Must not be NULL. +/// @param size The size to check against the block's size. +/// @return 1 if the size fits within the block, 0 otherwise. +/// @error Returns -1 if the block is NULL. static inline int __blkmngr_does_it_fit(block_t *block, uint32_t size) { - assert(block && "Received null block."); + // Check for a null block pointer to avoid dereferencing a null pointer + if (!block) { + pr_crit("Received null block.\n"); + return -1; // Error: Block pointer is NULL. + } + + // Check if the block can accommodate the requested size. return block->size >= size; } -/// @brief Prepares a string that represents the block. -/// @param block the block to represent. -/// @return a string with the block info. +/// @brief Prepares a string that represents the block. This function formats +/// the information of the specified block into a human-readable string. +/// @param block The block to represent. Can be NULL. +/// @return A string containing the block's address, size, and free status. static inline const char *__block_to_string(block_t *block) { + // Static buffer to hold the string representation of the block. static char buffer[256]; + if (block) { + // Format the block's information into the buffer sprintf(buffer, "0x%p [%9s](%d)", - block, - to_human_size(block->size), - block->is_free); + (void *)block, // Pointer to the block + to_human_size(block->size), // Human-readable size + block->is_free); // Free status (1 if free, 0 if allocated) } else { + // If the block is NULL, indicate this in the buffer. sprintf(buffer, "NULL"); } - return buffer; + + return buffer; // Return the formatted string. } /// @brief Dumpts debug information about the heap. @@ -145,263 +180,486 @@ static inline void __blkmngr_dump(heap_header_t *header) /// @return a block that should fit our needs. static inline block_t *__blkmngr_find_best_fitting(heap_header_t *header, uint32_t size) { - assert(header && "Received a NULL heap header."); - block_t *best_fitting = NULL, *block; + // Check if the header is NULL, log an error and return NULL + if (!header) { + pr_crit("Received a NULL heap header.\n"); + return NULL; // Return NULL to indicate failure. + } + + block_t *best_fitting = NULL; // Initialize the best fitting block to NULL. + block_t *block; // Declare a pointer for the current block. + + // Iterate over the list of free blocks. list_for_each_decl(it, &header->free) { + // Get the current block from the list. block = list_entry(it, block_t, free); + + // Skip if the block is not free. if (!block->is_free) { continue; } + + // Check if the block can accommodate the requested size. if (!__blkmngr_does_it_fit(block, size)) { continue; } + + // Update the best fitting block if it's the first found or smaller than + // the current best. if (!best_fitting || (block->size < best_fitting->size)) { best_fitting = block; } } + + // Return the best fitting block found, or NULL if none were suitable. return best_fitting; } /// @brief Given a block, finds its previous block. /// @param header the heap header. /// @param block the block. -/// @return a pointer to the previous block. +/// @return a pointer to the previous block or NULL if an error occurs. static inline block_t *__blkmngr_get_previous_block(heap_header_t *header, block_t *block) { - assert(header && "Received a NULL heap header."); - assert(block && "Received null block."); - // If the block is actually the head of the list, return NULL. + // Check if the heap header is valid. + if (!header) { + pr_crit("Received a NULL heap header.\n"); + return NULL; // Return NULL to indicate failure. + } + + // Check if the block is valid. + if (!block) { + pr_crit("Received a NULL block.\n"); + return NULL; // Return NULL to indicate failure. + } + + // If the block is the head of the list, return NULL. if (block->list.prev == &header->list) { - return NULL; + return NULL; // No previous block exists. } + + // Return the previous block by accessing the list entry. return list_entry(block->list.prev, block_t, list); } -/// @brief Given a block, finds its next block. -/// @param header the heap header. -/// @param block the block. -/// @return a pointer to the next block. +/// @brief Given a block, finds its next block in the memory pool. +/// @param header The heap header containing information about the heap. +/// @param block The current block for which the next block is to be found. +/// @return A pointer to the next block if it exists, or NULL if an error occurs or if the block is the last one. static inline block_t *__blkmngr_get_next_block(heap_header_t *header, block_t *block) { - assert(header && "Received a NULL heap header."); - assert(block && "Received null block."); - // If the block is actually the tail of the list, return NULL. + // Check if the heap header is valid. + if (!header) { + pr_crit("Received a NULL heap header.\n"); + return NULL; // Return NULL to indicate failure. + } + + // Check if the block is valid. + if (!block) { + pr_crit("Received a NULL block.\n"); + return NULL; // Return NULL to indicate failure. + } + + // If the block is the tail of the list, return NULL as there is no next block. if (block->list.next == &header->list) { - return NULL; + return NULL; // No next block exists. } + + // Return the next block by accessing the list entry. return list_entry(block->list.next, block_t, list); } -/// @brief Checks if the given `previous` block, is before `block`. -/// @param block the block from which we check the other block. -/// @param previous the supposedly previous block. -/// @return 1 if it is the previous block, 0 otherwise. +/// @brief Checks if the given `previous` block is actually the block that comes +/// before `block` in the memory pool. +/// @param block The current block from which we are checking the previous block. +/// @param previous The block that is supposedly the previous block. +/// @return 1 if `previous` is the actual previous block of `block`, 0 +/// otherwise. Returns -1 on error. static inline int __blkmngr_is_previous_block(block_t *block, block_t *previous) { - assert(block && "Received null block."); - assert(previous && "Received null previous block."); + // Check if the current block is valid. + if (!block) { + pr_crit("Received a NULL block.\n"); + return -1; // Error: Invalid block. + } + + // Check if the previous block is valid. + if (!previous) { + pr_crit("Received a NULL previous block.\n"); + return -1; // Error: Invalid previous block. + } + + // Check if the previous block's list entry matches the previous entry of + // the current block. return block->list.prev == &previous->list; } -/// @brief Checks if the given `next` block, is after `block`. -/// @param block the block from which we check the other block. -/// @param next the supposedly next block. -/// @return 1 if it is the next block, 0 otherwise. +/// @brief Checks if the given `next` block is actually the block that comes +/// after `block` in the memory pool. +/// @param block The current block from which we are checking the next block. +/// @param next The block that is supposedly the next block. +/// @return 1 if `next` is the actual next block of `block`, 0 otherwise. +/// Returns -1 on error. static inline int __blkmngr_is_next_block(block_t *block, block_t *next) { - assert(block && "Received null block."); - assert(next && "Received null next block."); + // Check if the current block is valid. + if (!block) { + pr_crit("Received a NULL block.\n"); + return -1; // Error: Invalid block. + } + + // Check if the next block is valid. + if (!next) { + pr_crit("Received a NULL next block.\n"); + return -1; // Error: Invalid next block. + } + + // Check if the next block's list entry matches the next entry of the + // current block. return block->list.next == &next->list; } -/// @brief Splits a block in two blocks, provided the size of the first one. -/// @param header the heap header. -/// @param block the block to split. -/// @param size the size of the first of the two new blocks. -static inline void __blkmngr_split_block(heap_header_t *header, block_t *block, uint32_t size) +/// @brief Splits a block into two blocks based on the specified size for the first block. +/// @param header The heap header containing metadata about the memory pool. +/// @param block The block to be split, which must be free. +/// @param size The size of the first of the two new blocks. Must be less than the original block's size minus overhead. +/// @return 0 on success, -1 on error. +static inline int __blkmngr_split_block(heap_header_t *header, block_t *block, uint32_t size) { - assert(block && "Received NULL block."); - assert(block->is_free && "The block is not free."); + // Check if the block is valid. + if (!block) { + pr_crit("Received NULL block.\n"); + return -1; // Cannot proceed without a valid block. + } + + // Check if the block is free; splitting a used block is not allowed. + if (!block->is_free) { + pr_crit("The block is not free and cannot be split.\n"); + return -1; // Cannot split a non-free block. + } + + // Check if the requested size is valid (greater than 0 and less than the + // current block size minus overhead). + if ((size == 0) || (size + OVERHEAD >= block->size)) { + pr_crit("Invalid size for splitting: size must be > 0 and < %u.\n", block->size - OVERHEAD); + return -1; // Size is invalid for splitting. + } pr_debug("Splitting %s\n", __block_to_string(block)); - // Create the new block. + // Create the new block by calculating its address based on the original block and the specified size. block_t *split = (block_t *)((char *)block + OVERHEAD + size); - // Insert the block in the list. + + // Insert the new block into the main list after the current block. list_head_insert_after(&split->list, &block->list); - // Insert the block in the free list. + + // Insert the new block into the free list. list_head_insert_after(&split->free, &block->free); - // Update the size of the new block. + + // Update the size of the new block to reflect the remaining size after splitting. split->size = block->size - OVERHEAD - size; - // Update the size of the base block. + + // Update the size of the original block to the new size. block->size = size; - // Set the blocks as free. + + // Mark both blocks as free. block->is_free = 1; split->is_free = 1; pr_debug("Into %s\n", __block_to_string(block)); pr_debug("And %s\n", __block_to_string(split)); + + return 0; // Success } -/// @brief Merges two blocks, into the first block. -/// @param header the heap header. -/// @param block the first block. -/// @param other the second block, which is lost in the process. -static inline void __blkmngr_merge_blocks(heap_header_t *header, block_t *block, block_t *other) +/// @brief Merges two adjacent blocks into the first block, effectively expanding its size. +/// @param header The heap header containing metadata about the memory pool. +/// @param block The first block that will be expanded. +/// @param other The second block that will be merged into the first block, becoming invalid in the process. +/// @return 0 on success, -1 on error. +static inline int __blkmngr_merge_blocks(heap_header_t *header, block_t *block, block_t *other) { - assert(block && "Received NULL first block."); - assert(other && "Received NULL second block."); - assert(block->is_free && "The first block is not free."); - assert(other->is_free && "The second block is not free."); - assert(__blkmngr_is_next_block(block, other) && "The blocks are not adjacent."); + // Check if the first block is valid. + if (!block) { + pr_crit("Received NULL first block.\n"); + return -1; // Cannot proceed without a valid first block. + } + + // Check if the second block is valid. + if (!other) { + pr_crit("Received NULL second block.\n"); + return -1; // Cannot proceed without a valid second block. + } + + // Check if the first block is free. + if (!block->is_free) { + pr_crit("The first block is not free.\n"); + return -1; // Cannot merge a non-free block. + } + + // Check if the second block is free. + if (!other->is_free) { + pr_crit("The second block is not free.\n"); + return -1; // Cannot merge with a non-free block. + } + + // Check if the blocks are adjacent. + if (!__blkmngr_is_next_block(block, other)) { + pr_crit("The blocks are not adjacent and cannot be merged.\n"); + return -1; // Blocks must be adjacent to merge. + } pr_debug("Merging %s\n", __block_to_string(block)); pr_debug("And %s\n", __block_to_string(other)); - // Remove the other block from the free list. + // Remove the other block from the free list, effectively marking it as not available. list_head_remove(&other->free); - // Remove the other block from the list. + + // Remove the other block from the main list. list_head_remove(&other->list); - // Update the size. - block->size = block->size + other->size + OVERHEAD; - // Set the splitted block as free. + + // Update the size of the first block to include the size of the second block and overhead. + block->size += other->size + OVERHEAD; + + // The first block remains free after merging, so set its free status. block->is_free = 1; pr_debug("Into %s\n", __block_to_string(block)); + + return 0; // Success } -/// @brief Extends the provided heap of the given increment. -/// @param heap Pointer to the heap. -/// @param increment Increment to the heap. -/// @return Pointer to the old top of the heap, ready to be used. +/// @brief Extends the provided heap by a specified increment. +/// @param heap Pointer to the heap structure that tracks the heap's memory area. +/// @param increment The amount by which to increase the heap size. +/// @return Pointer to the old top of the heap, or NULL if an error occurred. static void *__do_brk(vm_area_struct_t *heap, uint32_t increment) { - assert(heap && "Pointer to the heap is NULL."); + // Check if the heap pointer is valid. + if (!heap) { + pr_crit("Pointer to the heap is NULL.\n"); + return NULL; // Cannot proceed without a valid heap pointer. + } + // Get the current process. task_struct *task = scheduler_get_current_process(); - assert(task && "There is no current task!\n"); - // Get the memory descriptor. + // Check if there is a current task. + if (!task) { + pr_crit("There is no current task!\n"); + return NULL; // Cannot proceed without a current task. + } + + // Get the memory descriptor for the current task. mm_struct_t *mm = task->mm; - assert(mm && "The mm_struct of the current task is not initialized!\n"); - // Compute the new heap top. + // Check if the memory descriptor is initialized. + if (!mm) { + pr_crit("The mm_struct of the current task is not initialized!\n"); + return NULL; // Cannot proceed without a valid mm_struct. + } + + // Compute the new top of the heap by adding the increment to the current break. uint32_t new_heap_top = mm->brk + increment; - // Debugging message. + + // Debugging message to indicate the expansion of the heap. pr_notice("Expanding heap from 0x%p to 0x%p.\n", mm->brk, new_heap_top); - // If new boundary is larger than the end, we fail. + + // Check if the new heap top exceeds the boundaries of the heap. if (new_heap_top > heap->vm_end) { - pr_err("The new boundary is larger than the end!"); - return NULL; + pr_err("The new boundary is larger than the end of the heap!\n"); + return NULL; // New boundary exceeds the allowed heap area. } - // Overwrite the top of the heap. + + // Update the break (top of the heap) to the new value. mm->brk = new_heap_top; - // Return the old top of the heap. + + // Return the pointer to the old top of the heap before the increment. return (void *)(mm->brk - increment); } -/// @brief Allocates size bytes of uninitialized storage. -/// @param heap Heap from which we get the unallocated memory. -/// @param size Size of the desired memory area. -/// @return Pointer to the allocated memory area. +/// @brief Allocates a specified number of bytes of uninitialized storage from the heap. +/// @param heap Pointer to the heap from which we will allocate memory. +/// @param size The size of the desired memory area to allocate. +/// @return Pointer to the allocated memory area, or NULL if allocation fails. static void *__do_malloc(vm_area_struct_t *heap, size_t size) { + // Check if the heap pointer is valid. + if (!heap) { + pr_crit("Pointer to the heap is NULL.\n"); + return NULL; // Cannot proceed without a valid heap pointer. + } + + // Check if the requested size is zero. if (size == 0) { - return NULL; + pr_err("Requested allocation size is zero.\n"); + return NULL; // No allocation for zero size. } - // Get the heap header. + + // Get the heap header to access the memory management structures. heap_header_t *header = (heap_header_t *)heap->vm_start; - // Calculate size that's used, round it to multiple of 16. + if (!header) { + pr_err("Heap header is NULL.\n"); + return NULL; // Cannot proceed without a valid heap header. + } + + // Calculate the size that is used, rounding it to the nearest multiple of 16. uint32_t rounded_size = __blkmngr_get_rounded_size(size); - // Calculate actual size we need, which is the rounded size + // Calculate the actual size required, which includes overhead for the block header. uint32_t actual_size = rounded_size + OVERHEAD; - pr_debug("Searching block of size: %s\n", to_human_size(rounded_size)); - // Find the best fitting block. + + pr_debug("Searching for a block of size: %s\n", to_human_size(rounded_size)); + + // Find the best fitting block in the heap. block_t *block = __blkmngr_find_best_fitting(header, rounded_size); - // If we were able to find a suitable block, either split it, or return it. + + // If a suitable block is found, either split it or use it directly. if (block) { + // Check if the block size is larger than the actual size needed. if (block->size > actual_size) { - // Split the block, provide the rounded size to the function. - __blkmngr_split_block(header, block, rounded_size); + // Split the block, providing the rounded size to the splitting function. + if (__blkmngr_split_block(header, block, rounded_size) < 0) { + pr_err("Failed to split the block.\n"); + return NULL; // Return NULL if splitting fails. + } } else { pr_debug("Found perfect block: %s\n", __block_to_string(block)); } - // Remove the block from the free list. + + // Remove the block from the free list to mark it as allocated. list_head_remove(&block->free); } else { - pr_debug("Failed to find suitable block, we need to create a new one.\n"); - // We need more space, specifically the size of the block plus the size - // of the block_t structure. + pr_debug("Failed to find a suitable block; creating a new one.\n"); + + // We need more space, specifically the size of the block plus the size of the block_t structure. block = __do_brk(heap, actual_size); - // Set the size. + // Check if the block allocation was successful. + if (!block) { + pr_err("Failed to create a new block.\n"); + return NULL; // Return NULL if block creation fails. + } + + // Set the size for the newly created block. block->size = rounded_size; - // Check the block. - assert(block && "Failed to create a new block!"); - // Setup the new block. + + // Initialize the new block's list pointers. list_head_init(&block->list); list_head_init(&block->free); + // Insert the block into the header's list of blocks. list_head_insert_before(&block->list, &header->list); } - // Set the new block as used. + + // Set the block as used (allocated). block->is_free = 0; + + // Optionally dump the current state of the heap for debugging. __blkmngr_dump(header); + + // Return a pointer to the memory area, skipping the block header. return (void *)((char *)block + OVERHEAD); } -/// @brief Deallocates previously allocated space. -/// @param heap Heap to which we return the allocated memory. -/// @param ptr Pointer to the allocated memory. -static void __do_free(vm_area_struct_t *heap, void *ptr) +/// @brief Deallocates previously allocated space, returning it to the heap. +/// @param heap Pointer to the heap to which the allocated memory is returned. +/// @param ptr Pointer to the previously allocated memory to be deallocated. +/// @return 0 on success, -1 on error. +static int __do_free(vm_area_struct_t *heap, void *ptr) { - // We will use these in writing. + // Check if the heap pointer is valid. + if (!heap) { + pr_crit("Pointer to the heap is NULL.\n"); + return -1; // Return error if the heap pointer is NULL. + } + + // Check if the pointer to be freed is NULL. + if (!ptr) { + pr_err("Attempted to free a NULL pointer.\n"); + return -1; // Return error if the pointer is NULL. + } + + // Get the heap header to access the memory management structures. heap_header_t *header = (heap_header_t *)heap->vm_start; - // Get the current block. + if (!header) { + pr_err("Heap header is NULL.\n"); + return -1; // Return error if the heap header is not valid. + } + + // Calculate the block pointer from the provided pointer. block_t *block = (block_t *)((char *)ptr - OVERHEAD); - // Get the previous block. + if (!block) { + pr_err("Calculated block pointer is NULL.\n"); + return -1; // Safety check; should not happen. + } + + // Get the previous and next blocks for merging purposes. block_t *prev = __blkmngr_get_previous_block(header, block); - // Get the next block. block_t *next = __blkmngr_get_next_block(header, block); + pr_debug("Freeing block %s\n", __block_to_string(block)); - // Set the block free. + + // Mark the block as free. block->is_free = 1; - // Merge adjacent blocks. + + // Attempt to merge with adjacent blocks if they are free. if (prev && prev->is_free && next && next->is_free) { - pr_debug("Merging with previous and next.\n"); - __blkmngr_merge_blocks(header, prev, block); - __blkmngr_merge_blocks(header, prev, next); + pr_debug("Merging with previous and next blocks.\n"); + __blkmngr_merge_blocks(header, prev, block); // Merge with previous. + __blkmngr_merge_blocks(header, prev, next); // Merge with next. } else if (prev && prev->is_free) { - pr_debug("Merging with previous.\n"); - __blkmngr_merge_blocks(header, prev, block); + pr_debug("Merging with previous block.\n"); + __blkmngr_merge_blocks(header, prev, block); // Merge with previous. } else if (next && next->is_free) { - pr_debug("Merging with next.\n"); - // Merge the blocks. + pr_debug("Merging with next block.\n"); + // Merge the blocks with the next one. __blkmngr_merge_blocks(header, block, next); - // Add the block to the free list. + // Add the current block to the free list. list_head_insert_before(&block->free, &header->free); } else { - pr_debug("No merging required.\n"); - // Add the block to the free list. + pr_debug("No merging required; adding block to free list.\n"); + // Add the block to the free list since no merging is needed. list_head_insert_before(&block->free, &header->free); } + + // Dump the current state of the heap for debugging purposes. __blkmngr_dump(header); + + return 0; // Return success. } void *sys_brk(void *addr) { + // Check if the address is NULL. + if (!addr) { + pr_err("Received a NULL addr.\n"); + return NULL; // Return error if the addr is NULL. + } + // Get the current process. task_struct *task = scheduler_get_current_process(); - assert(task && "There is no current task!\n"); - // Get the memory descriptor. + if (!task) { + pr_err("There is no current task!\n"); + return NULL; // Return error if there is no current task. + } + + // Get the memory descriptor for the current task. mm_struct_t *mm = task->mm; - assert(mm && "The mm_struct of the current task is not initialized!\n"); - // Get the heap. + if (!mm) { + pr_err("The mm_struct of the current task is not initialized!\n"); + return NULL; // Return error if memory descriptor is not initialized. + } + + // Get the heap associated with the current task. vm_area_struct_t *heap = find_vm_area(task->mm, task->mm->start_brk); - // Allocate the segment if don't exist. + + // If the heap does not exist, allocate it. if (heap == NULL) { pr_debug("Allocating heap!\n"); - // Set the heap to 4 Mb. + + // Set the heap size to 4 MB. size_t heap_size = HEAP_SIZE; - // Add to that the space required to store the header, and the first block. + + // Calculate the total segment size needed (header + initial block). size_t segment_size = heap_size + sizeof(heap_header_t) + sizeof(block_t); + // Create the virtual memory area, we are goin to place the area between // 0x40000000 and 0x50000000, which surely is below the stack. The VM // code will check if it is a valid area anyway. @@ -411,39 +669,63 @@ void *sys_brk(void *addr) segment_size, MM_RW | MM_PRESENT | MM_USER | MM_UPDADDR, GFP_HIGHUSER); + if (!heap) { + pr_err("Failed to allocate heap memory area.\n"); + return NULL; // Return error if heap allocation fails. + } + pr_debug("Heap size : %s.\n", to_human_size(heap_size)); pr_debug("Heap start : 0x%p.\n", heap->vm_start); pr_debug("Heap end : 0x%p.\n", heap->vm_end); - // Initialize the memory. + + // Initialize the memory for the heap. memset((char *)heap->vm_start, 0, segment_size); - // Save where the original heap starts. + + // Save the starting address of the heap. task->mm->start_brk = heap->vm_start; - // Initialize the header. + + // Initialize the heap header. heap_header_t *header = (heap_header_t *)heap->vm_start; list_head_init(&header->list); list_head_init(&header->free); - // Preare the first block. - block_t *block = (block_t *)(header + sizeof(heap_header_t)); - // Let us start with a block of 1 Kb. - block->size = K; + + // Prepare the first block of memory. + block_t *block = (block_t *)((char *)header + sizeof(heap_header_t)); + block->size = K; // Start with a block of 1 KB. list_head_init(&block->list); list_head_init(&block->free); - // Insert the block inside the heap. + + // Insert the initial block into the heap. list_head_insert_before(&block->list, &header->list); list_head_insert_before(&block->free, &header->free); - // Save where the heap actually start. + + // Set the actual starting address of the heap. task->mm->brk = (uint32_t)((char *)block + OVERHEAD + block->size); - // Set the block as free. + // Mark the initial block as free. block->is_free = 1; + + // Dump the state of the memory manager for debugging. __blkmngr_dump(header); } + + // Variable to hold the return pointer. void *_ret = NULL; - // If the address falls inside the memory region, call the free function, - // otherwise execute a malloc of the specified amount. + + // Check if the specified address is within the allocated heap range. if (((uintptr_t)addr > heap->vm_start) && ((uintptr_t)addr < heap->vm_end)) { - __do_free(heap, addr); + // If it is, free the specified address. + if (__do_free(heap, addr) < 0) { + pr_err("Failed to free memory at address: 0x%p.\n", addr); + return NULL; // Return error if freeing fails. + } } else { + // If not, allocate new memory of the specified size. _ret = __do_malloc(heap, (size_t)addr); + if (!_ret) { + pr_err("Memory allocation failed for size: %zu.\n", (size_t)addr); + return NULL; // Return error if allocation fails. + } } - return _ret; + + return _ret; // Return the pointer to the allocated memory or NULL on failure. } diff --git a/mentos/src/mem/paging.c b/mentos/src/mem/paging.c index 2d375d54..055505bd 100644 --- a/mentos/src/mem/paging.c +++ b/mentos/src/mem/paging.c @@ -4,10 +4,10 @@ /// See LICENSE.md for details. // Setup the logging for this file (do this before any other include). -#include "sys/kernel_levels.h" // Include kernel log levels. -#define __DEBUG_HEADER__ "[PAGING]" ///< Change header. -#define __DEBUG_LEVEL__ LOGLEVEL_DEBUG ///< Set log level. -#include "io/debug.h" // Include debugging functions. +#include "sys/kernel_levels.h" // Include kernel log levels. +#define __DEBUG_HEADER__ "[PAGING]" ///< Change header. +#define __DEBUG_LEVEL__ LOGLEVEL_NOTICE ///< Set log level. +#include "io/debug.h" // Include debugging functions. #include "assert.h" #include "descriptor_tables/isr.h" @@ -59,14 +59,53 @@ typedef struct pg_iter_entry_s { page_directory_t *paging_get_main_directory(void) { + // Ensure the main_mm structure is initialized. + if (!main_mm) { + pr_crit("main_mm is not initialized\n"); + return NULL; // Return NULL to indicate failure. + } + + // Return the pointer to the main page directory. return main_mm->pgd; } -/// @brief Switches paging directory, the pointer can be a lowmem address -void paging_switch_directory_va(page_directory_t *dir) +int is_current_pgd(page_directory_t *pgd) +{ + // Check if the pgd pointer is NULL + if (pgd == NULL) { + // Return 0 (false) if the pgd pointer is NULL + return 0; + } + // Compare the given pgd with the current page directory + return pgd == paging_get_current_directory(); +} + +int paging_switch_directory_va(page_directory_t *dir) { + // Ensure the directory pointer is valid. + if (!dir) { + pr_crit("Invalid page directory pointer\n"); + return -1; // Return -1 to indicate failure. + } + + // Get the low memory page corresponding to the given directory address. page_t *page = get_lowmem_page_from_address((uintptr_t)dir); - paging_switch_directory((page_directory_t *)get_physical_address_from_page(page)); + if (!page) { + pr_crit("Failed to get low memory page from address\n"); + return -1; // Return -1 to indicate failure. + } + + // Get the physical address of the low memory page. + uintptr_t phys_addr = get_physical_address_from_page(page); + if (!phys_addr) { + pr_crit("Failed to get physical address from page\n"); + return -1; // Return -1 to indicate failure. + } + + // Switch to the new paging directory using the physical address. + paging_switch_directory((page_directory_t *)phys_addr); + + return 0; // Return success. } void paging_flush_tlb_single(unsigned long addr) @@ -86,25 +125,44 @@ vm_area_struct_t *create_vm_area(mm_struct_t *mm, // Compute the end of the virtual memory area. vm_end = vm_start + size; + // Check if the range is already occupied. if (is_valid_vm_area(mm, vm_start, vm_end) <= 0) { pr_crit("The virtual memory area range [%p, %p] is already in use.\n", vm_start, vm_end); - kernel_panic("Wrong virtual memory area range."); + return NULL; // Return NULL to indicate failure. } + // Allocate on kernel space the structure for the segment. segment = kmem_cache_alloc(vm_area_cache, GFP_KERNEL); + if (!segment) { + pr_crit("Failed to allocate vm_area_struct\n"); + return NULL; // Return NULL to indicate failure. + } + // Find the nearest order for the given memory size. order = find_nearest_order_greater(vm_start, size); if (pgflags & MM_COW) { + // If the area is copy-on-write, clear the present and update address + // flags. pgflags = pgflags & ~(MM_PRESENT | MM_UPDADDR); phy_start = 0; } else { - pgflags = pgflags | MM_UPDADDR; + // Otherwise, set the update address flag and allocate physical pages. + pgflags = pgflags | MM_UPDADDR; + page_t *page = _alloc_pages(gfpflags, order); - phy_start = get_physical_address_from_page(page); + if (!page) { + pr_crit("Failed to allocate pages\n"); + kmem_cache_free(segment); + return NULL; // Return NULL to indicate failure. + } + + // Retrieve the physical address from the allocated page. + phy_start = get_physical_address_from_page(page); } + // Update the virtual memory area in the page directory. mem_upd_vm_area(mm->pgd, vm_start, phy_start, size, pgflags); // Update vm_area_struct info. @@ -112,55 +170,86 @@ vm_area_struct_t *create_vm_area(mm_struct_t *mm, segment->vm_end = vm_end; segment->vm_mm = mm; - // Update memory descriptor list of vm_area_struct. + // Insert the new segment into the memory descriptor's list of vm_area_structs. list_head_insert_after(&segment->vm_list, &mm->mmap_list); mm->mmap_cache = segment; - // Sort the mmap_list. + // Sort the mmap_list to maintain order. list_head_sort(&mm->mmap_list, vm_area_compare); // Update memory descriptor info. mm->map_count++; - mm->total_vm += (1U << order); - return segment; + return segment; // Return the created vm_area_struct. } uint32_t clone_vm_area(mm_struct_t *mm, vm_area_struct_t *area, int cow, uint32_t gfpflags) { + // Allocate a new vm_area_struct for the cloned area. vm_area_struct_t *new_segment = kmem_cache_alloc(vm_area_cache, GFP_KERNEL); + if (!new_segment) { + pr_crit("Failed to allocate memory for new vm_area_struct\n"); + return -1; // Return -1 to indicate failure. + } + + // Copy the content of the existing vm_area_struct to the new segment. memcpy(new_segment, area, sizeof(vm_area_struct_t)); + // Update the memory descriptor for the new segment. new_segment->vm_mm = mm; + // Calculate the size and the nearest order for the new segment's memory allocation. uint32_t size = new_segment->vm_end - new_segment->vm_start; uint32_t order = find_nearest_order_greater(area->vm_start, size); if (!cow) { // If not copy-on-write, allocate directly the physical pages - page_t *dst_page = _alloc_pages(gfpflags, order); + page_t *dst_page = _alloc_pages(gfpflags, order); + if (!dst_page) { + pr_crit("Failed to allocate physical pages for the new vm_area\n"); + // Free the newly allocated segment on failure. + kmem_cache_free(new_segment); + return -1; // Return -1 to indicate failure. + } + uint32_t phy_vm_start = get_physical_address_from_page(dst_page); - // Then update the virtual memory map - mem_upd_vm_area(mm->pgd, new_segment->vm_start, phy_vm_start, size, - MM_RW | MM_PRESENT | MM_UPDADDR | MM_USER); + // Update the virtual memory map in the page directory. + if (mem_upd_vm_area(mm->pgd, new_segment->vm_start, phy_vm_start, size, + MM_RW | MM_PRESENT | MM_UPDADDR | MM_USER) < 0) { + pr_crit("Failed to update virtual memory area in page directory\n"); + // Free the allocated pages on failure. + __free_pages(dst_page); + // Free the newly allocated segment. + kmem_cache_free(new_segment); + return -1; // Return -1 to indicate failure. + } - // Copy virtual memory of source area into dest area by using a virtual mapping + // Copy virtual memory from source area into destination area using a virtual mapping. virt_memcpy(mm, area->vm_start, area->vm_mm, area->vm_start, size); } else { - // If copy-on-write, set the original pages as read-only - mem_upd_vm_area(area->vm_mm->pgd, area->vm_start, 0, size, - MM_COW | MM_PRESENT | MM_USER); + // If copy-on-write, set the original pages as read-only. + if (mem_upd_vm_area(area->vm_mm->pgd, area->vm_start, 0, size, + MM_COW | MM_PRESENT | MM_USER) < 0) { + pr_crit("Failed to mark original pages as copy-on-write\n"); + // Free the newly allocated segment. + kmem_cache_free(new_segment); + return -1; // Return -1 to indicate failure. + } - // Do a cow of the whole virtual memory area, handling fragmented physical memory - // and set it as read-only - mem_clone_vm_area(area->vm_mm->pgd, - mm->pgd, - area->vm_start, - new_segment->vm_start, - size, - MM_COW | MM_PRESENT | MM_UPDADDR | MM_USER); + // Perform a COW of the whole virtual memory area, handling fragmented physical memory. + if (mem_clone_vm_area(area->vm_mm->pgd, + mm->pgd, + area->vm_start, + new_segment->vm_start, + size, + MM_COW | MM_PRESENT | MM_UPDADDR | MM_USER) < 0) { + pr_crit("Failed to clone virtual memory area\n"); + // Free the newly allocated segment. + kmem_cache_free(new_segment); + return -1; // Return -1 to indicate failure. + } } // Update memory descriptor list of vm_area_struct. @@ -169,7 +258,6 @@ uint32_t clone_vm_area(mm_struct_t *mm, vm_area_struct_t *area, int cow, uint32_ // Update memory descriptor info. mm->map_count++; - mm->total_vm += (1U << order); return 0; @@ -180,99 +268,177 @@ int destroy_vm_area(mm_struct_t *mm, vm_area_struct_t *area) size_t area_total_size, area_size, area_start; uint32_t order, block_size; page_t *phy_page; - // Get the total area size. + + // Get the total size of the virtual memory area. area_total_size = area->vm_end - area->vm_start; - // Get the starting location. + + // Get the starting address of the area. area_start = area->vm_start; - // Free all the memory. + + // Free all the memory associated with the virtual memory area. while (area_total_size > 0) { area_size = area_total_size; - phy_page = mem_virtual_to_page(mm->pgd, area_start, &area_size); - // If the pages are marked as copy-on-write, do not deallocate them! + + // Translate the virtual address to the physical page. + phy_page = mem_virtual_to_page(mm->pgd, area_start, &area_size); + + // Check if the page was successfully retrieved. + if (!phy_page) { + pr_crit("Failed to retrieve physical page for virtual address %p\n", (void *)area_start); + return -1; // Return -1 to indicate error. + } + + // If the pages are marked as copy-on-write, do not deallocate them. if (page_count(phy_page) > 1) { order = phy_page->bbpage.order; block_size = 1UL << order; + + // Decrement the reference count for each page in the block. for (int i = 0; i < block_size; i++) { page_dec(phy_page + i); } } else { + // If not copy-on-write, free the allocated pages. __free_pages(phy_page); } + + // Update the remaining size and starting address for the next iteration. area_total_size -= area_size; area_start += area_size; } - // Delete segment from the mmap. + + // Remove the segment from the memory map list. list_head_remove(&area->vm_list); - // Free the memory. + + // Free the memory allocated for the vm_area_struct. kmem_cache_free(area); - // Reduce the counter for memory mapped areas. + + // Decrement the counter for the number of memory-mapped areas. --mm->map_count; - return 0; + + return 0; // Return 0 to indicate success. } -inline vm_area_struct_t *find_vm_area(mm_struct_t *mm, uint32_t vm_start) +vm_area_struct_t *find_vm_area(mm_struct_t *mm, uint32_t vm_start) { vm_area_struct_t *segment; - // Find the area. + + // Iterate through the memory map list in reverse order to find the area. list_for_each_prev_decl(it, &mm->mmap_list) { + // Get the current segment from the list entry. segment = list_entry(it, vm_area_struct_t, vm_list); + + // Assert that the segment is not NULL. assert(segment && "There is a NULL area in the list."); + + // Check if the starting address matches the requested vm_start. if (segment->vm_start == vm_start) { - return segment; + return segment; // Return the found segment. } } + + // If the area is not found, return NULL. return NULL; } -inline int is_valid_vm_area(mm_struct_t *mm, uintptr_t vm_start, uintptr_t vm_end) +int is_valid_vm_area(mm_struct_t *mm, uintptr_t vm_start, uintptr_t vm_end) { + // Check for a valid memory descriptor. + if (!mm || !vm_start || !vm_end) { + pr_crit("Invalid arguments: mm or vm_start or vm_end is NULL."); + return -1; // Return -1 to indicate error due to invalid input. + } + + // Check if the ending address is less than or equal to the starting address. if (vm_end <= vm_start) { - return -1; + pr_crit("Invalid virtual memory area: vm_end (%p) must be greater than vm_start (%p)", + (void *)vm_end, (void *)vm_start); + return -1; // Return -1 to indicate an error due to invalid input. } - // Get the stack. + + // Iterate through the list of memory areas to check for overlaps. vm_area_struct_t *area; list_for_each_prev_decl(it, &mm->mmap_list) { + // Get the current area from the list entry. area = list_entry(it, vm_area_struct_t, vm_list); - assert(area && "There is a NULL area in the list."); + + // Check if the area is NULL. + if (!area) { + pr_crit("Encountered a NULL area in the list."); + return -1; // Return -1 to indicate an error due to a NULL area. + } + + // Check if the new area overlaps with the current area. if ((vm_start > area->vm_start) && (vm_start < area->vm_end)) { - pr_crit("INSIDE(START): %p <= %p <= %p", area->vm_start, vm_start, area->vm_end); - return 0; + pr_crit("Overlap detected at start: %p <= %p <= %p", + (void *)area->vm_start, (void *)vm_start, (void *)area->vm_end); + return 0; // Return 0 to indicate an overlap with an existing area. } + if ((vm_end > area->vm_start) && (vm_end < area->vm_end)) { - pr_crit("INSIDE(END): %p <= %p <= %p", area->vm_start, vm_end, area->vm_end); - return 0; + pr_crit("Overlap detected at end: %p <= %p <= %p", + (void *)area->vm_start, (void *)vm_end, (void *)area->vm_end); + return 0; // Return 0 to indicate an overlap with an existing area. } + if ((vm_start < area->vm_start) && (vm_end > area->vm_end)) { - pr_crit("WRAPS: %p <= (%p, %p) <= %p", vm_start, area->vm_start, area->vm_end, vm_end); - return 0; + pr_crit("Wrap-around detected: %p <= (%p, %p) <= %p", + (void *)vm_start, (void *)area->vm_start, (void *)area->vm_end, (void *)vm_end); + return 0; // Return 0 to indicate the new area wraps around an existing area. } } + + // If no overlaps were found, return 1 to indicate the area is valid. return 1; } -inline int find_free_vm_area(mm_struct_t *mm, size_t length, uintptr_t *vm_start) +int find_free_vm_area(mm_struct_t *mm, size_t length, uintptr_t *vm_start) { - // Get the stack. + // Check for a valid memory descriptor. + if (!mm || !length || !vm_start) { + pr_crit("Invalid arguments: mm or length or vm_start is NULL."); + return -1; // Return -1 to indicate error due to invalid input. + } + vm_area_struct_t *area, *prev_area; + + // Iterate through the list of memory areas in reverse order. list_for_each_prev_decl(it, &mm->mmap_list) { + // Get the current area from the list entry. area = list_entry(it, vm_area_struct_t, vm_list); - assert(area && "There is a NULL area in the list."); - // Check the previous segment. + + // Check if the current area is NULL. + if (!area) { + pr_crit("Encountered a NULL area in the list."); + return -1; // Return -1 to indicate an error due to a NULL area. + } + + // Check the previous segment if it exists. if (area->vm_list.prev != &mm->mmap_list) { prev_area = list_entry(area->vm_list.prev, vm_area_struct_t, vm_list); - assert(prev_area && "There is a NULL area in the list."); - // Compute the available space. + + // Check if the previous area is NULL. + if (!prev_area) { + pr_crit("Encountered a NULL previous area in the list."); + return -1; // Return -1 to indicate an error due to a NULL area. + } + + // Compute the available space between the current area and the previous area. unsigned available_space = area->vm_start - prev_area->vm_end; - // If the space is enough, return the address. + + // If the available space is sufficient for the requested length, + // return the starting address. if (available_space >= length) { *vm_start = area->vm_start - length; - return 0; + return 0; // Return 0 to indicate success. } } } + + // If no suitable area was found, return 1 to indicate failure. return 1; } @@ -290,30 +456,86 @@ static void __init_pagetable(page_table_t *ptable) *ptable = (page_table_t){ { 0 } }; } -void paging_init(boot_info_t *info) +int paging_init(boot_info_t *info) { - mm_cache = KMEM_CREATE(mm_struct_t); + // Check if the info pointer is valid. + if (!info) { + pr_crit("Invalid boot info provided.\n"); + return -1; // Return -1 if memory cache creation fails. + } + + // Create memory cache for managing mm_struct. + mm_cache = KMEM_CREATE(mm_struct_t); + if (!mm_cache) { + pr_crit("Failed to create mm_cache.\n"); + return -1; // Return -1 if memory cache creation fails. + } + + // Create memory cache for managing vm_area_struct. vm_area_cache = KMEM_CREATE(vm_area_struct_t); + if (!vm_area_cache) { + pr_crit("Failed to create vm_area_cache.\n"); + return -1; // Return -1 if memory cache creation fails. + } + // Create cache for page directory with custom constructor function. pgdir_cache = KMEM_CREATE_CTOR(page_directory_t, __init_pagedir); + if (!pgdir_cache) { + pr_crit("Failed to create pgdir_cache.\n"); + return -1; // Return -1 if page directory cache creation fails. + } + + // Create cache for page table with custom constructor function. pgtbl_cache = KMEM_CREATE_CTOR(page_table_t, __init_pagetable); + if (!pgtbl_cache) { + pr_crit("Failed to create pgtbl_cache.\n"); + return -1; // Return -1 if page table cache creation fails. + } + // Allocate the main memory management structure. main_mm = kmem_cache_alloc(mm_cache, GFP_KERNEL); + if (!main_mm) { + pr_crit("Failed to allocate main_mm.\n"); + return -1; // Return -1 if allocation for mm_struct fails. + } + // Allocate the page directory for the main memory management structure. main_mm->pgd = kmem_cache_alloc(pgdir_cache, GFP_KERNEL); + if (!main_mm->pgd) { + pr_crit("Failed to allocate main_mm page directory.\n"); + return -1; // Return -1 if allocation for page directory fails. + } + // Calculate the size of low kernel memory. uint32_t lowkmem_size = info->stack_end - info->kernel_start; - // Map the first 1MB of memory with physical mapping to access video memory and other bios stuff - mem_upd_vm_area(main_mm->pgd, 0, 0, 1024 * 1024, MM_RW | MM_PRESENT | MM_GLOBAL | MM_UPDADDR); + // Map the first 1MB of memory with physical mapping to access video memory and other BIOS functions. + if (mem_upd_vm_area(main_mm->pgd, 0, 0, 1024 * 1024, + MM_RW | MM_PRESENT | MM_GLOBAL | MM_UPDADDR) < 0) { + pr_crit("Failed to map the first 1MB of memory.\n"); + return -1; // Return -1 if memory mapping fails. + } - mem_upd_vm_area(main_mm->pgd, info->kernel_start, info->kernel_phy_start, lowkmem_size, - MM_RW | MM_PRESENT | MM_GLOBAL | MM_UPDADDR); + // Map the kernel memory region into the virtual memory space. + if (mem_upd_vm_area(main_mm->pgd, info->kernel_start, info->kernel_phy_start, lowkmem_size, + MM_RW | MM_PRESENT | MM_GLOBAL | MM_UPDADDR) < 0) { + pr_crit("Failed to map kernel memory region.\n"); + return -1; // Return -1 if memory mapping fails. + } - isr_install_handler(PAGE_FAULT, page_fault_handler, "page_fault_handler"); + // Install the page fault interrupt service routine (ISR) handler. + if (isr_install_handler(PAGE_FAULT, page_fault_handler, "page_fault_handler") < 0) { + pr_crit("Failed to install page fault handler.\n"); + return -1; // Return -1 if ISR installation fails. + } + // Switch to the newly created page directory. paging_switch_directory_va(main_mm->pgd); + + // Enable paging. paging_enable(); + + return 0; // Return 0 on success. } // Error code interpretation. @@ -328,12 +550,24 @@ void paging_init(boot_info_t *info) /// @param flags the flags to set. static inline void __set_pg_table_flags(page_table_entry_t *table, uint32_t flags) { - table->rw = (flags & MM_RW) != 0; - table->present = (flags & MM_PRESENT) != 0; - table->kernel_cow = (flags & MM_COW) != 0; // Store the cow/not cow status - table->available = 1; // Future kernel data 2 bits - table->global = (flags & MM_GLOBAL) != 0; - table->user = (flags & MM_USER) != 0; + // Check if the table pointer is valid. + if (!table) { + pr_crit("Invalid page table entry provided.\n"); + return; // Exit the function early if the table is null. + } + // Set the Read/Write flag: 1 if the MM_RW flag is set, 0 otherwise. + table->rw = (flags & MM_RW) != 0; + // Set the Present flag: 1 if the MM_PRESENT flag is set, 0 otherwise. + table->present = (flags & MM_PRESENT) != 0; + // Set the Copy-On-Write flag: 1 if the MM_COW flag is set, 0 otherwise. + // This flag is used to track if the page is a copy-on-write page. + table->kernel_cow = (flags & MM_COW) != 0; + // Set the Available bits: these are reserved for future use, so set them to 1. + table->available = 1; // Currently just sets this to 1 as a placeholder. + // Set the Global flag: 1 if the MM_GLOBAL flag is set, 0 otherwise. + table->global = (flags & MM_GLOBAL) != 0; + // Set the User flag: 1 if the MM_USER flag is set, 0 otherwise. + table->user = (flags & MM_USER) != 0; } /// @brief Prints stack frame data and calls kernel_panic. @@ -378,74 +612,165 @@ static void __page_fault_panic(pt_regs *f, uint32_t addr) __asm__ __volatile__("cli"); } -/// @brief Handles the copy-on-write. -/// @param entry the entry to manage. +/// @brief Handles the Copy-On-Write (COW) mechanism for a page table entry. +/// If the page is marked as COW, it allocates a new page and updates the entry. +/// @param entry The page table entry to manage. /// @return 0 on success, 1 on error. static int __page_handle_cow(page_table_entry_t *entry) { + // Check if the entry pointer is valid. + if (!entry) { + pr_crit("Invalid page table entry provided.\n"); + return 1; // Return error if the entry is null. + } + // Check if the page is Copy On Write (COW). if (entry->kernel_cow) { - // Set the entry is no longer COW. + // Mark the page as no longer Copy-On-Write. entry->kernel_cow = 0; - // Check if the entry is not present (allocated). + + // If the page is not currently present (not allocated in physical memory). if (!entry->present) { - // Allocate a new page. + // Allocate a new physical page using high user memory flag. page_t *page = _alloc_pages(GFP_HIGHUSER, 0); - // Clear the new page. + if (!page) { + pr_crit("Failed to allocate a new page.\n"); + return 1; // Return error if the page allocation fails. + } + + // Map the allocated physical page to a virtual address. uint32_t vaddr = virt_map_physical_pages(page, 1); + if (!vaddr) { + pr_crit("Failed to map the physical page to virtual address.\n"); + return 1; // Return error if virtual mapping fails. + } + + // Clear the new page by setting all its bytes to 0. memset((void *)vaddr, 0, PAGE_SIZE); - // Unmap the virtual address. + + // Unmap the virtual address after clearing the page. virt_unmap(vaddr); - // Set it as current table entry frame. - entry->frame = get_physical_address_from_page(page) >> 12U; - // Set it as allocated. + + // Set the physical frame address of the allocated page into the entry. + entry->frame = get_physical_address_from_page(page) >> 12U; // Shift to get page frame number. + + // Mark the page as present in memory. entry->present = 1; - return 0; + + return 0; // Success, COW handled and page allocated. } } - pr_err("Page not cow!\n"); - return 1; + + // If the page is not marked as COW, print an error. + pr_err("Page not marked as copy-on-write (COW)!\n"); + return 1; // Return error as the page is not COW. } /// @brief Allocates memory for a page table entry. -/// @param entry the entry for which we allocate memory. -/// @param flags the flags to control the allocation. -/// @return a pointer to the page table entry. +/// @details If the page table is not present, allocates a new one and sets +/// flags accordingly. +/// @param entry The page directory entry for which memory is being allocated. +/// @param flags The flags that control the allocation, such as permissions and +/// attributes. +/// @return A pointer to the allocated page table, or NULL if allocation fails. static page_table_t *__mem_pg_entry_alloc(page_dir_entry_t *entry, uint32_t flags) { + // Check if the page directory entry is valid. + if (!entry) { + pr_crit("Invalid page directory entry provided.\n"); + return NULL; // Return NULL to indicate error. + } + + // If the page table is not present, allocate a new one. if (!entry->present) { - // Alloc page table if not present - // Present should be always 1, to indicate that the page tables - // have been allocated and allow lazy physical pages allocation - entry->present = 1; - entry->rw = 1; - entry->global = (flags & MM_GLOBAL) != 0; - entry->user = (flags & MM_USER) != 0; - entry->accessed = 0; - entry->available = 1; - return kmem_cache_alloc(pgtbl_cache, GFP_KERNEL); - } - entry->present |= (flags & MM_PRESENT) != 0; - entry->rw |= (flags & MM_RW) != 0; - - // We should not remove a global flag from a page directory, - // if this happens there is probably a bug in the kernel - assert(!entry->global || (flags & MM_GLOBAL)); - - entry->global &= (flags & MM_GLOBAL) != 0; - entry->user |= (flags & MM_USER) != 0; - return (page_table_t *)get_lowmem_address_from_page( - get_page_from_physical_address(((uint32_t)entry->frame) << 12U)); + // Mark the page table as present and set read/write and global/user flags. + entry->present = 1; // Indicate that the page table has been allocated. + entry->rw = 1; // Allow read/write access by default. + entry->global = (flags & MM_GLOBAL) != 0; // Set global flag if specified. + entry->user = (flags & MM_USER) != 0; // Set user-mode flag if specified. + entry->accessed = 0; // Mark as not accessed. + entry->available = 1; // Available for kernel use. + + // Allocate the page table using a memory cache. + page_table_t *new_table = kmem_cache_alloc(pgtbl_cache, GFP_KERNEL); + if (!new_table) { + pr_crit("Failed to allocate memory for page table.\n"); + return NULL; // Return NULL if allocation fails. + } + + return new_table; // Return the newly allocated page table. + } + + // If the page table is already present, update the flags accordingly. + entry->present |= (flags & MM_PRESENT) != 0; // Update the present flag if MM_PRESENT is set. + entry->rw |= (flags & MM_RW) != 0; // Update the read/write flag if MM_RW is set. + + // Ensure that the global flag is not removed if it was previously set. + // Removing a global flag from a page directory might indicate a bug in the kernel. + if (entry->global && !(flags & MM_GLOBAL)) { + kernel_panic("Attempted to remove the global flag from a page directory entry.\n"); + } + + // Update the global and user flags. + entry->global &= (flags & MM_GLOBAL) != 0; // Keep the global flag if specified. + entry->user |= (flags & MM_USER) != 0; // Set the user-mode flag if specified. + + // Retrieve the physical address of the page. + page_t *page = get_page_from_physical_address(((uint32_t)entry->frame) << 12U); + if (!page) { + pr_crit("Failed to retrieve page from physical address.\n"); + return NULL; // Return NULL if the page retrieval fails. + } + + // Convert the physical address into a low memory address. + page_table_t *lowmem_addr = (page_table_t *)get_lowmem_address_from_page(page); + if (!lowmem_addr) { + pr_crit("Failed to map page to low memory address.\n"); + return NULL; // Return NULL if the low memory mapping fails. + } + + return lowmem_addr; // Return the mapped page table. } -/// @brief Sets the frame attribute of a page table entry. -/// @param entry the entry. -/// @param table the page table. -static inline void __set_pg_entry_frame(page_dir_entry_t *entry, page_table_t *table) +/// @brief Sets the frame attribute of a page directory entry based on the page table's physical address. +/// @param entry The page directory entry to modify. +/// @param table The page table whose frame is being set in the directory entry. +/// @return 0 on success, -1 on failure. +static inline int __set_pg_entry_frame(page_dir_entry_t *entry, page_table_t *table) { + // Ensure the entry is not NULL. + if (!entry) { + pr_crit("Invalid page directory entry provided.\n"); + return -1; // Return -1 if the entry is NULL (error). + } + + // Ensure the table is not NULL. + if (!table) { + pr_crit("Invalid page table provided.\n"); + return -1; // Return -1 if the table is NULL (error). + } + + // Retrieve the low memory page structure from the virtual address of the table. page_t *table_page = get_lowmem_page_from_address((uint32_t)table); - uint32_t phy_addr = get_physical_address_from_page(table_page); - entry->frame = phy_addr >> 12u; + if (!table_page) { + pr_crit("Failed to retrieve low memory page from table address: %p\n", table); + return -1; // Return -1 if the low memory page retrieval fails (error). + } + + // Retrieve the physical address from the page structure. + uint32_t phy_addr = get_physical_address_from_page(table_page); + if (!phy_addr) { + pr_crit("Failed to retrieve physical address from page: %p\n", table_page); + return -1; // Return -1 if the physical address retrieval fails (error). + } + + // Set the frame attribute in the page directory entry (shifted by 12 bits + // to represent the frame number). + entry->frame = phy_addr >> 12u; + + pr_debug("Set page directory entry frame to 0x%x for table: %p\n", entry->frame, table); + + return 0; // Return 0 on success. } void page_fault_handler(pt_regs *f) @@ -468,26 +793,44 @@ void page_fault_handler(pt_regs *f) // | 1 1 0 | User process tried to write to a non-present page entry // | 1 1 1 | User process tried to write a page and caused a protection fault - // First, read the linear address that caused the Page Fault. When the exception occurs, the CPU control unit stores - // that value in the cr2 control register. - uint32_t faulting_addr; - __asm__ __volatile__("mov %%cr2, %0" - : "=r"(faulting_addr)); - // Get the physical address of the current page directory. + // Extract the error + int err_user = bit_check(f->err_code, 2) != 0; + int err_rw = bit_check(f->err_code, 1) != 0; + int err_present = bit_check(f->err_code, 0) != 0; + + // Extract the address that caused the page fault from the CR2 register. + uint32_t faulting_addr = get_cr2(); + + // Retrieve the current page directory's physical address. uint32_t phy_dir = (uint32_t)paging_get_current_directory(); - // Get the page directory. - page_directory_t *lowmem_dir = (page_directory_t *)get_lowmem_address_from_page(get_page_from_physical_address(phy_dir)); - // Get the directory entry. + if (!phy_dir) { + pr_crit("Failed to retrieve current page directory.\n"); + __page_fault_panic(f, faulting_addr); + } + + // Get the page from the physical address of the directory. + page_t *dir_page = get_page_from_physical_address(phy_dir); + if (!dir_page) { + pr_crit("Failed to get page from physical address: %p\n", (void *)phy_dir); + __page_fault_panic(f, faulting_addr); + } + + // Get the low memory address from the page and cast it to a page directory structure. + page_directory_t *lowmem_dir = (page_directory_t *)get_lowmem_address_from_page(dir_page); + if (!lowmem_dir) { + pr_crit("Failed to get low memory address from page: %p\n", (void *)dir_page); + __page_fault_panic(f, faulting_addr); + } + + // Get the directory entry that corresponds to the faulting address. page_dir_entry_t *direntry = &lowmem_dir->entries[faulting_addr / (1024U * PAGE_SIZE)]; - // Extract the error - bool_t err_user = bit_check(f->err_code, 2) != 0; - bool_t err_rw = bit_check(f->err_code, 1) != 0; - bool_t err_present = bit_check(f->err_code, 0) != 0; + // Panic only if page is in kernel memory, else abort process with SIGSEGV. if (!direntry->present) { - pr_crit("ERR(0): %d%d%d\n", err_user, err_rw, err_present); + pr_crit("ERR(0): Page directory entry not present (%d%d%d)\n", err_user, err_rw, err_present); + + // If the fault was caused by a user process, send a SIGSEGV signal. if (err_user) { - // Get the current process. task_struct *task = scheduler_get_current_process(); if (task) { // Notifies current process. @@ -503,32 +846,61 @@ void page_fault_handler(pt_regs *f) pr_crit("ERR(0): So, it is not present, and it was not the user.\n"); __page_fault_panic(f, faulting_addr); } - // Get the physical address of the page table. + + // Retrieve the physical address of the page table. uint32_t phy_table = direntry->frame << 12U; - // Get the page table. - page_table_t *lowmem_table = (page_table_t *)get_lowmem_address_from_page(get_page_from_physical_address(phy_table)); + + // Get the page from the physical address of the page table. + page_t *table_page = get_page_from_physical_address(phy_table); + if (!table_page) { + pr_crit("Failed to get page from physical address: %p\n", (void *)phy_table); + __page_fault_panic(f, faulting_addr); + } + + // Get the low memory address from the page and cast it to a page table structure. + page_table_t *lowmem_table = (page_table_t *)get_lowmem_address_from_page(table_page); + if (!lowmem_table) { + pr_crit("Failed to get low memory address from page: %p\n", (void *)table_page); + __page_fault_panic(f, faulting_addr); + } + // Get the entry inside the table that caused the fault. uint32_t table_index = (faulting_addr / PAGE_SIZE) % 1024U; + // Get the corresponding page table entry. page_table_entry_t *entry = &lowmem_table->pages[table_index]; - // There was a page fault on a virtual mapped address, - // so we must first update the original mapped page + if (!entry) { + pr_crit("Failed to retrieve page table entry.\n"); + __page_fault_panic(f, faulting_addr); + } + + // There was a page fault on a virtual mapped address, so we must first + // update the original mapped page if (virtual_check_address(faulting_addr)) { // Get the original page table entry from the virtually mapped one. page_table_entry_t *orig_entry = (page_table_entry_t *)(*(uint32_t *)entry); + if (!orig_entry) { + pr_crit("Original page table entry is NULL.\n"); + __page_fault_panic(f, faulting_addr); + } + // Check if the page is Copy on Write (CoW). if (__page_handle_cow(orig_entry)) { pr_crit("ERR(1): %d%d%d\n", err_user, err_rw, err_present); __page_fault_panic(f, faulting_addr); } + // Update the page table entry frame. entry->frame = orig_entry->frame; + // Update the entry flags. __set_pg_table_flags(entry, MM_PRESENT | MM_RW | MM_GLOBAL | MM_COW | MM_UPDADDR); } else { // Check if the page is Copy on Write (CoW). if (__page_handle_cow(entry)) { pr_crit("ERR(2): %d%d%d\n", err_user, err_rw, err_present); + + // If the fault was caused by a user process, send a SIGSEGV signal. if (err_user && err_rw && err_present) { // Get the current process. task_struct *task = scheduler_get_current_process(); @@ -548,7 +920,8 @@ void page_fault_handler(pt_regs *f) __page_fault_panic(f, faulting_addr); } } - // Invalidate the page table entry. + + // Invalidate the TLB entry for the faulting address. paging_flush_tlb_single(faulting_addr); } @@ -558,262 +931,517 @@ void page_fault_handler(pt_regs *f) /// @param addr_start The starting address. /// @param size The total amount we want to iterate. /// @param flags Allocation flags. -static void __pg_iter_init(page_iterator_t *iter, - page_directory_t *pgd, - uint32_t addr_start, - uint32_t size, - uint32_t flags) +/// @return 0 on success, -1 on error. +static int __pg_iter_init(page_iterator_t *iter, + page_directory_t *pgd, + uint32_t addr_start, + uint32_t size, + uint32_t flags) { + // Calculate the starting page frame number (PFN) based on the starting address. uint32_t start_pfn = addr_start / PAGE_SIZE; + // Calculate the ending page frame number (PFN) based on the starting address and size. uint32_t end_pfn = (addr_start + size + PAGE_SIZE - 1) / PAGE_SIZE; + // Determine the base page table index from the starting PFN. uint32_t base_pgt = start_pfn / 1024; - iter->entry = pgd->entries + base_pgt; - iter->pfn = start_pfn; - iter->last_pfn = end_pfn; - iter->flags = flags; + // Ensure that the base page table index is within valid range. + if (base_pgt >= MAX_PAGE_TABLE_ENTRIES) { + pr_crit("Base page table index %u is out of bounds.\n", base_pgt); + return -1; // Return -1 to indicate error. + } + + // Initialize the iterator's entry pointer to point to the corresponding page directory entry. + iter->entry = pgd->entries + base_pgt; + + // Set the page frame numbers for the iterator. + iter->pfn = start_pfn; + iter->last_pfn = end_pfn; + iter->flags = flags; + + // Allocate memory for the page table entry associated with the iterator. iter->table = __mem_pg_entry_alloc(iter->entry, flags); + // Check if the allocation was successful. + if (!iter->table) { + pr_crit("Failed to allocate memory for page table entry.\n"); + return -1; // Return -1 to indicate error. + } + + // Set the frame for the page entry. __set_pg_entry_frame(iter->entry, iter->table); + + return 0; // Return 0 to indicate success. } /// @brief Checks if the iterator has a next entry. -/// @param iter The iterator. -/// @return If we can continue the iteration. +/// @param iter The iterator to check. +/// @return Returns 1 if the iterator can continue the iteration; otherwise, returns 0. static int __pg_iter_has_next(page_iterator_t *iter) { + // Check for a null iterator pointer to avoid dereferencing a null pointer. + if (!iter) { + pr_crit("The page iterator is null.\n"); + return 0; // Return 0, indicating there are no entries to iterate. + } + + // Check if the current page frame number (pfn) is less than the last page frame number (last_pfn). + // This condition determines if there are more entries to iterate over. return iter->pfn < iter->last_pfn; } /// @brief Moves the iterator to the next entry. -/// @param iter The itetator. -/// @return The iterator after moving to the next entry. +/// @param iter The iterator to advance. +/// @return The current entry after moving to the next entry. static pg_iter_entry_t __pg_iter_next(page_iterator_t *iter) { + // Check for a null iterator pointer to avoid dereferencing a null pointer. + if (!iter) { + pr_crit("The page iterator is null.\n"); + return (pg_iter_entry_t){ 0 }; // Return a default entry indicating an error. + } + + // Initialize the result entry with the current page frame number (pfn). pg_iter_entry_t result = { .entry = &iter->table->pages[iter->pfn % 1024], .pfn = iter->pfn }; - if (++iter->pfn % 1024 == 0) { - // Create a new page only if we haven't reached the end - // The page directory is always aligned to page boundaries, - // so we can easily know when we've skipped the last page by checking - // if the address % PAGE_SIZE is equal to zero. - if (iter->pfn != iter->last_pfn && ((uint32_t)++iter->entry) % 4096 != 0) { - iter->table = __mem_pg_entry_alloc(iter->entry, iter->flags); - __set_pg_entry_frame(iter->entry, iter->table); + // Move to the next page frame number. + iter->pfn++; + + // Check if we have wrapped around to a new page. + if (iter->pfn % 1024 == 0) { + // Check if we haven't reached the end of the last page. + if (iter->pfn != iter->last_pfn) { + // Ensure that the new entry address is valid and page-aligned. + if (((uint32_t)++iter->entry) % 4096 != 0) { + // Attempt to allocate memory for a new page entry. + iter->table = __mem_pg_entry_alloc(iter->entry, iter->flags); + if (!iter->table) { + pr_crit("Failed to allocate memory for new page entry.\n"); + return (pg_iter_entry_t){ 0 }; // Return a default entry indicating an error. + } + + // Set the frame for the newly allocated entry. + __set_pg_entry_frame(iter->entry, iter->table); + } } } - return result; + return result; // Return the current entry after moving to the next. } -page_t *mem_virtual_to_page(page_directory_t *pgdir, uint32_t virt_start, size_t *size) +page_t *mem_virtual_to_page(page_directory_t *pgd, uint32_t virt_start, size_t *size) { + // Check for null pointer to the page directory to avoid dereferencing. + if (!pgd) { + pr_crit("The page directory is null.\n"); + return NULL; // Return NULL to indicate an error. + } + + // Calculate the page frame number and page table index from the virtual address. uint32_t virt_pfn = virt_start / PAGE_SIZE; - uint32_t virt_pgt = virt_pfn / 1024; - uint32_t virt_pgt_offset = virt_pfn % 1024; + uint32_t virt_pgt = virt_pfn / 1024; // Page table index. + uint32_t virt_pgt_offset = virt_pfn % 1024; // Offset within the page table. - page_t *pgd_page = mem_map + pgdir->entries[virt_pgt].frame; + // Get the physical page for the page directory entry. + page_t *pgd_page = mem_map + pgd->entries[virt_pgt].frame; + // Get the low memory address of the page table. page_table_t *pgt_address = (page_table_t *)get_lowmem_address_from_page(pgd_page); + if (!pgt_address) { + pr_crit("Failed to get low memory address from page directory entry.\n"); + return NULL; // Return NULL if unable to retrieve page table address. + } + // Get the physical frame number for the corresponding entry in the page table. uint32_t pfn = pgt_address->pages[virt_pgt_offset].frame; + // Map the physical frame number to a physical page. page_t *page = mem_map + pfn; - // FIXME: handle unaligned page mapping - // to return the correct to-block-end size - // instead of 0 (1 page at a time) + // FIXME: handle unaligned page mapping to return the correct to-block-end + // size instead of returning 0 (1 page at a time). if (size) { - uint32_t pfn_count = 1U << page->bbpage.order; - uint32_t bytes_count = pfn_count * PAGE_SIZE; - *size = min(*size, bytes_count); + uint32_t pfn_count = 1U << page->bbpage.order; // Calculate the number of pages. + uint32_t bytes_count = pfn_count * PAGE_SIZE; // Calculate the total byte count. + *size = min(*size, bytes_count); // Store the size, ensuring it doesn't exceed the maximum. } - return page; + return page; // Return the pointer to the mapped physical page. } -void mem_upd_vm_area(page_directory_t *pgd, - uint32_t virt_start, - uint32_t phy_start, - size_t size, - uint32_t flags) +int mem_upd_vm_area(page_directory_t *pgd, + uint32_t virt_start, + uint32_t phy_start, + size_t size, + uint32_t flags) { + // Check for null pointer to the page directory to avoid dereferencing. + if (!pgd) { + pr_crit("The page directory is null.\n"); + return -1; // Return -1 to indicate error. + } + + // Initialize the page iterator for the virtual memory area. page_iterator_t virt_iter; - __pg_iter_init(&virt_iter, pgd, virt_start, size, flags); + if (__pg_iter_init(&virt_iter, pgd, virt_start, size, flags) < 0) { + pr_crit("Failed to initialize source page iterator\n"); + return -1; // Return -1 to indicate error. + } + // Calculate the starting page frame number for the physical address. uint32_t phy_pfn = phy_start / PAGE_SIZE; + // Iterate through the virtual memory area. while (__pg_iter_has_next(&virt_iter)) { pg_iter_entry_t it = __pg_iter_next(&virt_iter); + + // If the MM_UPDADDR flag is set, update the frame address. if (flags & MM_UPDADDR) { + // Ensure the physical frame number is valid before assignment. + if (phy_pfn >= MAX_PHY_PFN) { + pr_crit("Physical frame number exceeds maximum limit.\n"); + return -1; // Return -1 to indicate error. + } it.entry->frame = phy_pfn++; - // Flush the tlb to allow address update - // TODO(enrico): Check if it's always needed (ex. when the pgdir is not the current one) + // Flush the TLB only if the page directory is the current one. paging_flush_tlb_single(it.pfn * PAGE_SIZE); } + + // Set the page table flags. __set_pg_table_flags(it.entry, flags); } + + return 0; // Return 0 to indicate success. } -void mem_clone_vm_area(page_directory_t *src_pgd, - page_directory_t *dst_pgd, - uint32_t src_start, - uint32_t dst_start, - size_t size, - uint32_t flags) +int mem_clone_vm_area(page_directory_t *src_pgd, + page_directory_t *dst_pgd, + uint32_t src_start, + uint32_t dst_start, + size_t size, + uint32_t flags) { - page_iterator_t src_iter; - page_iterator_t dst_iter; + // Check for null pointer. + if (!src_pgd) { + pr_crit("The source page directory is null.\n"); + return -1; // Return -1 to indicate error. + } + + // Check for null pointer. + if (!dst_pgd) { + pr_crit("The source page directory is null.\n"); + return -1; // Return -1 to indicate error. + } + + // Initialize iterators for both source and destination page directories. + page_iterator_t src_iter, dst_iter; - __pg_iter_init(&src_iter, src_pgd, src_start, size, flags); - __pg_iter_init(&dst_iter, dst_pgd, dst_start, size, flags); + // Initialize the source iterator to iterate through the source page directory. + if (__pg_iter_init(&src_iter, src_pgd, src_start, size, flags) < 0) { + pr_crit("Failed to initialize source page iterator\n"); + return -1; // Return -1 to indicate error. + } + + // Initialize the destination iterator to iterate through the destination page directory. + if (__pg_iter_init(&dst_iter, dst_pgd, dst_start, size, flags) < 0) { + pr_crit("Failed to initialize destination page iterator\n"); + return -1; // Return -1 to indicate error. + } + // Iterate over the pages in the source and destination page directories. while (__pg_iter_has_next(&src_iter) && __pg_iter_has_next(&dst_iter)) { pg_iter_entry_t src_it = __pg_iter_next(&src_iter); pg_iter_entry_t dst_it = __pg_iter_next(&dst_iter); + // Check if the source page is marked as copy-on-write (COW). if (src_it.entry->kernel_cow) { + // Clone the page by assigning the address of the source entry to the destination. *(uint32_t *)dst_it.entry = (uint32_t)src_it.entry; - // This is to make it clear that the page is not present, - // can be omitted because the .entry address is aligned to 4 bytes boundary - // so it's first two bytes are always zero + // Mark the destination page as not present. dst_it.entry->present = 0; } else { + // Copy the frame information from the source entry to the destination entry. dst_it.entry->frame = src_it.entry->frame; + // Set the page table flags for the destination entry. __set_pg_table_flags(dst_it.entry, flags); } - // Flush the tlb to allow address update - // TODO(enrico): Check if it's always needed (ex. when the pgdir is not the current one) + // Flush the TLB entry for the destination page to ensure the address is + // updated. It's essential to verify whether this is required in every + // case. paging_flush_tlb_single(dst_it.pfn * PAGE_SIZE); } + + return 0; // Return 0 to indicate success. } mm_struct_t *create_blank_process_image(size_t stack_size) { - // Allocate the mm_struct. + // Allocate the mm_struct for the new process image. mm_struct_t *mm = kmem_cache_alloc(mm_cache, GFP_KERNEL); + if (!mm) { + pr_crit("Failed to allocate memory for mm_struct\n"); + return NULL; // Return NULL to indicate error in allocation. + } + + // Initialize the allocated mm_struct to zero. memset(mm, 0, sizeof(mm_struct_t)); - // TODO(enrico): Use this field + // Initialize the list for memory management (mm) structures. + // TODO(enrico): Use this field for process memory management. list_head_init(&mm->mm_list); + // Get the main page directory. + page_directory_t *main_pgd = paging_get_main_directory(); + // Error handling: Failed to get the main page directory. + if (!main_pgd) { + pr_crit("Failed to get the main page directory\n"); + return NULL; // Return NULL to indicate error. + } + + // Allocate a new page directory structure and copy the main page directory. page_directory_t *pdir_cpy = kmem_cache_alloc(pgdir_cache, GFP_KERNEL); - memcpy(pdir_cpy, paging_get_main_directory(), sizeof(page_directory_t)); + if (!pdir_cpy) { + pr_crit("Failed to allocate memory for page directory\n"); + // Free previously allocated mm_struct. + kmem_cache_free(mm); + return NULL; // Return NULL to indicate error in allocation. + } + // Initialize the allocated page_directory to zero. + memcpy(pdir_cpy, main_pgd, sizeof(page_directory_t)); + + // Assign the copied page directory to the mm_struct. mm->pgd = pdir_cpy; - // Initialize vm areas list + // Initialize the virtual memory areas list for the new process. list_head_init(&mm->mmap_list); // Allocate the stack segment. vm_area_struct_t *segment = create_vm_area(mm, PROCAREA_END_ADDR - stack_size, stack_size, MM_PRESENT | MM_RW | MM_USER | MM_COW, GFP_HIGHUSER); - // Update the start of the stack. + if (!segment) { + pr_crit("Failed to create stack segment for new process\n"); + // Free page directory if allocation fails. + kmem_cache_free(pdir_cpy); + // Free mm_struct as well. + kmem_cache_free(mm); + return NULL; // Return NULL to indicate error in stack allocation. + } + + // Update the start of the stack in the mm_struct. mm->start_stack = segment->vm_start; - return mm; + + return mm; // Return the initialized mm_struct for the new process. } mm_struct_t *clone_process_image(mm_struct_t *mmp) { - // Allocate the mm_struct. + // Check if the input mm_struct pointer is valid. + if (!mmp) { + pr_crit("Invalid source mm_struct pointer.\n"); + return NULL; // Return NULL to indicate error. + } + + // Allocate the mm_struct for the new process image. mm_struct_t *mm = kmem_cache_alloc(mm_cache, GFP_KERNEL); + if (!mm) { + pr_crit("Failed to allocate memory for mm_struct\n"); + return NULL; // Return NULL to indicate error in allocation. + } + + // Copy the contents of the source mm_struct to the new one. memcpy(mm, mmp, sizeof(mm_struct_t)); - // Initialize the process with the main directory, to avoid page tables data races. - // Pages from the old process are copied/cow when segments are cloned + // Get the main page directory. + page_directory_t *main_pgd = paging_get_main_directory(); + // Error handling: Failed to get the main page directory. + if (!main_pgd) { + pr_crit("Failed to get the main page directory\n"); + return NULL; // Return NULL to indicate error. + } + + // Allocate a new page directory to avoid data races on page tables. page_directory_t *pdir_cpy = kmem_cache_alloc(pgdir_cache, GFP_KERNEL); - memcpy(pdir_cpy, paging_get_main_directory(), sizeof(page_directory_t)); + if (!pdir_cpy) { + pr_crit("Failed to allocate page directory for new process.\n"); + // Free the previously allocated mm_struct. + kmem_cache_free(mm); + return NULL; // Return NULL to indicate error. + } + + // Initialize the new page directory by copying from the main directory. + memcpy(pdir_cpy, main_pgd, sizeof(page_directory_t)); + // Assign the copied page directory to the mm_struct. mm->pgd = pdir_cpy; vm_area_struct_t *vm_area = NULL; - // Reset vm areas to allow easy clone + // Reset the memory area list to prepare for cloning. list_head_init(&mm->mmap_list); mm->map_count = 0; mm->total_vm = 0; - // Clone each memory area to the new process! + // Clone each memory area from the source process to the new process. list_head *it; list_for_each (it, &mmp->mmap_list) { vm_area = list_entry(it, vm_area_struct_t, vm_list); - clone_vm_area(mm, vm_area, 0, GFP_HIGHUSER); - } - // - // // Allocate the stack segment. - // mm->start_stack = create_segment(mm, stack_size); + if (clone_vm_area(mm, vm_area, 0, GFP_HIGHUSER) < 0) { + pr_crit("Failed to clone vm_area from source process.\n"); + // Free the previously allocated mm_struct. + kmem_cache_free(mm); + // Free the previously allocated page_directory. + kmem_cache_free(pdir_cpy); + return NULL; // Return NULL to indicate error. + } + } - return mm; + return mm; // Return the newly cloned mm_struct. } -void destroy_process_image(mm_struct_t *mm) +int destroy_process_image(mm_struct_t *mm) { - assert(mm != NULL); + // Check if the input mm_struct pointer is valid. + if (!mm) { + pr_crit("Invalid source mm_struct pointer.\n"); + return -1; // Return -1 to indicate error. + } + + // Get the main page directory. + page_directory_t *main_pgd = paging_get_main_directory(); + // Error handling: Failed to get the main page directory. + if (!main_pgd) { + pr_crit("Failed to get the main page directory\n"); + return -1; // Return -1 to indicate error. + } + + // Retrieve the current page directory. + uint32_t current_paging_dir = (uint32_t)paging_get_current_directory(); + if (current_paging_dir == 0) { + pr_crit("Failed to retrieve the current paging directory.\n"); + return -1; // Return -1 to indicate error. + } + + // Get the low memory page associated with the given mm_struct. + page_t *lowmem_page = get_lowmem_page_from_address((uint32_t)mm->pgd); + if (!lowmem_page) { + pr_crit("Failed to get low memory page from mm->pgd address: %p\n", (void *)mm->pgd); + return -1; // Return -1 to indicate error. + } + + // Step 2: Get the physical address from the low memory page. + uint32_t mm_pgd_phys_addr = get_physical_address_from_page(lowmem_page); + if (mm_pgd_phys_addr == 0) { + pr_crit("Failed to get physical address from low memory page: %p.\n", lowmem_page); + return -1; // Return -1 to indicate error. + } - if ((uint32_t)paging_get_current_directory() == get_physical_address_from_page(get_lowmem_page_from_address((uint32_t)mm->pgd))) { - paging_switch_directory_va(paging_get_main_directory()); + // Compare the current page directory with the one associated with the process. + if (current_paging_dir == mm_pgd_phys_addr) { + // Switch to the main directory if they are the same. + if (paging_switch_directory_va(main_pgd) < 0) { + pr_crit("Failed to switch to the main directory.\n"); + return -1; // Return -1 to indicate error. + } } // Free each segment inside mm. vm_area_struct_t *segment = NULL; - // Iterate the list. + // Iterate through the list of memory areas. list_head *it = mm->mmap_list.next, *next; + while (!list_head_empty(it)) { segment = list_entry(it, vm_area_struct_t, vm_list); + // Save the pointer to the next element in the list. next = segment->vm_list.next; - if (destroy_vm_area(mm, segment)) { - // Destroy the area. - kernel_panic("We failed to destroy the virtual memory area."); + + // Destroy the current virtual memory area. Return -1 on failure. + if (destroy_vm_area(mm, segment) < 0) { + pr_err("We failed to destroy the virtual memory area."); + return -1; // Failed to destroy the virtual memory area. } + // Move to the next element. it = next; } - // Free all the page tables + // Free all the page tables. for (int i = 0; i < 1024; i++) { page_dir_entry_t *entry = &mm->pgd->entries[i]; + // Check if the page table entry is present and not global. if (entry->present && !entry->global) { - page_t *pgt_page = get_page_from_physical_address(entry->frame * PAGE_SIZE); + // Get the physical page for the page table. + page_t *pgt_page = get_page_from_physical_address(entry->frame * PAGE_SIZE); + if (!pgt_page) { + pr_crit("Failed to get physical page for entry %d.\n", i); + continue; // Skip to the next entry on error. + } + + // Get the low memory address for the page table. uint32_t pgt_addr = get_lowmem_address_from_page(pgt_page); + if (pgt_addr == 0) { + pr_crit("Failed to get low memory address for physical page %p.\n", (void *)pgt_page); + continue; // Skip to the next entry on error. + } + + // Free the page table. kmem_cache_free((void *)pgt_addr); + + pr_debug("Successfully freed page table for entry %d at address %p.\n", i, (void *)pgt_addr); } } + + // Free the page directory structure. kmem_cache_free((void *)mm->pgd); - // Free the mm_struct. + // Free the memory structure representing the process image. kmem_cache_free(mm); + + return 0; // Success. } void *sys_mmap(void *addr, size_t length, int prot, int flags, int fd, off_t offset) { uintptr_t vm_start; + // Get the current task. task_struct *task = scheduler_get_current_process(); - // Check if we were asked for a specific spot. + + // Check if a specific address was requested for the memory mapping. if (addr && is_valid_vm_area(task->mm, (uintptr_t)addr, (uintptr_t)addr + length)) { + // If the requested address is valid, use it as the starting address. vm_start = (uintptr_t)addr; } else { - // Find an empty spot. + // Find an empty spot if no specific address was provided or the provided one is invalid. if (find_free_vm_area(task->mm, length, &vm_start)) { - pr_err("We failed to find a suitable spot for a new virtual memory area.\n"); - return NULL; + pr_err("Failed to find a suitable spot for a new virtual memory area.\n"); + return NULL; // Return NULL to indicate failure in finding a suitable memory area. } } - // Allocate the segment. + + // Allocate the virtual memory area segment. vm_area_struct_t *segment = create_vm_area( task->mm, vm_start, length, MM_PRESENT | MM_RW | MM_COW | MM_USER, GFP_HIGHUSER); + if (!segment) { + pr_err("Failed to allocate virtual memory area segment.\n"); + return NULL; // Return NULL to indicate allocation failure. + } + + // Set the memory flags for the mapping. task->mm->mmap_cache->vm_flags = flags; + + // Return the starting address of the newly created memory segment. return (void *)segment->vm_start; } @@ -821,23 +1449,42 @@ int sys_munmap(void *addr, size_t length) { // Get the current task. task_struct *task = scheduler_get_current_process(); - // Get the stack. - vm_area_struct_t *segment; - // - unsigned vm_start = (uintptr_t)addr, size; - // Find the area. + + // Initialize variables. + vm_area_struct_t *segment; // The virtual memory area segment. + unsigned vm_start = (uintptr_t)addr; // Starting address of the memory area to unmap. + unsigned size; // Size of the segment. + + // Iterate through the list of memory mapped areas in reverse order. list_for_each_prev_decl(it, &task->mm->mmap_list) { segment = list_entry(it, vm_area_struct_t, vm_list); - assert(segment && "There is a NULL area in the list."); - // Compute the size of the segment. + + // Check if the segment is valid. + if (!segment) { + pr_crit("Found a NULL area in the mmap list.\n"); + return -1; // Return -1 to indicate an error due to NULL segment. + } + + // Compute the size of the current segment. size = segment->vm_end - segment->vm_start; - // Check the segment. + + // Check if the requested address and length match the current segment. if ((vm_start == segment->vm_start) && (length == size)) { - pr_warning("[0x%p:0x%p] Found it, destroying it.\n", segment->vm_start, segment->vm_end); - destroy_vm_area(task->mm, segment); - return 0; + pr_debug("[0x%p:0x%p] Found it, destroying it.\n", + (void *)segment->vm_start, (void *)segment->vm_end); + + // Step 6: Destroy the found virtual memory area. + if (destroy_vm_area(task->mm, segment) < 0) { + pr_err("Failed to destroy the virtual memory area at [0x%p:0x%p].\n", + (void *)segment->vm_start, (void *)segment->vm_end); + return -1; // Return -1 to indicate an error during destruction. + } + + return 0; // Return 0 to indicate success. } } - return 1; + + pr_err("No matching memory area found for unmapping at address 0x%p with length %zu.\n", addr, length); + return 1; // Return 1 to indicate no matching area found. } diff --git a/mentos/src/mem/vmem_map.c b/mentos/src/mem/vmem_map.c index 44074bc1..66935674 100644 --- a/mentos/src/mem/vmem_map.c +++ b/mentos/src/mem/vmem_map.c @@ -43,9 +43,9 @@ int virt_init(void) VIRTUAL_MEMORY_PAGES_COUNT); // Get the main page directory. - page_directory_t *mainpgd = paging_get_main_directory(); + page_directory_t *main_pgd = paging_get_main_directory(); // Error handling: Failed to get the main page directory. - if (!mainpgd) { + if (!main_pgd) { pr_crit("Failed to get the main page directory\n"); return -1; // Return -1 to indicate failure. } @@ -63,7 +63,7 @@ int virt_init(void) page_table_t *table; for (uint32_t i = start_virt_pgt; i < 1024 && (pfn_num > 0); i++) { // Get the page directory entry. - entry = mainpgd->entries + i; + entry = main_pgd->entries + i; // Alloc virtual page table. entry->present = 1; // Mark the entry as present @@ -155,16 +155,16 @@ uint32_t virt_map_physical_pages(page_t *page, int pfn_count) uint32_t phy_address = get_physical_address_from_page(page); // Get the main page directory. - page_directory_t *mainpgd = paging_get_main_directory(); + page_directory_t *main_pgd = paging_get_main_directory(); // Error handling: Failed to get the main page directory. - if (!mainpgd) { + if (!main_pgd) { pr_crit("Failed to get the main page directory\n"); return -1; // Return -1 to indicate failure. } // Update the virtual memory area with the new mapping. mem_upd_vm_area( - mainpgd, virt_address, + main_pgd, virt_address, phy_address, pfn_count * PAGE_SIZE, MM_PRESENT | MM_RW | MM_GLOBAL | MM_UPDADDR); @@ -200,9 +200,9 @@ uint32_t virt_map_vaddress(mm_struct_t *mm, virt_map_page_t *vpage, uint32_t vad uint32_t start_map_virt_address = VIRT_PAGE_TO_ADDRESS(vpage); // Get the main page directory. - page_directory_t *mainpgd = paging_get_main_directory(); + page_directory_t *main_pgd = paging_get_main_directory(); // Error handling: Failed to get the main page directory. - if (!mainpgd) { + if (!main_pgd) { pr_crit("Failed to get the main page directory\n"); return -1; // Return -1 to indicate failure. } @@ -210,7 +210,7 @@ uint32_t virt_map_vaddress(mm_struct_t *mm, virt_map_page_t *vpage, uint32_t vad // Clone the source vaddr the the requested virtual memory portion. mem_clone_vm_area( mm->pgd, - mainpgd, + main_pgd, vaddr, start_map_virt_address, size, @@ -252,15 +252,15 @@ int virt_unmap_pg(virt_map_page_t *page) uint32_t addr = VIRT_PAGE_TO_ADDRESS(page); // Get the main page directory. - page_directory_t *mainpgd = paging_get_main_directory(); + page_directory_t *main_pgd = paging_get_main_directory(); // Error handling: Failed to get the main page directory. - if (!mainpgd) { + if (!main_pgd) { pr_crit("Failed to get the main page directory\n"); return -1; // Return -1 to indicate failure. } // Set all virtual pages as not present to avoid unwanted memory accesses by the kernel. - mem_upd_vm_area(mainpgd, addr, 0, (1 << page->bbpage.order) * PAGE_SIZE, MM_GLOBAL); + mem_upd_vm_area(main_pgd, addr, 0, (1 << page->bbpage.order) * PAGE_SIZE, MM_GLOBAL); // Free the pages in the buddy system. bb_free_pages(&virt_default_mapping.bb_instance, &page->bbpage); From 28667f05f0578af65d8ece9ca48f687b48fffce8 Mon Sep 17 00:00:00 2001 From: "Enrico Fraccaroli (Galfurian)" Date: Mon, 30 Sep 2024 15:39:59 -0400 Subject: [PATCH 6/9] Remove useless check. --- mentos/src/mem/zone_allocator.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mentos/src/mem/zone_allocator.c b/mentos/src/mem/zone_allocator.c index 455a9305..4d6c7742 100644 --- a/mentos/src/mem/zone_allocator.c +++ b/mentos/src/mem/zone_allocator.c @@ -165,7 +165,6 @@ static zone_t *get_zone_from_flags(gfp_t gfp_mask) { // Ensure that contig_page_data and node_zones are valid. assert(contig_page_data && "contig_page_data is NULL."); - assert(contig_page_data->node_zones && "node_zones is NULL."); switch (gfp_mask) { case GFP_KERNEL: From ed4b9ca5e69391be20cd227b2c9ff9e35408ca49 Mon Sep 17 00:00:00 2001 From: "Enrico Fraccaroli (Galfurian)" Date: Mon, 30 Sep 2024 15:42:47 -0400 Subject: [PATCH 7/9] Change the way we check errors in get_zone_from_page. --- mentos/src/mem/zone_allocator.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/mentos/src/mem/zone_allocator.c b/mentos/src/mem/zone_allocator.c index 4d6c7742..d5f96ad7 100644 --- a/mentos/src/mem/zone_allocator.c +++ b/mentos/src/mem/zone_allocator.c @@ -138,13 +138,19 @@ static zone_t *get_zone_from_page(page_t *page) zone = contig_page_data->node_zones + zone_index; // Check if the zone was retrieved successfully. - assert(zone && "Failed to retrieve the zone."); + if (!zone) { + pr_crit("Failed to get zone from GFP mask.\n"); + return NULL; // Return NULL to indicate failure. + } // Get the last page of the zone by adding the size to the memory map. last_page = zone->zone_mem_map + zone->size; // Check if the last page of the zone was retrieved successfully. - assert(last_page && "Failed to retrieve the last page of the zone."); + if (!last_page) { + pr_crit("Failed to retrieve the last page of the zone.\n"); + return NULL; // Return NULL to indicate failure. + } // Check if the given page is within the current zone. if (page < last_page) { @@ -152,10 +158,11 @@ static zone_t *get_zone_from_page(page_t *page) } } - // If no zone contains the page, return NULL. - // This could represent an error where the page doesn't belong to any zone. - assert(0 && "Error: page is over memory size or not part of any zone."); - return (zone_t *)NULL; + pr_crit("page is over memory size or not part of any zone."); + + // If no zone contains the page, return NULL. This could represent an error + // where the page doesn't belong to any zone. + return NULL; } /// @brief Get a zone from gfp_mask. From 426b9607d2680236085e81da05843e7f6fd9f32b Mon Sep 17 00:00:00 2001 From: "Enrico Fraccaroli (Galfurian)" Date: Mon, 30 Sep 2024 15:59:49 -0400 Subject: [PATCH 8/9] Improve comments and error checking for zone allocator. --- mentos/inc/mem/zone_allocator.h | 27 ++++-- mentos/src/mem/zone_allocator.c | 162 +++++++++++++++++++++----------- 2 files changed, 126 insertions(+), 63 deletions(-) diff --git a/mentos/inc/mem/zone_allocator.h b/mentos/inc/mem/zone_allocator.h index 3092fe4b..c1bf504d 100644 --- a/mentos/inc/mem/zone_allocator.h +++ b/mentos/inc/mem/zone_allocator.h @@ -159,22 +159,29 @@ uint32_t __alloc_pages_lowmem(gfp_t gfp_mask, uint32_t order); /// and return the memory address of the first page frame allocated. /// @param gfp_mask GFP_FLAGS to decide the zone allocation. /// @param order The logarithm of the size of the page frame. -/// @return Memory address of the first free page frame allocated, or NULL if allocation fails. +/// @return Memory address of the first free page frame allocated, or NULL if +/// allocation fails. page_t *_alloc_pages(gfp_t gfp_mask, uint32_t order); -/// @brief Get the start address of the corresponding page. -/// @param page A page structure. -/// @return The address that corresponds to the page. +/// @brief Converts a page structure to its corresponding low memory virtual +/// address. +/// @param page Pointer to the page structure. +/// @return The low memory virtual address corresponding to the specified page, +/// or 0 if the input page pointer is invalid. uint32_t get_lowmem_address_from_page(page_t *page); -/// @brief Get the start physical address of the corresponding page. -/// @param page A page structure -/// @return The physical address that corresponds to the page. +/// @brief Converts a page structure to its corresponding physical address. +/// @param page Pointer to the page structure. +/// @return The physical address corresponding to the specified page, or 0 if +/// the input page pointer is invalid. uint32_t get_physical_address_from_page(page_t *page); -/// @brief Get the page from it's physical address. -/// @param phy_addr The physical address -/// @return The page that corresponds to the physical address. +/// @brief Retrieves the page structure corresponding to a given physical +/// address. +/// @param phy_addr The physical address for which the page structure is +/// requested. +/// @return A pointer to the corresponding page structure, or NULL if the +/// address is invalid. page_t *get_page_from_physical_address(uint32_t phy_addr); /// @brief Retrieves the low memory page corresponding to the given virtual diff --git a/mentos/src/mem/zone_allocator.c b/mentos/src/mem/zone_allocator.c index d5f96ad7..74fc9511 100644 --- a/mentos/src/mem/zone_allocator.c +++ b/mentos/src/mem/zone_allocator.c @@ -77,12 +77,22 @@ page_t *get_lowmem_page_from_address(uint32_t addr) uint32_t get_lowmem_address_from_page(page_t *page) { - // Check for NULL page pointer. - assert(page && "Invalid page pointer."); + // Check for NULL page pointer. If it is NULL, print an error and return 0. + if (!page) { + pr_err("Invalid page pointer: NULL value provided.\n"); + return 0; // Return 0 to indicate an error in retrieving the address. + } // Calculate the index of the page in the memory map. unsigned int page_index = page - mem_map; + // Ensure the calculated page index is within valid bounds. + if (page_index < lowmem_page_base) { + pr_err("Invalid page index: %u is less than low memory base: %u.\n", + page_index, lowmem_page_base); + return 0; // Return 0 to indicate an error in retrieving the address. + } + // Calculate the offset from the low memory base address. unsigned int offset = page_index - lowmem_page_base; @@ -92,8 +102,11 @@ uint32_t get_lowmem_address_from_page(page_t *page) uint32_t get_physical_address_from_page(page_t *page) { - // Ensure the page pointer is not NULL. - assert(page && "Invalid page pointer."); + // Ensure the page pointer is not NULL. If it is NULL, print an error and return 0. + if (!page) { + pr_err("Invalid page pointer: NULL value provided.\n"); + return 0; // Return 0 to indicate an error in retrieving the address. + } // Calculate the index of the page in the memory map. unsigned int page_index = page - mem_map; @@ -105,8 +118,11 @@ uint32_t get_physical_address_from_page(page_t *page) page_t *get_page_from_physical_address(uint32_t phy_addr) { - // Ensure the physical address is valid. - assert(phy_addr % PAGE_SIZE == 0 && "Address must be page-aligned."); + // Ensure the physical address is valid and aligned to page boundaries. + if (phy_addr % PAGE_SIZE != 0) { + pr_crit("Address must be page-aligned. Received address: 0x%08x\n", phy_addr); + return NULL; // Return NULL to indicate failure due to misalignment. + } // Calculate the index of the page in the memory map. unsigned int page_index = phy_addr / PAGE_SIZE; @@ -114,25 +130,31 @@ page_t *get_page_from_physical_address(uint32_t phy_addr) // Check for overflow: ensure the index does not exceed the maximum memory // map size. if (page_index >= MAX_MEM_MAP_SIZE) { - pr_crit("Physical address is out of bounds.\n"); - return NULL; // Return NULL to indicate failure. + pr_crit("Physical address is out of bounds. Page index: %u, MAX: %u\n", + page_index, MAX_MEM_MAP_SIZE); + return NULL; // Return NULL to indicate failure due to out-of-bounds access. } // Return the pointer to the corresponding page structure in the memory map. return mem_map + page_index; } + /// @brief Get the zone that contains a page frame. -/// @param page A page descriptor. -/// @return The zone requested or NULL if the page is not within any zone. +/// @param page A pointer to the page descriptor. +/// @return A pointer to the zone containing the page, or NULL if the page is +/// not within any zone. static zone_t *get_zone_from_page(page_t *page) { // Validate the input parameter. - assert(page && "Invalid input: page is NULL."); + if (!page) { + pr_crit("Invalid input: page is NULL.\n"); + return NULL; // Return NULL to indicate failure due to NULL input. + } zone_t *zone; - page_t *last_page; + page_t *first_page, *last_page; - // Iterate over all the zones. + // Iterate over all the zones in the contiguous page data structure. for (int zone_index = 0; zone_index < contig_page_data->nr_zones; zone_index++) { // Get the zone at the given index. zone = contig_page_data->node_zones + zone_index; @@ -140,20 +162,25 @@ static zone_t *get_zone_from_page(page_t *page) // Check if the zone was retrieved successfully. if (!zone) { pr_crit("Failed to get zone from GFP mask.\n"); - return NULL; // Return NULL to indicate failure. + return NULL; // Return NULL to indicate failure if a zone is not found. } - // Get the last page of the zone by adding the size to the memory map. - last_page = zone->zone_mem_map + zone->size; + // Get the first and last page of the zone by adding the zone size to + // the base of the memory map. + first_page = zone->zone_mem_map, last_page = zone->zone_mem_map + zone->size; - // Check if the last page of the zone was retrieved successfully. + // Check if the first and last page of the zone was retrieved successfully. + if (!first_page) { + pr_crit("Failed to retrieve the first page of the zone.\n"); + return NULL; // Return NULL to indicate failure. + } if (!last_page) { pr_crit("Failed to retrieve the last page of the zone.\n"); return NULL; // Return NULL to indicate failure. } // Check if the given page is within the current zone. - if (page < last_page) { + if ((page >= first_page) && (page < last_page)) { return zone; // Return the zone if the page is within its range. } } @@ -165,14 +192,19 @@ static zone_t *get_zone_from_page(page_t *page) return NULL; } -/// @brief Get a zone from gfp_mask. -/// @param gfp_mask GFP_FLAG see gfp.h. -/// @return The zone requested or NULL if the gfp_mask is not recognized. +/// @brief Get a zone from the specified GFP mask. +/// @param gfp_mask GFP flags indicating the type of memory allocation request. +/// @return A pointer to the requested zone, or NULL if the gfp_mask is not +/// recognized. static zone_t *get_zone_from_flags(gfp_t gfp_mask) { - // Ensure that contig_page_data and node_zones are valid. - assert(contig_page_data && "contig_page_data is NULL."); + // Ensure that contig_page_data is initialized and valid. + if (!contig_page_data) { + pr_crit("contig_page_data is NULL.\n"); + return NULL; // Return NULL to indicate failure due to uninitialized data. + } + // Determine the appropriate zone based on the given GFP mask. switch (gfp_mask) { case GFP_KERNEL: case GFP_ATOMIC: @@ -187,28 +219,30 @@ static zone_t *get_zone_from_flags(gfp_t gfp_mask) return &contig_page_data->node_zones[ZONE_HIGHMEM]; default: - // If the gfp_mask does not match any known flags, return NULL. - assert(0 && "Error: Unrecognized gfp_mask."); - return (zone_t *)NULL; + // If the gfp_mask does not match any recognized flags, log an error and return NULL. + pr_crit("Unrecognized gfp_mask: %u.\n", gfp_mask); + return NULL; // Return NULL to indicate that the input was not valid. } } -/// @brief Checks if the memory is clean. -/// @param gfp_mask The mask which specifies the zone we are interested in. -/// @return 1 if clean, 0 on error. +/// @brief Checks if the specified memory zone is clean (i.e., all pages are free). +/// @param gfp_mask The mask that specifies the zone of interest for memory allocation. +/// @return 1 if the memory is clean, 0 if there is an error or if the memory is not clean. static int is_memory_clean(gfp_t gfp_mask) { // Get the corresponding zone based on the gfp_mask. zone_t *zone = get_zone_from_flags(gfp_mask); - - // Assert that the zone is valid. - assert(zone && "Failed to retrieve the zone given the gfp_mask!"); + if (!zone) { + pr_crit("Failed to retrieve the zone for gfp_mask: %u.\n", gfp_mask); + return 0; // Return 0 to indicate an error due to invalid zone. + } // Get the last free area list of the buddy system. bb_free_area_t *area = zone->buddy_system.free_area + (MAX_BUDDYSYSTEM_GFP_ORDER - 1); - - // Assert that the area is valid. - assert(area && "Failed to retrieve the last free_area for the given zone!"); + if (!area) { + pr_crit("Failed to retrieve the last free_area for the zone.\n"); + return 0; // Return 0 to indicate an error due to invalid area. + } // Compute the total size of the zone. unsigned int total_size = (zone->size / (1UL << (MAX_BUDDYSYSTEM_GFP_ORDER - 1))); @@ -311,25 +345,46 @@ static int pmm_check(void) } /// @brief Initializes the memory attributes for a specified zone. -/// @param name the zone's name. -/// @param zone_index the zone's index. -/// @param adr_from the lowest address of the zone. -/// @param adr_to the highest address of the zone (not included!). -static void zone_init(char *name, int zone_index, uint32_t adr_from, uint32_t adr_to) +/// @param name The zone's name. +/// @param zone_index The zone's index, which must be valid within the number of zones. +/// @param adr_from The lowest address of the zone (inclusive). +/// @param adr_to The highest address of the zone (exclusive). +/// @return 0 on success, -1 on error. +static int zone_init(char *name, int zone_index, uint32_t adr_from, uint32_t adr_to) { - // Ensure that the provided addresses are valid. - assert((adr_from < adr_to) && "Inserted bad block addresses: adr_from must be less than adr_to."); - assert(((adr_from & 0xfffff000) == adr_from) && "Inserted bad block addresses: adr_from must be aligned."); - assert(((adr_to & 0xfffff000) == adr_to) && "Inserted bad block addresses: adr_to must be aligned."); + // Ensure that the provided addresses are valid: adr_from must be less than adr_to. + if (adr_from >= adr_to) { + pr_crit("Invalid block addresses: adr_from (%u) must be less than adr_to (%u).\n", adr_from, adr_to); + return -1; // Return -1 to indicate an error. + } + + // Ensure that adr_from is page-aligned. + if ((adr_from & 0xfffff000) != adr_from) { + pr_crit("adr_from (%u) must be page-aligned.\n", adr_from); + return -1; // Return -1 to indicate an error. + } + + // Ensure that adr_to is page-aligned. + if ((adr_to & 0xfffff000) != adr_to) { + pr_crit("adr_to (%u) must be page-aligned.\n", adr_to); + return -1; // Return -1 to indicate an error. + } // Ensure that the zone_index is within the valid range. - assert((zone_index < contig_page_data->nr_zones) && "The index is above the number of zones."); + if ((zone_index < 0) || (zone_index >= contig_page_data->nr_zones)) { + pr_crit("The zone_index (%d) is out of bounds (max: %d).\n", + zone_index, contig_page_data->nr_zones - 1); + return -1; // Return -1 to indicate an error. + } // Take the zone_t structure that corresponds to the zone_index. zone_t *zone = contig_page_data->node_zones + zone_index; - // Assert that the zone was retrieved successfully. - assert(zone && "Failed to retrieve the zone."); + // Ensure that the zone was retrieved successfully. + if (!zone) { + pr_crit("Failed to retrieve the zone for zone_index: %d.\n", zone_index); + return -1; // Return -1 to indicate an error. + } // Calculate the number of page frames in the zone. size_t num_page_frames = (adr_to - adr_from) / PAGE_SIZE; @@ -349,12 +404,13 @@ static void zone_init(char *name, int zone_index, uint32_t adr_from, uint32_t ad // Initialize the buddy system for the new zone. buddy_system_init( - &zone->buddy_system, - name, - zone->zone_mem_map, - BBSTRUCT_OFFSET(page_t, bbpage), - sizeof(page_t), - num_page_frames); + &zone->buddy_system, // Buddy system structure for the zone. + name, // Name of the zone. + zone->zone_mem_map, // Pointer to the memory map of the zone. + BBSTRUCT_OFFSET(page_t, bbpage), // Offset for the buddy system structure. + sizeof(page_t), // Size of each page. + num_page_frames // Total number of page frames in the zone. + ); // Dump the current state of the buddy system for debugging purposes. buddy_system_dump(&zone->buddy_system); From b409d6ab65c45817c802efb217ad5899a78d0019 Mon Sep 17 00:00:00 2001 From: "Enrico Fraccaroli (Galfurian)" Date: Mon, 30 Sep 2024 16:00:28 -0400 Subject: [PATCH 9/9] Improve comments and error checking for zone allocator. --- mentos/src/mem/zone_allocator.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mentos/src/mem/zone_allocator.c b/mentos/src/mem/zone_allocator.c index 74fc9511..b96ee36e 100644 --- a/mentos/src/mem/zone_allocator.c +++ b/mentos/src/mem/zone_allocator.c @@ -414,6 +414,8 @@ static int zone_init(char *name, int zone_index, uint32_t adr_from, uint32_t adr // Dump the current state of the buddy system for debugging purposes. buddy_system_dump(&zone->buddy_system); + + return 0; } /*