Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 5 additions & 7 deletions kernel/src/mem/alloc/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -223,17 +223,15 @@ static int __compute_size_and_order(kmem_cache_t *cachep)
return -1;
}

// Compute the `gfp_order` based on the total object size and page size.
// The `gfp_order` determines how many contiguous pages will be allocated
// for the slab.
unsigned int size = round_up(cachep->aligned_object_size, PAGE_SIZE) / PAGE_SIZE;
// Compute how many pages are needed for at least one object.
unsigned int pages_needed = round_up(cachep->aligned_object_size, PAGE_SIZE) / PAGE_SIZE;

// Reset `gfp_order` to 0 before calculating.
cachep->gfp_order = 0;

// Calculate the order by determining how many divisions by 2 the size
// undergoes until it becomes smaller than or equal to 1.
while ((size /= 2) > 0) {
// Use ceil(log2(pages_needed)) so the slab is always large enough.
// Example: pages_needed = 3 requires order 2 (4 pages), not order 1 (2 pages).
while ((1U << cachep->gfp_order) < pages_needed) {
cachep->gfp_order++;
}

Expand Down
62 changes: 62 additions & 0 deletions kernel/src/tests/unit/test_slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -433,6 +433,67 @@ TEST(memory_slab_cache_destruction_safety)
TEST_SECTION_END();
}

/// @brief Test cache lifecycle for heterogeneous structure sizes.
TEST(memory_slab_multi_size_cache_lifecycle)
{
TEST_SECTION_START("Slab multi-size cache lifecycle");

typedef struct {
uint8_t value;
} tiny_obj_t;

typedef struct {
uint32_t values[6];
} small_obj_t;

typedef struct {
uint32_t values[64];
} medium_obj_t;

typedef struct {
uint8_t values[2048];
} large_obj_t;

typedef struct {
uint8_t values[8464];
} huge_obj_t;

typedef struct {
const char *name;
unsigned int size;
unsigned int align;
} cache_case_t;

cache_case_t cases[] = {
{.name = "test_slab_tiny", .size = sizeof(tiny_obj_t), .align = alignof(tiny_obj_t) },
{.name = "test_slab_small", .size = sizeof(small_obj_t), .align = alignof(small_obj_t) },
{.name = "test_slab_medium", .size = sizeof(medium_obj_t), .align = alignof(medium_obj_t)},
{.name = "test_slab_large", .size = sizeof(large_obj_t), .align = alignof(large_obj_t) },
{.name = "test_slab_huge", .size = sizeof(huge_obj_t), .align = alignof(huge_obj_t) },
};

unsigned long free_before_all = get_zone_free_space(GFP_KERNEL);

for (unsigned int i = 0; i < count_of(cases); ++i) {
kmem_cache_t *cache = kmem_cache_create(cases[i].name, cases[i].size, cases[i].align, GFP_KERNEL, NULL, NULL);
ASSERT_MSG(cache != NULL, "kmem_cache_create must succeed for each test size");

void *obj = kmem_cache_alloc(cache, GFP_KERNEL);
ASSERT_MSG(obj != NULL, "kmem_cache_alloc must succeed for each test size");

memset(obj, 0xA5, cases[i].size);
ASSERT_MSG(*(uint8_t *)obj == 0xA5, "allocated object must be writable");

ASSERT_MSG(kmem_cache_free(obj) == 0, "kmem_cache_free must succeed");
ASSERT_MSG(kmem_cache_destroy(cache) == 0, "kmem_cache_destroy must succeed");
}

unsigned long free_after_all = get_zone_free_space(GFP_KERNEL);
ASSERT_MSG(free_after_all == free_before_all, "Zone free pages must return to baseline after all cache destroys");

TEST_SECTION_END();
}

/// @brief Main test function for slab subsystem.
void test_slab(void)
{
Expand All @@ -451,4 +512,5 @@ void test_slab(void)
test_memory_slab_object_reuse();
test_memory_slab_parallel_caches();
test_memory_slab_cache_destruction_safety();
test_memory_slab_multi_size_cache_lifecycle();
}
Loading