Skip to content

Commit a42bc50

Browse files
committed
Add a batch allocator
Add a simple "batch allocator." It allocates memory for equally sized objects, that never has to be freed, but can be reused instead. The first call to this allocator allocates two instances of the object, the next one allocates 4, then 8, 16, and after that every new allocation request adds 32 instances. When those objects are freed, they are only marked as free and kept for reuse. Also add a unit test for it. Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
1 parent 9bfd564 commit a42bc50

File tree

7 files changed

+301
-1
lines changed

7 files changed

+301
-1
lines changed

src/include/sof/batch.h

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
/* SPDX-License-Identifier: BSD-3-Clause
2+
*
3+
* Copyright(c) 2025 Intel Corporation.
4+
*/
5+
6+
#ifndef __ZEPHYR_BATCH_H__
7+
#define __ZEPHYR_BATCH_H__
8+
9+
struct list_item;
10+
/**
11+
* Allocate memory tracked as part of a batch.
12+
*
13+
* @param head Pointer to the batch list head.
14+
* @param size Size in bytes of memory blocks to allocate.
15+
*
16+
* @return a pointer to the allocated memory on success, NULL on failure.
17+
*
18+
* Allocate a memory block of @a size bytes. @a size is used upon the first
19+
* invocation to allocate memory on the heap, all consequent allocations with
20+
* the same @a head must use the same @a size value. First allocation with an
21+
* empty @a head allocates 2 blocks. After both blocks are taken and a third one
22+
* is requested, the next call allocates 4 blocks, then 8, 16 and 32. After that
23+
* 32 blocks are allocated every time. Note, that by design allocated blocks are
24+
* never freed. See more below.
25+
*/
26+
void *batch_alloc(struct list_item *head, size_t size);
27+
28+
/**
29+
* Return a block to the batch
30+
*
31+
* @param head Pointer to the batch list head.
32+
* @param size Size in bytes of memory blocks to allocate.
33+
*
34+
* @return 0 on success or a negative error code.
35+
*
36+
* Return a block to the batch. Memory is never freed by design, unused blocks
37+
* are kept in the batch for future re-use.
38+
*/
39+
int batch_free(struct list_item *head, void *data);
40+
41+
#endif

src/lib/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
# SPDX-License-Identifier: BSD-3-Clause
22

3-
set(common_files notifier.c dma.c dai.c)
3+
set(common_files notifier.c dma.c dai.c batch.c)
44

55
if(CONFIG_LIBRARY)
66
add_local_sources(sof

src/lib/batch.c

Lines changed: 130 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,130 @@
1+
// SPDX-License-Identifier: BSD-3-Clause
2+
//
3+
// Copyright(c) 2025 Intel Corporation.
4+
5+
#include <rtos/alloc.h>
6+
#include <rtos/bit.h>
7+
#include <sof/batch.h>
8+
#include <sof/common.h>
9+
#include <sof/list.h>
10+
11+
#include <errno.h>
12+
#include <limits.h>
13+
#include <stdbool.h>
14+
#include <stddef.h>
15+
#include <stdint.h>
16+
17+
struct batch {
18+
struct list_item list;
19+
unsigned int n;
20+
uint32_t mask;
21+
size_t size;
22+
uint8_t data[];
23+
};
24+
25+
static int batch_add(struct list_item *head, unsigned int n, size_t size)
26+
{
27+
if (n > 32)
28+
return -ENOMEM;
29+
30+
if (!is_power_of_2(n))
31+
return -EINVAL;
32+
33+
size_t aligned_size = ALIGN_UP(size, sizeof(int));
34+
35+
/* Initialize with 0 to give caller a chance to identify new allocations */
36+
struct batch *abatch = rzalloc(0, n * aligned_size + sizeof(*abatch));
37+
38+
if (!abatch)
39+
return -ENOMEM;
40+
41+
abatch->n = n;
42+
/* clear bit means free */
43+
abatch->mask = 0;
44+
abatch->size = size;
45+
46+
list_item_append(&abatch->list, head);
47+
48+
return 0;
49+
}
50+
51+
void *batch_alloc(struct list_item *head, size_t size)
52+
{
53+
size_t aligned_size = ALIGN_UP(size, sizeof(int));
54+
struct list_item *list;
55+
struct batch *abatch;
56+
57+
/* Make sure size * 32 still fits in 32 bits */
58+
if (!size || aligned_size > (UINT_MAX >> 5) - sizeof(*abatch))
59+
return NULL;
60+
61+
list_for_item(list, head) {
62+
abatch = container_of(list, struct batch, list);
63+
64+
uint32_t free_mask = MASK(abatch->n - 1, 0) & ~abatch->mask;
65+
66+
/* sanity check */
67+
if (size != abatch->size)
68+
return NULL;
69+
70+
if (!free_mask)
71+
continue;
72+
73+
/* Find first free - guaranteed valid now */
74+
unsigned int bit = ffs(free_mask) - 1;
75+
76+
abatch->mask |= BIT(bit);
77+
78+
return abatch->data + aligned_size * bit;
79+
}
80+
81+
/* no free elements found */
82+
unsigned int new_n;
83+
84+
if (list_is_empty(head)) {
85+
new_n = 2;
86+
} else {
87+
/* Check the last one */
88+
abatch = container_of(head->prev, struct batch, list);
89+
90+
if (abatch->n == 32)
91+
new_n = 32;
92+
else
93+
new_n = abatch->n << 1;
94+
}
95+
96+
if (batch_add(head, new_n, size) < 0)
97+
return NULL;
98+
99+
/* Return the first element of the new batch, which is now the last one in the list */
100+
abatch = container_of(head->prev, struct batch, list);
101+
abatch->mask = 1;
102+
103+
return abatch->data;
104+
}
105+
106+
int batch_free(struct list_item *head, void *data)
107+
{
108+
struct list_item *list;
109+
struct batch *abatch;
110+
111+
list_for_item(list, head) {
112+
abatch = container_of(list, struct batch, list);
113+
114+
size_t aligned_size = ALIGN_UP(abatch->size, sizeof(int));
115+
116+
if ((uint8_t *)data >= abatch->data &&
117+
(uint8_t *)data < abatch->data + aligned_size * abatch->n) {
118+
unsigned int n = ((uint8_t *)data - abatch->data) / aligned_size;
119+
120+
if ((uint8_t *)data != abatch->data + n * aligned_size)
121+
return -EINVAL;
122+
123+
abatch->mask &= ~BIT(n);
124+
125+
return 0;
126+
}
127+
}
128+
129+
return -EINVAL;
130+
}
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
cmake_minimum_required(VERSION 3.20.0)
2+
3+
find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
4+
project(test_batch)
5+
6+
set(SOF_ROOT "${PROJECT_SOURCE_DIR}/../../../..")
7+
8+
# Include SOF CMake functions
9+
include(${SOF_ROOT}/scripts/cmake/misc.cmake)
10+
11+
target_include_directories(app PRIVATE
12+
${SOF_ROOT}/zephyr/include
13+
${SOF_ROOT}/src/include
14+
${SOF_ROOT}/src/platform/posix/include
15+
)
16+
17+
# Define SOF-specific configurations for unit testing
18+
target_compile_definitions(app PRIVATE
19+
-DCONFIG_ZEPHYR_POSIX=1
20+
)
21+
22+
target_sources(app PRIVATE
23+
test_batch_ztest.c
24+
${SOF_ROOT}/src/lib/batch.c
25+
)
26+
27+
target_link_libraries(app PRIVATE "-Wl,--wrap=rzalloc")
28+
29+
# Add RELATIVE_FILE definitions for SOF trace functionality
30+
sof_append_relative_path_definitions(app)

test/ztest/unit/batch/prj.conf

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
CONFIG_ZTEST=y
2+
CONFIG_SOF_FULL_ZEPHYR_APPLICATION=n
Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
// SPDX-License-Identifier: BSD-3-Clause
2+
//
3+
// Copyright(c) 2025 Intel Corporation.
4+
5+
#define DATA_SIZE 5
6+
#define ALIGNED_SIZE ALIGN_UP(DATA_SIZE, sizeof(int))
7+
8+
#include <zephyr/ztest.h>
9+
#include <sof/batch.h>
10+
#include <sof/common.h>
11+
#include <sof/list.h>
12+
#include <stdlib.h>
13+
#include <string.h>
14+
15+
void *__wrap_rzalloc(uint32_t flags, size_t bytes)
16+
{
17+
(void)flags;
18+
19+
void *ret = malloc(bytes);
20+
21+
if (ret)
22+
memset(ret, 0, bytes);
23+
24+
return ret;
25+
}
26+
27+
ZTEST(batch_suite, test_batch_wrong_size)
28+
{
29+
struct list_item head = LIST_INIT(head);
30+
/* new batch of 2 blocks */
31+
uint8_t *block1 = batch_alloc(&head, DATA_SIZE);
32+
/* should fail because of a different size */
33+
uint8_t *block2 = batch_alloc(&head, DATA_SIZE + 1);
34+
/* second block in the first batch */
35+
uint8_t *block3 = batch_alloc(&head, DATA_SIZE);
36+
/* new batch of 4 blocks */
37+
uint8_t *block4 = batch_alloc(&head, DATA_SIZE);
38+
/* should fail because of a different size */
39+
uint8_t *block5 = batch_alloc(&head, DATA_SIZE * 2);
40+
41+
zassert_not_null(block1);
42+
zassert_is_null(block2);
43+
zassert_not_null(block3);
44+
zassert_not_null(block4);
45+
zassert_is_null(block5);
46+
47+
zassert_not_ok(batch_free(&head, block1 + 1));
48+
zassert_ok(batch_free(&head, block1));
49+
zassert_not_ok(batch_free(&head, block3 + 1));
50+
zassert_ok(batch_free(&head, block3));
51+
zassert_not_ok(batch_free(&head, block4 + 1));
52+
zassert_ok(batch_free(&head, block4));
53+
}
54+
55+
ZTEST(batch_suite, test_batch)
56+
{
57+
struct list_item head = LIST_INIT(head);
58+
void *blocks[62]; /* 2 + 4 + 8 + 16 + 32 */
59+
unsigned int k = 0;
60+
61+
/* Loop over all powers: 2^1..2^5 */
62+
for (unsigned int i = 1; i <= 5; i++) {
63+
unsigned int n = 1 << i;
64+
uint8_t *start;
65+
66+
for (unsigned int j = 0; j < n; j++) {
67+
uint8_t *block = batch_alloc(&head, DATA_SIZE);
68+
69+
zassert_not_null(block, "allocation failed loop %u iter %u", i, j);
70+
71+
if (!j)
72+
start = block;
73+
else
74+
zassert_equal(block, start + ALIGNED_SIZE * j, "wrong pointer");
75+
76+
blocks[k++] = block;
77+
}
78+
}
79+
80+
while (k--)
81+
zassert_ok(batch_free(&head, blocks[k]), "free failed");
82+
}
83+
84+
ZTEST_SUITE(batch_suite, NULL, NULL, NULL, NULL, NULL);
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
# SPDX-License-Identifier: BSD-3-Clause
2+
#
3+
# Copyright(c) 2025 Intel Corporation.
4+
#
5+
# Batch allocator unit tests for Ztest framework
6+
7+
tests:
8+
sof.batch:
9+
tags: unit
10+
platform_allow: native_sim
11+
integration_platforms:
12+
- native_sim
13+
build_only: false

0 commit comments

Comments
 (0)