Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
201 changes: 201 additions & 0 deletions src/experimental/irregular_buffer.zig
Original file line number Diff line number Diff line change
@@ -0,0 +1,201 @@

const std = @import("std");

const Allocator = std.mem.Allocator;

pub fn IrregularBuffer(comptime T: type) type {

return struct {

const Self = @This();
const DataSlice = []T;
const ConstData = [] const T;
const IdxsSlice = []IndexPair;
const ConstIdxs = [] const IndexPair;

const IndexPair = struct {
lhs: usize,
rhs: usize,
};

const ForwardIterator = struct {
ptr: *const Self,
idx: usize = 0,
pub fn next(self: *@This()) ?ConstData {
const last = self.idx;
return if (last < self.ptr.idxs.len) blk: {
self.idx += 1;
break :blk self.ptr.get(last);
} else null;
}
};

data: DataSlice,
idxs: IdxsSlice,
data_capacity: usize,
idxs_capacity: usize,
allocator: Allocator,

pub fn init(allocator: Allocator) Self {
return Self{
.data = &[_]T{},
.idxs = &[_]IndexPair{},
.data_capacity = 0,
.idxs_capacity = 0,
.allocator = allocator,
};
}

pub fn len(self: *const Self) usize {
return self.idxs.len;
}

pub fn initCapacity(allocator: Allocator, data_capacity: usize, index_capacity: usize) Allocator.Error!Self {
var self = Self.init(allocator);
try self.ensureTotalCapacity(data_capacity, index_capacity);
return self;
}

pub fn deinit(self: *Self) void {
self.allocator.free(self.allocatedData());
self.allocator.free(self.allocatedIdxs());
self.data_capacity = 0;
self.idxs_capacity = 0;
self.data = &[_]T{ };
self.idxs = &[_]IndexPair{ };
}

pub fn get(self: *const Self, index: usize) ConstData {
std.debug.assert(index <= self.idxs.len);
const pair = self.idxs[index];
return self.data[pair.lhs..pair.rhs];
}

// if you try to append a value that is larger than the
// selected segment, we have to resize to make it work.
pub fn set(self: *Self, index: usize, slice: ConstData) Allocator.Error!void {
std.debug.assert(index < self.idxs.len);

const pair = self.idxs[index];

const old_len = (pair.rhs - pair.lhs);

// easiest case - it already fits in the slot
if (old_len >= slice.len) {
// reduce the rhs if it's longer than the new length
if (old_len > slice.len) {
self.idxs[index].rhs = pair.lhs + slice.len;
}
// copy into place (potentially leaves a gap)
return @memcpy(self.data[pair.lhs..pair.lhs + slice.len], slice);
}

// we're now out of bounds, so we calculate new
// capacities and adjust the idxs boundaries
const dif_len = (slice.len - old_len);
const new_len = self.data.len + dif_len;
try self.ensureUnusedCapacity(new_len, 0);

if (index == (self.idxs.len - 1)) {
@memcpy(self.data.ptr[pair.lhs..new_len], slice);
}
else { // copy entire buffer up to the right-side
var old_pos = self.data.ptr[pair.rhs..self.data.len];
var new_pos = self.data.ptr[pair.rhs + dif_len..new_len];
std.mem.copyBackwards(u8, new_pos, old_pos);

// fill in the expanded slot with the new slice
@memcpy(self.data[pair.lhs..pair.lhs + slice.len], slice);

// increment new positions in the idxs buffer
for (self.idxs[index + 1..]) |*idx| {
idx.lhs += dif_len;
idx.rhs += dif_len;
}
}
// make final adjustment to boundaries
self.idxs[index].rhs += dif_len;
self.data.len += dif_len;
}

pub fn clone(self: Self) Allocator.Error!Self {
var cloned = try Self.initCapacity(self.allocator, self.data_capacity, self.idxs_capacity);
if (self.data.len > 0) {
@memcpy(cloned.data, self.data);
@memcpy(cloned.idxs, self.idxs);
}
return cloned;
}

pub fn append(self: *Self, slice: ConstData) Allocator.Error!void {
try self.ensureUnusedCapacity(slice.len, 1);

// calculate new segment offsets
const old_end = self.data.len;
const new_end = old_end + slice.len;

// copy memory to the end of the data buffer
@memcpy(self.data.ptr[old_end..new_end], slice);

// append new index pair for data segment
self.idxs.ptr[self.idxs.len] = .{ .lhs = old_end, .rhs = new_end };

// reset slice boundaries
self.data.len = new_end;
self.idxs.len += 1;
}

pub fn allocatedData(self: Self) DataSlice {
return self.data.ptr[0..self.data_capacity];
}

pub fn allocatedIdxs(self: Self) IdxsSlice {
return self.idxs.ptr[0..self.idxs_capacity];
}

pub fn resize(self: *Self, data_size: usize, idxs_size: usize) Allocator.Error!void {
try self.ensureTotalCapacity(data_size, idxs_size);
self.data.len = data_size;
self.idxs.len = idxs_size;
}

fn ensureTotalCapacity(self: *Self, data_capacity: usize, idxs_capacity: usize) Allocator.Error!void {

// check data capacity...
if (self.data_capacity < data_capacity) {
const old_memory = self.allocatedData();
if (self.allocator.resize(old_memory, data_capacity)) {
self.data_capacity = data_capacity;
} else {
const new_memory = try self.allocator.alloc(T, data_capacity);
@memcpy(new_memory[0..self.data.len], self.data);
self.allocator.free(old_memory);
self.data.ptr = new_memory.ptr;
self.data_capacity = new_memory.len;
}
}

// check idxs capacity...
if (self.idxs_capacity < idxs_capacity) {
const old_memory = self.allocatedIdxs();
if (self.allocator.resize(old_memory, idxs_capacity)) {
self.idxs_capacity = idxs_capacity;
} else {
const new_memory = try self.allocator.alloc(IndexPair, idxs_capacity);
@memcpy(new_memory[0..self.idxs.len], self.idxs);
self.allocator.free(old_memory);
self.idxs.ptr = new_memory.ptr;
self.idxs_capacity = new_memory.len;
}
}
}

pub fn ensureUnusedCapacity(self: *Self, data_count: usize, idxs_count: usize) Allocator.Error!void {
return self.ensureTotalCapacity(self.data.len + data_count, self.idxs.len + idxs_count);
}

pub fn iterator(self: *const Self) ForwardIterator {
return .{ .ptr = self, .idx = 0 };
}
};
}
19 changes: 13 additions & 6 deletions src/experimental/linear_caching_allocator.zig
Original file line number Diff line number Diff line change
Expand Up @@ -249,11 +249,18 @@ pub const LinearCachingAllocator = struct {

buffer: OrderedCache = OrderedCache.init(std.heap.page_allocator),

backing_allocator: std.mem.Allocator = std.heap.page_allocator,
backing_allocator: std.mem.Allocator,

// TODO: Create a dummy mutex that can be swapped via policy
mutex: std.Thread.Mutex = std.Thread.Mutex{ },

pub fn init(fallback: std.mem.Allocator) Self {
return Self {
.backing_allocator = fallback,
.buffer = OrderedCache.init(std.heap.page_allocator)
};
}

Comment on lines +252 to +263
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Awesome

pub fn clear(self: *Self) void {
self.buffer.clear(&self.backing_allocator);
}
Expand Down Expand Up @@ -492,7 +499,7 @@ test "LinearCachingAllocator: initialization" {
x: usize = 0
};

var caching_allocator = LinearCachingAllocator{ };
var caching_allocator = LinearCachingAllocator.init(std.heap.page_allocator);

defer caching_allocator.deinit();

Expand All @@ -512,7 +519,7 @@ test "LinearCachingAllocator: basic cache utilization" {
x: usize = 0
};

var caching_allocator = LinearCachingAllocator{ };
var caching_allocator = LinearCachingAllocator.init(std.heap.page_allocator);

defer caching_allocator.deinit();

Expand Down Expand Up @@ -551,7 +558,7 @@ test "LinearCachingAllocator: alignment" {
try std.testing.expect(log2_a < log2_b);
}

var caching_allocator = LinearCachingAllocator{ };
var caching_allocator = LinearCachingAllocator.init(std.heap.page_allocator);

defer caching_allocator.deinit();

Expand Down Expand Up @@ -612,7 +619,7 @@ test "LinearCachingAllocator: resize" {
x: usize = 0
};

var caching_allocator = LinearCachingAllocator{ };
var caching_allocator = LinearCachingAllocator.init(std.heap.page_allocator);

defer caching_allocator.deinit();

Expand Down Expand Up @@ -677,7 +684,7 @@ test "LinearCachingAllocator: cache-warming" {
x: usize = 0
};

var caching_allocator = LinearCachingAllocator{ };
var caching_allocator = LinearCachingAllocator.init(std.heap.page_allocator);

defer caching_allocator.deinit();

Expand Down
65 changes: 65 additions & 0 deletions src/experimental/null_allocator.zig
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
// The NullAllocator is meant to be used as a fallback
// for allocator composition.

// For example, a StackAllocator, if it runs out of memory,
// can dispatch to another allocator to fulfill the request.
// If we want that to signal an error, we can give it the
// NullAllocator as a fallback and that will signal an
// "OutOfMemory" error, enforcing that we don't ask for
// more memory than is on the stack.
Comment on lines +1 to +9
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is actually pretty genius!

Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's also possible to use this for something like an allocator that can be "turned off" by mutating itself and replacing its composed allocator with this allocator.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Huh, now that's an interesting idea. Since we can access the member variables directly in Zig, I think this is something that we can already express?



const std = @import("std");

const Self = @This();

pub fn allocator() std.mem.Allocator {
return .{
.ptr = undefined,
.vtable = &.{
.alloc = alloc,
.resize = resize,
.free = free,
},
};
}

fn alloc(
ctx: *anyopaque,
len: usize,
log2_ptr_align: u8,
ret_addr: usize
) ?[*]u8 {
_ = ret_addr;
_ = log2_ptr_align;
_ = len;
_ = ctx;
return null;
}

fn resize(
ctx: *anyopaque,
old_mem: []u8,
log2_align: u8,
new_len: usize,
ret_addr: usize,
) bool {
_ = ret_addr;
_ = new_len;
_ = log2_align;
_ = old_mem;
_ = ctx;
return false;
}

fn free(
ctx: *anyopaque,
old_mem: []u8,
log2_align: u8,
ret_addr: usize,
) void {
_ = ret_addr;
_ = log2_align;
_ = old_mem;
_ = ctx;
}
Loading