diff --git a/src/experimental/irregular_buffer.zig b/src/experimental/irregular_buffer.zig new file mode 100644 index 0000000..a2090fc --- /dev/null +++ b/src/experimental/irregular_buffer.zig @@ -0,0 +1,201 @@ + +const std = @import("std"); + +const Allocator = std.mem.Allocator; + +pub fn IrregularBuffer(comptime T: type) type { + + return struct { + + const Self = @This(); + const DataSlice = []T; + const ConstData = [] const T; + const IdxsSlice = []IndexPair; + const ConstIdxs = [] const IndexPair; + + const IndexPair = struct { + lhs: usize, + rhs: usize, + }; + + const ForwardIterator = struct { + ptr: *const Self, + idx: usize = 0, + pub fn next(self: *@This()) ?ConstData { + const last = self.idx; + return if (last < self.ptr.idxs.len) blk: { + self.idx += 1; + break :blk self.ptr.get(last); + } else null; + } + }; + + data: DataSlice, + idxs: IdxsSlice, + data_capacity: usize, + idxs_capacity: usize, + allocator: Allocator, + + pub fn init(allocator: Allocator) Self { + return Self{ + .data = &[_]T{}, + .idxs = &[_]IndexPair{}, + .data_capacity = 0, + .idxs_capacity = 0, + .allocator = allocator, + }; + } + + pub fn len(self: *const Self) usize { + return self.idxs.len; + } + + pub fn initCapacity(allocator: Allocator, data_capacity: usize, index_capacity: usize) Allocator.Error!Self { + var self = Self.init(allocator); + try self.ensureTotalCapacity(data_capacity, index_capacity); + return self; + } + + pub fn deinit(self: *Self) void { + self.allocator.free(self.allocatedData()); + self.allocator.free(self.allocatedIdxs()); + self.data_capacity = 0; + self.idxs_capacity = 0; + self.data = &[_]T{ }; + self.idxs = &[_]IndexPair{ }; + } + + pub fn get(self: *const Self, index: usize) ConstData { + std.debug.assert(index <= self.idxs.len); + const pair = self.idxs[index]; + return self.data[pair.lhs..pair.rhs]; + } + + // if you try to append a value that is larger than the + // selected segment, we have to resize to make it work. + pub fn set(self: *Self, index: usize, slice: ConstData) Allocator.Error!void { + std.debug.assert(index < self.idxs.len); + + const pair = self.idxs[index]; + + const old_len = (pair.rhs - pair.lhs); + + // easiest case - it already fits in the slot + if (old_len >= slice.len) { + // reduce the rhs if it's longer than the new length + if (old_len > slice.len) { + self.idxs[index].rhs = pair.lhs + slice.len; + } + // copy into place (potentially leaves a gap) + return @memcpy(self.data[pair.lhs..pair.lhs + slice.len], slice); + } + + // we're now out of bounds, so we calculate new + // capacities and adjust the idxs boundaries + const dif_len = (slice.len - old_len); + const new_len = self.data.len + dif_len; + try self.ensureUnusedCapacity(new_len, 0); + + if (index == (self.idxs.len - 1)) { + @memcpy(self.data.ptr[pair.lhs..new_len], slice); + } + else { // copy entire buffer up to the right-side + var old_pos = self.data.ptr[pair.rhs..self.data.len]; + var new_pos = self.data.ptr[pair.rhs + dif_len..new_len]; + std.mem.copyBackwards(u8, new_pos, old_pos); + + // fill in the expanded slot with the new slice + @memcpy(self.data[pair.lhs..pair.lhs + slice.len], slice); + + // increment new positions in the idxs buffer + for (self.idxs[index + 1..]) |*idx| { + idx.lhs += dif_len; + idx.rhs += dif_len; + } + } + // make final adjustment to boundaries + self.idxs[index].rhs += dif_len; + self.data.len += dif_len; + } + + pub fn clone(self: Self) Allocator.Error!Self { + var cloned = try Self.initCapacity(self.allocator, self.data_capacity, self.idxs_capacity); + if (self.data.len > 0) { + @memcpy(cloned.data, self.data); + @memcpy(cloned.idxs, self.idxs); + } + return cloned; + } + + pub fn append(self: *Self, slice: ConstData) Allocator.Error!void { + try self.ensureUnusedCapacity(slice.len, 1); + + // calculate new segment offsets + const old_end = self.data.len; + const new_end = old_end + slice.len; + + // copy memory to the end of the data buffer + @memcpy(self.data.ptr[old_end..new_end], slice); + + // append new index pair for data segment + self.idxs.ptr[self.idxs.len] = .{ .lhs = old_end, .rhs = new_end }; + + // reset slice boundaries + self.data.len = new_end; + self.idxs.len += 1; + } + + pub fn allocatedData(self: Self) DataSlice { + return self.data.ptr[0..self.data_capacity]; + } + + pub fn allocatedIdxs(self: Self) IdxsSlice { + return self.idxs.ptr[0..self.idxs_capacity]; + } + + pub fn resize(self: *Self, data_size: usize, idxs_size: usize) Allocator.Error!void { + try self.ensureTotalCapacity(data_size, idxs_size); + self.data.len = data_size; + self.idxs.len = idxs_size; + } + + fn ensureTotalCapacity(self: *Self, data_capacity: usize, idxs_capacity: usize) Allocator.Error!void { + + // check data capacity... + if (self.data_capacity < data_capacity) { + const old_memory = self.allocatedData(); + if (self.allocator.resize(old_memory, data_capacity)) { + self.data_capacity = data_capacity; + } else { + const new_memory = try self.allocator.alloc(T, data_capacity); + @memcpy(new_memory[0..self.data.len], self.data); + self.allocator.free(old_memory); + self.data.ptr = new_memory.ptr; + self.data_capacity = new_memory.len; + } + } + + // check idxs capacity... + if (self.idxs_capacity < idxs_capacity) { + const old_memory = self.allocatedIdxs(); + if (self.allocator.resize(old_memory, idxs_capacity)) { + self.idxs_capacity = idxs_capacity; + } else { + const new_memory = try self.allocator.alloc(IndexPair, idxs_capacity); + @memcpy(new_memory[0..self.idxs.len], self.idxs); + self.allocator.free(old_memory); + self.idxs.ptr = new_memory.ptr; + self.idxs_capacity = new_memory.len; + } + } + } + + pub fn ensureUnusedCapacity(self: *Self, data_count: usize, idxs_count: usize) Allocator.Error!void { + return self.ensureTotalCapacity(self.data.len + data_count, self.idxs.len + idxs_count); + } + + pub fn iterator(self: *const Self) ForwardIterator { + return .{ .ptr = self, .idx = 0 }; + } + }; +} \ No newline at end of file diff --git a/src/experimental/linear_caching_allocator.zig b/src/experimental/linear_caching_allocator.zig index 59fada6..d17a31e 100644 --- a/src/experimental/linear_caching_allocator.zig +++ b/src/experimental/linear_caching_allocator.zig @@ -249,11 +249,18 @@ pub const LinearCachingAllocator = struct { buffer: OrderedCache = OrderedCache.init(std.heap.page_allocator), - backing_allocator: std.mem.Allocator = std.heap.page_allocator, + backing_allocator: std.mem.Allocator, // TODO: Create a dummy mutex that can be swapped via policy mutex: std.Thread.Mutex = std.Thread.Mutex{ }, + pub fn init(fallback: std.mem.Allocator) Self { + return Self { + .backing_allocator = fallback, + .buffer = OrderedCache.init(std.heap.page_allocator) + }; + } + pub fn clear(self: *Self) void { self.buffer.clear(&self.backing_allocator); } @@ -492,7 +499,7 @@ test "LinearCachingAllocator: initialization" { x: usize = 0 }; - var caching_allocator = LinearCachingAllocator{ }; + var caching_allocator = LinearCachingAllocator.init(std.heap.page_allocator); defer caching_allocator.deinit(); @@ -512,7 +519,7 @@ test "LinearCachingAllocator: basic cache utilization" { x: usize = 0 }; - var caching_allocator = LinearCachingAllocator{ }; + var caching_allocator = LinearCachingAllocator.init(std.heap.page_allocator); defer caching_allocator.deinit(); @@ -551,7 +558,7 @@ test "LinearCachingAllocator: alignment" { try std.testing.expect(log2_a < log2_b); } - var caching_allocator = LinearCachingAllocator{ }; + var caching_allocator = LinearCachingAllocator.init(std.heap.page_allocator); defer caching_allocator.deinit(); @@ -612,7 +619,7 @@ test "LinearCachingAllocator: resize" { x: usize = 0 }; - var caching_allocator = LinearCachingAllocator{ }; + var caching_allocator = LinearCachingAllocator.init(std.heap.page_allocator); defer caching_allocator.deinit(); @@ -677,7 +684,7 @@ test "LinearCachingAllocator: cache-warming" { x: usize = 0 }; - var caching_allocator = LinearCachingAllocator{ }; + var caching_allocator = LinearCachingAllocator.init(std.heap.page_allocator); defer caching_allocator.deinit(); diff --git a/src/experimental/null_allocator.zig b/src/experimental/null_allocator.zig new file mode 100644 index 0000000..329f9a8 --- /dev/null +++ b/src/experimental/null_allocator.zig @@ -0,0 +1,65 @@ +// The NullAllocator is meant to be used as a fallback +// for allocator composition. + +// For example, a StackAllocator, if it runs out of memory, +// can dispatch to another allocator to fulfill the request. +// If we want that to signal an error, we can give it the +// NullAllocator as a fallback and that will signal an +// "OutOfMemory" error, enforcing that we don't ask for +// more memory than is on the stack. + + +const std = @import("std"); + +const Self = @This(); + +pub fn allocator() std.mem.Allocator { + return .{ + .ptr = undefined, + .vtable = &.{ + .alloc = alloc, + .resize = resize, + .free = free, + }, + }; +} + +fn alloc( + ctx: *anyopaque, + len: usize, + log2_ptr_align: u8, + ret_addr: usize +) ?[*]u8 { + _ = ret_addr; + _ = log2_ptr_align; + _ = len; + _ = ctx; + return null; +} + +fn resize( + ctx: *anyopaque, + old_mem: []u8, + log2_align: u8, + new_len: usize, + ret_addr: usize, +) bool { + _ = ret_addr; + _ = new_len; + _ = log2_align; + _ = old_mem; + _ = ctx; + return false; +} + +fn free( + ctx: *anyopaque, + old_mem: []u8, + log2_align: u8, + ret_addr: usize, +) void { + _ = ret_addr; + _ = log2_align; + _ = old_mem; + _ = ctx; +} diff --git a/src/experimental/regular_buffer.zig b/src/experimental/regular_buffer.zig new file mode 100644 index 0000000..3db220c --- /dev/null +++ b/src/experimental/regular_buffer.zig @@ -0,0 +1,179 @@ + +const std = @import("std"); + +const Allocator = std.mem.Allocator; + +pub fn RegularBuffer(comptime T: type, comptime slice_size: usize) type { + + if (slice_size == 0) { + @compileError("Slice size must be greater than zero."); + } + + return struct { + + const Self = @This(); + const ValueType = T; + const SliceSize = slice_size; + const DataSlice = []T; + const ConstData = [] const T; + + const IteratorType = enum { + slice, vector + }; + + data: DataSlice, + capacity: usize, + allocator: Allocator, + + pub fn init(allocator: Allocator) Self { + return Self{ + .data = &[_]T{}, + .capacity = 0, + .allocator = allocator, + }; + } + + pub fn count(self: *const Self) usize { + return self.data.len / SliceSize; + } + + pub fn initCapacity(allocator: Allocator, size: usize) Allocator.Error!Self { + var self = Self.init(allocator); + try self.ensureTotalCapacity(size * SliceSize); + return self; + } + + pub fn deinit(self: *Self) void { + self.allocator.free(self.allocatedSlice()); + self.capacity = 0; + self.data = &[_]T{ }; + } + + pub fn fromOwnedSlice(allocator: Allocator, slice: DataSlice) Self { + return Self{ + .data = slice, + .capacity = slice.len, + .allocator = allocator, + }; + } + + pub fn toOwnedSlice(self: *Self) Allocator.Error!DataSlice { + const allocator = self.allocator; + + const old_memory = self.allocatedSlice(); + if (allocator.resize(old_memory, self.data.len)) { + const result = self.data; + self.* = init(allocator); + return result; + } + const new_memory = try allocator.alloc(T, self.data.len); + @memcpy(new_memory, self.data); + @memset(self.data, undefined); + self.clearAndFree(); + return new_memory; + } + + pub fn get(self: *const Self, index: usize) ConstData { + const pos = index * SliceSize; + const end = pos + SliceSize; + std.debug.assert(end <= self.data.len); + return self.data[pos..end]; + } + + pub fn set(self: *Self, index: usize, slice: ConstData) void { + const pos = index * SliceSize; + const end = pos + SliceSize; + std.debug.assert(end <= self.data.len); + @memcpy(self.data[pos..end], slice); + } + + pub fn clone(self: Self) Allocator.Error!Self { + var cloned = try Self.initCapacity(self.allocator, self.capacity); + if (self.data.len > 0) { + @memcpy(cloned.data, self.data); + } + return cloned; + } + + pub fn append(self: *Self, slice: ConstData) Allocator.Error!void { + std.debug.assert(slice.len == SliceSize); + try self.ensureUnusedCapacity(1); + const end = self.data.len + SliceSize; + @memcpy(self.data.ptr[self.data.len..end], slice); + self.data.len = end; + } + + pub fn allocatedSlice(self: Self) DataSlice { + return self.data.ptr[0..self.capacity]; + } + + pub fn resize(self: *Self, new_count: usize) Allocator.Error!void { + try self.ensureTotalCapacity(new_count * SliceSize); + self.data.len = new_count; + } + + // this method is private because the new_capacity is calculated situationally. + fn ensureTotalCapacity(self: *Self, new_capacity: usize) Allocator.Error!void { + + if (self.capacity >= new_capacity) + return; + + const old_memory = self.allocatedSlice(); + if (self.allocator.resize(old_memory, new_capacity)) { + self.capacity = new_capacity; + } else { + const new_memory = try self.allocator.alloc(T, new_capacity); + @memcpy(new_memory[0..self.data.len], self.data); + self.allocator.free(old_memory); + self.data.ptr = new_memory.ptr; + self.capacity = new_memory.len; + } + } + + pub fn ensureUnusedCapacity(self: *Self, additional_count: usize) Allocator.Error!void { + return self.ensureTotalCapacity(self.data.len + (additional_count * SliceSize)); + } + + const ForwardIterator = struct { + ptr: *const Self, + idx: usize = 0, + pub inline fn next(self: *@This()) ?ConstData { + const last = self.idx; + return if (last < self.ptr.data.len) blk: { + self.idx += SliceSize; + break :blk self.ptr.data[last..self.idx]; + } else null; + } + }; + + pub fn iterator(self: *const Self) ForwardIterator { + return .{ .ptr = self, .idx = 0 }; + } + }; +} + +// Some types may be vectorizable. Because the slice size is known +// at compile time, we can create a convenience iterator to return +// vectors instead of slices. + +inline fn VectorIteratorType(comptime T: type) type { + return struct { + const SliceSize = T.SliceSize; + const ValueType = T.ValueType; + ptr: *const T, + idx: usize = 0, + pub inline fn next(self: *@This()) ?@Vector(SliceSize, ValueType) { + const last = self.idx; + return if (last < self.ptr.data.len) blk: { + self.idx += SliceSize; + const slice = self.ptr.data[last..self.idx]; + break :blk slice[0..SliceSize].*; + } else null; + } + }; +} + +// Helper function for creating VectorIteratorType +pub inline fn VectorIterator(buffer: anytype) VectorIteratorType(@TypeOf(buffer.*)) { + return .{ .ptr = buffer, .idx = 0 }; +} \ No newline at end of file diff --git a/src/experimental/stack_allocator.zig b/src/experimental/stack_allocator.zig index 9f0795b..3eef3b7 100644 --- a/src/experimental/stack_allocator.zig +++ b/src/experimental/stack_allocator.zig @@ -75,6 +75,10 @@ pub fn StackBuffer(comptime size: usize) type { } return true; } + + pub inline fn usedSlice(self: *Self) []u8 { + return self.items[0..self.used]; + } }; } @@ -101,6 +105,10 @@ pub fn StackAllocator(comptime size: usize) type { }; } + pub inline fn usedSlice(self: *Self) []u8 { + return self.stack_buffer.usedSlice(); + } + pub fn allocator(self: *Self) std.mem.Allocator { return .{ .ptr = self,