-
Notifications
You must be signed in to change notification settings - Fork 1
Buffers and allocators #15
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: core
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,201 @@ | ||
|
|
||
| const std = @import("std"); | ||
|
|
||
| const Allocator = std.mem.Allocator; | ||
|
|
||
| pub fn IrregularBuffer(comptime T: type) type { | ||
|
|
||
| return struct { | ||
|
|
||
| const Self = @This(); | ||
| const DataSlice = []T; | ||
| const ConstData = [] const T; | ||
| const IdxsSlice = []IndexPair; | ||
| const ConstIdxs = [] const IndexPair; | ||
|
|
||
| const IndexPair = struct { | ||
| lhs: usize, | ||
| rhs: usize, | ||
| }; | ||
|
|
||
| const ForwardIterator = struct { | ||
| ptr: *const Self, | ||
| idx: usize = 0, | ||
| pub fn next(self: *@This()) ?ConstData { | ||
| const last = self.idx; | ||
| return if (last < self.ptr.idxs.len) blk: { | ||
| self.idx += 1; | ||
| break :blk self.ptr.get(last); | ||
| } else null; | ||
| } | ||
| }; | ||
|
|
||
| data: DataSlice, | ||
| idxs: IdxsSlice, | ||
| data_capacity: usize, | ||
| idxs_capacity: usize, | ||
| allocator: Allocator, | ||
|
|
||
| pub fn init(allocator: Allocator) Self { | ||
| return Self{ | ||
| .data = &[_]T{}, | ||
| .idxs = &[_]IndexPair{}, | ||
| .data_capacity = 0, | ||
| .idxs_capacity = 0, | ||
| .allocator = allocator, | ||
| }; | ||
| } | ||
|
|
||
| pub fn len(self: *const Self) usize { | ||
| return self.idxs.len; | ||
| } | ||
|
|
||
| pub fn initCapacity(allocator: Allocator, data_capacity: usize, index_capacity: usize) Allocator.Error!Self { | ||
| var self = Self.init(allocator); | ||
| try self.ensureTotalCapacity(data_capacity, index_capacity); | ||
| return self; | ||
| } | ||
|
|
||
| pub fn deinit(self: *Self) void { | ||
| self.allocator.free(self.allocatedData()); | ||
| self.allocator.free(self.allocatedIdxs()); | ||
| self.data_capacity = 0; | ||
| self.idxs_capacity = 0; | ||
| self.data = &[_]T{ }; | ||
| self.idxs = &[_]IndexPair{ }; | ||
| } | ||
|
|
||
| pub fn get(self: *const Self, index: usize) ConstData { | ||
| std.debug.assert(index <= self.idxs.len); | ||
| const pair = self.idxs[index]; | ||
| return self.data[pair.lhs..pair.rhs]; | ||
| } | ||
|
|
||
| // if you try to append a value that is larger than the | ||
| // selected segment, we have to resize to make it work. | ||
| pub fn set(self: *Self, index: usize, slice: ConstData) Allocator.Error!void { | ||
| std.debug.assert(index < self.idxs.len); | ||
|
|
||
| const pair = self.idxs[index]; | ||
|
|
||
| const old_len = (pair.rhs - pair.lhs); | ||
|
|
||
| // easiest case - it already fits in the slot | ||
| if (old_len >= slice.len) { | ||
| // reduce the rhs if it's longer than the new length | ||
| if (old_len > slice.len) { | ||
| self.idxs[index].rhs = pair.lhs + slice.len; | ||
| } | ||
| // copy into place (potentially leaves a gap) | ||
| return @memcpy(self.data[pair.lhs..pair.lhs + slice.len], slice); | ||
| } | ||
|
|
||
| // we're now out of bounds, so we calculate new | ||
| // capacities and adjust the idxs boundaries | ||
| const dif_len = (slice.len - old_len); | ||
| const new_len = self.data.len + dif_len; | ||
| try self.ensureUnusedCapacity(new_len, 0); | ||
|
|
||
| if (index == (self.idxs.len - 1)) { | ||
| @memcpy(self.data.ptr[pair.lhs..new_len], slice); | ||
| } | ||
| else { // copy entire buffer up to the right-side | ||
| var old_pos = self.data.ptr[pair.rhs..self.data.len]; | ||
| var new_pos = self.data.ptr[pair.rhs + dif_len..new_len]; | ||
| std.mem.copyBackwards(u8, new_pos, old_pos); | ||
|
|
||
| // fill in the expanded slot with the new slice | ||
| @memcpy(self.data[pair.lhs..pair.lhs + slice.len], slice); | ||
|
|
||
| // increment new positions in the idxs buffer | ||
| for (self.idxs[index + 1..]) |*idx| { | ||
| idx.lhs += dif_len; | ||
| idx.rhs += dif_len; | ||
| } | ||
| } | ||
| // make final adjustment to boundaries | ||
| self.idxs[index].rhs += dif_len; | ||
| self.data.len += dif_len; | ||
| } | ||
|
|
||
| pub fn clone(self: Self) Allocator.Error!Self { | ||
| var cloned = try Self.initCapacity(self.allocator, self.data_capacity, self.idxs_capacity); | ||
| if (self.data.len > 0) { | ||
| @memcpy(cloned.data, self.data); | ||
| @memcpy(cloned.idxs, self.idxs); | ||
| } | ||
| return cloned; | ||
| } | ||
|
|
||
| pub fn append(self: *Self, slice: ConstData) Allocator.Error!void { | ||
| try self.ensureUnusedCapacity(slice.len, 1); | ||
|
|
||
| // calculate new segment offsets | ||
| const old_end = self.data.len; | ||
| const new_end = old_end + slice.len; | ||
|
|
||
| // copy memory to the end of the data buffer | ||
| @memcpy(self.data.ptr[old_end..new_end], slice); | ||
|
|
||
| // append new index pair for data segment | ||
| self.idxs.ptr[self.idxs.len] = .{ .lhs = old_end, .rhs = new_end }; | ||
|
|
||
| // reset slice boundaries | ||
| self.data.len = new_end; | ||
| self.idxs.len += 1; | ||
| } | ||
|
|
||
| pub fn allocatedData(self: Self) DataSlice { | ||
| return self.data.ptr[0..self.data_capacity]; | ||
| } | ||
|
|
||
| pub fn allocatedIdxs(self: Self) IdxsSlice { | ||
| return self.idxs.ptr[0..self.idxs_capacity]; | ||
| } | ||
|
|
||
| pub fn resize(self: *Self, data_size: usize, idxs_size: usize) Allocator.Error!void { | ||
| try self.ensureTotalCapacity(data_size, idxs_size); | ||
| self.data.len = data_size; | ||
| self.idxs.len = idxs_size; | ||
| } | ||
|
|
||
| fn ensureTotalCapacity(self: *Self, data_capacity: usize, idxs_capacity: usize) Allocator.Error!void { | ||
|
|
||
| // check data capacity... | ||
| if (self.data_capacity < data_capacity) { | ||
| const old_memory = self.allocatedData(); | ||
| if (self.allocator.resize(old_memory, data_capacity)) { | ||
| self.data_capacity = data_capacity; | ||
| } else { | ||
| const new_memory = try self.allocator.alloc(T, data_capacity); | ||
| @memcpy(new_memory[0..self.data.len], self.data); | ||
| self.allocator.free(old_memory); | ||
| self.data.ptr = new_memory.ptr; | ||
| self.data_capacity = new_memory.len; | ||
| } | ||
| } | ||
|
|
||
| // check idxs capacity... | ||
| if (self.idxs_capacity < idxs_capacity) { | ||
| const old_memory = self.allocatedIdxs(); | ||
| if (self.allocator.resize(old_memory, idxs_capacity)) { | ||
| self.idxs_capacity = idxs_capacity; | ||
| } else { | ||
| const new_memory = try self.allocator.alloc(IndexPair, idxs_capacity); | ||
| @memcpy(new_memory[0..self.idxs.len], self.idxs); | ||
| self.allocator.free(old_memory); | ||
| self.idxs.ptr = new_memory.ptr; | ||
| self.idxs_capacity = new_memory.len; | ||
| } | ||
| } | ||
| } | ||
|
|
||
| pub fn ensureUnusedCapacity(self: *Self, data_count: usize, idxs_count: usize) Allocator.Error!void { | ||
| return self.ensureTotalCapacity(self.data.len + data_count, self.idxs.len + idxs_count); | ||
| } | ||
|
|
||
| pub fn iterator(self: *const Self) ForwardIterator { | ||
| return .{ .ptr = self, .idx = 0 }; | ||
| } | ||
| }; | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,65 @@ | ||
| // The NullAllocator is meant to be used as a fallback | ||
| // for allocator composition. | ||
|
|
||
| // For example, a StackAllocator, if it runs out of memory, | ||
| // can dispatch to another allocator to fulfill the request. | ||
| // If we want that to signal an error, we can give it the | ||
| // NullAllocator as a fallback and that will signal an | ||
| // "OutOfMemory" error, enforcing that we don't ask for | ||
| // more memory than is on the stack. | ||
|
Comment on lines
+1
to
+9
Owner
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is actually pretty genius!
Owner
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's also possible to use this for something like an allocator that can be "turned off" by mutating itself and replacing its composed allocator with this allocator.
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Huh, now that's an interesting idea. Since we can access the member variables directly in Zig, I think this is something that we can already express? |
||
|
|
||
|
|
||
| const std = @import("std"); | ||
|
|
||
| const Self = @This(); | ||
|
|
||
| pub fn allocator() std.mem.Allocator { | ||
| return .{ | ||
| .ptr = undefined, | ||
| .vtable = &.{ | ||
| .alloc = alloc, | ||
| .resize = resize, | ||
| .free = free, | ||
| }, | ||
| }; | ||
| } | ||
|
|
||
| fn alloc( | ||
| ctx: *anyopaque, | ||
| len: usize, | ||
| log2_ptr_align: u8, | ||
| ret_addr: usize | ||
| ) ?[*]u8 { | ||
| _ = ret_addr; | ||
| _ = log2_ptr_align; | ||
| _ = len; | ||
| _ = ctx; | ||
| return null; | ||
| } | ||
|
|
||
| fn resize( | ||
| ctx: *anyopaque, | ||
| old_mem: []u8, | ||
| log2_align: u8, | ||
| new_len: usize, | ||
| ret_addr: usize, | ||
| ) bool { | ||
| _ = ret_addr; | ||
| _ = new_len; | ||
| _ = log2_align; | ||
| _ = old_mem; | ||
| _ = ctx; | ||
| return false; | ||
| } | ||
|
|
||
| fn free( | ||
| ctx: *anyopaque, | ||
| old_mem: []u8, | ||
| log2_align: u8, | ||
| ret_addr: usize, | ||
| ) void { | ||
| _ = ret_addr; | ||
| _ = log2_align; | ||
| _ = old_mem; | ||
| _ = ctx; | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Awesome