diff --git a/build.zig b/build.zig index db0955a..6337402 100644 --- a/build.zig +++ b/build.zig @@ -15,6 +15,7 @@ const Dependencies = struct { rshc: *Module, vulkan: *Module, glfw: *Module, + common: *Module, }; fn resolveGLFWSystemDeps(m: *Module) void { @@ -48,10 +49,17 @@ fn buildDeps(b: *Build, opts: BuildOpts) Dependencies { resolveGLFWSystemDeps(glfw_mod); + const common_mod = b.createModule(.{ + .optimize = opts.optimize, + .target = opts.target, + .root_source_file = b.path("src/common/common.zig"), + }); + return .{ .rshc = rshc_mod, .vulkan = vulkan_mod, .glfw = glfw_mod, + .common = common_mod, }; } @@ -69,6 +77,9 @@ fn buildLibrary( }); lib_mod.addImport("vulkan", deps.vulkan); + lib_mod.addImport("common", deps.common); + + // Temporary for development lib_mod.addImport("rshc", deps.rshc); lib_mod.addImport("glfw", deps.glfw); @@ -85,16 +96,34 @@ fn buildLibrary( const SampleEntry = struct { name: []const u8, - path: []const u8, + desc: []const u8, + path: []const u8 = "(no description)", // these get populated later mod: *Module = undefined, exe: *Compile = undefined, }; var sample_files = [_]SampleEntry{ - .{ .name = "basic_planes", .path = "basic_planes.zig" }, - .{ .name = "compute_drawing", .path = "compute_drawing/main.zig" }, - .{ .name = "test_sample", .path = "test_sample.zig" }, + .{ + .name = "basic-planes", + .path = "basic_planes.zig", + .desc = "basic showcase of bootstrapping vulkan up to 3d rendering", + }, + .{ + .name = "compute-drawing", + .path = "compute_drawing/main.zig", + .desc = "drawing using compute shaders", + }, + .{ + .name = "test-sample", + .path = "test_sample.zig", + .desc = "test to see if the sample build steps work correctly", + }, + .{ + .name = "raymarch-fractals", + .path = "raymarch_fractals/main.zig", + .desc = "pick from a selection of raymarched fractals, all programmed within fragment shaders", + }, }; fn populateSampleModules( @@ -149,39 +178,48 @@ fn buildSamples( ) void { populateSampleModules(b, lib_mod, deps, opts); - const build_sample = b.option( - []const u8, - "sample", - "specify sample to build and run", - ); - if (build_sample) |sample_name| { - var entry: ?*SampleEntry = null; - - for (&sample_files) |*f| { - if (std.mem.order(u8, sample_name, f.name) == .eq) { - entry = f; - break; - } - } - - if (entry == null) return; + for (sample_files) |entry| { + b.installArtifact(entry.exe); - b.installArtifact(entry.?.exe); + const run_step = b.addRunArtifact(entry.exe); + run_step.addArgs(b.args orelse &.{}); - const run_step = b.addRunArtifact(entry.?.exe); run_step.step.dependOn(b.getInstallStep()); - const run_cmd = b.step("run", "run a sample executable"); + const step_name = std.fmt.allocPrint( + b.allocator, + "run-{s}", + .{entry.name}, + ) catch @panic("Achievement Get: How did we get here?"); + + const run_cmd = b.step(step_name, entry.desc); run_cmd.dependOn(&run_step.step); } } /// adds a single module and compile step containing /// all included tests (based on specified options) -fn buildTests(b: *Build, lib_mod: *Module, deps: Dependencies) void { - _ = b; - _ = lib_mod; - _ = deps; +fn buildTests(b: *Build, lib_mod: *Module, deps: Dependencies, opts: BuildOpts) void { + const test_comp = b.addTest(.{ + .name = "unit_tests", + .root_module = lib_mod, + .target = opts.target, + }); + + const common_tests = b.addTest(.{ + .name = "common_unit_tests", + .root_module = deps.common, + .target = opts.target, + }); + b.installArtifact(test_comp); + b.installArtifact(common_tests); + + const test_step = b.addRunArtifact(test_comp); + const common_test_step = b.addRunArtifact(common_tests); + const test_cmd = b.step("test", "run all unit tests"); + + test_cmd.dependOn(&test_step.step); + test_cmd.dependOn(&common_test_step.step); } // Although this function looks imperative, note that its job is to // declaratively construct a build graph that will be executed by an external @@ -192,12 +230,24 @@ pub fn build(b: *Build) void { .optimize = b.standardOptimizeOption(.{}), }; + const opt_build_samples = b.option( + bool, + "build_samples", + "whether or not to build sample executables (defaults: true)", + ) orelse true; + + const opt_build_tests = b.option( + bool, + "build_tests", + "whether or not to build sample executables (defaults: true)", + ) orelse true; + const deps = buildDeps(b, opts); const lib_mod, const lib_exe = buildLibrary(b, deps, opts); // handle samples and testing if specified - buildSamples(b, lib_mod, deps, opts); - buildTests(b, lib_mod, deps); + if (opt_build_samples) buildSamples(b, lib_mod, deps, opts); + if (opt_build_tests) buildTests(b, lib_mod, deps, opts); // zls-friendly check step // (which made all the rest of the code way grosser) diff --git a/samples/basic_planes.zig b/samples/basic_planes.zig index 485da4b..37888d8 100644 --- a/samples/basic_planes.zig +++ b/samples/basic_planes.zig @@ -381,7 +381,7 @@ pub fn main() !void { defer window.destroy(); glfw.vulkanSupported() catch |err| { - std.debug.print("Could not load Vulkan\n", .{}); + std.log.err("Could not load Vulkan", .{}); return err; }; @@ -400,5 +400,5 @@ pub fn main() !void { try mainLoop(); } - std.debug.print("You win!\n", .{}); + std.log.info("You win!\n", .{}); } diff --git a/samples/common/render_quad.zig b/samples/common/render_quad.zig index 787753b..9391afd 100644 --- a/samples/common/render_quad.zig +++ b/samples/common/render_quad.zig @@ -1,6 +1,7 @@ const std = @import("std"); const ray = @import("ray"); const api = ray.api; +const math = ray.math; const Allocator = std.mem.Allocator; @@ -81,7 +82,7 @@ pub fn initSelf(self: *Self, ctx: *const Context, allocator: Allocator, config: }, .viewport = .{ .Swapchain = config.swapchain }, .descriptors = if (config.frag_descriptors) |fd| &.{ - fd.h_desc_layout, + fd.vkLayout(), } else &.{}, }); defer fixed_functions_config.deinit(); @@ -119,7 +120,7 @@ pub fn drawOneShot(self: *const Self, cmd_buf: *const CommandBuffer, framebuffer self.renderpass.begin(cmd_buf, framebuffer, image_index); if (self.desc) |d| { - d.bind(cmd_buf, self.pipeline.h_pipeline_layout, .{}); + d.use(cmd_buf, self.pipeline.h_pipeline_layout, .{}); } self.dev.draw(cmd_buf, 6, 1, 0, 0); diff --git a/samples/compute_drawing/main.zig b/samples/compute_drawing/main.zig index 0c9cab1..c75022e 100644 --- a/samples/compute_drawing/main.zig +++ b/samples/compute_drawing/main.zig @@ -25,7 +25,7 @@ const GraphicsPipeline = api.GraphicsPipeline; const RenderPass = api.RenderPass; const FixedFunctionState = api.FixedFunctionState; const Descriptor = api.Descriptor; -const DescriptorBinding = api.ResolvedDescriptorBinding; +const DescriptorLayout = api.DescriptorLayout; const CommandBuffer = api.CommandBuffer; const Semaphore = api.Semaphore; @@ -141,6 +141,10 @@ const SampleState = struct { .inst_extensions = helpers.glfwInstanceExtensions(), .loader = glfw.glfwGetInstanceProcAddress, .window = &self.window, + .management = .{ + .allocator = self.allocator, + .pool_sizes = 1024, + }, }); } @@ -177,19 +181,28 @@ const SampleState = struct { try self.gpu_state.uniforms.setData(&self.gpu_state.host_uniforms); // create fragment-specific descriptors - self.graphics.descriptor = try Descriptor.init(self.ctx, self.allocator, .{ - .bindings = &.{ .{ - .data = .{ .Uniform = self.gpu_state.uniforms.buffer() }, - .stages = .{ .fragment_bit = true }, - }, DescriptorBinding{ - .data = .{ .Sampler = .{ - .sampler = self.gpu_state.render_sampler, - .view = self.gpu_state.render_view.h_view, - } }, - .stages = .{ .fragment_bit = true }, - } }, + var frag_desc_layout = try DescriptorLayout.init(self.ctx, self.allocator, 2); + frag_desc_layout.addDescriptors(&.{ + .{ 0, "FragUniforms", .Uniform, .{ .fragment_bit = true } }, + .{ 1, "MainTex", .Sampler, .{ .fragment_bit = true } }, + }); + try frag_desc_layout.resolve(); + + self.graphics.descriptor = try Descriptor.init(self.ctx, .{ + .layout = frag_desc_layout, + .usage = .{ + .lifetime_bits = .Static, + }, }); + self.graphics.descriptor.bindUniformsNamed( + "FragUniforms", + self.gpu_state.uniforms.buffer(), + ); + self.graphics.descriptor.bindSamplerNamed("MainTex", self.gpu_state.render_sampler, self.gpu_state.render_view); + + self.graphics.descriptor.update(); + try self.graphics.render_quad.initSelf(self.ctx, self.allocator, .{ .frag_shader = &frag_shader, .frag_descriptors = &self.graphics.descriptor, @@ -243,26 +256,27 @@ const SampleState = struct { self.gpu_state.compute_uniforms = try UniformBuffer(ComputeUniforms).create(self.ctx); self.gpu_state.particles = try StorageBuffer(Particle).create(self.ctx, PARTICLE_COUNT); - const descriptors: []const DescriptorBinding = &.{ - .{ - .data = .{ .Uniform = self.gpu_state.compute_uniforms.buffer() }, - .stages = .{ .compute_bit = true }, - }, - .{ - .data = .{ .StorageBuffer = self.gpu_state.particles.buffer() }, - .stages = .{ .compute_bit = true }, - }, - .{ - .data = .{ .Image = .{ - .img = &self.gpu_state.render_target, - .view = self.gpu_state.render_view.h_view, - } }, - .stages = .{ .compute_bit = true }, - }, - }; - self.compute.pipeline = try Compute.init(self.ctx, self.allocator, .{ + var compute_desc_layout = try DescriptorLayout.init(self.ctx, self.allocator, 3); + compute_desc_layout.addDescriptors(&.{ + .{ 0, "Uniforms", .Uniform, .{ .compute_bit = true } }, + .{ 1, "Particles", .StorageBuffer, .{ .compute_bit = true } }, + .{ 2, "RenderTarget", .Image, .{ .compute_bit = true } }, + }); + try compute_desc_layout.resolve(); + + var compute_descriptor = try Descriptor.init(self.ctx, .{ + .layout = compute_desc_layout, + .usage = .{ .lifetime_bits = .Static }, + }); + + compute_descriptor.bindUniformsNamed("Uniforms", self.gpu_state.compute_uniforms.buffer()); + compute_descriptor.bindBufferNamed("Particles", self.gpu_state.particles.buffer()); + compute_descriptor.bindImageNamed("RenderTarget", &self.gpu_state.render_target, self.gpu_state.render_view); + compute_descriptor.update(); + + self.compute.pipeline = try Compute.init(self.ctx, .{ .shader = &shader, - .desc_bindings = descriptors, + .desc = compute_descriptor, }); } diff --git a/samples/raymarch_fractals/main.zig b/samples/raymarch_fractals/main.zig new file mode 100644 index 0000000..4ec4fe9 --- /dev/null +++ b/samples/raymarch_fractals/main.zig @@ -0,0 +1,291 @@ +//! Basic example of compute shader initialization using the Low-Level +//! unmanaged Vulkan API. +//! NOTE: This example will likely be deprecated in favor of using the upcoming +//! Managed low level API (see milestone 0.1). +const std = @import("std"); +const ray = @import("ray"); +const glfw = @import("glfw"); +const helpers = @import("helpers"); +const vk = @import("vulkan"); + +const api = ray.api; +const math = ray.math; + +const Vec3 = math.Vec3; + +const RenderQuad = helpers.RenderQuad; + +const FixedBufferAllocator = std.heap.FixedBufferAllocator; +const Allocator = std.mem.Allocator; + +const Context = ray.Context; +const Swapchain = api.Swapchain; +const FrameBuffer = api.FrameBuffer; +const GraphicsPipeline = api.GraphicsPipeline; +const RenderPass = api.RenderPass; +const FixedFunctionState = api.FixedFunctionState; +const Descriptor = api.Descriptor; +const DescriptorBinding = api.ResolvedDescriptorBinding; +const CommandBuffer = api.CommandBuffer; + +const Semaphore = api.Semaphore; +const Fence = api.Fence; + +const GraphicsQueue = api.GraphicsQueue; +const PresentQueue = api.PresentQueue; + +const UniformBuffer = api.ComptimeUniformBuffer; +const StorageBuffer = api.ComptimeStorageBuffer; +const Image = api.Image; +const Texture = api.TexImage; +const Compute = api.Compute; + +const log = std.log.scoped(.application); + +const GraphicsState = struct { + descriptor: Descriptor, + framebuffers: FrameBuffer, + render_quad: RenderQuad, + + cmd_buf: CommandBuffer, + + pub fn deinit(self: *GraphicsState) void { + self.render_quad.deinit(); + self.framebuffers.deinit(); + self.cmd_buf.deinit(); + self.descriptor.deinit(); + } +}; + +const ApplicationUniforms = extern struct { + transform: math.Mat4, + resolution: math.Vec2, + time: f32, + aspect: f32, +}; + +const GPUState = struct { + host_uniforms: ApplicationUniforms, + + // compute and graphics visible + uniforms: UniformBuffer(ApplicationUniforms), + texture: Texture, + + // compute and graphics visible (needs viewport quad) + // written to in the compute shader and simply mapped to the viewport + + pub fn deinit(self: *GPUState) void { + self.uniforms.deinit(); + self.texture.deinit(); + } +}; + +const SyncState = struct { + sem_render: Semaphore, + sem_acquire_frame: Semaphore, + frame_fence: Fence, + + pub fn deinit(self: *const SyncState) void { + self.sem_render.deinit(); + self.sem_acquire_frame.deinit(); + self.frame_fence.deinit(); + } +}; + +const SampleState = struct { + ctx: *Context = undefined, + swapchain: Swapchain = undefined, + window: glfw.Window = undefined, + allocator: Allocator, + + graphics: GraphicsState = undefined, + gpu_state: GPUState = undefined, + + sync: SyncState = undefined, + + pub fn createContext(self: *SampleState) !void { + self.window = try helpers.makeBasicWindow(900, 600, "Sacred Geometry"); + self.ctx = try Context.init(self.allocator, .{ + .inst_extensions = helpers.glfwInstanceExtensions(), + .loader = glfw.glfwGetInstanceProcAddress, + .window = &self.window, + }); + } + + pub fn createSwapchain(self: *SampleState) !void { + self.swapchain = try Swapchain.init(self.ctx, self.allocator, .{ + .requested_extent = helpers.windowExtent(&self.window), + .requested_present_mode = .mailbox_khr, + .requested_format = .{ + .color_space = .srgb_nonlinear_khr, + .format = .r8g8b8a8_srgb, + }, + }); + } + + pub fn createGraphicsPipeline(self: *SampleState, file: []const u8) !void { + const frag_shader = try helpers.initSampleShader( + self.ctx, + self.allocator, + file, + .Fragment, + ); + + defer frag_shader.deinit(); + + const size = self.window.dimensions(); + + // do the texture + self.gpu_state.texture = try Texture.fromFile( + self.ctx, + self.allocator, + "textures/clear.png", + ); + + // initialize uniforms + self.gpu_state.host_uniforms = .{ + .time = 0, + .transform = math.Mat4.scale(math.vec(.{ 20.0, 1.0, 1.0 })), + .resolution = math.vec(.{ 1.0, 1.0 }), + .aspect = @as(f32, @floatFromInt(size.height)) / @as(f32, @floatFromInt(size.width)), + }; + + self.gpu_state.uniforms = try UniformBuffer(ApplicationUniforms) + .create(self.ctx); + + try self.gpu_state.uniforms.setData(&self.gpu_state.host_uniforms); + + // create fragment-specific descriptors + self.graphics.descriptor = try Descriptor.init(self.ctx, self.allocator, .{ .bindings = &.{ .{ + .data = .{ .Uniform = self.gpu_state.uniforms.buffer() }, + .stages = .{ .fragment_bit = true }, + }, .{ + .data = .{ .Sampler = .{ + .sampler = self.gpu_state.texture.h_sampler, + .view = self.gpu_state.texture.view.h_view, + } }, + .stages = .{ .fragment_bit = true }, + } } }); + + try self.graphics.render_quad.initSelf(self.ctx, self.allocator, .{ + .frag_shader = &frag_shader, + .frag_descriptors = &self.graphics.descriptor, + .swapchain = &self.swapchain, + }); + + self.graphics.framebuffers = try FrameBuffer.initAlloc(self.ctx, self.allocator, .{ + .depth_view = null, + .swapchain = &self.swapchain, + .renderpass = &self.graphics.render_quad.renderpass, + }); + + self.graphics.cmd_buf = try CommandBuffer.init(self.ctx, .{}); + } + + pub fn createSyncObjects(self: *SampleState) !void { + self.sync.frame_fence = try Fence.init(self.ctx, true); + self.sync.sem_acquire_frame = try Semaphore.init(self.ctx); + self.sync.sem_render = try Semaphore.init(self.ctx); + + //compute sync objects + } + + pub fn active(self: *const SampleState) bool { + return !self.window.shouldClose(); + } + + fn updateUniforms(self: *SampleState) !void { + self.gpu_state.host_uniforms.time = @floatCast(glfw.getTime()); + try self.gpu_state.uniforms.setData(&self.gpu_state.host_uniforms); + } + + // intercepts errors and logs them + pub fn update(self: *SampleState) !void { + glfw.pollEvents(); + + // wait for v + try self.sync.frame_fence.wait(); + try self.sync.frame_fence.reset(); + + try self.updateUniforms(); + + _ = try self.swapchain.getNextImage(self.sync.sem_acquire_frame.h_sem, null); + + try self.graphics.cmd_buf.reset(); + try self.graphics.cmd_buf.begin(); + self.graphics.render_quad.drawOneShot( + &self.graphics.cmd_buf, + &self.graphics.framebuffers, + ); + + try self.graphics.cmd_buf.end(); + // submit the command buffer to a synchronized queue + try self.ctx.submitCommands(&self.graphics.cmd_buf, .Graphics, .{ + .fence_wait = self.sync.frame_fence.h_fence, + .sem_sig = self.sync.sem_render.h_sem, + .sem_wait = self.sync.sem_acquire_frame.h_sem, + }); + + try self.ctx.presentFrame(&self.swapchain, .{ + .sem_wait = self.sync.sem_render.h_sem, + }); + } + + pub fn deinit(self: *SampleState) void { + self.ctx.dev.waitIdle() catch {}; + + self.graphics.deinit(); + self.gpu_state.deinit(); + + self.swapchain.deinit(); + + self.sync.deinit(); + self.ctx.deinit(); + self.window.destroy(); + } +}; + +pub fn main() !void { + var buf_alloc = std.heap.GeneralPurposeAllocator(.{ + }).init; + defer _ = buf_alloc.deinit(); + + var args = try std.process.argsWithAllocator(buf_alloc.allocator()); + defer args.deinit(); + _ = args.next(); + + const shader_file = args.next() orelse { + log.err("Usage: raymarch_fractal ", .{}); + return error.RequiresArgument; + }; + + var state: SampleState = .{ + .allocator = buf_alloc.allocator(), + }; + + // figure out the full path + var path_builder = std.ArrayList(u8).init(buf_alloc.allocator()); + defer path_builder.deinit(); + + try path_builder.appendSlice("raymarch_fractals/shaders/"); + try path_builder.appendSlice(shader_file); + + try state.createContext(); + try state.createSyncObjects(); + try state.createSwapchain(); + try state.createGraphicsPipeline(path_builder.items); + + state.window.show(); + + while (state.active()) { + state.update() catch |err| { + log.err("An error occured while running: {!}\n ....Terminating", .{err}); + state.deinit(); + + return err; + }; + } + + state.deinit(); + log.info("You Win!", .{}); +} diff --git a/samples/raymarch_fractals/shaders/hollow_boi.glsl b/samples/raymarch_fractals/shaders/hollow_boi.glsl new file mode 100644 index 0000000..cd8aa11 --- /dev/null +++ b/samples/raymarch_fractals/shaders/hollow_boi.glsl @@ -0,0 +1,170 @@ +#version 450 + +#define EPSILON 0.0001 +#define ITERATIONS 256 +#define MAX_DIST 1000.0 + +#define PI 3.14159265 + +//#define NORMALS_ONLY + +layout(location = 0) out vec4 fragColor; +layout(location = 0) in vec2 texCoord; + +const vec3 spos = vec3(0.0); +const vec3 light = normalize(vec3(1.0, 0.4, 2.3)); + +const float sp_radius = 5.0; + +layout(binding = 0) uniform FragUniforms { + mat4 transform; + vec2 resolution; + float time; + float aspect; +} u; + +layout(binding = 1) uniform sampler2D main_tex; + +const vec2 mouse = vec2(0, 0); + +float sdfSubtract(float d1, float d2) { + return max(-d2, d1); +} + +float sdfSmoothUnion(float d1, float d2, float r) { + float h = clamp( 0.5 + 0.5*(d2-d1)/r, 0.0, 1.0 ); + return mix( d2, d1, h ) - r*h*(1.0-h); +} + +float sdfSmooth(float d, float r) { + return d - r; +} + +vec3 sdfMirrorZ(vec3 pos, float offset) { + return vec3(pos.xy, abs(pos.z - offset)); +} + +float cylinder(vec3 p, float h, float r) { + vec2 d = abs(vec2(length(p.xz),p.y)) - vec2(r,h); + return min(max(d.x,d.y),0.0) + length(max(d,0.0)); +} + +float sphere(vec3 p, vec3 sp, float r) { + return length(p - sp) - r; +} + + +float horns(vec3 pos) { + vec3 p = pos - vec3(0.0, 0.96, 0.0); + const float rad = 0.2; + + vec2 wut = vec2(length(p.yz) - 1.0, p.x); + float donut = length(wut) - rad; + + float mid = sphere(pos, vec3(0.0, 1.9, 0.0), 0.6); + + return sdfSubtract(donut, mid); +} + + + +float body(vec3 pos) { + return 0.0; +} + + + +float scene(in vec3 pos) { + float head_base = sdfSmooth(cylinder(pos, 0.4,0.5), 0.5); + float eye = sphere(sdfMirrorZ(pos, 0.0), vec3(0.75, 0.0, 0.4), 0.3); + + return sdfSmoothUnion( horns(pos), sdfSubtract(head_base, eye), 0.2); + //return horns(pos); +} + + + +float march(vec3 o, vec3 dir) { + float t = 0.0; + for (int i = 0; i < ITERATIONS; i++) { + vec3 ray = o + dir * t; + + float cd = scene(ray); + + t += cd; + + if (t <= EPSILON || t > MAX_DIST) { + break; + } + } + + return t; +} + +vec3 calcNormals(in vec3 pos) { + const float e = 0.0005; + const vec2 k = vec2(1, -1) * 0.5773 * e; + return normalize( + k.xyy*scene(pos+k.xyy) + + k.yyx*scene(pos+k.yyx) + + k.yxy*scene(pos+k.yxy) + + k.xxx*scene(pos+k.xxx) ); +} + + +vec3 render(float dist, vec3 pos) { + if (dist > MAX_DIST) return vec3(0.0); + + vec3 n = calcNormals(pos); + + #ifdef NORMALS_ONLY + return abs(n); + #endif + + float dif = 0.9 * clamp(dot(light, n), 0.0, 1.0); + + float dotx = abs(dot(n, vec3(1.0, 0.0, 0.0))); + float doty = abs(dot(n, vec3(0.0, 1.0, 0.0))); + float dotz = abs(dot(n, vec3(0.0, 0.0, 1.0))); + + vec3 albedo = + texture(main_tex, normalize(pos.yz) + vec2(0.5)).rgb * dotx + + texture(main_tex, normalize(pos.xz) + vec2(0.5)).rgb * doty + + texture(main_tex, normalize(pos.xy) + vec2(0.5)).rgb * dotz; + vec3 ambient = vec3(0.1); + return albedo * dif + ambient; +} + +// check to make sure y is represented in the correct coordinate space +vec3 lookat(vec3 orig, vec3 eye, vec3 pos) { + const vec3 world_up = vec3(0.0, 1.0, 0.0); + + vec3 z = normalize(pos - eye); + vec3 x = normalize(cross(z, world_up)); + vec3 y = normalize(cross(x, z)); + + mat3 look = mat3(x, y, z); + + return orig * look; +} + +void main() { + // Normalized screen coordinates (from -1 to 1) + // quite helpful for figuring out ray direction without doing an explicit + // perspective projection + vec2 uv = (texCoord - 0.5 * u.resolution.xy)/u.resolution.y; + + // adjust for vulkan + uv.y = -uv.y * u.aspect; + + float ylook = 0.0; //4.0 * (iMouse.y - 0.5 * iResolution.y)/iResolution.y; + + float a = mouse.x / u.resolution.x * 2.0 * PI; + + vec3 pos = vec3(4.0 * cos(a), ylook, 4.0 * sin(a)); + vec3 dir = lookat(normalize(vec3(uv, 1.0)), pos, spos); + + float dist = march(pos, dir); + + fragColor = vec4(render(dist, pos + dir * dist), 1.0); +} diff --git a/samples/raymarch_fractals/shaders/juliabulb.glsl b/samples/raymarch_fractals/shaders/juliabulb.glsl new file mode 100644 index 0000000..b55da56 --- /dev/null +++ b/samples/raymarch_fractals/shaders/juliabulb.glsl @@ -0,0 +1,189 @@ +#version 450 + +#define EPSILON 0.001 +#define ITERATIONS 48 +#define MAX_DIST 80.0 + +#define PI 3.14159265 + +//#define NORMALS_ONLY + +layout(location = 0) out vec4 fragColor; +layout(location = 0) in vec2 texCoord; + +const float sp_radius = 5.0; + +layout(binding = 0) uniform FragUniforms { + mat4 transform; + vec2 resolution; + float time; + float aspect; +} u; + +#define TIME (u.time * 0.1) + +const vec2 mouse = vec2(0, 0); + +const int fractal_iterations = 4; +const float yeet = 4.0; +const float power = 8.0; + +vec4 freakyFractal(in vec3 pos) { + float dr = 1.0; + float r; + vec3 o = vec3(1.0); + vec3 z = pos; + vec3 c = vec3(sin(TIME), 1.0 - cos(TIME), cos(TIME)); + + for (int i = 0; i < fractal_iterations; i++) { + r = length(z); + o = min(o, abs(z)); + + if (r > yeet) break; + + float theta = 8.0 *acos(clamp(z.z / r, -1.0, 1.0)); + float phi = 8.0 * atan(z.y, z.x); + + dr = power * pow(r, 3.5) * dr; + + float scaled_r = pow(r, power); + + z = scaled_r * vec3( + sin(theta) * cos(phi), + sin(phi) * sin(theta), + cos(theta) + ) + c; + } + + return vec4(0.5 * log(r) * sqrt(r) / max(dr, 1e-6), abs(normalize(o))); +} + +const vec3 spos = vec3(0.0); + + +vec4 vecScene(in vec3 pos) { + return freakyFractal(pos); +} + +float scene(in vec3 pos) { + return freakyFractal(pos).x; +} + +float march(vec3 o, vec3 dir) { + float t = 0.0; + for (int i = 0; i < ITERATIONS; i++) { + + vec3 ray = o + dir * t; + + float cd = scene(ray); + + t += cd; + + if (cd < EPSILON * t || t > MAX_DIST) { + break; + } + } + + return t; +} + +vec3 calcNormals(in vec3 pos) { + const float e = EPSILON; + const vec2 k = vec2(1, -1) * 0.5773 * e; + return normalize( + k.xyy*scene(pos+k.xyy) + + k.yyx*scene(pos+k.yyx) + + k.yxy*scene(pos+k.yxy) + + k.xxx*scene(pos+k.xxx) ); +} + +vec3 calcNormals1(in vec3 pos) { + vec3 eps = vec3(0.001,0.0,0.0); + return normalize( vec3( + scene(pos+eps.xyy).x - scene(pos-eps.xyy).x, + scene(pos+eps.yxy).x - scene(pos-eps.yxy).x, + scene(pos+eps.yyx).x - scene(pos-eps.yyx).x ) ); +} + +const vec3 col_c = vec3(1.0, 0.03, 0.82); +const vec3 col_a = vec3(0.1, 1.0, 0.9); +const vec3 col_b = vec3(0.4, 0.05, 0.5); + +const vec3 sky_a = vec3(1.0, 0.95, 0.95); +const vec3 sky_b = vec3(0.3, 0.9, 1.0); +const vec3 sky_c = vec3(0.2, 0.2, 0.4); + +// Basic ass phong lighting, replace with something fancier +// if you wish +vec3 render(float dist, vec3 pos, vec3 look, vec3 dir, vec3 light) { + vec3 light1 = vec3( 0.577, 0.577, -0.577 ); + + if (dist > MAX_DIST) + { + vec3 col = 1.3*vec3(0.8,.95,1.0)*(0.7+0.3*dir.y); + col += vec3(0.8,0.7,0.5)*pow( clamp(dot(dir,light1),0.0,1.0), 32.0 ); + return col; + } + + vec3 n = calcNormals(pos); + vec3 o = vecScene(pos).yzw; + + #ifdef NORMALS_ONLY + return abs(n); + #endif + + float trapMix = length(o); + vec3 base = vec3(0.2, 0.9, 1.0); + vec3 band = 0.25 + 0.5 * cos(6.2831 * (o + 0.2 * o)); + vec3 albedo = mix(col_a, col_b, band); + + float dif = max(dot(n, light), 0.0); + + + float shine = 64.0; + vec3 halfa = normalize(light + look); + float spa = max(dot(halfa, n), 0.0); + float spec = pow(spa, shine); + + return albedo * dif / dist + albedo * 0.1 / dist + spec * vec3(1) / dist; +} + +// check to make sure y is represented in the correct coordinate space +vec3 lookat(vec3 orig, vec3 eye, vec3 pos) { + const vec3 world_up = vec3(0.0, 1.0, 0.0); + + vec3 z = normalize(pos - eye); + vec3 x = normalize(cross(z, world_up)); + vec3 y = normalize(cross(x, z)); + + mat3 look = mat3(x, y, z); + + return orig * look; +} + +void main() { + // Normalized screen coordinates (from -1 to 1) + // quite helpful for figuring out ray direction without doing an explicit + // perspective projection + vec2 uv = (texCoord - 0.5 * u.resolution.xy)/u.resolution.y; + + // adjust for vulkan + uv.y = -uv.y * u.aspect; + + float ylook = 0.0;//sin(TIME * 0.5) * 4.0; + + float a = mouse.x / u.resolution.x * 2.0 * PI; + + float dist = (sin(TIME) + 1.0) * 0.5 * 4.0 + 1.0; + + vec3 pos = vec3(dist * cos(TIME), ylook, dist * sin(TIME)); + vec3 lpos = vec3(dist * cos(TIME * 3.0), ylook, dist * sin(TIME * 3.0)); + + vec3 dir = lookat(normalize(vec3(uv, 1.0)), pos, spos); + vec3 look = lookat(normalize(vec3(uv, 1.0)), pos, spos); + vec3 light = lookat(normalize(vec3(0.0, 0.0, 1.0)), lpos, spos); + + float di = march(pos, dir); + + fragColor = vec4(render(di, pos + dir * di, look, dir, light), 1.0); +} diff --git a/samples/raymarch_fractals/shaders/mandelbox.glsl b/samples/raymarch_fractals/shaders/mandelbox.glsl new file mode 100644 index 0000000..86a843c --- /dev/null +++ b/samples/raymarch_fractals/shaders/mandelbox.glsl @@ -0,0 +1,112 @@ +#version 450 + +#define EPSILON 0.0001 +#define ITERATIONS 256 +#define MAX_DIST 1000.0 + +#define PI 3.14159265 + +//#define NORMALS_ONLY + +layout(location = 0) out vec4 fragColor; +layout(location = 0) in vec2 texCoord; + +const vec3 spos = vec3(0.0); +const vec3 light = normalize(vec3(1.0, 0.4, 2.3)); + +const float sp_radius = 5.0; + +layout(binding = 0) uniform FragUniforms { + mat4 transform; + vec2 resolution; + float time; + float aspect; +} u; + +const vec2 mouse = vec2(0, 0); + + +// boll circle +float scene(in vec3 pos) { + return length(pos) - 0.5; +} + + + +float march(vec3 o, vec3 dir) { + float t = 0.0; + for (int i = 0; i < ITERATIONS; i++) { + vec3 ray = o + dir * t; + + float cd = scene(ray); + + t += cd; + + if (t <= EPSILON || t > MAX_DIST) { + break; + } + } + + return t; +} + +vec3 calcNormals(in vec3 pos) { + const float e = 0.0005; + const vec2 k = vec2(1, -1) * 0.5773 * e; + return normalize( + k.xyy*scene(pos+k.xyy) + + k.yyx*scene(pos+k.yyx) + + k.yxy*scene(pos+k.yxy) + + k.xxx*scene(pos+k.xxx) ); +} + + +vec3 render(float dist, vec3 pos) { + if (dist > MAX_DIST) return vec3(0.0); + + vec3 n = calcNormals(pos); + + #ifdef NORMALS_ONLY + return abs(n); + #endif + + float dif = 0.9 * clamp(dot(light, n), 0.0, 1.0); + + vec3 albedo = vec3(1.0); + vec3 ambient = vec3(0.1); + return albedo * dif + ambient; +} + +// check to make sure y is represented in the correct coordinate space +vec3 lookat(vec3 orig, vec3 eye, vec3 pos) { + const vec3 world_up = vec3(0.0, 1.0, 0.0); + + vec3 z = normalize(pos - eye); + vec3 x = normalize(cross(z, world_up)); + vec3 y = normalize(cross(x, z)); + + mat3 look = mat3(x, y, z); + + return orig * look; +} + +void main() { + // Normalized screen coordinates (from -1 to 1) + // quite helpful for figuring out ray direction without doing an explicit + // perspective projection + vec2 uv = (texCoord - 0.5 * u.resolution.xy)/u.resolution.y; + + // adjust for vulkan + uv.y = -uv.y * u.aspect; + + float ylook = 0.0; //4.0 * (iMouse.y - 0.5 * iResolution.y)/iResolution.y; + + float a = mouse.x / u.resolution.x * 2.0 * PI; + + vec3 pos = vec3(4.0 * cos(a), ylook, 4.0 * sin(a)); + vec3 dir = lookat(normalize(vec3(uv, 1.0)), pos, spos); + + float dist = march(pos, dir); + + fragColor = vec4(render(dist, pos + dir * dist), 1.0); +} diff --git a/samples/raymarch_fractals/shaders/mandelbulb.glsl b/samples/raymarch_fractals/shaders/mandelbulb.glsl new file mode 100644 index 0000000..5cc683b --- /dev/null +++ b/samples/raymarch_fractals/shaders/mandelbulb.glsl @@ -0,0 +1,194 @@ +#version 450 + +#define EPSILON 0.001 +#define ITERATIONS 48 +#define MAX_DIST 80.0 + +#define PI 3.14159265 + +//#define NORMALS_ONLY + +layout(location = 0) out vec4 fragColor; +layout(location = 0) in vec2 texCoord; + +const float sp_radius = 5.0; + +layout(binding = 0) uniform FragUniforms { + mat4 transform; + vec2 resolution; + float time; + float aspect; +} u; + +#define TIME (u.time * 0.1) + +const vec2 mouse = vec2(0, 0); + +const int fractal_iterations = 12; +const float yeet = 4.0; +const float power = 8.0; + +vec4 freakyFractal(in vec3 pos) { + float dr = 1.0; + float r; + vec3 o = vec3(1.0); + vec3 z = pos; + + for (int i = 0; i < fractal_iterations; i++) { + r = length(z); + o = min(o, abs(z)); + + if (r > yeet) break; + + float theta = acos(z.z / r); + float phi = atan(z.y, z.x); + + dr = pow(r, power - 1) * power * dr + 1.0; + + float scaled_r = pow(r, power); + theta = theta * power; + phi = phi * power; + + z = scaled_r * vec3( + sin(theta) * cos(phi), + sin(phi) * sin(theta), + cos(theta) + ) + pos; + + z.y = clamp(z.y, -1.0, 1.0); + } + + return vec4(0.5 * log(r) * r / max(dr, 1e-6), abs(normalize(o))); +} + +const vec3 spos = vec3(0.0); + + +vec4 vecScene(in vec3 pos) { + return freakyFractal(pos); +} + +float scene(in vec3 pos) { + return freakyFractal(pos).x; +} + +float march(vec3 o, vec3 dir) { + float t = 0.0; + for (int i = 0; i < ITERATIONS; i++) { + + vec3 ray = o + dir * t; + + float cd = scene(ray); + + t += cd; + + if (cd < EPSILON * t || t > MAX_DIST) { + break; + } + } + + return t; +} + +vec3 calcNormals(in vec3 pos) { + const float e = EPSILON; + const vec2 k = vec2(1, -1) * 0.5773 * e; + return normalize( + k.xyy*scene(pos+k.xyy) + + k.yyx*scene(pos+k.yyx) + + k.yxy*scene(pos+k.yxy) + + k.xxx*scene(pos+k.xxx) ); +} + +vec3 calcNormals1(in vec3 pos) { + vec3 eps = vec3(0.001,0.0,0.0); + return normalize( vec3( + scene(pos+eps.xyy).x - scene(pos-eps.xyy).x, + scene(pos+eps.yxy).x - scene(pos-eps.yxy).x, + scene(pos+eps.yyx).x - scene(pos-eps.yyx).x ) ); +} + +const vec3 col_c = vec3(1.0, 0.03, 0.82); +const vec3 col_a = vec3(0.1, 1.0, 0.9); +const vec3 col_b = vec3(0.4, 0.05, 0.5); + +const vec3 sky_a = vec3(1.0, 0.95, 0.95); +const vec3 sky_b = vec3(0.3, 0.9, 1.0); +const vec3 sky_c = vec3(0.2, 0.2, 0.4); + +// Basic ass phong lighting, replace with something fancier +// if you wish +vec3 render(float dist, vec3 pos, vec3 look, vec3 dir, vec3 light) { + if (dist > MAX_DIST) + { + float sky = pow(dot(dir, vec3(0.0, 1.0, 0.0)), 2.0); + float sky2 = pow(dot(dir, vec3(0.0, -1.0, 0.0)), 2.0); + + vec3 sky_a = max(mix(sky_a, sky_b, sky), vec3(0.0)); + vec3 sky_b = max(mix(sky_a, sky_c, sky2), vec3(0.0)); + + return sky_a + sky_b; + } + + vec3 n = calcNormals(pos); + vec3 o = vecScene(pos).yzw; + + #ifdef NORMALS_ONLY + return abs(n); + #endif + + float trapMix = length(o); + vec3 base = vec3(0.2, 0.9, 1.0); + vec3 band = 0.25 + 0.5 * cos(6.2831 * (o + 0.2 * o)); + vec3 albedo = mix(col_a, col_b, band); + + float dif = max(dot(n, light), 0.0); + + + float shine = 32.0; + vec3 halfa = normalize(light + look); + float spa = max(dot(halfa, n), 0.0); + float spec = pow(spa, shine); + + return albedo * dif / dist + albedo * 0.1 / dist + spec * vec3(1) / dist; +} + +// check to make sure y is represented in the correct coordinate space +vec3 lookat(vec3 orig, vec3 eye, vec3 pos) { + const vec3 world_up = vec3(0.0, 1.0, 0.0); + + vec3 z = normalize(pos - eye); + vec3 x = normalize(cross(z, world_up)); + vec3 y = normalize(cross(x, z)); + + mat3 look = mat3(x, y, z); + + return orig * look; +} + +void main() { + // Normalized screen coordinates (from -1 to 1) + // quite helpful for figuring out ray direction without doing an explicit + // perspective projection + vec2 uv = (texCoord - 0.5 * u.resolution.xy)/u.resolution.y; + + // adjust for vulkan + uv.y = -uv.y * u.aspect; + + float ylook = 0.0;//sin(TIME * 0.5) * 4.0; + + float a = mouse.x / u.resolution.x * 2.0 * PI; + + float dist = (sin(TIME) + 1.0) * 0.5 * 4.0 + 1.0; + + vec3 pos = vec3(dist * cos(TIME), ylook, dist * sin(TIME)); + vec3 lpos = vec3(dist * cos(TIME * 3.0), ylook, dist * sin(TIME * 3.0)); + + vec3 dir = lookat(normalize(vec3(uv, 1.0)), pos, spos); + vec3 look = lookat(normalize(vec3(uv, 1.0)), pos, spos); + vec3 light = lookat(normalize(vec3(0.0, 0.0, 1.0)), lpos, spos); + + float di = march(pos, dir); + + fragColor = vec4(render(di, pos + dir * di, look, dir, light), 1.0); +} diff --git a/samples/raymarch_fractals/shaders/raymarch_template.glsl b/samples/raymarch_fractals/shaders/raymarch_template.glsl new file mode 100644 index 0000000..86a843c --- /dev/null +++ b/samples/raymarch_fractals/shaders/raymarch_template.glsl @@ -0,0 +1,112 @@ +#version 450 + +#define EPSILON 0.0001 +#define ITERATIONS 256 +#define MAX_DIST 1000.0 + +#define PI 3.14159265 + +//#define NORMALS_ONLY + +layout(location = 0) out vec4 fragColor; +layout(location = 0) in vec2 texCoord; + +const vec3 spos = vec3(0.0); +const vec3 light = normalize(vec3(1.0, 0.4, 2.3)); + +const float sp_radius = 5.0; + +layout(binding = 0) uniform FragUniforms { + mat4 transform; + vec2 resolution; + float time; + float aspect; +} u; + +const vec2 mouse = vec2(0, 0); + + +// boll circle +float scene(in vec3 pos) { + return length(pos) - 0.5; +} + + + +float march(vec3 o, vec3 dir) { + float t = 0.0; + for (int i = 0; i < ITERATIONS; i++) { + vec3 ray = o + dir * t; + + float cd = scene(ray); + + t += cd; + + if (t <= EPSILON || t > MAX_DIST) { + break; + } + } + + return t; +} + +vec3 calcNormals(in vec3 pos) { + const float e = 0.0005; + const vec2 k = vec2(1, -1) * 0.5773 * e; + return normalize( + k.xyy*scene(pos+k.xyy) + + k.yyx*scene(pos+k.yyx) + + k.yxy*scene(pos+k.yxy) + + k.xxx*scene(pos+k.xxx) ); +} + + +vec3 render(float dist, vec3 pos) { + if (dist > MAX_DIST) return vec3(0.0); + + vec3 n = calcNormals(pos); + + #ifdef NORMALS_ONLY + return abs(n); + #endif + + float dif = 0.9 * clamp(dot(light, n), 0.0, 1.0); + + vec3 albedo = vec3(1.0); + vec3 ambient = vec3(0.1); + return albedo * dif + ambient; +} + +// check to make sure y is represented in the correct coordinate space +vec3 lookat(vec3 orig, vec3 eye, vec3 pos) { + const vec3 world_up = vec3(0.0, 1.0, 0.0); + + vec3 z = normalize(pos - eye); + vec3 x = normalize(cross(z, world_up)); + vec3 y = normalize(cross(x, z)); + + mat3 look = mat3(x, y, z); + + return orig * look; +} + +void main() { + // Normalized screen coordinates (from -1 to 1) + // quite helpful for figuring out ray direction without doing an explicit + // perspective projection + vec2 uv = (texCoord - 0.5 * u.resolution.xy)/u.resolution.y; + + // adjust for vulkan + uv.y = -uv.y * u.aspect; + + float ylook = 0.0; //4.0 * (iMouse.y - 0.5 * iResolution.y)/iResolution.y; + + float a = mouse.x / u.resolution.x * 2.0 * PI; + + vec3 pos = vec3(4.0 * cos(a), ylook, 4.0 * sin(a)); + vec3 dir = lookat(normalize(vec3(uv, 1.0)), pos, spos); + + float dist = march(pos, dir); + + fragColor = vec4(render(dist, pos + dir * dist), 1.0); +} diff --git a/src/api/api.zig b/src/api/api.zig index 4d71e99..b7565de 100644 --- a/src/api/api.zig +++ b/src/api/api.zig @@ -8,11 +8,12 @@ const ind_buf = @import("index_buffer.zig"); const sh = @import("shader.zig"); const uni_buf = @import("uniform.zig"); const vert_buf = @import("vertex_buffer.zig"); +const common = @import("common_types.zig"); // direct vulkan-zig imports -pub const GlobalInterface = vk.BaseWrapper; -pub const InstanceInterface = vk.InstanceProxy; -pub const DeviceInterface = vk.DeviceProxy; +pub const GlobalInterface = *const vk.BaseWrapper; +pub const InstanceInterface = *const vk.InstanceProxy; +pub const DeviceInterface = *const vk.DeviceProxy; pub const DynamicState = vk.DynamicState; /// All registered extensions for devices and instances @@ -21,6 +22,7 @@ pub const extensions = vk.extensions; pub const InstanceHandler = base.InstanceHandler; pub const DeviceHandler = base.DeviceHandler; pub const SurfaceHandler = base.SurfaceHandler; +pub const DeviceMemoryLayout = @import("memory/DeviceMemoryLayout.zig"); pub const GraphicsQueue = queue.GraphicsQueue; pub const ComputeQueue = queue.ComputeQueue; @@ -48,13 +50,17 @@ pub const ComptimeVertexBuffer = vert_buf.VertexBuffer; pub const ComptimeIndexBuffer = ind_buf.IndexBuffer; pub const ComptimeUniformBuffer = uni_buf.UniformBuffer; pub const ComptimeStorageBuffer = @import("storage_buffer.zig").ComptimeStorageBuffer; -const BufInterface = buf.AnyBuffer; +pub const BufInterface = buf.AnyBuffer; pub const Descriptor = @import("descriptor.zig"); -pub const ResolvedDescriptorBinding = Descriptor.ResolvedBinding; +pub const DescriptorPool = @import("descriptor_pool.zig"); +pub const DescriptorLayout = Descriptor.DescriptorLayout; +pub const DescriptorType = common.DescriptorType; +pub const DescriptorUsageInfo = common.DescriptorUsageInfo; + // additional utility structs and stuff -const types = @import("types.zig"); +const types = @import("common_types.zig"); pub const SyncInfo = types.SyncInfo; // sync stuff @@ -64,4 +70,16 @@ pub const Fence = sync.Fence; // shaders pub const ShaderModule = sh.Module; +// Obtaining RTTI for vulkan API +const Registry = @import("../resource_management/res.zig").Registry; + +pub fn populateRegistry(reg: *Registry) !void { + // temporary, until I restructure this file to include + // data declarations rather than wrappers. + reg.registerType(CommandBuffer.CommandBuffer); +} +// comptime { +// _ = @import("memory/allocator.zig"); +// _ = @import("memory/Heap.zig"); +// } diff --git a/src/api/base.zig b/src/api/base.zig index c9088b1..de5526d 100644 --- a/src/api/base.zig +++ b/src/api/base.zig @@ -3,7 +3,7 @@ const vk = @import("vulkan"); const std = @import("std"); const glfw = @import("glfw"); -const util = @import("../util.zig"); +const util = @import("common").util; const api = @import("api.zig"); const Allocator = std.mem.Allocator; @@ -26,8 +26,8 @@ fn debugCallback( p_user_data: ?*anyopaque, ) callconv(.c) vk.Bool32 { const callback_data = p_callback_data orelse { - validation_log.err( - "Something probably bad happened but vulkan won't fucking give me the info", + std.debug.print( + "Something probably bad happened but vulkan won't fucking give me the info\n", .{}, ); return vk.FALSE; @@ -37,8 +37,8 @@ fn debugCallback( // That way, logging levels will work for vulkan validation as well.. const msg_level = message_severity.toInt(); - validation_log.debug( - "{d} -- {s}", + std.debug.print( + "{d} -- {s}\n", .{ msg_level, callback_data.p_message orelse "Fuck" }, ); @@ -56,10 +56,6 @@ pub const InstanceHandler = struct { validation_layers: []const [*:0]const u8, }, - device: struct { - required_extensions: []const [*:0]const u8, - }, - loader: GetProcAddrHandler, allocator: Allocator, enable_debug_log: bool, @@ -98,9 +94,7 @@ pub const InstanceHandler = struct { }; } - fn createInstance(self: *InstanceHandler, config: *const Config) !void { - - // log the available extensions + fn createInstance(self: *InstanceHandler, config: Config) !void { const available = self.w_db.enumerateInstanceExtensionPropertiesAlloc( null, config.allocator, @@ -108,6 +102,7 @@ pub const InstanceHandler = struct { log.debug("Failed to enumerate extension properties... this is probably bad.\n", .{}); return error.ExtensionEnumerationFailed; }; + defer config.allocator.free(available); log.debug("Available Extensions:", .{}); @@ -149,14 +144,14 @@ pub const InstanceHandler = struct { .application_version = @bitCast(vk.makeApiVersion(0, 0, 0, 0)), .p_engine_name = "No Engine", .engine_version = @bitCast(vk.makeApiVersion(0, 0, 0, 0)), - .api_version = @bitCast(vk.API_VERSION_1_4), + .api_version = @bitCast(vk.API_VERSION_1_3), }, .p_next = &defaultDebugConfig(), .enabled_extension_count = @intCast(config.instance.required_extensions.len), .pp_enabled_extension_names = config.instance.required_extensions.ptr, .enabled_layer_count = @intCast(config.instance.validation_layers.len), .pp_enabled_layer_names = config.instance.validation_layers.ptr, - .flags = .{ .enumerate_portability_bit_khr = true }, + .flags = .{}, }, null); self.w_di = try self.allocator.create(vk.InstanceWrapper); @@ -182,7 +177,7 @@ pub const InstanceHandler = struct { ); } - fn loadBase(self: *InstanceHandler, config: *const Config) !void { + fn loadBase(self: *InstanceHandler, config: Config) !void { self.w_db = vk.BaseWrapper.load(config.loader); // check to see if the dispatch table loading fucked up @@ -192,7 +187,7 @@ pub const InstanceHandler = struct { } } - pub fn init(config: *const Config) !InstanceHandler { + pub fn init(config: Config) !InstanceHandler { var ctx: InstanceHandler = .{ .allocator = config.allocator, }; @@ -226,7 +221,7 @@ pub const InstanceHandler = struct { //TODO: Yeet into a non-owning view of context pub const DeviceHandler = struct { pub const Config = struct { - surface: *const SurfaceHandler, + surface: ?*const SurfaceHandler, required_extensions: []const [*:0]const u8 = &[0][*:0]const u8{}, }; @@ -329,7 +324,7 @@ pub const DeviceHandler = struct { // TODO: Support overlap in queue families // (i.e) some queues might support both compute and graphics operations families: FamilyIndices, - swapchain_details: SwapchainSupportDetails, + swapchain_details: ?SwapchainSupportDetails, h_dev: vk.Device = .null_handle, h_pdev: vk.PhysicalDevice = .null_handle, @@ -354,8 +349,15 @@ pub const DeviceHandler = struct { var found = false; var chosen_mem: u32 = 0; + for (0..dev_mem_props.memory_heap_count) |i| { + const heap = dev_mem_props.memory_heaps[i]; + log.debug("{d}. memory heap: {s} (size: {d})", .{i, heap.flags, heap.size}); + } + for (0..dev_mem_props.memory_type_count) |i| { const mem_flags = dev_mem_props.memory_types[i].property_flags; + const heap_index = dev_mem_props.memory_types[i].heap_index; + log.debug("Memory Type {d} (heap {d}): {s}", .{i, heap_index, mem_flags}); if (mem_reqs.memory_type_bits & (@as(u32, 1) << @intCast(i)) != 0 and mem_flags.contains(requested_flags)) { found = true; @@ -373,7 +375,7 @@ pub const DeviceHandler = struct { fn getQueueFamilies( dev: vk.PhysicalDevice, pr_inst: *const vk.InstanceProxy, - surface: *const SurfaceHandler, + surface: ?*const SurfaceHandler, allocator: Allocator, ) FamilyIndices { var found_indices: FamilyIndices = .{ @@ -387,6 +389,7 @@ pub const DeviceHandler = struct { dev, allocator, ) catch { + log.err("Could not get queue family proprties while enumerating device\n", .{}); return found_indices; }; defer allocator.free(dev_queue_family_props); @@ -395,6 +398,7 @@ pub const DeviceHandler = struct { const i: u32 = @intCast(index); log.debug("Family {d}, Supports:\n {s}", .{i, props.queue_flags}); + if (props.queue_flags.contains(.{ .graphics_bit = true, })) { @@ -405,13 +409,16 @@ pub const DeviceHandler = struct { })) { found_indices.compute_family = i; } - - if ((pr_inst.getPhysicalDeviceSurfaceSupportKHR( - dev, - i, - surface.h_surface, - ) catch vk.FALSE) == vk.TRUE) { - found_indices.present_family = i; + + // HACK: Guhh + if (surface) |surf| { + if ((pr_inst.getPhysicalDeviceSurfaceSupportKHR( + dev, + i, + surf.h_surface, + ) catch vk.FALSE) == vk.TRUE) { + found_indices.present_family = i; + } } } @@ -445,7 +452,7 @@ pub const DeviceHandler = struct { // This will likely require a bit of upgrade when I consider making this project more portable fn pickSuitablePhysicalDevice( pr_inst: *const vk.InstanceProxy, - config: *const Config, + config: Config, allocator: Allocator, ) ?vk.PhysicalDevice { const physical_devices = @@ -473,7 +480,7 @@ pub const DeviceHandler = struct { // Later on, I plan to accept a device properties struct // which shall serve as the criteria for choosing a graphics unit - pub fn init(parent: *const InstanceHandler, config: *const Config) !DeviceHandler { + pub fn init(parent: *const InstanceHandler, config: Config) !DeviceHandler { // attempt to find a suitable device -- hardcoded for now const chosen_dev = pickSuitablePhysicalDevice( &parent.pr_inst, @@ -493,15 +500,23 @@ pub const DeviceHandler = struct { parent.allocator, ); - // just enable all the available features lmao + //HACK: Extremely lazy fix for device being hardcoded + //for a surface. Please replace this once proper featureset + //support exists const dev_features = parent.pr_inst.getPhysicalDeviceFeatures(chosen_dev); - const swapchain_details = getDeviceSupport( - &parent.pr_inst, - config.surface, - chosen_dev, - parent.allocator, - ) catch unreachable; - errdefer swapchain_details.deinit(); + const swapchain_details = if (config.surface) |surf| + getDeviceSupport( + &parent.pr_inst, + surf, + chosen_dev, + parent.allocator, + ) catch unreachable + else + null; + + if (swapchain_details) |sd| { + errdefer sd.deinit(); + } const priority = [_]f32{1.0}; @@ -513,7 +528,8 @@ pub const DeviceHandler = struct { .p_queue_priorities = &priority, }, .{ - .queue_family_index = dev_queue_indices.present_family.?, + .queue_family_index = dev_queue_indices.present_family orelse + @panic("This is bad"), .queue_count = 1, .p_queue_priorities = &priority, }, @@ -521,7 +537,7 @@ pub const DeviceHandler = struct { const logical_dev = parent.pr_inst.createDevice(chosen_dev, &.{ .p_queue_create_infos = &queue_create_infos, - .queue_create_info_count = queue_create_infos.len, + .queue_create_info_count = if (config.surface != null) queue_create_infos.len else 1, .p_enabled_features = @ptrCast(&dev_features), .pp_enabled_extension_names = config.required_extensions.ptr, .enabled_extension_count = @intCast(config.required_extensions.len), @@ -584,7 +600,11 @@ pub const DeviceHandler = struct { self.pr_dev.destroyDevice(null); self.ctx.allocator.destroy(self.dev_wrapper); - self.swapchain_details.deinit(); + // HACK: Quick fix to get device working without a window + // surface, please replace once proper featuresets are introduced + if (self.swapchain_details) |*sd| { + sd.deinit(); + } } pub fn getMemProperties(self: *const DeviceHandler) vk.PhysicalDeviceMemoryProperties { @@ -652,17 +672,23 @@ pub const SurfaceHandler = struct { h_window: *const glfw.Window = undefined, h_surface: vk.SurfaceKHR = .null_handle, ctx: *const InstanceHandler = undefined, - - pub fn init(window: *const glfw.Window, ctx: *const InstanceHandler) !SurfaceHandler { - var surface: vk.SurfaceKHR = undefined; - - if (glfw.glfwCreateWindowSurface(ctx.pr_inst.handle, window.handle, null, &surface) != .success) { - log.debug("failed to create window surface!", .{}); - return error.SurfaceCreationFailed; + + /// Only reason window is optional is if we don't care + /// about presenting to a screen. + /// It is on the user to not create types that involve a window + /// -- may redo later for easier use and more predictable errors + pub fn init(window: ?*const glfw.Window, ctx: *const InstanceHandler) !SurfaceHandler { + var surface: vk.SurfaceKHR = .null_handle; + + if (window) |w| { + if (glfw.glfwCreateWindowSurface(ctx.pr_inst.handle, w.handle, null, &surface) != .success) { + log.debug("failed to create window surface!", .{}); + return error.SurfaceCreationFailed; + } } return SurfaceHandler{ - .h_window = window, + .h_window = window orelse undefined, .h_surface = surface, .ctx = ctx, }; diff --git a/src/api/buffer.zig b/src/api/buffer.zig index d690d61..544289e 100644 --- a/src/api/buffer.zig +++ b/src/api/buffer.zig @@ -4,7 +4,7 @@ const std = @import("std"); const log = std.log.scoped(.buffer); const meth = @import("../math.zig"); -const util = @import("../util.zig"); +const util = @import("common").util; const CommandBuffer = @import("command_buffer.zig"); const DeviceHandler = @import("base.zig").DeviceHandler; diff --git a/src/api/command_buffer.zig b/src/api/command_buffer.zig index b83c4b4..7ebf280 100644 --- a/src/api/command_buffer.zig +++ b/src/api/command_buffer.zig @@ -2,8 +2,9 @@ const std = @import("std"); const api = @import("api.zig"); const vk = @import("vulkan"); const base = @import("base.zig"); -const util = @import("../util.zig"); +const util = @import("common").util; const queue = @import("queue.zig"); +const env = @import("../env.zig"); const Context = @import("../context.zig"); const DeviceHandler = base.DeviceHandler; @@ -18,16 +19,13 @@ h_cmd_pool: vk.CommandPool, dev: *const DeviceHandler, one_shot: bool = false, -pub const Config = struct { - src_queue_family: queue.QueueFamily = .Graphics, -}; -pub fn init(ctx: *const Context, config: Config) !Self { +pub fn init(ctx: *const Context, config: CommandBuffer.Config) !Self { const dev = ctx.env(.dev); - return initDev(dev, config); + return try initDev(dev, config); } -fn initDev(dev: *const DeviceHandler, config: Config) !Self { +fn initDev(dev: *const DeviceHandler, config: CommandBuffer.Config) !Self { var cmd_buffer: vk.CommandBuffer = undefined; dev.pr_dev.allocateCommandBuffers( &.{ @@ -41,14 +39,21 @@ fn initDev(dev: *const DeviceHandler, config: Config) !Self { return err; }; - return .{ + var api_cmd_buf = Self{ .h_cmd_buffer = cmd_buffer, .h_cmd_pool = dev.getCommandPool(config.src_queue_family), .dev = dev, }; + + if (config.one_shot) { + api_cmd_buf.one_shot = true; + try api_cmd_buf.beginConfig(.{ .one_time_submit_bit = true }); + } + + return api_cmd_buf; } -pub fn oneShot(dev: *const DeviceHandler, config: Config) !Self { +pub fn oneShot(dev: *const DeviceHandler, config: CommandBuffer.Config) !Self { var buf = try initDev(dev, config); buf.one_shot = true; @@ -103,3 +108,68 @@ pub fn deinit(self: *const Self) void { util.asManyPtr(vk.CommandBuffer, &self.h_cmd_buffer), ); } + +const res = @import("../resource_management/res.zig"); +const common = @import("common"); +const Registry = res.Registry; + +pub const CommandBuffer = struct { + pub const Config = struct { + src_queue_family: queue.QueueFamily = .Graphics, + one_shot: bool = false, + }; + + pub const entry_config = + Registry.EntryConfig{ + .State = CommandBuffer, + .Proxy = CommandBufferProxy, + .InitErrors = CommandBufferInitErrors, + .ConfigType = Config, + .management = .Pooled, + .initFn = dummyInit, + .deinitFn = dummyDeinit, + }; + + h_cmd_buffer: vk.CommandBuffer, + h_cmd_pool: vk.CommandPool, + + dev: *const DeviceHandler, + one_shot: bool = false, +}; + +pub fn addEntries(reg: *Registry) !void { + reg.addEntry(CommandBuffer); +} + +const CommandBufferInitErrors = error{ + Something, +}; + +fn dummyInit(self: *CommandBuffer, e: env.Empty(), config: CommandBuffer.Config) CommandBufferInitErrors!void { + _ = e; + _ = self; + _ = config; +} + +fn dummyDeinit(self: *const CommandBuffer) void { + _ = self; +} + +const crapi = @import("../resource_management/res.zig").Registry.ComptimeAPI; + + +pub const CommandBufferProxy = struct { + const CommandBufferHandle = crapi.HandleFor(CommandBuffer); + + handle: CommandBufferHandle, + + //pub const bind = Entry.bindFn; + //// easier than "Factory.destroyHandle(thing)" + //pub const deinit = Entry.deinitFn; + + //pub const submit = res.APIFunction(submit); + //pub const reset = res.APIFunction(reset); + //pub const begin = res.APIFunction(begin); + //pub const beginConfig = res.APIFunction(beginConfig); + //pub const end = res.APIFunction(end); +}; diff --git a/src/api/common_types.zig b/src/api/common_types.zig new file mode 100644 index 0000000..052f68d --- /dev/null +++ b/src/api/common_types.zig @@ -0,0 +1,51 @@ +const vk = @import("vulkan"); + +pub const SyncInfo = struct { + fence_sig: ?vk.Fence = null, + fence_wait: ?vk.Fence = null, + + sem_sig: ?vk.Semaphore = null, + sem_wait: ?vk.Semaphore = null, +}; + +pub const DescriptorUsageInfo = packed struct { + /// THis just directly controls which pool the descriptor is + /// allocated into. For the moment, all descriptors will be + /// either transient or application scoped + pub const LifetimeScope = enum(u3) { + /// descriptor exists for just a single frame + /// which places it into the "transient" pool + Transient, + + /// Descriptor exists for a longer time, but + /// can still be freed and replaced. This pool still follows vulkan + /// rules, but "freed" descriptors are placed into a free store + /// which allows them to be rewritten instead of being completely reallocated. + Scene, + + /// These descriptors are completely static. Never to be freed until the application dies + Static, + }; + + lifetime_bits: LifetimeScope, +}; + +pub const DescriptorType = enum(u8) { + Uniform, + Sampler, + StorageBuffer, + Image, + + pub fn toVkDescriptor(self: DescriptorType) vk.DescriptorType { + return switch(self) { + .Uniform => .uniform_buffer, + .Sampler => .combined_image_sampler, + .StorageBuffer => .storage_buffer, + .Image => .storage_image, + }; + } + + pub fn toIndex(self: DescriptorType) usize { + return @intFromEnum(self); + } +}; diff --git a/src/api/compute.zig b/src/api/compute.zig index ecf53cf..d4254a3 100644 --- a/src/api/compute.zig +++ b/src/api/compute.zig @@ -1,7 +1,7 @@ const std = @import("std"); const vk = @import("vulkan"); const api = @import("api.zig"); -const util = @import("../util.zig"); +const util = @import("common").util; const DeviceInterface = api.DeviceInterface; const ShaderModule = api.ShaderModule; @@ -17,7 +17,7 @@ const Self = @This(); pub const Config = struct { shader: *const ShaderModule, - desc_bindings: []const ResolvedBinding, + desc: api.Descriptor, }; pr_dev: *const DeviceInterface, @@ -25,24 +25,19 @@ h_pipeline: vk.Pipeline, h_pipeline_layout: vk.PipelineLayout, desc: Descriptor, -pub fn init(ctx: *const Context, allocator: Allocator, cfg: Config) !Self { +pub fn init(ctx: *const Context, cfg: Config) !Self { const pr_dev: *const DeviceInterface = ctx.env(.di); - var desc = try Descriptor.init(ctx, allocator, .{ - .bindings = cfg.desc_bindings, - }); - errdefer desc.deinit(); - // create the actual pipeline const layout = try pr_dev.createPipelineLayout(&.{ .flags = .{}, .set_layout_count = 1, - .p_set_layouts = &.{desc.h_desc_layout}, + .p_set_layouts = &.{cfg.desc.vkLayout()}, }, null); errdefer pr_dev.destroyPipelineLayout(layout, null); var new = Self{ .pr_dev = pr_dev, - .desc = desc, + .desc = cfg.desc, .h_pipeline_layout = layout, .h_pipeline = .null_handle, }; @@ -65,12 +60,12 @@ pub fn init(ctx: *const Context, allocator: Allocator, cfg: Config) !Self { } pub fn updateData(self: *Self, binding: u32, data: anytype) !void { - self.desc.update(binding, data); + self.desc.setValue(binding, data); } pub fn bind(self: *const Self, cmd_buf: *const CommandBuffer) void { self.pr_dev.cmdBindPipeline(cmd_buf.h_cmd_buffer, .compute, self.h_pipeline); - self.desc.bind(cmd_buf, self.h_pipeline_layout, .{ .bind_point = .compute }); + self.desc.use(cmd_buf, self.h_pipeline_layout, .{ .bind_point = .compute }); } pub fn dispatch( diff --git a/src/api/descriptor.zig b/src/api/descriptor.zig index a449b53..c02b0bc 100644 --- a/src/api/descriptor.zig +++ b/src/api/descriptor.zig @@ -1,25 +1,22 @@ //! A pretty temporary implementation for //! basic descriptor management in vulkan, manages it's own pool for now const std = @import("std"); +const debug = std.debug; const vk = @import("vulkan"); -const util = @import("../util.zig"); -const uniform = @import("uniform.zig"); -const buffer = @import("buffer.zig"); +const util = @import("common").util; +const common = @import("common_types.zig"); -const many = util.asManyPtr; +const api = @import("api.zig"); -const Allocator = std.mem.Allocator; - -const DeviceHandler = @import("base.zig").DeviceHandler; -const UniformBuffer = uniform.UniformBuffer; -const CommandBuffer = @import("command_buffer.zig"); const Context = @import("../context.zig"); -const AnyBuffer = buffer.AnyBuffer; - -const TexImage = @import("texture.zig"); -const Image = @import("image.zig"); +const Allocator = std.mem.Allocator; +const DeviceHandler = api.DeviceHandler; +const UniformBuffer = api.ComptimeUniformBuffer; +const CommandBuffer = api.CommandBuffer; +const AnyBuffer = api.BufInterface; +const Image = api.Image; const log = std.log.scoped(.descriptor); @@ -35,29 +32,12 @@ fn createDescriptorPool(pr_dev: *const vk.DeviceProxy) !vk.DescriptorPool { }, null); } -pub const DescriptorType = enum(u8) { - Uniform, - Sampler, - StorageBuffer, - Image, +pub const Config = struct { + layout: DescriptorLayout, + usage: common.DescriptorUsageInfo, }; -pub const ResolvedBinding = struct { - stages: vk.ShaderStageFlags, - data: union(DescriptorType) { - Uniform: AnyBuffer, - Sampler: struct { - sampler: vk.Sampler, - view: vk.ImageView, - }, - StorageBuffer: AnyBuffer, - Image: struct { - img: *const Image, - // caller is responsible for this for now - view: vk.ImageView, - }, - }, -}; +pub const DescriptorType = common.DescriptorType; // flattened since buffers are the same regardless // of whether they be uniform or storage @@ -71,179 +51,29 @@ const BindingWriteInfo = union(enum) { /// is the (0 indexed) order they be actually laid out const Self = @This(); -pub const Config = struct { - // this gets copied to the actual array, so it can be specified locally no problemo - bindings: []const ResolvedBinding, -}; - -h_desc_layout: vk.DescriptorSetLayout, -h_desc_pool: vk.DescriptorPool, h_desc_set: vk.DescriptorSet, pr_dev: *const vk.DeviceProxy, -allocator: Allocator, -resolved_bindings: []ResolvedBinding = undefined, +layout: DescriptorLayout, -fn resolveDescriptorLayout( - layouts: []vk.DescriptorSetLayoutBinding, - bindings: []const ResolvedBinding, -) void { - // I guess just ignore any extra bindings specified, not my problem lol - for (0..layouts.len) |index| { - layouts[index] = vk.DescriptorSetLayoutBinding{ - .binding = @intCast(index), - .descriptor_count = 1, - .stage_flags = bindings[index].stages, - - .descriptor_type = switch (bindings[index].data) { - .Sampler => .combined_image_sampler, - .Uniform => .uniform_buffer, - .StorageBuffer => .storage_buffer, - .Image => .storage_image, - }, - }; +pub fn init(ctx: *const Context, config: Config) !Self { + if (config.layout.layout == null) { + log.err("Invalid descriptor layout (likely forgot to call resolve())", .{}); + return error.InvalidLayout; } -} - -fn updateDescriptorSets( - self: *Self, - dev: *const DeviceHandler, - desc_set: vk.DescriptorSet, - allocator: Allocator, -) !void { - const num_bindings = self.resolved_bindings.len; - var writes: []vk.WriteDescriptorSet = try allocator.alloc( - vk.WriteDescriptorSet, - num_bindings, - ); - defer allocator.free(writes); - - var write_infos: []BindingWriteInfo = try allocator.alloc( - BindingWriteInfo, - num_bindings, - ); - defer allocator.free(write_infos); - - for (self.resolved_bindings, 0..num_bindings) |binding, index| { - writes[index] = vk.WriteDescriptorSet{ - .descriptor_type = undefined, - - .dst_binding = @intCast(index), - .dst_array_element = 0, - .descriptor_count = 1, - .dst_set = desc_set, - - .p_buffer_info = undefined, - .p_image_info = undefined, - .p_texel_buffer_view = undefined, - }; - - self.resolved_bindings[index] = binding; - - switch (binding.data) { - .Sampler => |sampler| { - write_infos[index] = .{ .Image = vk.DescriptorImageInfo{ - .image_layout = .read_only_optimal, - .image_view = sampler.view, - .sampler = sampler.sampler, - } }; - - writes[index].descriptor_type = .combined_image_sampler; - writes[index].p_image_info = &.{write_infos[index].Image}; - }, - .Image => |img| { - write_infos[index] = .{ .Image = vk.DescriptorImageInfo{ - .image_layout = .general, - .image_view = img.view, - .sampler = .null_handle, - } }; - - writes[index].p_image_info = &.{write_infos[index].Image}; - writes[index].descriptor_type = .storage_image; - }, - else => { - const buf: AnyBuffer, const dt: vk.DescriptorType = switch (binding.data) { - .Uniform => |buf| .{ buf, .uniform_buffer }, - .StorageBuffer => |buf| .{ buf, .storage_buffer }, - else => unreachable, - }; - - write_infos[index] = .{ .Buffer = vk.DescriptorBufferInfo{ - .buffer = buf.handle, - .offset = 0, - .range = buf.size, - } }; - - writes[index].descriptor_type = dt; - - writes[index].p_buffer_info = many( - vk.DescriptorBufferInfo, - &write_infos[index].Buffer, - ); - }, - } - } - - dev.pr_dev.updateDescriptorSets( - @intCast(writes.len), - writes[0..].ptr, - 0, - null, - ); -} -pub fn init(ctx: *const Context, allocator: Allocator, config: Config) !Self { const dev: *const DeviceHandler = ctx.env(.dev); + var desc_pools = ctx.env(.desc); - var desc_bindings = try allocator.alloc( - vk.DescriptorSetLayoutBinding, - config.bindings.len, - ); - defer allocator.free(desc_bindings); - - resolveDescriptorLayout(desc_bindings, config.bindings); - - const layout_info = vk.DescriptorSetLayoutCreateInfo{ - .binding_count = @intCast(desc_bindings.len), - .p_bindings = desc_bindings[0..].ptr, - }; - - const desc_layout = try dev.pr_dev.createDescriptorSetLayout( - &layout_info, - null, - ); - - const desc_pool = try createDescriptorPool(&dev.pr_dev); - errdefer dev.pr_dev.destroyDescriptorPool(desc_pool, null); - - var desc_set = [_]vk.DescriptorSet{.null_handle}; - - try dev.pr_dev.allocateDescriptorSets(&.{ - .descriptor_pool = desc_pool, - .descriptor_set_count = 1, - .p_set_layouts = many(vk.DescriptorSetLayout, &desc_layout), - }, desc_set[0..]); - - // setup the bloody descriptor sets and associate them wit the buffer - const owned_bindings = try allocator.alloc( - ResolvedBinding, - config.bindings.len, - ); - errdefer allocator.free(owned_bindings); - std.mem.copyForwards(ResolvedBinding, owned_bindings, config.bindings); + const desc_set = try desc_pools.reserve(config.usage, config.layout.layout.?); var descriptor = Self{ - .h_desc_layout = desc_layout, - .h_desc_pool = desc_pool, + .layout = config.layout, .pr_dev = &dev.pr_dev, - .h_desc_set = desc_set[0], - .resolved_bindings = owned_bindings, - .allocator = allocator, + .h_desc_set = desc_set, }; errdefer descriptor.deinit(); - try descriptor.updateDescriptorSets(dev, desc_set[0], allocator); - return descriptor; } @@ -251,7 +81,9 @@ pub const BindInfo = struct { bind_point: vk.PipelineBindPoint = .graphics, }; -pub fn bind( +/// formerly "bind" (renamed to avoid confusion with +/// binding concrete resources to descriptor slots) +pub fn use( self: *const Self, cmd_buf: *const CommandBuffer, layout: vk.PipelineLayout, @@ -263,27 +95,305 @@ pub fn bind( layout, 0, 1, - many(vk.DescriptorSet, &self.h_desc_set), + &.{self.h_desc_set}, 0, null, ); } -pub fn deinit(self: *Self) void { - self.pr_dev.destroyDescriptorPool(self.h_desc_pool, null); - self.pr_dev.destroyDescriptorSetLayout(self.h_desc_layout, null); +//FIXME: Can't pass context until lifecyle routines are integrated +pub fn deinit(self: *Self) void { //, ctx: *const Context) void { - self.allocator.free(self.resolved_bindings); + //var desc_pool = ctx.env(.desc); + // this may do nothing depending on usage + //desc_pool.free(self.usage, self.h_desc_set); + self.layout.deinit(); } -pub fn update(self: *Self, index: usize, data: anytype) !void { +pub fn bindUniformsNamed(self: *Self, name: []const u8, ubo: AnyBuffer) void { + const loc = self.layout.named_bindings.get(name) orelse return; + self.bindUniforms(loc, ubo); +} + +pub fn bindSamplerNamed(self: *Self, name: []const u8, sampler: vk.Sampler, view: Image.View) void { + const loc = self.layout.named_bindings.get(name) orelse return; + self.bindSampler(loc, sampler, view); +} + +pub fn bindImageNamed(self: *Self, name: []const u8, img: *const Image, view: Image.View) void { + const loc = self.layout.named_bindings.get(name) orelse return; + self.bindImage(loc, img, view); +} + +pub fn bindBufferNamed(self: *Self, name: []const u8, buf: AnyBuffer) void { + const loc = self.layout.named_bindings.get(name) orelse return; + self.bindBuffer(loc, buf); +} + +pub fn bindUniforms(self: *Self, index: usize, ubo: AnyBuffer) void { + self.bindBase(index, .{ .Uniform = ubo }); +} + +pub fn bindSampler(self: *Self, index: usize, sampler: vk.Sampler, view: Image.View) void { + self.bindBase(index, .{ .Sampler = .{ + .sampler = sampler, + .view = view, + } }); +} + +//TODO: Get rid of *img if i don't need it +pub fn bindImage(self: *Self, index: usize, img: *const Image, view: Image.View) void { + self.bindBase(index, .{ .Image = .{ + .img = img, + .view = view, + } }); +} + +pub fn bindBuffer(self: *Self, index: usize, buf: AnyBuffer) void { + self.bindBase(index, .{ .StorageBuffer = buf }); +} + +fn bindBase(self: *Self, index: usize, b: DescriptorBinding) void { + debug.assert(index < self.layout.size); + const write = &self.layout.writes[index]; + write.* = vk.WriteDescriptorSet{ + .descriptor_type = undefined, + + .dst_binding = @intCast(index), + .dst_array_element = 0, + .descriptor_count = 1, + .dst_set = self.h_desc_set, + + .p_buffer_info = undefined, + .p_image_info = undefined, + .p_texel_buffer_view = undefined, + }; + + const binding = &self.layout.write_infos[index]; + + switch (b) { + .Sampler => |sampler| { + binding.* = .{ .Image = vk.DescriptorImageInfo{ + .image_layout = .read_only_optimal, + .image_view = sampler.view.h_view, + .sampler = sampler.sampler, + } }; + + write.p_image_info = @as([*]vk.DescriptorImageInfo, @ptrCast(&binding.Image)); + write.descriptor_type = .combined_image_sampler; + }, + .Image => |img| { + binding.* = .{ .Image = vk.DescriptorImageInfo{ + .image_layout = .general, + .image_view = img.view.h_view, + .sampler = .null_handle, + } }; + + write.p_image_info = @as([*]vk.DescriptorImageInfo, @ptrCast(&binding.Image)); + write.descriptor_type = .storage_image; + }, + else => { + const buf: AnyBuffer, const dt: vk.DescriptorType = switch (b) { + .Uniform => |buf| .{ buf, .uniform_buffer }, + .StorageBuffer => |buf| .{ buf, .storage_buffer }, + else => unreachable, + }; + + binding.* = .{ .Buffer = vk.DescriptorBufferInfo{ + .buffer = buf.handle, + .offset = 0, + .range = buf.size, + } }; + + write.p_buffer_info = @as([*]vk.DescriptorBufferInfo, @ptrCast(&binding.Buffer)); + write.descriptor_type = dt; + }, + } +} + +/// Write descriptor values to the descriptor set. +/// -- This should happen anytime the descriptor's makeup changes +/// (i.e a new texture or buffer is needed) +/// Do note that just updating data of existing uniform or storage buffers +/// should just be done directly using the "setValue" function +pub fn update(self: *Self) void { + self.pr_dev.updateDescriptorSets( + @intCast(self.layout.writes.len), + self.layout.writes.ptr, + 0, + null, + ); +} + +pub fn vkLayout(self: *const Self) vk.DescriptorSetLayout { + // this has already been checked during initialization. + return self.layout.layout orelse unreachable; +} + +/// Formerly "update" +pub fn setValue(self: *Self, index: usize, data: anytype) !void { const binding = self.resolved_bindings[index]; switch (binding.data) { .Uniform => |buf| try buf.setData(data), .StorageBuffer => |buf| try buf.setData(data), else => { - log.err("Fuck (AKA no texture updating pretty please)", .{}); + log.err("Images may not be written from via descriptors", .{}); return error.Unsupported; }, } } + +const DescriptorBinding = union(DescriptorType) { + Uniform: AnyBuffer, + Sampler: struct { + sampler: vk.Sampler, + view: Image.View, + }, + StorageBuffer: AnyBuffer, + Image: struct { + img: *const Image, + view: Image.View, + }, +}; + + +// Descriptor layouts done better +pub const DescriptorLayout = struct { + const Specifier = struct { + u32, + []const u8, + DescriptorType, + vk.ShaderStageFlags, + }; + + fn id(spec: Specifier) SpecifierID { + return SpecifierID{ + .type = @intCast(spec.@"2".toIndex()), + .stage_flags = spec.@"3", + }; + } + const SpecifierID = packed struct { + type: u32, + stage_flags: vk.ShaderStageFlags, + }; + + di: *const api.DeviceInterface, + allocator: Allocator, + + specifiers: std.ArrayListUnmanaged(Specifier), + named_bindings: std.StringArrayHashMapUnmanaged(usize), + + writes: []vk.WriteDescriptorSet, + write_infos: []BindingWriteInfo, + layout: ?vk.DescriptorSetLayout = null, + size: usize, + + /// Screw you, just figure out how many descriptors you need before + /// allocating shit all over the place. + pub fn init(ctx: *const Context, allocator: Allocator, size: usize) !DescriptorLayout { + var bindings_map = try std.StringArrayHashMapUnmanaged(usize).init(allocator, &.{}, &.{}); + errdefer bindings_map.deinit(allocator); + + const writes_buf = try allocator.alloc(vk.WriteDescriptorSet, size); + errdefer allocator.free(writes_buf); + + const write_infos_buf = try allocator.alloc(BindingWriteInfo, size); + errdefer allocator.free(write_infos_buf); + + var specifiers = try std.ArrayListUnmanaged(Specifier).initCapacity(allocator, size); + errdefer specifiers.deinit(allocator); + + try bindings_map.ensureTotalCapacity(allocator, size); + return DescriptorLayout{ + .di = ctx.env(.di), + .allocator = allocator, + + .specifiers = specifiers, + .named_bindings = bindings_map, + + .writes = writes_buf, + .write_infos = write_infos_buf, + .size = size, + }; + } + + pub fn addDescriptor(self: *DescriptorLayout, spec: Specifier) void { + debug.assert(self.specifiers.items.len + 1 < self.size); + self.specifiers.appendAssumeCapacity(spec); + self.named_bindings.putAssumeCapacity(spec.@"1", self.specifiers.items.len - 1); + } + + pub fn addDescriptors(self: *DescriptorLayout, specs: []const Specifier) void { + debug.assert(self.specifiers.items.len + specs.len <= self.size); + + for (specs) |spec| { + self.specifiers.insertAssumeCapacity(@intCast(spec.@"0"), spec); + self.named_bindings.putAssumeCapacity(spec.@"1", self.specifiers.items.len - 1); + } + } + + pub fn resolve(self: *DescriptorLayout) !void { + const layout_infos = try self.allocator.alloc(vk.DescriptorSetLayoutBinding, self.size); + defer self.allocator.free(layout_infos); + + for (self.specifiers.items, layout_infos) |spec, *layout_info| { + layout_info.descriptor_count = 1; + + layout_info.binding = spec.@"0"; + layout_info.descriptor_type = spec.@"2".toVkDescriptor(); + layout_info.stage_flags = spec.@"3"; + layout_info.p_immutable_samplers = null; + } + + self.layout = try self.di.createDescriptorSetLayout(&.{ + .binding_count = @intCast(layout_infos.len), + .p_bindings = layout_infos.ptr, + }, null); + } + + pub fn deinit(self: *DescriptorLayout) void { + self.allocator.free(self.writes); + self.allocator.free(self.write_infos); + self.specifiers.deinit(self.allocator); + + if (self.layout) |l| { + self.di.destroyDescriptorSetLayout(l, null); + } + } +}; + +//TODO: Unfortunately any unit tests involving context-inditialized objects +//are impossible because the context explodes when you don't intialize it correctly +//(i.e without a window, since you shouldn't have unit tests spawn a gajillion windows if +//you can help it) + +//NOTE: Example usage +// +//pub fn thing() void { +// const ctx: Context = undefined; +// const allocator: Allocator = undefined; +// +// var layout = try DescriptorLayout.init(); +// +// try layout.addDescriptors(&.{ +// .{ 0, "Uniforms", .Uniform }, +// .{ 1, "MainTex", .Sampler }, +// .{ 2, "ComputeOutput", .Image }, +// }); +// try layout.resolve(); +// +// //NOTE: You give ownership to the descriptor when you +// var desc = try Self.init(ctx, allocator, layout); +// +// // After consideration, I have elected to prefix this family of functions +// // with "bind", as they are what associates a concrete resource +// // with the descriptor "slot" persay. In the case of image-type descriptors, +// // these functions are literally all that you need to associate a concrete +// // image with a descriptor slot, making them almost analogous with "glBindTexture" and such. +// desc.bindUniformsNamed("Uniforms", some_ubo); +// desc.bindSamplerNamed("MainTex", some_texture); +// desc.bindBufferNamed("BuffyTheBuffer", some_buffer); +// desc.bindImageNamed("ComputeOutput", some_storage_image); +// +// desc.update(); +//} diff --git a/src/api/descriptor_pool.zig b/src/api/descriptor_pool.zig new file mode 100644 index 0000000..059938d --- /dev/null +++ b/src/api/descriptor_pool.zig @@ -0,0 +1,306 @@ +//! This is more of a manager for descriptor pools then +//! an singular descriptor pool. +//! +//! The idea here is to have descriptors specify so called "usage hints" +//! which are meant to describe how the descriptor will be used. +//! These hints will encompass either the lifetime of the descriptor set and/or +//! the actual makeup of descriptors. + +const common = @import("common_types.zig"); +const vk = @import("vulkan"); +const std = @import("std"); +const api = @import("api.zig"); + +const Allocator = std.mem.Allocator; + +const UsageInfo = common.DescriptorUsageInfo; +const PoolUsageMap = std.EnumMap(UsageInfo.LifetimeScope, AnyPool); +const Context = @import("../context.zig"); + +const log = std.log.scoped(.desc_pool); + +const PoolErrors = error{ + /// Pool has run out of memory + OutOfMemory, + /// an internal vulkan error has occured + /// (best check validation logs on a debug build) + Internal, +}; + +const AnyPool = struct { + pub const VTable = struct { + rawReserve: *const fn ( + *anyopaque, + /// This mostly exists for the purpose of potentially having more sophisticated pool managment + /// within a single usage pool + []const UsageInfo, + []const vk.DescriptorSetLayout, + /// Descriptor sets are written into this user provided slice rather than allocation + []vk.DescriptorSet, + ) PoolErrors!void, + rawFree: *const fn ( + *anyopaque, + []const UsageInfo, + []const vk.DescriptorSet, + ) void, + + reset: *const fn ( + *anyopaque, + ) void, + }; + + vtable: *const VTable, + ctx: *anyopaque, + + pub fn rawReserve( + self: AnyPool, + usages: []const UsageInfo, + layouts: []const vk.DescriptorSetLayout, + sets: []vk.DescriptorSet, + ) PoolErrors!void { + try self.vtable.rawReserve(self.ctx, usages, layouts, sets); + } + + pub fn rawFree( + self: AnyPool, + usages: []const UsageInfo, + layouts: []const vk.DescriptorSet, + ) void { + return self.vtable.rawFree(self.ctx, usages, layouts); + } + + pub fn reset( + self: AnyPool, + ) void { + return self.vtable.reset(self.ctx); + } +}; + +const DescriptorSizeRatios = std.EnumMap(common.DescriptorType, f32); + +const DescriptorType = common.DescriptorType; +const PoolArena = struct { + const ratios = DescriptorSizeRatios.init(.{ + .Uniform = 1.0, + .Sampler = 2.0, + .StorageBuffer = 0.5, + .Image = 1.0, + }); + + inline fn calcDescriptorCount(t: DescriptorType, size: usize) u32 { + const ratio = ratios.get(t) orelse unreachable; + return @intFromFloat(ratio * @as(f32, @floatFromInt(size))); + } + fn poolSizesForCount(set_count: usize) [4]vk.DescriptorPoolSize { + return [_]vk.DescriptorPoolSize{ + .{ + .type = DescriptorType.Uniform.toVkDescriptor(), + .descriptor_count = calcDescriptorCount(.Uniform, set_count), + }, + .{ + .type = DescriptorType.Sampler.toVkDescriptor(), + .descriptor_count = calcDescriptorCount(.Sampler, set_count), + }, + .{ + .type = DescriptorType.Image.toVkDescriptor(), + .descriptor_count = calcDescriptorCount(.Image, set_count), + }, + .{ + .type = DescriptorType.StorageBuffer.toVkDescriptor(), + .descriptor_count = calcDescriptorCount(.StorageBuffer, set_count), + }, + }; + } + + h_desc_pool: vk.DescriptorPool, + di: api.DeviceInterface, + + pub fn init(di: api.DeviceInterface, size: usize) !PoolArena { + const pool_sizes = poolSizesForCount(size); + + const pool_create_info = vk.DescriptorPoolCreateInfo{ + .max_sets = @intCast(size), + .p_pool_sizes = &pool_sizes, + .pool_size_count = pool_sizes.len, + }; + + const desc_pool = try di.createDescriptorPool( + &pool_create_info, + null, + ); + + return PoolArena{ + .h_desc_pool = desc_pool, + .di = di, + }; + } + + fn rawReserve( + ctx: *anyopaque, + usages: []const UsageInfo, + layouts: []const vk.DescriptorSetLayout, + /// Caller provides the memory for the sets (tries to allocate a set for all elements of slice) + sets: []vk.DescriptorSet, + ) PoolErrors!void { + const self = @as(*PoolArena, @ptrCast(@alignCast(ctx))); + + const alloc_info = vk.DescriptorSetAllocateInfo{ + .descriptor_pool = self.h_desc_pool, + .descriptor_set_count = @intCast(sets.len), + .p_set_layouts = layouts.ptr, + }; + + self.di.allocateDescriptorSets(&alloc_info, sets.ptr) catch |e| { + return switch (e) { + error.OutOfHostMemory, error.OutOfDeviceMemory, error.OutOfPoolMemory => PoolErrors.OutOfMemory, + error.FragmentedPool => blk: { + // TODO: possibly trigger a defragging routine here + log.debug( + "pool is too fragmented to allocate.", + .{}, + ); + break :blk PoolErrors.OutOfMemory; + }, + else => PoolErrors.Internal, + }; + }; + + _ = usages; + } + + fn rawFree( + ctx: *anyopaque, + usages: []const UsageInfo, + sets: []const vk.DescriptorSet, + ) void { + // Freeing single sets is not allowed without a flag + // and is thus a no-op for this implementation + _ = ctx; + _ = usages; + _ = sets; + } + + fn reset( + ctx: *anyopaque, + ) void { + const self = @as(*PoolArena, @ptrCast(@alignCast(ctx))); + self.di.resetDescriptorPool(self.h_desc_pool, .{}) catch + @panic("internal descriptor pool error occured"); + } + + pub fn interface(self: *PoolArena) AnyPool { + return AnyPool{ + .ctx = self, + .vtable = &AnyPool.VTable{ + .rawFree = rawFree, + .rawReserve = rawReserve, + .reset = reset, + }, + }; + } + + pub fn deinit(self: *PoolArena) void { + self.di.destroyDescriptorPool(self.h_desc_pool, null); + } +}; + +/// for now, I think it is permissible to +/// have the static pool be managed the +/// same as the transient pool via a PoolArena +/// (it just gets freed at the end) +// const StaticPool = struct { +// h_desc_pool: vk.DescriptorPool, +// }; + +pools_by_usage: PoolUsageMap, + +transient_pool: PoolArena, +//lol +scene_pool: PoolArena, +static_pool: PoolArena, + +const Self = @This(); +pub const Env = Context.Environment.EnvSubset(.{.di}); + +pub const PoolSizes = struct { + transient: usize, + scene: usize, + static: usize, +}; + +//keeps the door open for a more complicated descriptor managment +fn poolByUsage(self: *Self, usage: UsageInfo) AnyPool { + return self.pools_by_usage.get(usage.lifetime_bits) orelse unreachable; +} + +pub fn initSelf(self: *Self, env: Env, sizes: PoolSizes) !void { + const di: api.DeviceInterface = env.di; + + self.transient_pool = try PoolArena.init(di, sizes.transient); + errdefer self.transient_pool.deinit(); + + self.scene_pool = try PoolArena.init(di, sizes.scene); + errdefer self.scene_pool.deinit(); + + self.static_pool = try PoolArena.init(di, sizes.static); + errdefer self.static_pool.deinit(); + + self.pools_by_usage = PoolUsageMap.init(.{ + .Transient = self.transient_pool.interface(), + .Scene = self.scene_pool.interface(), + .Static = self.static_pool.interface(), + }); +} + +pub fn resetTransient(self: *Self) void { + self.transient_pool.interface().reset(); +} + +pub fn reserve( + self: *Self, + usage: UsageInfo, + layout: vk.DescriptorSetLayout, +) !vk.DescriptorSet { + const pool = self.poolByUsage(usage); + var set = [1]vk.DescriptorSet{undefined}; + try pool.rawReserve(&.{usage}, &.{layout}, set[0..]); + + return set[0]; +} + +pub fn reserveRange( + self: *Self, + usage: UsageInfo, + layouts: []const vk.DescriptorSetLayout, + sets: []vk.DescriptorSet, +) !void { + const pool = self.poolByUsage(usage); + try pool.rawReserve( &.{usage}, layouts, sets); +} + +pub fn free( + self: *Self, + // free does something different depending on the usage + usage: UsageInfo, + set: vk.DescriptorSet, +) void { + self.freeRange(&.{usage}, &.{set}); +} + +pub fn freeRange( + self: *Self, + usages: []const UsageInfo, + sets: []const vk.DescriptorSet, +) void { + for (usages, sets) |u, s| { + const pool = self.poolByUsage(u); + pool.rawFree(&.{u}, &.{s}); + } +} + +pub fn deinit(self: *Self) void { + self.scene_pool.deinit(); + self.static_pool.deinit(); + self.transient_pool.deinit(); +} + diff --git a/src/api/graphics_pipeline.zig b/src/api/graphics_pipeline.zig index 8ee7a1c..53957bc 100644 --- a/src/api/graphics_pipeline.zig +++ b/src/api/graphics_pipeline.zig @@ -1,7 +1,7 @@ const std = @import("std"); const vk = @import("vulkan"); const base = @import("base.zig"); -const util = @import("../util.zig"); +const util = @import("common").util; const shader = @import("shader.zig"); const Allocator = std.mem.Allocator; diff --git a/src/api/image.zig b/src/api/image.zig index a485795..79732b6 100644 --- a/src/api/image.zig +++ b/src/api/image.zig @@ -3,7 +3,7 @@ const vk = @import("vulkan"); const rsh = @import("rshc"); const buf = @import("buffer.zig"); -const util = @import("../util.zig"); +const util = @import("common").util; const Allocator = std.mem.Allocator; const Context = @import("../context.zig"); @@ -349,7 +349,11 @@ pub fn cmdClear( ); } -fn initSelf(self: *Self, dev: *const DeviceHandler, config: Config) !void { +fn initSelf( + self: *Self, + dev: *const DeviceHandler, + config: Config, +) !void { const image_info = vk.ImageCreateInfo{ .image_type = .@"2d", .extent = .{ diff --git a/src/api/memory/DeviceMemoryBlock.zig b/src/api/memory/DeviceMemoryBlock.zig new file mode 100644 index 0000000..e69de29 diff --git a/src/api/memory/DeviceMemoryLayout.zig b/src/api/memory/DeviceMemoryLayout.zig new file mode 100644 index 0000000..42e11d4 --- /dev/null +++ b/src/api/memory/DeviceMemoryLayout.zig @@ -0,0 +1,136 @@ +const std = @import("std"); +const vk = @import("vulkan"); +const api = @import("../api.zig"); + +const debug = std.debug; + +const Heap = @import("Heap.zig"); + +const Context = @import("../../context.zig"); + +const Layout = @This(); + +//pub const Env = Context.EnvSubset(.{.dev, .ii}); +pub const Env = struct { + dev: *const api.DeviceHandler, + ii: api.InstanceInterface, +}; + +const DeviceLayoutError = error{ + OutOfMemory, + OutOfBounds, +}; + +/// Basic properties of a memory heap... +/// TODO: Extend std's MultiArray to support sparse indexing +const HeapProperties = struct { + max_size: usize, + supported_type_props: vk.MemoryPropertyFlags, +}; + +raw_mem_props: vk.PhysicalDeviceMemoryProperties, +// heaps: HeapMap, + +// index tables for memory properties +prop_type_index: [vk.MAX_MEMORY_TYPES]vk.MemoryPropertyFlags, +heap_prop_index: [vk.MAX_MEMORY_HEAPS]HeapProperties, +type_heap_index: [vk.MAX_MEMORY_TYPES]u32, + +mem_type_count: usize, +mem_heap_count: usize, + +env: Env, + +pub fn init( + env: Env, +) Layout { + const dev: *const api.DeviceHandler = env.dev; + const ii: api.InstanceInterface = env.ii; + + const mem_props = ii.getPhysicalDeviceMemoryProperties(dev.h_pdev); + + var new_layout = Layout{ + .raw_mem_props = mem_props, + + .prop_type_index = undefined, + .heap_prop_index = undefined, + .type_heap_index = undefined, + + .mem_type_count = mem_props.memory_type_count, + .mem_heap_count = mem_props.memory_heap_count, + + .env = env, + }; + + for (0.., + &new_layout.prop_type_index, + &new_layout.type_heap_index, + ) |i, *prop, *h| { + prop.* = mem_props.memory_types[i].property_flags; + h.* = mem_props.memory_types[i].heap_index; + + const heap_props = &new_layout.heap_prop_index[h.*].supported_type_props; + heap_props.* = heap_props.merge(prop.*); + } + + for (0.., &new_layout.heap_prop_index) |i, *prop| { + prop.max_size = mem_props.memory_heaps[i].size; + } + + return new_layout; +} + +pub fn getBasicHeapProps(layout: *const Layout, heap: u32) ?HeapProperties { + if (heap > vk.MAX_MEMORY_HEAPS) return null; + return layout.heap_prop_index[heap]; +} + + +pub fn compatibleTypeInHeap( + layout: *const Layout, + heap: u32, + props: vk.MemoryPropertyFlags +) ?u32 { + for (0..layout.mem_type_count) |t| { + if (layout.type_heap_index[t] != heap) continue; + const type_props = layout.prop_type_index[t]; + + if (type_props.contains(props)) return @intCast(t); + } + + return null; +} + +pub fn findSupportingHeap( + layout: *const Layout, + props: vk.MemoryPropertyFlags +) ?u32 { + for (0..layout.mem_heap_count) |h| { + const heap_props = layout.heap_prop_index[h].supported_type_props; + if (heap_props.contains(props)) return @intCast(h); + } + + return null; +} + +/// You now own the heap as it is an active view of the +/// device's memory as opposed to a passive overview such as this +pub fn acquireHeap(layout: *Layout, index: u32) Heap { + debug.assert(index < layout.mem_heap_count); + + return Heap.init(.{ + .mem_layout = layout, + .di = &layout.env.dev.pr_dev, //TODO: Rename pr_dev as di + }, index); +} + + +//pub fn heapSupportsType( +// self: *const Layout, +// heap: Heap, +// type_index: usize, +//) bool { +// return null; +//} + +//pub fn typeSupportsProperty(self: *const Layout, type_index: usize, properties: vk.MemoryPropertyFlags) bool {} diff --git a/src/api/memory/VulkanPool.zig b/src/api/memory/VulkanPool.zig new file mode 100644 index 0000000..cb31cba --- /dev/null +++ b/src/api/memory/VulkanPool.zig @@ -0,0 +1,136 @@ +//! Contrary to popular belief, this is not a pool allocator, it is in fact +//! a TLSF allocator in disguise!!!!!!!! >:) \_O_O_/ +//! Additionally, this acts as the main virtualizer for raw GPU +//! memory obtained by the heap. Unlike raw heap allocations, this does +//! a better job fitting allocation requests to sanely proportioned memory blocks +//! in the respective memory type and is provided by the VirtualAllocator for user +//! allocation. +//! +//! Additionally, allocation algorithms store their bookkeeping data here, +//! to facilitate division of responsibility between algorithm and nasty +//! vulkan memory type handling. In this way an algorithm can just get their +//! respective control block and do stuff as normal. +//! +//! Do note, while a TLSF allocator gives great (near linear) performance +//! for small to large-ish sized allocations, users will likely still want to +//! use different allocation algorithms for a whole host of reasons, with this +//! (or a dedicated allocation) as the source for all that because of the lower +//! overall fragmentation. Examples include linear arena or stack allocators, +//! other free lists, or some form of buddy allocator, each of which +//! has it's own place where it would be best utilized. +//! +//! Honestly, the filthy casual can just get away with using the virtual allocator +//! for most cases tho... + +const common = @import("common"); +const std = @import("std"); + +const allocation = @import("allocation.zig"); + +const DeviceMemoryBlock = @import("DeviceMemoryBlock.zig"); + +const AllocationData = allocation.AllocationData; +const Allocation = allocation.Allocation; +const ReifiedAllocation = allocation.ReifiedAllocation; + +const ControlBlockTable = std.AutoHashMap(common.TypeId, ControlBlockHeader); +const AllocationTable = common.ObjectPool(AllocationData, .{}); + + +// somewhat arbitrarily chosen for 64-bit systems +// with respect to what one might expect for GPU heap sizes +// (which are generally not more than 64 gigs) +const exp_index_size: usize = @typeInfo(usize).int.bits; +const linear_index_size: usize = 8; + + +// Do note that the control block for the VulkanPool allocator +// can in fact be determined entirely at compile time using just system +// parameters for word sizes and a bit of heuristics... +// NOTE: FUTURE ME + +const Pool = @This(); + + +const Error = error { + OutOfMemory, +}; + +const ControlBlockHeader = struct { + algorithm_type: common.TypeId, + p_control: *anyopaque, +}; + +// arraylists don't play nice with arenas +control_blocks: ControlBlockTable, +control_arena: std.heap.ArenaAllocator, + +// TODO: should be able to specify handles in the args +// since just guessing here is a pain +// (luckily the partition bit works out in this case) +allocation_table: AllocationTable, +pool_blocks: SrcAllocationList, + +/// This is just to maintain an invariant for additional allocations +/// Specifically that contiguous memory ranges ought to be part of the same +/// dedicated allocation. Since this is not true in rare cases on its own, +/// I provide a table +/// in order to maintain bounded coalescence of contiguous memory ranges. +/// +/// NOTE: Thinking of dropping this in release builds once I'm sure consuming code +/// maintains the invariant properly. +base_memory_handles: std.SegmentedList(vk.DeviceMemory, exp_index_size); + +type_index: u32, + +pub fn init(allocation: ReifiedAllocation, host_allocator: std.mem.Allocator) Error!Pool { + var arena = std.heap.ArenaAllocator.init(host_allocator), + errdefer arena.deinit(); + + var src_allocations = SrcAllocationList{}; + errdefer src_allocations.deinit(); + + var control_blocks = try ControlBlockTable.init(host_allocator); + errdefer control_blocks.deinit(); + + const type_index = allocation.get().meta.type_index; + + (src_allocations.addOne() catch unreachable).* = allocation; + return Pool{ + .control_blocks = control_blocks, + .control_arena = arena, + .type_index = type_index, + + .src_allocations = src_allocations, + .allocation_table = try AllocationTable.initPreheated( + arena.allocator(), + 512, + ), + }; +} + +/// Technically, we could just get away with updating bookeeping information ad hoc +/// since allocations are just offsets into a memory blob. +pub fn extend(pool: *Pool, new_allocation: ReifiedAllocation) Error!Pool { + const new_allocation = src_allocations.addOne(); +} + +/// Most allocation algorithms will just call this when binding a memory pool +/// This can also allocate new control blocks if a new algorithm is used +/// Expects the type to have a control block nested as a declaration +pub fn getControlBlock( + pool: *Pool, + comptime AlgorithmType: type, + inst: *const AlgorithmType +) Error!*AlgorithmType { + if (!std.meta.hasDecl(AlgorithmType, "ControlType")) + @compileError("AlgorithmType must have ControlType decl to manage bookkeeping"); + const ControlBlock = AlgorithmType.ControlBlock; + + +} + +pub fn deinit(pool: *Pool) void { +} + +// This is the class you want to access private form shit diff --git a/src/api/memory/allocation.zig b/src/api/memory/allocation.zig new file mode 100644 index 0000000..abff602 --- /dev/null +++ b/src/api/memory/allocation.zig @@ -0,0 +1,27 @@ +const vk = @import("vulkan"); +/// Represents an individual memory allocation.. +/// Depending on the allocated memory's properties, +/// Accessing the memory might require creating a staging buffer +/// or flushing and invalidating cached host-mapped memory +/// +/// Probably should handleify this, to make allocations more resistant +/// to changes in location and such + + +pub const DeviceMemoryRange = struct { + offset: usize, + size: usize, +}; + +pub const AllocationData = struct { + src_heap: vk.DeviceMemory, + range: DeviceMemoryRange, +}; + +/// Handle for allocation data +pub const Allocation = common.Handle(AllocationData, .{ + .partition_bit = 12, +}); + +/// Reified (complete) handle for allocation data +pub const ReifiedAllocation = Allocation.Reified; diff --git a/src/api/memory/allocator.zig b/src/api/memory/allocator.zig new file mode 100644 index 0000000..dc2ce6c --- /dev/null +++ b/src/api/memory/allocator.zig @@ -0,0 +1,294 @@ +//! Vulkan-specific allocator for giving user control over host and device-side allocations. +//! Of course, the standard zig allocation interface is great but it's usage doesn't +//! fit well into device-side memory allocations, so we have this. +//! +//! I will do my best to keep the implementation of this allocator be as in-line as possible +//! to the standard memory allocation interface (if I can I will support creating +//! std.mem.Allocators). + +const std = @import("std"); +const vk = @import("vulkan"); +const common = @import("common"); +const api = @import("../api.zig"); + +const Context = @import("../../context.zig"); + +const allocation = @import("allocation.zig"); + +pub const Error = error{ + HostOutOfMemory, + DeviceOutOfMemory, + NoMatchingHeap, + // InvalidConfiguration, This'll be handled with asserts in debug +}; + + +const Allocation = allocation.Allocation; +const ReifiedAllocation = allocation.ReifiedAllocation; + + +const AllocationScope = enum(u2) { + /// General Purpose, Picks a free-list styled, static heap + /// (in fact, It'll likely just choose the scene heap outright) + /// This is the "idontwanttothinkaboutit" option + General, + /// Short lived, Maximum of a single frame. This creates the allocation + /// in a high-performance linear allocator (probably an arena with some bells and whistles) + /// and invalidates it (clears the buffer) every frame. Use for fire-and-forget operations + Transient, + /// Medium Lived, Persists across frames but may be freed/reused if new resources need to be loaded + /// during runtime. May use a free list or a ring-buffer with an LRU mechanism to clear potentially unused + /// resources. + Scene, + /// Never freed. Exists across all scenes, Note that freeing may be a no-op in some cases + /// So be careful to only reserve what is necessary for static memory + Static, +}; + +const ResourceLayoutType = enum(u1) { + /// The resource is expected to have a linear, + /// (i.e a contiguous array) layout, which concerns buffers. + /// This does not imply that the actual user-defined layout of a buffer needs to be a specific way, + /// moreso that the resource won't use any vulkan driver supported layout options. + Linear, + /// The resource is expected to have a varying/vulkan + /// supported layout. This applies mostly to images + Optimal, +}; + +const AllocationPropertyFlags = packed struct { + /// Whether or not memory should be directly mappable on the host + /// Otherwise, transfers and staging buffers are necessary + host_mappable: bool = false, + + /// Whether or not memory should be addressable in a sequential manner, + /// (i.e copied forwards, or maybe backwards). + seq_access: bool = false, + + /// Whether or not memory should be addressable in a random manner, + /// (indexing, partial copies, etc). This also implies sequantial access is allowed + /// so no need to set both .seq_access and this. + /// Random access will likely prefer cached memory whereas sequential access will prefer coherent. + rand_access: bool = false, + + /// How long will this allocation live? + /// This will control which heap and which allocation algorithm + /// will be used for this allocation + lifetime_hint: AllocationScope = .General, +}; + +const StructureHints = packed struct { + /// Size in bytes of the structure + /// use for untyped but otherwise fixed-sized resources. Especially if you have multiple of the same + /// so that the same pool can be used and potentially CPU/GPU time saved with initialization + /// + /// This should be an either/or with type_id, as specifying values for both prefers type_id + size: ?usize = null, + + /// Specific type ID of the structure. Use if you have fixed-sized resource of a known type + /// This in particlar will prefer a slab allocator over a generic pool. + /// For best type safe results, Most functions that allocate will accept a comptime type + /// to interpret the resulting memory in the case of direct host interaction. + type_id: ?common.TypeId = null, +}; + +const DedicatedAllocationPolicyBits = enum(u2) { + Force, + Disallow, + Unspecified, +}; + +pub const AllocationConfig = packed struct { + pub const GeneralFlags = packed struct { + ///General stuff + /// Whether or not the best fit should be selected in case a perfect + /// allocator match isn't found + best_fit: bool = true, + + /// Relevant allocation is promised but may not actually be allocated until + /// the allocation is first accessed + /// NOTE: Uuuhhh, I gotta make sure this actually guaruntees a valid memory allocation + /// when its needed + lazy: bool = false, + + /// Force the allocator to perform a dedicated + /// VkAllocateMemory specifically for this allocation (renders most configuration moot) + dedicated_alloc_policy: DedicatedAllocationPolicyBits = .Unspecified, + }; + + /// This is the final layout of configuration + /// required for the allocation algorithm to actually specify allocation + pub const Resolved = packed struct { + mem_props: vk.MemoryPropertyFlags, + mem_reqs: vk.MemoryRequirements, + general_flags: GeneralFlags, + resource_type: ResourceLayoutType, + }; + + /// Directly force an allocation to be for memory with + /// the specified properties + /// This will be prioritized over any other settings + /// So use carefully. + mem_props_override: ?vk.MemoryPropertyFlags = null, + + /// Requested memory properties, includes various hints for needed + /// memory operations along with lifetimes. + /// This is used in picking a heap and memory type in the heap (rather then just mapping + /// directly to type indices) + req_mem_props: AllocationPropertyFlags, + + /// The physical requirements for a given resource, such as size, + /// alignments, and compatible memory types, you can manually specify these, + /// or use the more convenient "resource_owner" field to infer them from the given resource + resource_requirements: ?vk.MemoryRequirements = null, + + /// Directly pass in the resource owning the allocation to infer base memory requirements.. + /// Must be a valid handle + resource_owner: ?union(enum) { + Buffer: vk.Buffer, + Image: vk.Image, + } = null, + + /// Whether or not the allocation is expected to be used + /// Defaults to null in which case the resource should be explicitly specified + /// Unfortunately, vulkan has very specific requirements on how memory between buffers + /// and images ought to be laid out so the allocator will need at least some knowledge of + /// what the allocation will actually be used for. + resource_type: ?ResourceLayoutType = null, + + /// Structure hints + /// This controls whether or not to use a structured memory pool + /// based on given parameters + structure_hints: StructureHints = .{}, + + general: GeneralFlags = .{}, + + /// Flattens all nullable values and optional configuration parameters + /// into a common resolved layout. + /// This tends to ignore certain information + /// in order to generate concrete allocation parameters so further config processing is + /// usually done by the configurable allocator beforehand to resolve which allocator to use + /// This is moreso "stage 2" resolution as defined in the spec + pub fn flatten(self: *const AllocationConfig) Resolved { + _ = self; + return undefined; + } +}; + + +/// Manages top-level handles to instantiated +/// heap allocations for all included memory types +/// This is one of the few allocators that directly allows for memory configuration +/// rather than just having a predefined configuration specified at initialization time. +/// Also, I don't really think this'll fit into a standard interface +/// +/// ## For Users: +/// * DO NOT use this for general purpose allocations. Think of this as the OS syscall heap allocator, +/// except worse. +/// * Every single allocation will likely consume a huge chunk of memory and likely result in +/// a separate GPU allocation, which vulkan no likey. +/// * Rather, it is better to use this as a parent allocator for a more efficient child allocator. +/// This ensures the giant memory blob is better utilized then being wasted on a single 24 byte +/// uniform buffer. +/// * Recommended not to instance this yourself, as every context's memory manager will come with +/// an instanced version +/// * This allocator uses an extremely basic algorithm which allocates and frees large blocks of memory +/// with little regard for fragmentation, making it a poor choice anyways for a general-purpose allocator +pub const VirtualAllocator = struct { + /// Env's can be manually initialized if you don't want to + /// go through the entire context init schtick + pub const Env = Context.EnvSubset(.{.mem_layout, .di}); + + allocator: std.mem.Allocator, + env: Env, + + // Free list tracking which unfortunately can't be in-place sigh + + + /// Doesn't actually do much rn but could anticipatorially do some ahead-of-time heap allocation + pub fn init( + env: Env, + allocator: std.mem.Allocator, + ) Error!VirtualAllocator { + return VirtualAllocator{ + .allocator = allocator, + .env = env, + }; + } + + /// Retrieves or creates a bookkeeping entry + /// to a given vulkan pool with specific properties (caller must provide pool) + pub fn bind(self: *VirtualAllocator, pool: *VulkanPool) Error!void { + } + + /// NOTE: This allocator doesn't support best fit re-indexing, + /// that happens at a higher level. + pub fn allocate(self: *VirtualAllocator, config: AllocationConfig) Error!Allocation { + _ = self; + _ = config; + + return undefined; + } + + /// This version returns a reified handle to the allocation table + /// which is essentially combined data and API along with pointer to the backing pool + /// for that classic OOP feel (even if it's all a lie) + pub fn allocateReified(self: *VirtualAllocator, config: AllocationConfig) Error!ReifiedAllocation { + _ = self; + _ = config; + + return undefined; + } + + pub fn free(self: *VirtualAllocator, mem: Allocation) void { + _ = self; + _ = mem; + } +}; + +// Vulkan Memory Types and Heaps: +// * There are memory heaps, which generally map to GPU/CPU physical memory +// * Each heap has a fixed budget of total bytes, which may differ from the total memory available in said heap +// * Memory types, are bound to a heap, and offer a view into said memory +// with specific properties, backed by the implementation most likely. +// * These properties will be backed by the implementation, supporting things +// like GPU-Host coherency, or requireing cache flush/invalidtion operations. +// Rather then just having user specify the memory types they'd like for an allocation, +// Users will give hints that better inform their actual usage, such as whether to use linear allocation, +// or to ensure the memory is host-mappable. + +// pub fn fucc() !void { +// const ctx: *const Context = Context.MagicallyInit(yadda yadda); +// +// // ....Other initialization shit... +// +// // Way 1: Just specifying memory directly +// // Assume Yadda Yadda means correct memory/init configuration +// // ctx.mem is a per-context "configurable" allocator as stated +// const buf_memory = try ctx.mem.allocate(yadda yadda); +// const my_buffer = VertexBuffer.init(ctx, buf_memory, .{yadda yadda}); +// +// // This allows users to do, for example +// // pick a specific existing allocator or create their own +// +// // Uses the same "AllocationConfig" to find a compatible allocator +// // ALSO: var my_way_better_allocator = GPUStackAllocator.init(some_other_memory_block).allocator(); +// var my_way_better_allocator = ctx.mem.findAllocator(AllocationConfig{}) orelse @panic("Reinvented the wheel too hard"); +// const mem2 = try my_way_better_allocator.allocate(.{yadda yadda}); +// const my_buffer2 = VertexBuffer.init(ctx, mem2, .{yadda yadda}); +// +// // Way 2: Asking the buffer and finding a graceful compromise +// var required: AllocationConfig = VertexBuffer.getRequiredPropertiesFor(.{some_use_case}); +// // Can fail if the buffer has a hard requirement for something +// // (OPTIONAL) +// try required.unify(AllocationConfig{my_extra_properties}); +// +// const mem3 = try ctx.mem.allocate(required); +// const my_buffer3 = VertexBuffer.init(ctx, mem3, .{yadda yadda}); +// +// +// // Way 3: Automated: +// const my_buffer4 = try VertexBuffer.initAlloc(ctx, .{yadda yadda}); +// } + +const debug = std.debug; diff --git a/src/api/memory/heap.zig b/src/api/memory/heap.zig new file mode 100644 index 0000000..8fba658 --- /dev/null +++ b/src/api/memory/heap.zig @@ -0,0 +1,156 @@ +//! A heap is a blob of memory which can +//! be partitioned into blocks of arbitrary sizes +//! It gives 0 shits about anything other then +//! allocating and deallocating memory ranges. +//! Think of it more as a view into a section of the device's memory +//! layout rather than the actual memory itself. +//! The actual memory properties are just stored in DeviceMemoryLayout +//! to prevent unnecesary duplication. + +const common = @import("common"); +const util = common.util; +const vk = @import("vulkan"); +const std = @import("std"); + +const debug = std.debug; + +const Heap = @This(); + +const Context = @import("../../context.zig"); + +pub const Env = Context.Environment.EnvSubset(.{.mem_layout, .di}); + +pub const Error = error { + IncompatibleProperties, + OutOfHostMemory, + OutOfDeviceMemory, + Unknown, + InvalidExternalHandle, + InvalidOpaqueCaptureAddressKHR, +}; + +/// Since memory type bits are a pain to set manually, +/// I'll try to keep that automated as much as I can, +/// hence the need for this equivalent struct with defualt values +pub const MemoryRequirements = struct { + size: vk.DeviceSize, + alignment: vk.DeviceSize, + memory_type_bits: ?u32 = null, +}; + +env: Env, +index: u32, +available_budget: usize, + +/// Calculates based on the budget returned by +/// the vulkan extension, or in absence, using a heuristic +/// based on the total memory of said heap. +fn calcAvailableBudget(env: Env, heap_index: u32) usize { + const props = env.mem_layout.getBasicHeapProps(heap_index) orelse + return 0; + return util.pct(props.max_size, 75); +} + +pub fn init(env: Env, heap_index: u32) Heap { + // figure out how much memory we can allocate + return Heap { + .env = env, + .index = heap_index, + .available_budget = calcAvailableBudget(env, heap_index), + }; +} + +inline fn matchesMask(bit: u32, mask: u32) bool { + return mask & (@as(u32, 1) << @intCast(bit)) != 0; +} + +pub fn alloc( + heap: *Heap, + mem_type: u32, + reqs: MemoryRequirements, +) Error!vk.DeviceMemory { + var resolved_reqs = reqs; + if (resolved_reqs.memory_type_bits == null) { + resolved_reqs.memory_type_bits = @as(u32, 1) << @as(u5, @intCast(mem_type)); + } + + debug.assert(mem_type < vk.MAX_MEMORY_TYPES); + debug.assert(matchesMask(mem_type, resolved_reqs.memory_type_bits.?)); + + util.assertMsg(resolved_reqs.size >= util.megabytes(@as(vk.DeviceSize, 16)), + "Due to GPU memory restrictions, small dedicated allocations must use \"allocSmall\"", + ); + + return heap.env.di.allocateMemory(&.{ + .allocation_size = resolved_reqs.size, + .memory_type_index = mem_type, + }, null); +} + +pub fn allocWithProps( + heap: *Heap, + props: vk.MemoryPropertyFlags, + reqs: MemoryRequirements, +) Error!vk.DeviceMemory { + const mem_type = heap.env.mem_layout.compatibleTypeInHeap(heap.index, props) orelse + return error.IncompatibleProperties; + + return heap.alloc(mem_type, reqs); +} + +pub fn free(heap: *Heap, mem: vk.DeviceMemory) void { + heap.env.di.freeMemory(mem, null); +} + + +// Comptime-invoked so that +// optional features can be enabled +// given required extensions are present. +// TODO: Feature sets n stuff +//pub fn requestExtensions(fs: FeatureSet) []const ?[:0]const u8 { +// return &.{ +// fs.ifEnabled("VK_EXT_memory_budget", .AccurateHeapReadouts), +// }; +//} + +const testing = std.testing; + +const ray = @import("../../root.zig"); +const TestingContext = ray.testing.MinimalVulkanContext; + +test "raw heap allocations" { + const testing_props = vk.MemoryPropertyFlags{ + .host_visible_bit = true, + .host_coherent_bit = true, + }; + var minimal_vulkan = try TestingContext.initMinimalVulkan(testing.allocator, .{ + .noscreen = true, + }); + defer minimal_vulkan.deinit(testing.allocator); + + const test_heap_index = minimal_vulkan.mem_layout.findSupportingHeap( + testing_props, + ) orelse return error.NoMatchingHeap; + + var heap = minimal_vulkan.mem_layout.acquireHeap(test_heap_index); + + + // Raw map memory for a write/read cycle + const mem_handle = try heap.allocWithProps(testing_props, .{ + .size = 128, + .alignment = 1, + }); + defer heap.free(mem_handle); + + + const mem = @as( + [*]u8, + @ptrCast(@alignCast( + try minimal_vulkan.di.mapMemory(mem_handle, 0, 128, .{}) + )), + )[0..128]; + + std.mem.copyForwards(u8, mem, "100 bottles of beer on the wall"); + + minimal_vulkan.di.unmapMemory(mem_handle); +} diff --git a/src/api/queue.zig b/src/api/queue.zig index 5bfadfc..6b0f17d 100644 --- a/src/api/queue.zig +++ b/src/api/queue.zig @@ -1,6 +1,6 @@ const std = @import("std"); const vk = @import("vulkan"); -const util = @import("../util.zig"); +const util = @import("common").util; const Context = @import("../context.zig"); diff --git a/src/api/renderpass.zig b/src/api/renderpass.zig index 91f21ec..ceea771 100644 --- a/src/api/renderpass.zig +++ b/src/api/renderpass.zig @@ -1,7 +1,7 @@ const std = @import("std"); const vk = @import("vulkan"); const base = @import("base.zig"); -const util = @import("../util.zig"); +const util = @import("common").util; const Context = @import("../context.zig"); const DeviceHandler = base.DeviceHandler; @@ -101,10 +101,10 @@ pub fn initAlloc( .p_attachments = attachments_list.ptr, .subpass_count = 1, - .p_subpasses = util.asManyPtr(vk.SubpassDescription, &subpass_desc), + .p_subpasses = &.{subpass_desc}, .dependency_count = 1, - .p_dependencies = util.asManyPtr(vk.SubpassDependency, &subpass_dep), + .p_dependencies =&.{subpass_dep}, }, null) catch |err| { log.err("Failed to create render pass: {!}", .{err}); return err; diff --git a/src/api/swapchain.zig b/src/api/swapchain.zig index 6ee0fdc..7fbffed 100644 --- a/src/api/swapchain.zig +++ b/src/api/swapchain.zig @@ -1,6 +1,6 @@ const std = @import("std"); const vk = @import("vulkan"); -const util = @import("../util.zig"); +const util = @import("common").util; const base = @import("base.zig"); const Context = @import("../context.zig"); @@ -142,14 +142,17 @@ pub fn init( allocator: Allocator, config: Config, ) !Self { - // TODO: Swapchain support details should probably lie within the Surface type - // instead of the device since these details mostly concern properties of the chosen - // draw surface. + // The swapchain should NEVER be initialized + // without surface support, though this implementation + // is a bit spaghetti const device: *const DeviceHandler = ctx.env(.dev); const pr_dev: *const vk.DeviceProxy = ctx.env(.di); const surface: *const SurfaceHandler = ctx.env(.surf); - const details: *const DeviceHandler.SwapchainSupportDetails = &device.swapchain_details; + if (device.swapchain_details == null) + @panic("Cannot initialize swapchain without surface support"); + + const details: *const DeviceHandler.SwapchainSupportDetails = &device.swapchain_details.?; // request an appropriate number of swapchain images var image_count: u32 = details.capabilities.min_image_count + 1; diff --git a/src/api/types.zig b/src/api/types.zig deleted file mode 100644 index 4f40141..0000000 --- a/src/api/types.zig +++ /dev/null @@ -1,9 +0,0 @@ -const vk = @import("vulkan"); - -pub const SyncInfo = struct { - fence_sig: ?vk.Fence = null, - fence_wait: ?vk.Fence = null, - - sem_sig: ?vk.Semaphore = null, - sem_wait: ?vk.Semaphore = null, -}; diff --git a/src/api/uniform.zig b/src/api/uniform.zig index 3ed0e94..89018d8 100644 --- a/src/api/uniform.zig +++ b/src/api/uniform.zig @@ -9,6 +9,7 @@ const Context = @import("../context.zig"); const AnyBuffer = buf_api.AnyBuffer; const GenericBuffer = buf_api.GenericBuffer; +/// More like undefined behaviour object LMAO pub fn UniformBuffer(T: type) type { return struct { const Self = @This(); diff --git a/src/api/vertex_buffer.zig b/src/api/vertex_buffer.zig index bf73073..63535f7 100644 --- a/src/api/vertex_buffer.zig +++ b/src/api/vertex_buffer.zig @@ -5,7 +5,7 @@ const StructInfo = TypeInfo.Struct; const vk = @import("vulkan"); const meth = @import("../math.zig"); -const util = @import("../util.zig"); +const util = @import("common").util; const buf_api = @import("buffer.zig"); const Layout = union(enum) { diff --git a/src/common/common.zig b/src/common/common.zig new file mode 100644 index 0000000..a3bb766 --- /dev/null +++ b/src/common/common.zig @@ -0,0 +1,80 @@ +const std = @import("std"); +const assert = std.debug.assert; + +const h = @import("handle.zig"); +pub const Handle = h.Handle; +pub const OpaqueHandle = h.OpaqueHandle; + +// these resemble std's pools, but they have intrinsic support for handle +// indexing +pub const ObjectPool = @import("object_pool.zig").ObjectPool; + +pub const config = @import("config.zig"); +pub const util = @import("util.zig"); + +/// This is cursed as hell.. +/// Returns a unique identifier for any type at comptime or runtime +/// https://github.com/ziglang/zig/issues/19858#issuecomment-2094339450 +pub const TypeId = usize; +pub fn typeId(comptime T: type) TypeId { + // Uhhh.... + return @intFromError(@field(anyerror, @typeName(T))); +} + + + +fn APIFunctionType(func: anytype) type { + const T = @TypeOf(func); + _ = T; +} + +// Type Ids shoudl only be conditionally compiled +// in a runtime safety build, otherwise all type ID checking should +// be a no-op +pub const AnyPtr = struct { + + ptr: *anyopaque, + id: TypeId, + + pub fn from(comptime T: type, ptr: *T) AnyPtr { + return .{ + .ptr = ptr, + .id = typeId(T), + }; + } + + /// Used if the underlying type is already a pointer + pub fn fromDirect(comptime T: type, ptr: T) AnyPtr { + return .{ + // Only reason const cast is here is because I'm probably gonna toss this entire type + .ptr = @constCast(ptr), + .id = typeId(T), + }; + } + + pub fn get(self: AnyPtr, comptime T: type) *T { + assert(self.id == typeId(T)); + return @as(*T, @ptrCast(@alignCast(self.ptr))); + } +}; + +/// # Overview: +/// This just remaps the first argument from a pointer to the DATA +/// type to a pointer to the API proxy type, with some minimal surrounding logic +/// in the wrapping function's body +/// +/// ## Notes: +/// Automagically handles multi-application of the internal function based +/// on the number of objects referenced by the handle. +/// FIX: This is seeming to be disgusting to implement per-spec here +/// Need an alternative method, either constraining type signatures +/// or just not automating API function binding since there isn't that much extra code +/// I need right now +pub fn APIFunction(func: anytype) APIFunctionType(func) { +} + + +comptime { + _ = @import("config.zig"); + _ = @import("object_pool.zig"); +} diff --git a/src/common/config.zig b/src/common/config.zig new file mode 100644 index 0000000..c7fd9f8 --- /dev/null +++ b/src/common/config.zig @@ -0,0 +1,533 @@ +//! Configurations and stuff but cooler + +const std = @import("std"); +const util = @import("common.zig").util; + +fn validateProfiles( + comptime T: type, + comptime Hint: type, +) std.builtin.Type.Enum { + const pinfo = @typeInfo(T); + if (pinfo != .@"enum") + @compileError("Invalid profile listing enum: " ++ + @typeName(T) ++ " for config type: " ++ @typeName(Hint)); + + return pinfo.@"enum"; +} + +fn validateDef( + comptime T: type, + comptime def: anytype, + comptime Hint: type, +) []const std.builtin.Type.StructField { + const info = @typeInfo(@TypeOf(def)); + + switch (info) { + .@"struct" => |*s| { + const pinfo = @typeInfo(T).@"enum"; + for (pinfo.fields) |fld| { + if (util.tryGetField(s, fld.name) == null) { + @compileError("Profile def missing field: " ++ + fld.name ++ " for config type: " ++ @typeName(Hint)); + } + } + + if (util.tryGetField(s, "Default") == null) { + @compileError("Profile def missing default field " ++ + " for config type: " ++ @typeName(Hint)); + } + }, + else => @compileError("Invalid profile def struct: " ++ + @typeName(T) ++ " for config type: " ++ @typeName(Hint)), + } + return @typeInfo(@TypeOf(def)).@"struct".fields; +} + +pub const ParameterDef = struct { + OutputType: type, + out_fld_name: [:0]const u8, + in_fld_name: [:0]const u8, + + InputType: type, + resolver: *const anyopaque, +}; + +fn ProfileDef(comptime ConfigType: type) type { + return union(enum) { + Valued: struct { + PType: ?type, + default_val: ConfigType, + // map from field names to all resolver functions (built from param data) + resolvers: ?std.StaticStringMap(ParameterDef), + }, + Function, + }; +} + +pub fn ConfigurationRegistry( + /// ConfigType can be any struct + comptime ConfigType: type, + /// Profiles must be a packed, non-sparse enum, (preferably human readable) + comptime Profiles: type, + /// profile_defs must be an anonymous struct literal + /// whose fields are named after the "Profile" enum fields + /// with an additional field called "Default" for the default configuration + /// Values can either be valid instances of the configuration struct type, + /// or one of the provided parameterization helper types + comptime profile_defs: anytype, +) type { + const profile_info = validateProfiles(Profiles, ConfigType); + const def_fields = validateDef(Profiles, profile_defs, ConfigType); + + // Extra field for the default profile + const profiles_len = profile_info.fields.len + 1; + const ProfileDefType = ProfileDef(ConfigType); + comptime var profiles: [profiles_len]ProfileDefType = .{undefined} ** profiles_len; + + for (def_fields) |fld| { + const field_index: usize = if (!std.mem.eql(u8, fld.name, "Default")) + @intFromEnum(std.meta.stringToEnum( + Profiles, + fld.name, + ) orelse unreachable) + else + (profiles_len - 1); + + profiles[field_index] = switch (fld.type) { + ParameterizedProfileDef(ConfigType) => blk: { + const parameters = @field(profile_defs, fld.name).params; + + comptime var ptype_fields: []const std.builtin.Type.StructField = &.{}; + comptime var resolvers: []const struct { []const u8, ParameterDef } = &.{}; + + for (parameters) |p| { + ptype_fields = ptype_fields ++ [_]std.builtin.Type.StructField{.{ + .name = p.in_fld_name, + .type = p.InputType, + .default_value_ptr = null, + .is_comptime = false, + .alignment = @alignOf(p.InputType), + }}; + + resolvers = resolvers ++ .{.{ p.in_fld_name, p }}; + } + + const PType = @Type(.{ + .@"struct" = std.builtin.Type.Struct{ + .fields = ptype_fields, + .decls = &.{}, + .is_tuple = false, + .layout = .auto, + }, + }); + + const default_val = @field(profile_defs, fld.name).default_val; + + break :blk ProfileDefType{ .Valued = .{ + .PType = PType, + .default_val = default_val, + .resolvers = std.StaticStringMap(ParameterDef) + .initComptime(resolvers), + } }; + }, + ConfigType => blk: { + const default_val: ConfigType = @field(profile_defs, fld.name); + + break :blk ProfileDefType{ .Valued = .{ + .PType = null, + .default_val = default_val, + .resolvers = null, + } }; + }, + else => { + //TODO: Check for a full mapping function signature later + @compileError("Invalid profile def entry for type: " ++ + @typeName(ConfigType)); + } + }; + } + + return struct { + const Self = @This(); + const profile_data: [profiles_len]ProfileDefType = profiles; + const default_index = profiles_len - 1; + // TODO: Check presence of fields + // and make sure the params are a struct literal. + fn comptimeValidate(params: anytype, index: usize) void { + comptime { + const params_info = @typeInfo(@TypeOf(params)); + switch (params_info) { + .@"struct" => {}, + else => @compileError("Invalid parameter literal: must be struct!"), + } + + _ = index; + } + } + pub const ConfigBuilder = struct { + underlying: ConfigType, + pub fn extend(self: *ConfigBuilder, vals: ConfigType) *ConfigBuilder { + inline for (@typeInfo(ConfigType).@"struct".fields) |*fld| { + const default_val = fld.defaultValue(); + const in_val = @field(vals, fld.name); + + // This ensures we do not overwrite fields that are left as default values. + if (!std.meta.eql(default_val, in_val)) { + @field( + self.underlying, + fld.name, + ) = @field(vals, fld.name); + } + } + + return self; + } + + pub fn combine(self: *ConfigBuilder, comptime profile: Profiles, params: anytype) *ConfigBuilder { + const other = Self.ProfileMixin(profile, params); + return self.extend(other); + } + + pub fn finalize(self: *const ConfigBuilder) ConfigType { + return self.underlying; + } + }; + + pub fn BuilderMixin(comptime profile: Profiles, params: anytype) ConfigBuilder { + return .{ + .underlying = setFromProfileIndex(@intFromEnum(profile), params), + }; + } + + fn setFromProfileIndex(comptime index: usize, params: anytype) ConfigType { + const profile_def = profile_data[index]; + return switch (profile_def) { + .Valued => |*v| blk: { + var val: ConfigType = v.default_val; + if (v.resolvers) |rmap| { + // PType is only null if there are no resolver methods... + const ptype_info = @typeInfo(v.PType orelse unreachable).@"struct"; + + inline for (ptype_info.fields) |*fld| { + const res = rmap.get(fld.name) orelse unreachable; + const in_val = @field(params, res.in_fld_name); + + const func = @as( + *const fn (res.InputType) res.OutputType, + @ptrCast(@alignCast(res.resolver)), + ); + + @field(val, res.out_fld_name) = func(in_val); + } + } + + break :blk val; + }, + .Function => @compileError("Function-based profiles currently unsupported!"), + }; + } + + pub fn DefaultMixin(params: anytype) ConfigType { + return setFromProfileIndex(default_index, params); + } + + pub fn ProfileMixin(comptime profile: Profiles, params: anytype) ConfigType { + return setFromProfileIndex(@intFromEnum(profile), params); + } + }; +} + +fn ParameterizedProfileDef(comptime ConfigType: type) type { + return struct { + params: []const ParameterDef, + default_val: ConfigType, + }; +} + +pub fn Parameterized( + comptime instance: anytype, + comptime params: anytype, +) ParameterizedProfileDef(@TypeOf(instance)) { + if (@typeInfo(@TypeOf(params)) != .@"struct") + @compileError("Invalid parameterset type (must be struct) " ++ + @typeName(@TypeOf(instance))); + + const pinfo = @typeInfo(@TypeOf(params)).@"struct"; + comptime var params_list: []const ParameterDef = &.{}; + + for (pinfo.fields) |fld| { + const val = @field(params, fld.name); + + params_list = params_list ++ [_]ParameterDef{.{ + .out_fld_name = fld.name, + + .OutputType = val.OutputType, + .InputType = val.InputType, + .in_fld_name = val.in_fld_name, + .resolver = val.resolver, + }}; + } + + return .{ + .params = params_list, + .default_val = instance, + }; +} + +// fn ResolverFn(comptime T: type) type {} + +// since the output field names are mapped to the left-hand side +// of an assignment, we need a temporary storage record for the rest of the data. +// This is mostly for usage ergonomics, there's no actual implementation reason +// that it has to be this way. +const IntermediateParam = struct { + OutputType: type, + InputType: type, + in_fld_name: [:0]const u8, + resolver: *const anyopaque, +}; + +//Parameters can be either: +// - Resolver functions which map from an input type to a config field +// - Direct mappings from input parameters to config fields +// (in this case, "resolver" would be null. +pub fn Parameter( + comptime T: type, + comptime field_name: [:0]const u8, + comptime resolver: anytype, +) IntermediateParam { + const ResolverType = @TypeOf(resolver); + const rinfo = @typeInfo(ResolverType); + + const InputType, const res = switch (rinfo) { + .@"fn" => |f| blk: { + if (f.params.len != 1) + @compileError("Resolver functions must take a single argument"); + const input_arg = f.params[0]; + + break :blk .{ input_arg.type.?, resolver }; + }, + .null => blk: { + const Container = struct { + pub fn noOp(in: T) T { + return in; + } + }; + + break :blk .{ T, Container.noOp }; + }, + else => @compileError("resolver must be a function or null"), + }; + + return IntermediateParam{ + .OutputType = T, + .InputType = InputType, + .in_fld_name = field_name, + .resolver = res, + }; +} + +const testing = std.testing; + +const TestBitField = packed struct { + bit_1: bool = false, + bit_2: bool = false, + bit_3: bool = false, + rest: u5 = 0, +}; + +const TestConfigStruct = struct { + flags: TestBitField = .{}, + + comptime_field_a: u32 = 0, + comptime_field_b: usize = 0, + name: []const u8 = "Unknown", + + pointer: *const usize = &larrys_num, + writer: ?std.io.AnyWriter = null, + + pub const Profiles = enum { + Larry, + Harry, + WriterBoy, + PartialA, + PartialB, + }; + + const larry_last_name_table = [_][]const u8{ + "Larry Larryson", + "Larry Jerryson", + "Larry The Lobster", + "Larry the Platypus", + }; + + fn resolveLarryName(index: usize) []const u8 { + return larry_last_name_table[@mod(index, larry_last_name_table.len)]; + } + + fn sideEffectyResolver(w: std.io.AnyWriter) std.io.AnyWriter { + _ = w.write("Evil function") catch @panic("Test write failed"); + return w; + } + + pub const larrys_num: usize = 1000; + + const Registry = ConfigurationRegistry(TestConfigStruct, Profiles, .{ + .Harry = TestConfigStruct{ + .flags = .{ + .bit_1 = true, + .bit_2 = false, + .bit_3 = true, + .rest = 10, + }, + .comptime_field_a = 100, + .comptime_field_b = 0, + .name = "Harry Harryson", + + .pointer = &larrys_num, + .writer = null, + }, + .Larry = Parameterized(TestConfigStruct{ + .flags = .{}, + .comptime_field_a = 6291, + .writer = null, + }, .{ + .comptime_field_b = Parameter(usize, "larry_bank_id", null), + .name = Parameter([]const u8, "larry_name_id", resolveLarryName), + .pointer = Parameter(*const usize, "larry_number", null), + }), + .WriterBoy = Parameterized(TestConfigStruct{ + .name = "Writer Boy", + }, .{ + .writer = Parameter(std.io.AnyWriter, "some_writer", sideEffectyResolver), + }), + .Default = TestConfigStruct{ + .flags = .{ + .bit_1 = false, + .bit_2 = true, + .bit_3 = false, + .rest = 2, + }, + .comptime_field_a = 123, + .comptime_field_b = 320432, + .name = "Default", + + .pointer = &larrys_num, + .writer = null, + }, + .PartialA = TestConfigStruct{ + .flags = .{ .bit_1 = true }, + .comptime_field_a = 999, + }, + .PartialB = Parameterized(TestConfigStruct{ + .comptime_field_b = 998, + .pointer = &larrys_num, + .writer = null, + }, .{ + .name = Parameter([]const u8, "partial_name", null), + }), + }); + + const from = Registry.BuilderMixin; + const profile = Registry.ProfileMixin; + const default = Registry.DefaultMixin; +}; + +test "config (default value)" { + const default = TestConfigStruct.default(.{}); + + try testing.expectEqual(default, TestConfigStruct{ + .flags = .{ + .bit_1 = false, + .bit_2 = true, + .bit_3 = false, + .rest = 2, + }, + .comptime_field_a = 123, + .comptime_field_b = 320432, + .name = "Default", + + .pointer = &TestConfigStruct.larrys_num, + .writer = null, + }); +} + +test "config (profile)" { + const profile = TestConfigStruct.profile(.Harry, .{}); + + try testing.expectEqual(profile, TestConfigStruct{ + .flags = .{ + .bit_1 = true, + .bit_2 = false, + .bit_3 = true, + .rest = 10, + }, + .comptime_field_a = 100, + .comptime_field_b = 0, + .name = "Harry Harryson", + + .pointer = &TestConfigStruct.larrys_num, + .writer = null, + }); +} + +test "config (parameterized profile)" { + var string: [64]u8 = .{0} ** 64; + var stream = std.io.fixedBufferStream(string[0..]); + var w = stream.writer(); + + const larry_num: usize = 12321; + + const val = TestConfigStruct.profile(.Larry, .{ + .larry_bank_id = 943, + .larry_name_id = 1, + .larry_number = &larry_num, + }); + + try testing.expectEqual(val, TestConfigStruct{ + .flags = .{}, + .comptime_field_a = 6291, + .writer = null, + .comptime_field_b = 943, + .name = "Larry Jerryson", + .pointer = &larry_num, + }); + + const writer_val = TestConfigStruct.profile(.WriterBoy, .{ + .some_writer = w.any(), + }); + + try testing.expectEqual(writer_val, TestConfigStruct{ + .name = "Writer Boy", + .writer = w.any(), + }); + + try testing.expectEqualSlices(u8, string[0..("Evil function").len], "Evil function"); +} + +test "config (value composition)" { + var val_a_b = TestConfigStruct.from(.PartialA, .{}); + const res = val_a_b.extend(.{ + .comptime_field_b = 998, + .pointer = &TestConfigStruct.larrys_num, + .writer = null, + .name = "Name Here", + }).finalize(); + + const res2 = val_a_b.combine(.PartialB, .{ + .partial_name = "Name Here", + }).finalize(); + + try testing.expectEqual(res, res2); +} + +test "overwritting values" { + var partial_val_builder = TestConfigStruct.from(.PartialA, .{}); + + // makes sure composition doesn't overwrite the original values with default values... + const config = partial_val_builder.extend(.{ + .comptime_field_b = 10230, + }).finalize(); + + try testing.expect(config.comptime_field_a == 999); +} diff --git a/src/common/handle.zig b/src/common/handle.zig new file mode 100644 index 0000000..32d5220 --- /dev/null +++ b/src/common/handle.zig @@ -0,0 +1,114 @@ +const std = @import("std"); +const common = @import("common.zig"); + +// const ct = common.ct; + +const AnyPtr = common.AnyPtr; + +pub const Config = struct { + index_bits: usize = 32, + gen_bits: usize = 32, + + partition_bit: ?usize = null, +}; + +fn PartitionIndexType(index_bits: usize, partition: usize) type { + return if (partition == 0) + std.meta.Int(.unsigned, index_bits) + else + packed struct { + lhs: std.meta.Int(.unsigned, partition), + rhs: std.meta.Int(.unsigned, index_bits - partition), + }; +} + + +pub fn Handle( + comptime T: type, + comptime config: Config, +) type { + const partition_bit = config.partition_bit orelse 0; + + if (config.index_bits > 64) + @compileError( + "index_bits (" ++ + config.index_bits ++ + ") must be within the range of <=64"); + + if (partition_bit > config.index_bits) + @compileError("index bit partition must lie within the range of index bits"); + + const IndexType = PartitionIndexType(config.index_bits, partition_bit); + + if (config.gen_bits == 0) { + return packed struct { + const Self = @This(); + const Type = T; + + pub fn Reified(err: type, getter: *const fn (*anyopaque, Self) err!*T) type { + return packed struct { + h: Self, + p: *anyopaque, + + pub fn get(self: @This()) err!*T { + return getter(self.p, self.h); + } + + pub fn getAssumeValid(self: @This()) *T { + return getter(self.p, self.h) catch unreachable; + } + + pub fn init(h: Self, mem: *anyopaque) @This() { + return .{ + .h = h, + .p = mem, + }; + } + }; + } + + index: IndexType, + + }; + } + else { + // gen is included only if we want a generation counter + const GenType = std.meta.Int(.unsigned, config.gen_bits); + + return packed struct { + const Type = T; + const Self = @This(); + + /// Uhh duplicate im lazy + pub fn Reified(err: type, getter: *const fn (*anyopaque, Self) err!*T) type { + return packed struct { + h: Self, + p: *anyopaque, + + pub fn get(self: @This()) err!*T { + return getter(self.p, self.h); + } + + pub fn getAssumeValid(self: @This()) *T { + return getter(self.p, self.h) catch unreachable; + } + + pub fn init(h: Self, mem: *anyopaque) @This() { + return .{ + .h = h, + .p = mem, + }; + } + }; + } + + + gen: GenType, + index: IndexType, + }; + } +} + + +pub const OpaqueHandle = packed struct { +}; diff --git a/src/common/iterator.zig b/src/common/iterator.zig new file mode 100644 index 0000000..e69de29 diff --git a/src/common/object_pool.zig b/src/common/object_pool.zig new file mode 100644 index 0000000..623beeb --- /dev/null +++ b/src/common/object_pool.zig @@ -0,0 +1,544 @@ +//! Its a pool that works like a pool with handle indexing. Good for DOD +//! centralized data storage in da memory. This is the cornerstone +//! of the memory management sytem for this application. +//! +//! TODO: A great feature would be adding scoped allocations for individual pools +//! which match vulkan object's lifetimes (generally they are the same per-type of object) + +const std = @import("std"); +const common = @import("common.zig"); +const builtin = @import("builtin"); +const debug = std.debug; + +const Allocator = std.mem.Allocator; +const AnyPtr = common.AnyPtr; +const Alignment = std.mem.Alignment; + +const Self = @This(); + +const PoolError = error{ + OutOfMemory, + InvalidHandle, +}; + +const Config = struct { + /// Leave null for default type alignment + alignment: ?std.mem.Alignment = null, + /// Leave null for 32-bit indexing and equal generation counter. + IndexType: type = u32, + /// Can pool expand once existing resources are exhausted? + /// NOTE: This is a little janky in implenmentation for the moment + growable: bool = true, +}; + +// I didn't plan this out at all lol +// Plan: +// Step 1: Handles and slots are stored in separate blocks, indexed as slices in the header +// (this is to decrease alignment padding bytes) +// Step 1.1: +// Step 2: Memory broken into pages (done) +// Step 3: Pages can be created if more space is needed and growing is allowed (done) + + +/// Finds the address offset between 2 separately alignments, +/// i.e the padding bytes required. +fn alignForwardDiff(comptime T: type, comptime b: Alignment) comptime_int { + // @compileLog(@typeName(T)); + // @compileLog(@sizeOf(T)); + // @compileLog(b.toByteUnits()); + return @sizeOf(T) % b.toByteUnits(); +} + +///TODO: Handle bit widths smaller then a single page +fn PartitionIndexType(comptime IndexType: type, page_size: usize) type { + _ = page_size; + + const low_bits = 12; + const high_bits = @typeInfo(IndexType).int.bits - low_bits; + + + return packed struct { + page_index: std.meta.Int(.unsigned, high_bits), + addr: std.meta.Int(.unsigned, low_bits), + }; + + +} + +/// Type Safe generic pool +/// Note that signedness is unsupported in config since there is no point +/// in supporting negative indexing +pub fn ObjectPool(comptime T: type, comptime config: Config) type { + if (@typeInfo(config.IndexType) != .int) + @compileError("IndexType must be an integer"); + if (@typeInfo(config.IndexType).int.bits < 12) + @compileError("IndexType must have a bit-width of at least 12"); + if (@typeInfo(config.IndexType).int.signedness != .unsigned) + @compileError("IndexType must be unsigned"); + + + + const resolved_alignment = if (config.alignment) |al| + al + else + Alignment.fromByteUnits(@alignOf(T)); + + return struct { + pub const IndexType = config.IndexType; + /// needs to accomodate free list nodes at least... + const SlotType = if (@sizeOf(T) > @sizeOf(FreeNode)) T else FreeNode; + const Pool = @This(); + + const FreeNode = struct { + next: ?*FreeNode, + page: *Page, + }; + + pub const Handle = common.Handle(T, .{ + .index_bits = @typeInfo(config.IndexType).int.bits, + .gen_bits = @typeInfo(config.IndexType).int.bits, + .partition_bit = 12, + }); + + fn getReified(p: *anyopaque, h: Handle) PoolError!*T { + const pool: *Pool = @ptrCast(@alignCast(p)); + return pool.get(h); + } + + pub const ReifiedHandle = Handle.Reified(PoolError, getReified); + + // These 2 functions are meant to update state in the stored handle state + // of the pool, not the handles the user gets + fn bind(h: *Handle, idx: usize, page_index: usize) void { + debug.assert(idx < std.math.maxInt(IndexType)); + debug.assert(page_index < std.math.maxInt(IndexType)); + + h.index.lhs = @intCast(idx); + h.index.rhs = @intCast(page_index); + } + + fn unbind(h: *Handle) void { + h.index.lhs = 0; + h.index.rhs = 0; + h.gen += 1; + } + + /// Pages are broken into 2 separately alligned sections of memory + /// - Handles (contiains handle information mainly gen count) + /// - Slots (contains actual data for the object stuff) + /// + /// Both are forwards aligned to their natural aligned addresses + /// for minimal padding due to a disjoint in the alignment of handles + /// and the container type + const Page = struct { + cap: usize = page_size, + index: usize, + handles: [*]Handle, // to prevent gross pointer arithmetic + slots: [*]SlotType, + }; + + const PageArray = std.ArrayListUnmanaged(*Page); + + pub const page_size = 4096; + const handle_align_padding = alignForwardDiff( + Page, + Alignment.fromByteUnits(@alignOf(Handle)), + ); + const slot_align_padding = alignForwardDiff(Handle, resolved_alignment); + + free_list: ?*FreeNode = null, + allocator: Allocator, + + page_table: PageArray, + + // tracks the currently growing page in the allocator (for reuse purposes) + current_page: usize = 0, + + fn prependFreeNode(pool: *Pool, node: *FreeNode) void { + node.next = pool.free_list; + pool.free_list = node; + } + + fn popFreeNode(pool: *Pool) ?*FreeNode { + const free_node = pool.free_list; + + if (pool.free_list) |fl| { + pool.free_list = fl.next; + } + + return free_node; + } + + fn growPageIfAllowed(pool: *Pool) PoolError!*Page { + if (!config.growable) return error.OutOfMemory; + return pool.justGrowTheFuckingPage(page_size); + } + + fn indexForNode(node: *FreeNode) usize { + const relative_addr = @intFromPtr(node) - @intFromPtr(node.page.slots); + return @divExact(relative_addr, @sizeOf(SlotType)); + } + + /// Ensures there's enough capacity without altering + /// the free list + fn ensureCapacity(pool: *Pool, cap: usize) PoolError!void { + var current_cap: usize = 0; + for (pool.page_table.items) |page| { + current_cap += page.cap; + } + + if (current_cap >= cap) return; + + const needed_cap = cap - current_cap; + const needed_pages = @divFloor(needed_cap, page_size) + 1; + + var p: *Page = undefined; + for (0..needed_pages) |_| { + p = try pool.justGrowTheFuckingPage(page_size); + } + + // Lower the final page's capacity + // So OutOfMemory happens when expected + p.cap = needed_cap % page_size; + } + + inline fn calcAllocationSize(size: usize) usize { + return size * @sizeOf(Handle) + size * @sizeOf(SlotType) + @sizeOf(Page) + + handle_align_padding + slot_align_padding; + } + + fn justGrowTheFuckingPage(pool: *Pool, size: usize) PoolError!*Page { + // If we have an extra unused page, just return that instead. + // This covers the cases of pool reuse via the .reset() method + if (pool.page_table.items.len != 0 and + pool.page_table.items.len - 1 > pool.current_page) { + return pool.page_table.items[pool.current_page + 1]; + } + const allocation_size = calcAllocationSize(size); + + const new_page = try pool.allocator.alignedAlloc( + u8, + @alignOf(Page), + allocation_size, + ); + errdefer pool.allocator.free(new_page); + + const page_ptr = @as(*Page, @ptrCast(new_page.ptr)); + + const handles_addr: [*]Handle = + @ptrFromInt(@intFromPtr(page_ptr) + + @sizeOf(Page) + handle_align_padding + ); + + + const slots_addr: [*]SlotType = @ptrFromInt( + @intFromPtr(handles_addr) + + page_size * @sizeOf(Handle) + slot_align_padding + ); + + page_ptr.* = .{ + .handles = handles_addr, + .slots = slots_addr, + .index = pool.page_table.items.len, + }; + + try pool.page_table.append(pool.allocator, page_ptr); + return page_ptr; + } + + /// yeet a new free node onto the free list + pub fn allocNew(pool: *Pool) PoolError!*FreeNode { + var current_page = pool.page_table.items[pool.current_page]; + if (current_page.cap == 0) { + current_page = try pool.growPageIfAllowed(); + pool.current_page += 1; + } + + const next_index = current_page.cap - 1; + const node: *FreeNode = @ptrCast(@alignCast(¤t_page.slots[next_index])); + node.page = current_page; + + current_page.cap -= 1; + return node; + } + + /// Initialize the thing + /// A non growable pool should use initPreheated since no pool memory + /// is allocated aside from the page table in this function + pub fn init(allocator: Allocator, initial_size: usize) PoolError!Pool { + var pool = Pool{ + // This is completely arbitrary + .page_table = try PageArray.initCapacity(allocator, 32), + .allocator = allocator, + }; + + try pool.ensureCapacity(initial_size); + + return pool; + } + + /// Initialize the thing with cap guaranteed free slots before next allocation + pub fn initPreheated(allocator: Allocator, cap: usize) PoolError!Pool { + var new_pool = try init(allocator, cap); + try new_pool.ensureCapacity(cap); + new_pool.preheat(cap) catch unreachable; + + return new_pool; + } + + pub fn preheat(pool: *Pool, cap: usize) PoolError!void { + for (0..cap) |_| { + const node = try pool.allocNew(); + pool.prependFreeNode(node); + } + } + + fn fetchPageIfValid(pool: *Pool, h: Handle) PoolError!*Page { + debug.assert(h.index.rhs <= pool.page_table.items.len); + const page = pool.page_table.items[h.index.rhs]; + + const handle = page.handles[h.index.lhs]; + if (h.gen != handle.gen) return error.InvalidHandle; + + return page; + } + + // get an element outta the pool + pub fn get(pool: *Pool, h: Handle) PoolError!*T { + const page = try pool.fetchPageIfValid(h); + return @as(*T, @ptrCast(@alignCast(&page.slots[h.index.lhs]))); + } + + /// reserve a single handle and allocate if needed + pub fn reserve(pool: *Pool) PoolError!Handle { + var node = pool.popFreeNode() orelse + try pool.allocNew(); + + const index = indexForNode(node); + + const handle = &node.page.handles[index]; + bind(handle, index, @intCast(node.page.index)); + + return handle.*; + } + + /// Reserve an item from the poool and initialize it with the givne value + pub fn reserveInit(pool: *Pool, val: T) PoolError!Handle { + const h = try pool.reserve(); + const v = pool.get(h) catch unreachable; + v.* = val; + + return h; + } + + /// Reserve an item and retain memory of which pool allocated it in the handle + /// (essentially "reifying the handle" by + /// giving all the information needed to resolve the data) + pub fn reserveReified(pool: *Pool) PoolError!ReifiedHandle { + const h = try pool.reserve(); + return ReifiedHandle.init(h, pool); + } + + pub fn reserveAssumeCapacityReified(pool: *Pool) ReifiedHandle { + const h = pool.reserveAssumeCapacity(); + return ReifiedHandle.init(h, pool); + } + + /// reserve a single handle whilst assuming there is space + /// Useful if you know that the pool will have space for a reservation + /// and you don't want to deal with an unreachable failure point + /// ... Of course, it should actually be unreachable... + pub fn reserveAssumeCapacity(pool: *Pool) Handle { + var node = pool.popFreeNode() orelse unreachable; + const index = indexForNode(node); + + const handle = &node.page.handles[index]; + bind(handle, index, node.page.index); + + return handle.*; + } + + pub fn reserveAssumeCapacityInit(pool: *Pool, val: *const T) Handle { + const h = pool.reserveAssumeCapacity(); + const v = pool.get(h) orelse unreachable; + + v.* = val.*; + return h; + } + + + /// return a handle to the pool, freeing it. + pub fn free(pool: *Pool, h: Handle) void { + const page = pool.fetchPageIfValid(h) catch return; + const backing_h = &page.handles[h.index.lhs]; + const former_slot: *FreeNode = @ptrCast(&page.slots[h.index.lhs]); + + unbind(backing_h); + pool.prependFreeNode(former_slot); + } + + /// reset the pool's allocations but don't bother freeing the backing memory + /// BUG: This function is completely BORKED, lemme fix it later + pub fn reset(pool: *Pool) void { + for (pool.page_table.items) |page| { + page.cap = page_size; + } + pool.current_page = 0; + + pool.free_list = pool.allocNew() catch unreachable; + } + + /// Destroy the entire pool and free all the memory + pub fn deinit(pool: *Pool) void { + const allocation_size = calcAllocationSize(page_size); + for (pool.page_table.items) |page| { + const original_slice: []u8 = @as([*]u8, @ptrCast(@alignCast(page)))[0..allocation_size]; + pool.allocator.free(original_slice); + } + + pool.page_table.deinit(pool.allocator); + } + }; +} + +const testing = std.testing; + +const TestingTypeA = [10]u32; + +test "out of memory" { + + var pool = try ObjectPool(TestingTypeA, .{.growable = false}).init(testing.allocator, 2); + defer pool.deinit(); + + for (0..2) |_| { + _ = try pool.reserve(); + } + + try testing.expectError(error.OutOfMemory, pool.reserve()); + + + try testing.expectError(error.OutOfMemory, ObjectPool(usize, .{}).init( + testing.failing_allocator, + 10, + )); +} + +test "preheat" { + var pool = try ObjectPool(TestingTypeA, .{.growable = false}).initPreheated(testing.allocator, 32); + defer pool.deinit(); + + for (0..32) |_| { + _ = pool.reserveAssumeCapacity(); + } + + try testing.expectError(error.OutOfMemory, pool.reserve()); + + var pool2 = try ObjectPool(TestingTypeA, .{}).initPreheated(testing.allocator, 10); + defer pool2.deinit(); + //try pool2.preheat(10); + + for (0..10) |_| { + _ = pool2.reserveAssumeCapacity(); + } +} + +test "growable pool" { + const TestingTypeB = struct { + name: []const u8, + age: usize, + }; + + var pool = try ObjectPool(TestingTypeB, .{}).init(testing.allocator, 1); + defer pool.deinit(); + + + for (0..5000) |_| { + _ = try pool.reserve(); + } + + const h = try pool.reserveInit(.{ + .name = "Jerry Smith", + .age = 420690, + }); + + try testing.expectEqual((try pool.get(h)).*, TestingTypeB{ + .name = "Jerry Smith", + .age = 420690, + }); +} + +test "integrity" { + var pool = try ObjectPool(TestingTypeA, .{}).init(testing.allocator, 1024); + defer pool.deinit(); + + const h = try pool.reserveInit(.{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + const v = try pool.get(h); + + try testing.expectEqual(v.*, TestingTypeA{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}); + + var h2 = h; + h2.gen += 102313; + + try testing.expectError(error.InvalidHandle, pool.get(h2)); +} + + +test "small object allocations" { + const Smol = u8; + const HandleType = ObjectPool(Smol, .{}).Handle; + var handles: [1024]HandleType = undefined; + + var pool = try ObjectPool(Smol, .{}).init(testing.allocator, handles.len); + defer pool.deinit(); + + + for (0..handles.len) |i| { + const val: u8 = @intCast(@mod(i, std.math.maxInt(u8))); + handles[i] = try pool.reserveInit(val); + + try testing.expectEqual((try pool.get(handles[i])).*, val); + } + + for (0..handles.len) |i| { + pool.free(handles[i]); + try testing.expectError(error.InvalidHandle, pool.get(handles[i])); + } + +} + +test "use after free" { + var pool = try ObjectPool(u32, .{.growable = false}).init(testing.allocator, 10); + defer pool.deinit(); + + const h1 = try pool.reserveInit(42069); + try testing.expectEqualDeep((try pool.get(h1)).*, @as(u32, 42069)); + + pool.free(h1); + try testing.expectError(error.InvalidHandle, pool.get(h1)); + + const h2 = try pool.reserveInit(80085); + try testing.expectEqual((try pool.get(h2)).*, @as(u32, 80085)); + + //pool.reset(); + //try pool.preheat(20); + + //try testing.expectError(error.InvalidHandle, pool.get(h2)); +} + +test "reification" { + const TestStruct = struct { + name: []const u8, + id: usize, + }; + var pool = try ObjectPool(TestStruct, .{.growable = false}).init(testing.allocator, 128); + defer pool.deinit(); + + const h = try pool.reserveInit(.{ + .name = "larry the libster", + .id = 932932, + }); + + const h2 = ObjectPool(TestStruct, .{.growable = false}).ReifiedHandle.init(h, &pool); + + try testing.expectEqual(try pool.get(h), try h2.get()); +} diff --git a/src/common/util.zig b/src/common/util.zig new file mode 100644 index 0000000..b886c0a --- /dev/null +++ b/src/common/util.zig @@ -0,0 +1,185 @@ +//! TODO: Merge this into the common directory +//! Most of this is completely unused and useless, so +//! only a bit of this iwll be included... +const std = @import("std"); +const builtin = @import("builtin"); +pub fn asCString(rep: anytype) [*:0]const u8 { + return @as([*:0]const u8, @ptrCast(rep)); +} + +pub fn emptySlice(comptime T: type) []T { + return &[0]T{}; +} + +//FIXME: This is due for a BIG REFACTOR COMING SOON +// (because it is much worse than just &.{} which I Didn't know was a thing oops. +pub fn asManyPtr(comptime T: type, ptr: *const T) [*]const T { + return @as([*]const T, @ptrCast(ptr)); +} + +//FIXME: This is due for a BIG REFACTOR COMING SOON +// (because it is much worse than just &.{} which I Didn't know was a thing oops. +// funilly enough, &.{1, 2, 3} is shorter than span(.{1, 2, 3}). I just don't +// like reading docs + idk how. +pub fn span(v: anytype) []@TypeOf(v[0]) { + const T = @TypeOf(v[0]); + comptime var sp: [v.len]T = undefined; + for (v, 0..) |val, index| { + sp[index] = val; + } + + return sp[0..]; +} + +const StructInfo = std.builtin.Type.Struct; +const StructField = std.builtin.Type.StructField; +const Function = std.builtin.Type.Fn; + +// Reflection Stuff +pub fn tryGetField(info: *const StructInfo, name: []const u8) ?*const StructField { + for (info.fields) |*fld| { + if (std.mem.eql(u8, fld.name, name)) { + return fld; + } + } + + return null; +} + +pub fn fnSignatureMatches( + comptime A: type, + comptime B: type, +) bool { + const info_a = if (@typeInfo(A).@"fn") @typeInfo(A).@"fn" else return false; + const info_b = if (@typeInfo(B).@"fn") @typeInfo(B).@"fn" else return false; + + if (info_a.params.len != info_b.params.len) return false; + + for (info_a.params, info_b.params) |a, b| { + if (!std.meta.eql(a, b)) return false; + } + + return true; +} + +/// returns the percentage of a number +/// should work for all numeric types +pub fn pct(num: anytype, percentage: @TypeOf(num)) @TypeOf(num) { + return @divTrunc(num * percentage, 100); +} + +const BasicMemUnits = enum(usize) { + Bytes, + Kilobytes, + Megabytes, + Gigabytes, +}; + +/// Obviously, val should be numerical, but should +/// otherwise work with integral and floating points, +/// illegal divisions notwithstanding. +pub inline fn transformMemUnits(comptime from: BasicMemUnits, comptime to: BasicMemUnits, val: anytype) @TypeOf(val) { + const ValueType = @TypeOf(val); + const info = @typeInfo(ValueType); + + var a: ValueType = 1; + var b: ValueType = 1; + + for (0..@intFromEnum(to)) |_| { + a *= 1024; + } + for (1..@intFromEnum(from)) |_| { + b *= 1024; + } + + if (info == .int) { + return @divFloor(a, b) * val; + } else { + const fnum: ValueType = @floatFromInt(a); + const fden: ValueType = @floatFromInt(b); + + return (fnum / fden) * val; + } +} + +pub inline fn megabytes(val: anytype) @TypeOf(val) { + return transformMemUnits(.Megabytes, .Bytes, val); +} + +pub inline fn assertMsg(ok: bool, comptime msg: []const u8) void { + if (builtin.mode == .Debug or builtin.mode == .ReleaseSafe) { + if (!ok) { + @panic("Assertion failed: " ++ msg); + } + } +} + +pub fn signatureMatches(a: *const Function, b: *const Function) bool { + if (a.params.len != b.params.len) return false; + for (a.params, b.params) |p1, p2| + if (p1.type != p2.type) return false; + + if (!a.calling_convention.eql(b)) return false; + return true; +} + +///TODO: Param for ordering fields in the bitset, +/// which I don't really need now, but could be useful later +pub fn EnumToBitfield(comptime E: type) type { + const e_info = @typeInfo(E); + const default_value: bool = false; + + if (e_info != .@"enum") + @compileError("Invalid type: " ++ @typeName(E) ++ " must be an enum to convert to bitfield"); + + comptime var bit_fields: []const StructField = &.{}; + for (e_info.@"enum".fields) |fld| { + bit_fields = bit_fields ++ &[_]StructField{.{ + .type = bool, + .alignment = 0, + .is_comptime = false, + .default_value_ptr = &default_value, + .name = fld.name, + }}; + } + + const InnerBitfield = @Type(.{.@"struct" = std.builtin.Type.Struct{ + .decls = &.{}, + .fields = bit_fields, + .layout = .@"packed", + .is_tuple = false, + }}); + + const BackingInt = @typeInfo(InnerBitfield).@"struct".backing_integer orelse + @compileError("Too many fields in enum: " ++ @typeName(E)); + + return struct { + pub const EnumType = E; + pub const Bitfield = InnerBitfield; + const EnumBitfield = @This(); + + val: Bitfield = .{}, + pub fn initPopulated(vals: []const EnumType) EnumBitfield { + var new: EnumBitfield = .{}; + + for (vals) |v| { + new.set(v); + } + + return new; + } + + pub fn has(bits: EnumBitfield, val: EnumType) bool { + var dumbass_copy = bits.val; + const bit: usize = @intFromEnum(val); + const int_val: BackingInt = @as(*BackingInt, @ptrCast(@alignCast(&dumbass_copy))).*; + return (int_val >> @as(u4, @intCast(bit))) & 0x1 != 0; + } + + pub fn set(bits: *EnumBitfield, val: EnumType) void { + const bits_as_int = @as(*BackingInt, @ptrCast(@alignCast(&bits.val))); + const bit = @intFromEnum(val); + bits_as_int.* |= @as(BackingInt, 1) << @as(u4, @intCast(bit)); + } + }; +} diff --git a/src/context.zig b/src/context.zig index 5815999..b3b69a7 100644 --- a/src/context.zig +++ b/src/context.zig @@ -4,9 +4,11 @@ const vk = @import("vulkan"); const glfw = @import("glfw"); const e = @import("env.zig"); +const res = @import("resource_management/res.zig"); const Allocator = std.mem.Allocator; const ExtensionNameList = std.ArrayList([*:0]const u8); +const Registry = res.Registry; const Device = api.DeviceHandler; const Instance = api.InstanceHandler; @@ -25,12 +27,24 @@ const EnvBacking = struct { inst: Ref(Instance, .{}), dev: Ref(Device, .{}), surf: Ref(Surface, .{}), + desc: Ref(api.DescriptorPool, .{ + .field = "descriptor_pool", + .mutable = true, + }), + res: Ref(res.ResourceManager, .{ + .field = "resources", + .mutable = true, + }), gi: Ref(GlobalInterface, .{ .field = "global_interface" }), ii: Ref(InstanceInterface, .{ .field = "inst_interface" }), di: Ref(DeviceInterface, .{ .field = "dev_interface" }), + + mem_layout: Ref(api.DeviceMemoryLayout, .{}), + registry: Ref(res.Registry, .{}), + }; -const Environment = e.For(EnvBacking); +pub const Environment = e.For(EnvBacking); const Self = @This(); @@ -40,9 +54,9 @@ inst: Instance, dev: Device, surf: Surface, -global_interface: *const GlobalInterface, -inst_interface: *const InstanceInterface, -dev_interface: *const DeviceInterface, +global_interface: GlobalInterface, +inst_interface: InstanceInterface, +dev_interface: DeviceInterface, graphics_queue: api.GraphicsQueue, present_queue: api.PresentQueue, @@ -54,6 +68,10 @@ compute_queue: api.ComputeQueue, allocator: Allocator, +resources: res.ResourceManager, +descriptor_pool: api.DescriptorPool, +registry: Registry, + fn ResolveEnvType(comptime field: anytype) type { return switch (@TypeOf(field)) { void => *const Environment, @@ -67,6 +85,7 @@ fn ResolveEnvType(comptime field: anytype) type { /// becomes more of a concern /// * .inst -> VkInstance (Retrieves application's VkInstance handle, generally not useful for application logic) /// * .dev -> VkDevice (Retrieves application's VkDevice handle, generally not useful for application logic) +/// * .desc -> Descriptor Pool (Application-wide descriptor pools (not much reason to access this on the user side) /// /// ### Interfaces /// * .gi -> global interface (does not require an instanced handle of any kind, generally setup such as extension querying @@ -92,8 +111,9 @@ pub fn env(self: *const Self, comptime field: anytype) ResolveEnvType(field) { pub const Config = struct { inst_extensions: []const [*:0]const u8 = &.{}, dev_extensions: []const [*:0]const u8 = &.{}, - window: *const glfw.Window, + window: ?*const glfw.Window = null, loader: glfw.GetProcAddrHandler, + management: res.ResourceManager.Config, }; /// TODO: maybe have an unmanaged variant for more fine-grained user control @@ -134,12 +154,9 @@ pub fn init(allocator: Allocator, config: Config) !*Self { // Initialize Instance and device from parameters // Later on, I'll have some better ways to handle ownership and lifetimes - // then just raw heap allocations lol + // then just raw heap allocations lol (Working on it) - // Would be great if errdefers worked in initializers... because I like keeping initialization - // in initializers when I can - - new.inst = try Instance.init(&.{ + new.inst = try Instance.init(.{ .instance = .{ .required_extensions = all_inst_ext.items, .validation_layers = &.{ @@ -147,9 +164,7 @@ pub fn init(allocator: Allocator, config: Config) !*Self { }, }, .allocator = allocator, - .device = undefined, .enable_debug_log = true, - .loader = config.loader, }); errdefer new.inst.deinit(); @@ -177,13 +192,26 @@ pub fn init(allocator: Allocator, config: Config) !*Self { new.ctx_env = Environment.init(new); + new.registry = try Registry.init(allocator); + try api.initRegistry(&new.registry); + + new.resources = try res.ResourceManager.init(config.management, &new.registry); + try api.DescriptorPool.initSelf(&new.descriptor_pool, new, .{ + .scene = 1024, + .static = 1024, + .transient = 1024, + }); + return new; } pub fn deinit(self: *Self) void { + self.descriptor_pool.deinit(); + self.dev.deinit(); self.surf.deinit(); self.inst.deinit(); + self.registry.deinit(); const alloc = self.allocator; alloc.destroy(self); @@ -217,3 +245,6 @@ pub fn presentFrame( const image = swapchain.image_index; try self.present_queue.present(swapchain, image, sync.sem_wait); } + + + diff --git a/src/env.zig b/src/env.zig index 0631832..e6fd647 100644 --- a/src/env.zig +++ b/src/env.zig @@ -5,13 +5,20 @@ const std = @import("std"); const RefConfig = struct { field: ?[]const u8 = null, + mutable: bool = false, }; + +fn ResolveInnerType(comptime T: type, comptime config: RefConfig) type { + const AsPtr = if (!config.mutable) *const T else *T; + + return if (@typeInfo(T) == .pointer) T else AsPtr; +} /// returns a const non-owning pointer to the object /// (might enforce a bit more memory safety here later) pub fn Ref(comptime T: type, comptime config: RefConfig) type { return struct { pub const field = config.field; - pub const InnerType = *const T; + pub const InnerType = ResolveInnerType(T, config); const Self = @This(); inner: InnerType, @@ -109,14 +116,39 @@ fn MakeRefBindings(comptime T: type) type { }; } -fn findParentFieldType(pt: type, fname: []const u8) type { +fn FindParentFieldType(pt: type, fname: []const u8) type { const fields = @typeInfo(@typeInfo(pt).pointer.child); for (fields.@"struct".fields) |fld| { if (std.mem.order(u8, fname, fld.name) == .eq) return fld.type; } - unreachable; + @compileError("Could not find matching field for name: " ++ fname ++ " (FindParentFieldInType)"); +} + +pub fn Empty() type { + return struct {}; +} + +/// Ensure LHS is a pointer +pub fn populate(lhs: anytype, rhs: anytype) void { + const lhs_info = @typeInfo(@TypeOf(lhs)); + const rhs_info = @typeInfo(@TypeOf(rhs.inner)); + + switch (lhs_info) { + .pointer => {}, + else => @compileError("lhs (" ++ @typeName(@TypeOf(lhs)) ++ ") must be a pointer"), + } + switch (rhs_info) { + .@"struct" => {}, + else => @compileError("rhs (" ++ @typeName(@TypeOf(rhs)) ++ ") must be a struct type"), + } + + inline for (rhs_info.@"struct".fields) |fld| { + if (@hasField(lhs_info.pointer.child, fld.name)) { + @field(lhs, fld.name) = @field(rhs.inner, fld.name).inner; + } + } } pub fn For(comptime T: type) type { @@ -127,6 +159,36 @@ pub fn For(comptime T: type) type { const Bindings = MakeRefBindings(T); const Self = @This(); + /// Defines an env subset type which can be automatically populated by a factory + pub fn EnvSubset(comptime fields: anytype) type { + comptime var field_infos: []const StructField = &.{}; + for (fields) |enum_lit| { + const matching_field = std.meta.fieldInfo(T, enum_lit); + const MatchingFieldType = matching_field.type; + + // This is janky due to how the env system mapps fields oops + const mapped_field_info = StructField{ + .default_value_ptr = null, + .type = MatchingFieldType.InnerType, + .is_comptime = false, + .alignment = @alignOf(MatchingFieldType.InnerType), + .name = matching_field.name, + }; + field_infos = field_infos ++ [1]StructField{mapped_field_info}; + } + + const SubsetType = @Type(.{ + .@"struct" = .{ + .fields = field_infos, + .decls = &.{}, + .layout = .auto, + .is_tuple = false, + }, + }); + + return SubsetType; + } + inner: T, pub fn ResolveInner(comptime field: ContextEnum) type { @@ -177,7 +239,7 @@ pub fn For(comptime T: type) type { const backing_name = bind.backing_name; const parent_name = bind.parent_name; // if the field is a pointer, simply copy it over to the inner struct - const pft = findParentFieldType(ParentType, parent_name); + const pft = FindParentFieldType(ParentType, parent_name); @field(backing, backing_name).inner = switch (@typeInfo(pft)) { .pointer => @field(val, parent_name), else => &@field(val, parent_name), @@ -189,5 +251,23 @@ pub fn For(comptime T: type) type { .inner = backing, }; } + + /// initialize the env fields directly from a structure + /// This is meant to be used mainly for testing purposes where creating + /// an entire context is unwieldy. + pub fn initRaw(val: anytype) Self { + var backing: T = undefined; + + inline for (Bindings.map) |bind| { + if (@hasField(@TypeOf(val), bind.backing_name)) { + @field(backing, bind.backing_name).inner = + @field(val, bind.backing_name); + } + } + + return Self{ + .inner = backing, + }; + } }; } diff --git a/src/glfw.zig b/src/glfw.zig index 2e8003f..160de98 100644 --- a/src/glfw.zig +++ b/src/glfw.zig @@ -18,6 +18,7 @@ pub const GetProcAddrHandler = *const (fn (vk.Instance, [*:0]const u8) callconv( pub const CLIENT_API = c.GLFW_CLIENT_API; pub const NO_API = c.GLFW_NO_API; pub const RESIZABLE = c.GLFW_RESIZABLE; +pub const VISIBLE = c.GLFW_VISIBLE; pub const GLFWwindow = c.GLFWwindow; @@ -35,11 +36,13 @@ fn ErrorOnFalse(comptime func: fn () callconv(.c) c_int, comptime err: anytype) return wrapperType.wrapper; } -pub extern fn glfwGetInstanceProcAddress(instance: vk.Instance, name: [*:0]const u8) vk.PfnVoidFunction; +extern fn glfwGetInstanceProcAddress(instance: vk.Instance, name: [*:0]const u8) vk.PfnVoidFunction; pub extern fn glfwCreateWindowSurface(instance: vk.Instance, window: *GLFWwindow, allocation_callbacks: ?*const vk.AllocationCallbacks, surface: *vk.SurfaceKHR) vk.Result; +pub const getInstanceProcAddress = glfwGetInstanceProcAddress; + pub const init = ErrorOnFalse(c.glfwInit, error.GLFWInitFailed); -pub const terminate = c.glfwTerminate; +pub const deinit = c.glfwTerminate; pub const vulkanSupported = ErrorOnFalse(c.glfwVulkanSupported, error.VulkanUnsupported); pub const getRequiredInstanceExtensions = c.glfwGetRequiredInstanceExtensions; pub const getFramebufferSize = c.glfwGetFramebufferSize; @@ -48,13 +51,16 @@ pub const setErrorCallback = c.glfwSetErrorCallback; pub const getTime = c.glfwGetTime; -pub const createWindowSurface = c.glfwCreateWindowSurface; - const glfwDestroyWindow = c.glfwDestroyWindow; const glfwWindowShouldClose = c.glfwWindowShouldClose; const glfwCreateWindow = c.glfwCreateWindow; const glfwWindowHint = c.glfwWindowHint; +pub fn instanceExtensions() [][*:0]const u8 { + var count: u32 = 0; + return @ptrCast(getRequiredInstanceExtensions(&count)[0..count]); +} + pub const Window = struct { handle: *GLFWwindow, diff --git a/src/math.zig b/src/math.zig index f05ce0b..2340c0c 100644 --- a/src/math.zig +++ b/src/math.zig @@ -369,4 +369,13 @@ pub const Mat4 = extern struct { return true; } + + pub fn scale(by: Vec3) Mat4 { + return create(.{ + .{by.x, 0, 0, 0}, + .{0, by.y, 0, 0}, + .{0, 0, by.z, 0}, + .{0, 0, 0, 1.0}, + }); + } }; diff --git a/src/resource_management/factory.zig b/src/resource_management/factory.zig new file mode 100644 index 0000000..8c72939 --- /dev/null +++ b/src/resource_management/factory.zig @@ -0,0 +1,186 @@ +//! Creating and destroying API types with a unified context, +//! configuration parameters, and common configurable interfaces. +const std = @import("std"); + +const Context = @import("../context.zig"); +const Registry = @import("registry.zig"); + +const testing = std.testing; +const common = @import("common"); +const cfg = common.config; +const api = @import("../api/api.zig"); +const env_api = @import("../env.zig"); + +const Factory = @This(); + +const crapi = Registry.ComptimeAPI; + +/// Since anything can be created, any sort of static resource +/// could be required from an application instance. +const Env = Context.Environment; + +env: Env, + +pub fn init(env: Env) Factory { + return Factory { + .env = env, + }; +} + +fn createInPlace( + self: *Factory, + comptime APIType: type, + inst: *APIType, + config: crapi.ResolveConfigType(APIType), +) !void { + const entry_config = crapi.GetRegistry(APIType) orelse + @compileError("Invalid API type: " ++ @typeName(APIType) ++ " (could not find registry config)"); + const EnvFields = crapi.EnvFor(APIType); + const ConfigType = entry_config.ConfigType; + const ErrorType = entry_config.InitErrors; + const InitFunc = *const fn ( + *APIType, + EnvFields, + ConfigType, + ) ErrorType!void; + const registry = self.env.get(.registry); + + const reg_entry = registry.getEntry(common.typeId(APIType)) orelse + return error.InvalidAPIType; + + var populated_env: EnvFields = undefined; + + env_api.populate(&populated_env, self.env); + // nice and safe. + const initFn: InitFunc = @ptrCast(@alignCast(reg_entry.initFn)); + + try initFn(inst, populated_env, config); +} + +fn allocManaged( + self: *Factory, + comptime APIType: type, +) !std.meta.Tuple(&.{ + *APIType, + crapi.ManagedReturnType(APIType)} +) { + const ProxyType = crapi.ManagedReturnType(APIType); + const entry_config = crapi.GetRegistry(APIType) orelse + @compileError("Invalid registry"); + + // unmanaged resource shouldn't be created using managed functions + std.debug.assert(entry_config.management != .Unmanaged); + var res = self.env.get(.res); + std.debug.print("Resource manager address in factory: {*}\n", .{res}); + + var ptr: *APIType = undefined; + var proxy: ProxyType = undefined; + + + switch (entry_config.management) { + .Transient => { + ptr = try res.createTransient(APIType); + proxy = .{.handle = ptr}; + }, + //TODO: Streamed allocations need an actual system + .Pooled, .Streamed => { + const handle = + try res.reservePooledByType(APIType); + + ptr = handle.getAssumeValid(); + proxy = .{.handle = handle}; + }, + + .Unmanaged => unreachable, + } + + return .{ptr, proxy}; +} + +/// some management modes return different handle variants or just pointers. +/// This depends on the handle variant of the particular type +pub fn create( + self: *Factory, + comptime APIType: type, + config: crapi.ResolveConfigType(APIType), +) !crapi.ManagedReturnType(APIType) { + const ptr, const h = try self.allocManaged(APIType); + try self.createInPlace(APIType, ptr, config); + + return h; +} + +/// Creates a new API type with a pre-populated configuration value. +/// Since, some vulkan objects can be heavy on parameterization. +pub fn createPreconfig( + self: *Factory, + comptime APIType: type, + comptime profile: crapi.ResolveConfigRegistry(APIType).Profiles, + params: anytype, +) !crapi.ManagedReturnType(APIType) { + const ConfigType = crapi.ResolveConfigType(APIType); + const config = ConfigType.profile(profile, params); + + return self.create(APIType, config); +} + +/// Creates a new object but with a user-provided allocator +pub fn createAllocated( + self: *Factory, + comptime APIType: type, + allocator: std.mem.Allocator, + config: crapi.ResolveConfigType(APIType), +) !*APIType { + const new = try allocator.create(APIType); + errdefer allocator.destroy(new); + + try self.createInPlace(APIType, new, config); + return new; +} + +pub fn createPreconfigAllocated( + self: *Factory, + comptime APIType: type, + comptime profile: crapi.ResolveConfigRegistry(APIType).Profiles, + allocator: std.mem.Allocator, + params: anytype, +) !*APIType { + const ConfigType = crapi.ResolveConfigType(APIType); + const config = ConfigType.profile(profile, params); + + return self.createAllocated(APIType, allocator, config); +} + +pub fn createInPlacePreconfig( + self: *Factory, + comptime APIType: type, + comptime profile: crapi.ResolveConfigRegistry(APIType).Profiles, + inst: *APIType, + params: anytype, +) !void { + const ConfigType = crapi.ResolveConfigType(APIType); + const config = ConfigType.profile(profile, params); + + try self.createInPlace(APIType, inst, config); +} + +const CommandBuffer = api.CommandBuffer.CommandBuffer; + +const ray_testing = @import("../root.zig").testing; + +// Ideally, we'd get this working without memory leaks +test "factory basic functionality" { + + var test_ctx: ray_testing.TestingContext = undefined; + try test_ctx.initFor(testing.allocator, .{}); + defer test_ctx.deinit(testing.allocator); + + var factory_shit = Factory.init(test_ctx.env); + + const piss = try factory_shit.create(CommandBuffer, .{.one_shot = true}); + std.debug.print("The type name is: {s}\n", .{@typeName(@TypeOf(piss))}); +} + + +test "factory unmanaged functionality" { +} diff --git a/src/resource_management/registry.zig b/src/resource_management/registry.zig new file mode 100644 index 0000000..1b136a0 --- /dev/null +++ b/src/resource_management/registry.zig @@ -0,0 +1,369 @@ +//! Registry for all existing API types containg information about +//! how they should be handled by the resource manager/API handlers +//! +//!NOTE: +//! This is globally defined and not scoped to an application context +const std = @import("std"); + +// this is an example of how api functions should be signatured +// There are 2 possible signatures, one without an allocator, +// and the other with it. The expected signature is configured by +// How the entry is specified in the type registry at compile time. +// Due to the function pointery nature of this crap, error unions need to be explicitly specified +// For types that require ad-hoc initializatoin allocations, +// (This is usually I sign that I messed something up) +// pub fn exampleInit( +// self: *Self, +// ctx: *Context, +// env: SomeEnvSubset, +// config: Config, +// ) error{bitch}!Self {} + +const common = @import("common"); +const Context = @import("../context.zig"); +const cfg = common.config; +const env = @import("../env.zig"); +const util = common.util; + +const Allocator = std.mem.Allocator; +const AnyPtr = common.AnyPtr; +const TypeId = common.TypeId; + +const Self = @This(); + +pub const PfnPopulateRegistry = *const fn(*Self) error{OutOfMemory}!void; + +pub const ManagementMode = enum { + Unmanaged, + Pooled, + Transient, + Streamed, +}; + +pub const EntryConfig = struct { + // type for storing data + State: type, + + // type for associating and operating on data + Proxy: type, + + // error set for init functions (omit for anyerror) + InitErrors: type = error{}, + + // the type of the configuration struct (if any) + ConfigType: type = struct {}, + // whether or not initialization depends on an allocator + management: ManagementMode = .Pooled, + + initFn: *const anyopaque, + deinitFn: *const anyopaque, +}; + +const ObjectPool = common.ObjectPool; + + +/// Comptime registry API +/// In comptime, the types themselves are the registries. +/// Hopefully, this doesn't slow compilation to a crawl... +/// The working of this registry API depends on a coherent +/// naming scheme for faster lookups +pub const ComptimeAPI = struct { + pub fn ManagedResourceType(comptime mode: ManagementMode, comptime T: type) type { + const PoolType = ObjectPool(T, .{}); + return switch (mode) { + .Pooled, .Streamed => PoolType.ReifiedHandle, + else => *T, + }; + } + + // gets the config registry, cuz thats a generic + fn ConfigRegistryFor(comptime T: type) type { + return if (@hasDecl(T, "Registry")) + @TypeOf(T.Registry) + else + @compileError("Invalid type " ++ @typeName(T) ++ " (missing config registry)"); + } + + /// Resolves the handle type from the API type as well as the management mode + pub fn ManagedReturnType(comptime T: type) type { + const registry = GetRegistry(T) orelse + @compileError("Invalid type: " ++ @typeName(T) ++ " (missing type registry)"); + + return registry.Proxy; + } + + pub fn ProxyAPIMixin(comptime T: type) type { + _ = T; + return u32; + } + + pub fn ResolveConfigRegistry(comptime ConfigType: type) ConfigRegistryFor(ConfigType) { + return ConfigType.Registry; + } + + pub fn ResolveConfigType(comptime APIType: type) type { + return if (@hasDecl(APIType, "Config")) APIType.Config else struct {}; + } + + pub fn GetRegistry(comptime APIType: type) ?EntryConfig { + if (!@hasDecl(APIType, "entry_config")) return null; + return APIType.entry_config; + } + + /// Given the type's API registry entry, returns the corresponding handle type + /// which differs depending on the management mode. + pub fn HandleFor(comptime T: type) type { + const entry_config = GetRegistry(T) orelse + @compileError("cannot create a proxy for: " ++ @typeName(T) ++ " (no entry config)"); + return ManagedResourceType(entry_config.management, T); + } + + pub fn EnvFor(comptime T: type) type { + return if (@hasDecl(T, "Env")) T.Env else env.Empty(); + } +}; + +// this returns the meta-information of the registry entry +// (underlying function types, unique type identifier) +pub fn RegistryEntryType(comptime config: EntryConfig) type { + return struct { + // remember, these functions should be remapped first arguments to the proxy + pub const InitFnType = InitFnTemplate(config); + pub const DeinitFnType = DeinitFnTemplate(config); + pub const entry_id = common.typeId(config.State); + }; +} + +/// NOTE: this could be a lot less branchy and stupid, but +/// that would come at the cost of @Type, which is not supported +/// by LSPs. I might redo this if it turns out this template doesn't +/// get directly touched by user code. +fn InitFnTemplate(comptime config: EntryConfig) type { + const error_type = config.InitErrors orelse anyerror; + if (config.config_type) |ct| { + if (config.requires_alloc) { + return *const fn (*config.State, *const Context, Allocator, ct) error_type!void; + } else { + return *const fn (*config.State, *const Context, ct) error_type!void; + } + } else { + if (config.requires_alloc) { + return *const fn (*config.State, *const Context, Allocator) error_type!void; + } else { + return *const fn (*config.State, *const Context) error_type!void; + } + } +} + +fn DeinitFnTemplate(comptime config: EntryConfig) type { + return if (config.requires_alloc) + (*const fn (*config.State, Allocator) void) + else + (*const fn (*config.State) void); +} + +pub const RegistryEntry = struct { + type_id: TypeId, + type_name: []const u8, + size_bytes: usize, + + initFn: *const anyopaque, + deinitFn: *const anyopaque, + + management: ManagementMode, +}; + +entries: std.ArrayList(RegistryEntry), +typeid_index: std.AutoHashMap(TypeId, *const RegistryEntry), + +pub fn init(allocator: Allocator) !Self { + return .{ + .entries = std.ArrayList(RegistryEntry).init(allocator), + .typeid_index = std.AutoHashMap(TypeId, *const RegistryEntry).init(allocator), + }; +} + + +/// the type referenced by "T" must match the shape of +/// a configurable type as defined in the "CRAPI" +/// type specification +pub fn registerType( + self: *Self, + comptime T: type, +) void { + const entry_config = ComptimeAPI.GetRegistry(T) orelse + @compileError("cannot create a registry entry for type: " ++ @typeName(T) ++ " (no entry config)"); + + const entry = RegistryEntry{ + .initFn = entry_config.initFn, + .deinitFn = entry_config.deinitFn, + + .type_id = common.typeId(entry_config.State), + .type_name = @typeName(entry_config.State), + .management = entry_config.management, + .size_bytes = @sizeOf(entry_config.State), + }; + + // If the type registry fails to build, there is literally nothing to be done about it. + // Probably shouldn't just panic tho :( + self.entries.append(entry) catch + @panic("Failed to build type registry due to an allocation error!"); + + self.typeid_index.put( + common.typeId(entry_config.State), + &self.entries.items[self.entries.items.len - 1], + ) catch + @panic("Failed to build type registry due to being out of memory"); +} + +pub fn getEntry( + self: *const Self, + id: TypeId, +) ?*const RegistryEntry { + return self.typeid_index.get(id); +} + +pub const PredicateFn = *const fn (*const RegistryEntry) bool; +pub const Predicate = struct { + pub fn ManagementModeIs(comptime mode: ManagementMode) PredicateFn { + const Container = struct { + pub fn predicate(entry: *const RegistryEntry) bool { + return entry.management == mode; + } + }; + + return Container.predicate; + } + + pub fn TypeIdMatches(comptime id: TypeId) PredicateFn { + const Container = struct { + pub fn predicate(entry: *const RegistryEntry) bool { + return entry.type_id == id; + } + }; + + return Container.predicate; + } +}; + +const debug = std.debug; + +pub const Query = struct { + const MAX_PREDICATES = 6; + + predicates: [MAX_PREDICATES]PredicateFn, + num_predicates: u8, + entries: []RegistryEntry, + + pub const Iterator = struct { + entries: []const RegistryEntry, + predicates: []PredicateFn, + index: usize, + + fn matches(self: *Iterator, entry: *const RegistryEntry) bool { + for (self.predicates) |p| { + if (!p(entry)) return false; + } + + return true; + } + + pub fn next(self: *Iterator) ?*const RegistryEntry { + while (self.index < self.entries.len and + !self.matches(&self.entries[self.index])) : (self.index += 1) + {} + + if (self.index < self.entries.len) { + const tmp = &self.entries[self.index]; + self.index += 1; + + return tmp; + } else { + return null; + } + } + + pub fn collect(self: *Iterator, allocator: Allocator) ?[]*const RegistryEntry { + _ = self; + _ = allocator; + @panic("Query.Iterator.collect: needs implementation"); + } + + pub fn getCount(self: *const Iterator) usize { + var dup = self.*; + var count: usize = 0; + + while (dup.next()) |_| { + count += 1; + } + + return count; + } + }; + + pub fn where(self: *Query, predicate: PredicateFn) *Query { + debug.assert(self.num_predicates < MAX_PREDICATES); + + self.predicates[@intCast(self.num_predicates)] = predicate; + self.num_predicates += 1; + + return self; + } + + pub fn iterator(self: *Query) Iterator { + return Iterator{ + .index = 0, + .entries = self.entries, + .predicates = self.predicates[0..self.num_predicates], + }; + } + + pub fn first(self: *Query) ?*const RegistryEntry { + var iter = self.iterator(); + return iter.next(); + } +}; + +/// begins an entry selection query +pub fn select(self: *Self) Query { + return Query{ + .predicates = [_]PredicateFn{undefined} ** Query.MAX_PREDICATES, + .num_predicates = 0, + .entries = self.entries.items, + }; +} + +pub fn deinit(self: *Self) void { + self.entries.deinit(); +} + +const api = @import("../api/api.zig"); +const testing = std.testing; + +const NonexistentStruct = struct {}; + +test "api entries" { + // try querying an existing entry + var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); + defer arena.deinit(); + + var reg = try Self.init(arena.allocator()); + try api.populateRegistry(®); + + var q1 = reg.select(); + const item = q1 + .where(Predicate.ManagementModeIs(.Pooled)) + .where(Predicate.TypeIdMatches(common.typeId(api.CommandBuffer.CommandBuffer))) + .first(); + + try testing.expect(item != null); + try testing.expect(item.?.type_id == common.typeId(api.CommandBuffer.CommandBuffer)); + + // try querying for something that doesn't exist + var q2 = reg.select(); + const nonexistent = q2 + .where(Predicate.TypeIdMatches(common.typeId(NonexistentStruct))) + .first(); + + try testing.expect(nonexistent == null); +} diff --git a/src/resource_management/res.zig b/src/resource_management/res.zig new file mode 100644 index 0000000..e1556c3 --- /dev/null +++ b/src/resource_management/res.zig @@ -0,0 +1,16 @@ +const common = @import("common"); + +pub const Registry = @import("registry.zig"); +pub const ResourceManager = @import("resource_manager.zig"); +pub const APIFactory = @import("factory.zig").APIFactory; + +/// Provides a single source of truth for normal object pools, +/// rather then just spamming the same (potentially complex) definition +/// everywhere +pub fn StandardObjectPool(comptime T: type) type { + return common.ObjectPool(T, .{}); +} + +comptime { + _ = @import("factory.zig"); +} diff --git a/src/resource_management/resource_manager.zig b/src/resource_management/resource_manager.zig new file mode 100644 index 0000000..734f45f --- /dev/null +++ b/src/resource_management/resource_manager.zig @@ -0,0 +1,131 @@ +const std = @import("std"); +const Self = @This(); + +const common = @import("common"); +const res_api = @import("res.zig"); +const Registry = @import("registry.zig"); + +const Allocator = std.mem.Allocator; + +const TypeId = common.TypeId; + +const Handle = common.Handle; +const OpaqueHandle = common.OpaqueHandle; +const Predicate = Registry.Predicate; +const crapi = Registry.ComptimeAPI; + +const MemoryPoolsTable = struct { + const PoolTypeMap = std.AutoArrayHashMapUnmanaged(common.TypeId, *anyopaque); + + cfg: Config, + ///NOTE: If it turns out the type id hack I used produces consecutive values, + ///this can be replaced with a sparse index + pool_ptrs: PoolTypeMap, + pool_arena: std.heap.ArenaAllocator, + + // the actual pools themselves are lazily initialized + pub fn init(cfg: Config, iter: Registry.Query.Iterator) !MemoryPoolsTable { + const count = iter.getCount(); + + var pool_map = try PoolTypeMap.init(cfg.allocator, &.{}, &.{}); + errdefer pool_map.deinit(cfg.allocator); + try pool_map.ensureUnusedCapacity(cfg.allocator, count); + + return MemoryPoolsTable{ + .cfg = cfg, + .pool_ptrs = pool_map, + .pool_arena = std.heap.ArenaAllocator.init(cfg.allocator), + }; + } + + /// null if API type is invalid, and panics on allocation failure since there isn't + /// much to be done recovery wise + pub fn getPool(table: *MemoryPoolsTable, comptime T: type) ?*res_api.StandardObjectPool(T) { + if (crapi.GetRegistry(T) == null) return null; + + const type_id = common.typeId(T); + std.debug.print("Did we survice?\n", .{}); + _ = table.pool_ptrs.count(); + std.debug.print("... Yup\n", .{}); + const pool = table.pool_ptrs.getOrPutAssumeCapacity( + type_id, + ); + + if (pool.found_existing) { + return @as(*res_api.StandardObjectPool(T), @ptrCast(@alignCast(pool.value_ptr.*))); + } else { + const new_pool = table.cfg.allocator.create( + res_api.StandardObjectPool(T), + ) catch @panic("Allocation failed (NOTE: check fixed buffer sizes/ratios)"); + new_pool.* = res_api.StandardObjectPool(T).initPreheated( + table.pool_arena.allocator(), + table.cfg.pool_sizes, + ) catch @panic("Pool initialization failed due to memory exhaustion (NOTE: check fixed buffer sizes/ratios)"); + + pool.value_ptr.* = new_pool; + + return new_pool; + } + } + + pub fn deinit(table: *MemoryPoolsTable) void { + // HACK: THis only works because all the memory is controlled by a linear arena allocator + // But we might want a better strategy + var iter = table.pool_ptrs.iterator(); + while (iter.next()) |i| { + if (i.value_ptr.*) |v| { + table.cfg.allocator.free(v); + } + } + table.pool_ptrs.deinit(table.cfg.allocator); + table.pool_arena.deinit(); + } +}; + +const HandleArena = struct {}; + +pub const Config = struct { + allocator: Allocator, + pool_sizes: usize, +}; + +fn PoolHandle(comptime T: type) type { + return res_api.StandardObjectPool(T).ReifiedHandle; +} + +pools: MemoryPoolsTable, +transients: HandleArena, + +pub fn init(config: Config, registry: *Registry) !Self { + + // loop through each API registry entry + // and initialize the table with a pool for each entry + // whose (default) management strategy is tagged as "Pooled" + var q_entries = registry.select(); + const entries_iter = q_entries + .where(Predicate.ManagementModeIs(.Pooled)) + .iterator(); + + const table = try MemoryPoolsTable.init(config, entries_iter); + + return Self{ + .pools = table, + .transients = undefined, + }; +} + +pub fn deinit(res: *Self) void { + res.pools.deinit(); +} + +pub fn reserveTransient(res: *Self, comptime T: type) !*T { + _ = res; + @panic("reserveTransient: needs implementation"); +} + +pub fn reservePooledByType(res: *Self, comptime T: type) !PoolHandle(T) { + if (crapi.GetRegistry(T) == null) @compileError("Invalid type: " ++ @typeName(T) ++ " (missing registry)"); + const pool = res.pools.getPool(T) orelse unreachable; + + return pool.reserveReified(); +} diff --git a/src/resource_management/unused.zig b/src/resource_management/unused.zig new file mode 100644 index 0000000..30c39c0 --- /dev/null +++ b/src/resource_management/unused.zig @@ -0,0 +1,165 @@ +//! Unused code +//! Mostly exists because I mistook a pool allocator +//! for a free list allocator and created part of an RBtree +//! +//! oops + +const RBTree = struct { + const NodeColor = enum(u8) { + Red, + Black, + }; + + ///NOTE: Probably best if this is 8-aligned + pub const Node = struct { + parent: *Node, + left: *Node = null_node, + right: *Node = null_node, + + color: NodeColor, + buf: ?[]u8, + + pub inline fn isLeaf(self: Node) bool { + return self.buf != null; + } + + pub inline fn isNull(self: Node) bool { + return self.left == null_node and self.right == null_node; + } + }; + + const null_node = &Node{ + .parent = null_node, + .left = null_node, + .right = null_node, + + .color = .Black, + .buf = null, + }; + + root: *Node, + black_height: u32 = 1, + + fn partitionBuffer(buf: []u8) struct { *Node, []u8 } { + assert(buf.len > @sizeOf(*Node)); + + const node_space = @as( + *Node, + @ptrCast(@alignCast(buf[0..@sizeOf(*Node)].ptr)), + ); + + const buf_space = buf[@sizeOf(*Node)..]; + + return .{ node_space, buf_space }; + } + + fn leftRotate() void {} + + fn rightRotate() void {} + + pub fn init(buf: []u8) RBTree { + const root, const rest = partitionBuffer(buf); + + root.* = .{ + .parent = null_node, + .color = .Black, + .buf = rest, + }; + + return .{ + .root = root, + }; + } + + pub fn insert(len: usize) void {} + + pub fn delete() void {} + + pub fn find() void {} +}; + + + +/// Red-Black Tree Tests +/// Makes a horribly unoptimized testing RB tree +/// by individually allocating and connecting nodes. +/// This function does not respect RBTree rules, and therefore must be +/// constructed correctly by hand for test cases. +const TreeTestBuilder = struct { + const Direction = enum { + Left, + Right, + }; + + root: *RBTree.Node, + allocator: Allocator, + + current_node: *RBTree.Node, + + pub fn initAlloc(allocator: Allocator, root_size: usize) TreeTestBuilder { + const root: *RBTree.Node = allocator.create(RBTree.Node) orelse + @panic("Allocation failure during test"); + + root.parent = RBTree.null_node; + root.left = RBTree.null_node; + root.right = RBTree.null_node; + root.color = .Black; + + return TreeTestBuilder{ + .root = root, + .allocator = allocator, + .current_node = root, + }; + } + + /// deep-copy every node of an existing tree while preserving structure + pub fn deepCopy(orig: *const RBTree, allocator: Allocator) TreeTestBuilder {} + + // creates a new child + pub fn child( + self: *TreeTestBuilder, + dir: Direction, + col: RBTree.NodeColor, + size: usize, + ) *TreeTestBuilder {} + + /// sets current node pointer to parent + pub fn end(self: *TreeTestBuilder) *TreeTestBuilder {} + + pub fn build(self: *TreeTestBuilder) RBTree {} +}; + + +fn expectTreeEqual(a: RBTree, b: RBTree, label: []const u8) !void { +} + +test "insertion (all cases)" { + var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); + defer arena.deinit(); + + const scratch = arena.allocator(); + + // Insertion case 1: + const initial_tree_1 = TreeTestBuilder.initAlloc(scratch, 128).build(); + var final = TreeTestBuilder.deepCopy(&initial_tree_1).build(); + final.insert(64); + + const final_tree_1 = TreeTestBuilder.initAlloc(scratch, 128) + .child(.Left, .Red, 64) + .build(); + + try expectTreeEqual(initial_tree_1, final_tree_1, "insertion case 1"); + + // Insertion case 2 (single iteration): + const initial_tree_2 = TreeTestBuilder.initAlloc(scratch, 128) + .child(.Left, .Red, 64) + .end() + .child(.Right, .Red, + // Insertion case 2 & 3: +} + +test "deletion (all cases)" {} + +test "find (included)" {} + +test "find (not included)" {} diff --git a/src/root.zig b/src/root.zig index ba97013..d391e23 100644 --- a/src/root.zig +++ b/src/root.zig @@ -1,5 +1,23 @@ pub const Context = @import("context.zig"); +// Low-Level vulkan wrappers pub const api = @import("api/api.zig"); + +// Linear algebra math pub const math = @import("math.zig"); -pub const util = @import("util.zig"); + +//TODO: replace the shitty utils with common +pub const common = @import("common"); + +// Resource manager NOTE: (not sure if this should be exported tbh) +pub const res = @import("resource_management/res.zig"); + +pub const testing = @import("tests/testing.zig"); + +// imports for testing +comptime { + _ = @import("resource_management/registry.zig"); + _ = @import("resource_management/res.zig"); + _ = @import("context.zig"); + _ = @import("api/api.zig"); +} diff --git a/src/test_all.zig b/src/test_all.zig deleted file mode 100644 index 38b6580..0000000 --- a/src/test_all.zig +++ /dev/null @@ -1,5 +0,0 @@ - - -comptime { - _ = @import("../src/main.zig"); -} diff --git a/src/tests/testing.zig b/src/tests/testing.zig new file mode 100644 index 0000000..034c342 --- /dev/null +++ b/src/tests/testing.zig @@ -0,0 +1,294 @@ +//! These testing utilities are intended to be used soley for testing blocks +//! and will cause a compilation error if imported outside of a testing binary. + +comptime { + const builtin = @import("builtin"); + if (!builtin.is_test) + @compileError("DO NOT USE THIS MODULE OUTSIDE TESTING BRUH"); +} + +const std = @import("std"); +const api = @import("../api/api.zig"); +const glfw = @import("glfw"); +const env = @import("../env.zig"); + +/// This sucks ass, replacing with an incremental context loader for now, +/// and a more configurable app context later. +pub const MinimalVulkanContext = struct { + const Config = struct { + noscreen: bool = false, + }; + + dev: api.DeviceHandler, + inst: api.InstanceHandler, + + di: api.DeviceInterface, + ii: api.InstanceInterface, + gi: api.GlobalInterface, + + mem_layout: api.DeviceMemoryLayout, + /// Creates an extremely stripped-down minimal + /// vulkan context without most of the core rendering utilities. + /// Do not use for normal rendering. + pub fn initMinimalVulkan( + allocator: std.mem.Allocator, + config: Config, + ) !*MinimalVulkanContext { + const new_ctx = try allocator.create(MinimalVulkanContext); + + try glfw.init(); + + const min_inst_ext = [_][*:0]const u8{ + api.extensions.khr_get_physical_device_properties_2.name, + api.extensions.ext_debug_utils.name, + api.extensions.khr_surface.name, + }; + + const min_dev_ext = [_][*:0]const u8{ + api.extensions.khr_swapchain.name, + }; + + new_ctx.inst = try api.InstanceHandler.init(.{ + .instance = .{ + .required_extensions = min_inst_ext[0..], + .validation_layers = &.{ + "VK_LAYER_KHRONOS_validation", + }, + }, + .allocator = allocator, + .enable_debug_log = true, + .loader = glfw.getInstanceProcAddress, + }); + errdefer new_ctx.inst.deinit(); + + new_ctx.dev = try api.DeviceHandler.init(&new_ctx.inst, .{ + .required_extensions = min_dev_ext[0..], + .surface = null, + }); + + new_ctx.gi = &new_ctx.inst.w_db; + new_ctx.ii = &new_ctx.inst.pr_inst; + new_ctx.di = &new_ctx.dev.pr_dev; + + new_ctx.mem_layout = api.DeviceMemoryLayout.init(.{ + .dev = &new_ctx.dev, + .ii = new_ctx.ii, + }); + + _ = config; + + return new_ctx; + } + + /// Allocator should be the same allocator used to initialize + /// the structure + pub fn deinit(ctx: *MinimalVulkanContext, allocator: std.mem.Allocator) void { + ctx.dev.deinit(); + ctx.inst.deinit(); + + allocator.destroy(ctx); + + glfw.deinit(); + } +}; + +const Context = @import("../context.zig"); +const res = @import("../resource_management/res.zig"); +const util = @import("common").util; + +pub const TestingContext = struct { + const Environment = Context.Environment; + const ContextBitfield = util.EnumToBitfield(Environment.ContextEnum); + + const PfnCtxInit = *const fn (*TestingContext, *Config) anyerror!void; + + pub const Config = struct { + _selected: ContextBitfield = undefined, + populators: []const res.Registry.PfnPopulateRegistry = &.{ + api.populateRegistry, + }, + fields: []const Environment.ContextEnum = &.{ + .inst, + .surf, + .dev, + .desc, + .registry, + .res, + .mem_layout, + }, + + validation_layers: []const [*:0]const u8 = &.{ + "VK_LAYER_KHRONOS_validation", + }, + + window: ?glfw.Window = null, + desc_pool_sizes: api.DescriptorPool.PoolSizes = .{ + .transient = 128, + .scene = 128, + .static = 256, + }, + + res_pool_sizes: usize = 256, + }; + + const init_comp_dispatch = std.EnumMap(Environment.ContextEnum, PfnCtxInit).init(.{ + .inst = initInst, + .surf = initSurf, + .dev = initDev, + .desc = initDesc, + .registry = initRegistry, + .res = initRes, + .mem_layout = initMemLayout, + }); + + allocator: std.mem.Allocator, + + env: Environment, + + // backing fields + global_interface: api.GlobalInterface, + inst_interface: api.InstanceInterface, + dev_interface: api.DeviceInterface, + + dev: api.DeviceHandler, + inst: api.InstanceHandler, + surf: api.SurfaceHandler, + descriptor_pool: api.DescriptorPool, + mem_layout: api.DeviceMemoryLayout, + registry: res.Registry, + resources: res.ResourceManager, + + window: ?glfw.Window, + + fn initInst(ctx: *TestingContext, cfg: *Config) !void { + ctx.inst = try api.InstanceHandler.init(.{ .allocator = ctx.allocator, .enable_debug_log = true, .loader = glfw.getInstanceProcAddress, .instance = .{ + .validation_layers = cfg.validation_layers, + .required_extensions = &.{ + api.extensions.ext_debug_utils.name, + api.extensions.khr_surface.name, + api.extensions.khr_win_32_surface.name, + }, + } }); + + ctx.global_interface = &ctx.inst.w_db; + ctx.inst_interface = &ctx.inst.pr_inst; + } + + fn initDev(ctx: *TestingContext, cfg: *Config) !void { + ctx.dev = try api.DeviceHandler.init(&ctx.inst, .{ + .required_extensions = &.{ + api.extensions.khr_swapchain.name, + }, + .surface = if (cfg._selected.has(.surf)) + &ctx.surf + else + null, + }); + + ctx.dev_interface = &ctx.dev.pr_dev; + + //HACK: Env initialization should NOT be order dependent on field initialization, + //because that suddenly couples parts of the initialization process together horribly. + ctx.env = Environment.init(ctx); + + std.debug.print("Address of res: {*} vs {*}\n", .{ctx.env.get(.res), &ctx.resources}); + } + + fn initSurf(ctx: *TestingContext, cfg: *Config) !void { + glfw.Window.hints(.{ + .{ glfw.CLIENT_API, glfw.NO_API }, + .{ glfw.VISIBLE, glfw.FALSE }, + }); + + ctx.window = try glfw.Window.create(100, 200, "Dummy", null, null); + errdefer ctx.window.?.destroy(); + + ctx.surf = try api.SurfaceHandler.init(if (ctx.window) |*w| + w + else + null, &ctx.inst); + + _ = cfg; + } + + fn initDesc(ctx: *TestingContext, cfg: *Config) !void { + var e: api.DescriptorPool.Env = undefined; + env.populate(&e, ctx.env); + + _ = try ctx.dev_interface.createDescriptorPool(&.{ + // NOTE: In the future, pools and sets will be managed separately :) + .pool_size_count = 1, + .max_sets = 1, + .p_pool_sizes = &.{.{ + .type = .uniform_buffer, + .descriptor_count = 1, + }}, + }, null); + + try ctx.descriptor_pool.initSelf(e, cfg.desc_pool_sizes); + } + + fn initRes(ctx: *TestingContext, cfg: *Config) !void { + // needs the resource manager to actually exist first + // THe type registry will need to be populated from config, + // as will the general configuration for resource manager + + ctx.resources = try res.ResourceManager.init(.{ + .allocator = ctx.allocator, + .pool_sizes = cfg.res_pool_sizes, + }, &ctx.registry); + + std.debug.print("Address of res: {*} vs {*}\n", .{ctx.env.get(.res), &ctx.resources}); + _ = try ctx.resources.reservePooledByType(api.CommandBuffer.CommandBuffer); + std.debug.print("Survived A\n", .{}); + _ = try ctx.env.get(.res).reservePooledByType(api.CommandBuffer.CommandBuffer); + std.debug.print("Survived B\n", .{}); + } + + fn initMemLayout(ctx: *TestingContext, cfg: *Config) !void { + var e: api.DeviceMemoryLayout.Env = undefined; + env.populate(&e, ctx.env); + + ctx.mem_layout = api.DeviceMemoryLayout.init(e); + _ = cfg; + } + + fn initRegistry(ctx: *TestingContext, cfg: *Config) !void { + ctx.registry = try res.Registry.init(ctx.allocator); + + // populate registry with predefined sources + for (cfg.populators) |populator| { + try populator(&ctx.registry); + } + } + + /// Initializes only the specified env fields for a testing context + pub fn initFor( + ctx: *TestingContext, + allocator: std.mem.Allocator, + cfg: Config, + ) !void { + try glfw.init(); + + ctx.allocator = allocator; + + var cfg2 = cfg; + cfg2._selected = ContextBitfield.initPopulated(cfg.fields); + + + for (cfg.fields) |fld| { + const initializer = init_comp_dispatch.getAssertContains(fld); + try initializer(ctx, &cfg2); + } + } + + pub fn deinit(ctx: *TestingContext, allocator: std.mem.Allocator) void { + if (ctx.window) |w| { + w.destroy(); + } + + glfw.deinit(); + + _ = allocator; + } +}; diff --git a/src/util.zig b/src/util.zig deleted file mode 100644 index 83eb030..0000000 --- a/src/util.zig +++ /dev/null @@ -1,52 +0,0 @@ -const std = @import("std"); -pub fn asCString(rep: anytype) [*:0]const u8 { - return @as([*:0]const u8, @ptrCast(rep)); -} - -pub fn emptySlice(comptime T: type) []T { - return &[0]T{}; -} - -//FIXME: This is due for a BIG REFACTOR COMING SOON -// (because it is much worse than just &.{} which I Didn't know was a thing oops. -pub fn asManyPtr(comptime T: type, ptr: *const T) [*]const T { - return @as([*]const T, @ptrCast(ptr)); -} - -//FIXME: This is due for a BIG REFACTOR COMING SOON -// (because it is much worse than just &.{} which I Didn't know was a thing oops. -// funilly enough, &.{1, 2, 3} is shorter than span(.{1, 2, 3}). I just don't -// like reading docs + idk how. -pub fn span(v: anytype) [] @TypeOf(v[0]) { - const T = @TypeOf(v[0]); - comptime var sp: [v.len] T = undefined; - for (v, 0..) |val, index| { - sp[index] = val; - } - - return sp[0..]; -} - -const StructInfo = std.builtin.Type.Struct; -const StructField = std.builtin.Type.StructField; -const Function = std.builtin.Type.Fn; - -// Reflection Stuff -pub fn tryGetField(info: *StructInfo, name: []const u8) ?*StructField { - for (info.fields) |*fld| { - if (std.mem.eql(u8, fld.name, name) == .eq) { - return fld; - } - } - - return null; -} - -pub fn signatureMatches(a: *const Function, b: *const Function) bool { - if (a.params.len != b.params.len) return false; - for (a.params, b.params) |p1, p2| - if (p1.type != p2.type) return false; - - if (!a.calling_convention.eql(b)) return false; - return true; -} diff --git a/test/test_env.zig b/test/test_env.zig deleted file mode 100644 index 92fce2b..0000000 --- a/test/test_env.zig +++ /dev/null @@ -1,54 +0,0 @@ -const std = @import("std"); - -const expect = std.testing.expect; - -var testing_allocator = std.heap.DebugAllocator(.{ .safety = true }).init; - -test "context initialization" { - const alloc = testing_allocator.allocator(); - const window = try makeBasicWindow(900, 600, "Actual Test Window"); - defer glfw.terminate(); - defer window.destroy(); - - const ctx = try Context.init(alloc, .{ - .window = &window, - .loader = glfw.glfwGetInstanceProcAddress, - .inst_extensions = glfwInstanceExtensions(), - }); - defer ctx.deinit(); -} - -test "environment querying" { - const alloc = testing_allocator.allocator(); - const window = try makeBasicWindow(900, 600, "Actual Actual Test Window"); - defer glfw.terminate(); - defer window.destroy(); - - const ctx = try Context.init(alloc, .{ - .window = &window, - .loader = glfw.glfwGetInstanceProcAddress, - .inst_extensions = glfwInstanceExtensions(), - }); - defer ctx.deinit(); - - const global_interface: *const ray.api.GlobalInterface = ctx.env(.gi); - const instance_interface = ctx.env(.ii); - const dev_interface = ctx.env(.di); - - std.debug.print("global_interface type: {s}\n", .{@typeName(@TypeOf(global_interface))}); - std.debug.print("instance_interface type: {s}\n", .{@typeName(@TypeOf(instance_interface))}); - std.debug.print("dev_interface type: {s}\n", .{@typeName(@TypeOf(dev_interface))}); - - //TODO: Do some rudimentary vulkan API test call. - const available = global_interface.enumerateInstanceExtensionPropertiesAlloc( - null, - alloc, - ) catch { - return error.ExtensionEnumerationFailed; - }; - defer alloc.free(available); -} - -test "basic vulkan type creation" { - try expect(true); -} diff --git a/test/test_math.zig b/test/test_math.zig deleted file mode 100644 index 04fb618..0000000 --- a/test/test_math.zig +++ /dev/null @@ -1,125 +0,0 @@ -const meth = @import("ray").meth; - -const std = @import("std"); - -const Mat4 = meth.Mat4; -const vec = meth.vec; - -const expect = std.testing.expect; -// NOTE: I'm adding unit tests for the math library straight up -// because just debugging these by running the application would fucking -// suck... -const test_log = std.log.scoped(.math_tests); - -fn assertEqual(matrices: anytype, op: []const u8) !void { - const first = matrices[0]; - inline for (matrices) |v| { - if (!Mat4.eql(first, v)) { - test_log.err( - "Matrix {s} failed: \nExpected: \n{s} \nGot: \n{s}", - .{ op, first, v }, - ); - - return error.MatricesNotEqualWTFBro; - } - } -} - -const glm = @cImport({ - @cInclude("cglm/struct.h"); -}); - -test "matrix multiplication" { - const mat1 = Mat4.create(.{ - .{ 1.0, 2.0, 3.0, 4.0 }, - .{ 3.0, 2.0, 1.0, 1.0 }, - .{ 1.0, 2.0, 3.0, 2.0 }, - .{ 2.0, 3.0, 7.0, 3.0 }, - }); - - const mat2 = Mat4.create(.{ - .{ 4.0, 5.0, 6.0, 7.0 }, - .{ 6.0, 5.0, 4.0, 3.0 }, - .{ 4.0, 6.0, 5.0, 9.0 }, - .{ 2.0, 8.0, 5.0, 3.0 }, - }); - - const res = Mat4.mul(mat1, mat2); - - const correct = Mat4.create(.{ - .{ 36.0, 65.0, 49.0, 52.0 }, - .{ 30.0, 39.0, 36.0, 39.0 }, - .{ 32.0, 49.0, 39.0, 46.0 }, - .{ 60.0, 91.0, 74.0, 95.0 }, - }); - - try assertEqual(.{ correct, res }, "multiplication"); -} - -test "matrix ordering" { - // make sure the bloody matrices are column-major ordered - const mat1 = Mat4.create(.{ - .{1.0, 2.0, 3.0, 4.0}, - .{1.0, 2.0, 3.0, 4.0}, - .{1.0, 2.0, 3.0, 4.0}, - .{1.0, 2.0, 3.0, 4.0}, - }); - - for (mat1.data, 0..) |col, v| { - for (col) |item| { - const fv: f32 = @floatFromInt(v + 1); - if (item != fv) { - test_log.err("Matrix ordering incorrect! {d} != {d}", .{item, fv}); - test_log.err("Matrix: \n{s}", .{mat1}); - return error.NotColumnMajorBro; - } - } - } -} - -test "matrix translation" { - const mat1 = Mat4.identity().translate(vec(.{ 10.0, 20.0, 30.0 })); - const res = Mat4.create(.{ - .{ 1.0, 0.0, 0.0, 10.0 }, - .{ 0.0, 1.0, 0.0, 20.0 }, - .{ 0.0, 0.0, 1.0, 30.0 }, - .{ 0.0, 0.0, 0.0, 1.0 }, - }); - - try assertEqual(.{ res, mat1 }, "translation"); -} - -test "matrix rotation" { - const rot = meth.radians(45.0); - const mat1 = Mat4.identity().rotateZ(rot); - var res = Mat4.identity(); - - const mat = glm.glms_rotate_z(glm.glms_mat4_identity(), rot); - res.data = mat.raw; - - try assertEqual(.{ res, mat1 }, "Z rotation"); -} - -test "matrix projection" { - const mat1 = Mat4.perspective(meth.radians(75.0), 600.0 / 900.0, 0.1, 30.0); - const res = Mat4.create(.{ - .{ 1.9548, 0.0000, 0.0000, 0.0000 }, - .{ 0.0000, 1.3032, 0.0000, 0.0000 }, - .{ 0.0000, 0.0000, -1.0033, -1.0000 }, - .{ 0.0000, 0.0000, -0.1003, 0.0000 }, - }).transpose(); - - try assertEqual(.{ res, mat1 }, "perspective projection"); -} - -test "matrix view" { - const mat1 = Mat4.lookAt(vec(.{ 2.0, 2.0, 2.0 }), vec(.{ 0, 0, 0 }), meth.Vec3.global_up); - const res = Mat4.create(.{ - .{ 0.7071, -0.4082, -0.5774, 0.0000 }, - .{ -0.7071, -0.4082, -0.5774, 0.0000 }, - .{ 0.0000, 0.8165, -0.5774, 0.0000 }, - .{ -0.0000, -0.0000, -3.4641, 1.0000 }, - }).transpose(); - - try assertEqual(.{ res, mat1 }, "look at"); -} diff --git a/textures/clear.png b/textures/clear.png new file mode 100644 index 0000000..6d649b6 Binary files /dev/null and b/textures/clear.png differ