Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 31 additions & 9 deletions wgpu-hal/src/gles/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use core::{mem, ops::Range};

use arrayvec::ArrayVec;

use super::{conv, Command as C};
use super::{conv, BufferBacking, Command as C};

#[derive(Clone, Copy, Debug, Default)]
struct TextureSlotDesc {
Expand Down Expand Up @@ -291,9 +291,13 @@ impl crate::CommandEncoder for super::CommandEncoder {
if !bar.usage.from.contains(wgt::BufferUses::STORAGE_READ_WRITE) {
continue;
}
let buffer = match &bar.buffer.backing {
&BufferBacking::Gl { raw } | &BufferBacking::GlCachedOnHost { raw, .. } => raw,
BufferBacking::Host { .. } => unreachable!(),
};
Comment on lines +294 to +297
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
let buffer = match &bar.buffer.backing {
&BufferBacking::Gl { raw } | &BufferBacking::GlCachedOnHost { raw, .. } => raw,
BufferBacking::Host { .. } => unreachable!(),
};
let buffer = match bar.buffer.backing {
BufferBacking::Gl { raw } | &BufferBacking::GlCachedOnHost { raw, .. } => raw,
BufferBacking::Host { .. } => unreachable!(),
};

This feels like a weird reference-dereference? If it needs a ref, could we use ref raw

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also in general I don't like taking references in match expressions, preferring to explictly using ref/ref mut. This is a slightly less conservative version of the clippy lint against match ergonomics.

self.cmd_buffer
.commands
.push(C::BufferBarrier(bar.buffer.raw.unwrap(), bar.usage.to));
.push(C::BufferBarrier(buffer, bar.usage.to));
}
}

Expand Down Expand Up @@ -1001,9 +1005,11 @@ impl crate::CommandEncoder for super::CommandEncoder {
) {
self.state.index_offset = binding.offset;
self.state.index_format = format;
self.cmd_buffer
.commands
.push(C::SetIndexBuffer(binding.buffer.raw.unwrap()));
let buffer = match &binding.buffer.backing {
&BufferBacking::Gl { raw } | &BufferBacking::GlCachedOnHost { raw, .. } => raw,
BufferBacking::Host { .. } => unreachable!(),
};
self.cmd_buffer.commands.push(C::SetIndexBuffer(buffer));
}
unsafe fn set_vertex_buffer<'a>(
&mut self,
Expand All @@ -1012,8 +1018,12 @@ impl crate::CommandEncoder for super::CommandEncoder {
) {
self.state.dirty_vbuf_mask |= 1 << index;
let (_, ref mut vb) = self.state.vertex_buffers[index as usize];
let raw = match &binding.buffer.backing {
&BufferBacking::Gl { raw } | &BufferBacking::GlCachedOnHost { raw, .. } => raw,
BufferBacking::Host { .. } => unreachable!(),
};
*vb = Some(super::BufferBinding {
raw: binding.buffer.raw.unwrap(),
raw,
offset: binding.offset,
});
}
Expand Down Expand Up @@ -1107,10 +1117,14 @@ impl crate::CommandEncoder for super::CommandEncoder {
for draw in 0..draw_count as wgt::BufferAddress {
let indirect_offset =
offset + draw * size_of::<wgt::DrawIndirectArgs>() as wgt::BufferAddress;
let indirect_buf = match &buffer.backing {
&BufferBacking::Gl { raw } | &BufferBacking::GlCachedOnHost { raw, .. } => raw,
BufferBacking::Host { .. } => unreachable!(),
};
#[allow(clippy::clone_on_copy)] // False positive when cloning glow::UniformLocation
self.cmd_buffer.commands.push(C::DrawIndirect {
topology: self.state.topology,
indirect_buf: buffer.raw.unwrap(),
indirect_buf,
indirect_offset,
first_instance_location: self.state.first_instance_location.clone(),
});
Expand All @@ -1130,11 +1144,15 @@ impl crate::CommandEncoder for super::CommandEncoder {
for draw in 0..draw_count as wgt::BufferAddress {
let indirect_offset =
offset + draw * size_of::<wgt::DrawIndexedIndirectArgs>() as wgt::BufferAddress;
let indirect_buf = match &buffer.backing {
&BufferBacking::Gl { raw } | &BufferBacking::GlCachedOnHost { raw, .. } => raw,
BufferBacking::Host { .. } => unreachable!(),
};
#[allow(clippy::clone_on_copy)] // False positive when cloning glow::UniformLocation
self.cmd_buffer.commands.push(C::DrawIndexedIndirect {
topology: self.state.topology,
index_type,
indirect_buf: buffer.raw.unwrap(),
indirect_buf,
indirect_offset,
first_instance_location: self.state.first_instance_location.clone(),
});
Expand Down Expand Up @@ -1221,8 +1239,12 @@ impl crate::CommandEncoder for super::CommandEncoder {
self.cmd_buffer.commands.push(C::Dispatch(count));
}
unsafe fn dispatch_indirect(&mut self, buffer: &super::Buffer, offset: wgt::BufferAddress) {
let indirect_buf = match &buffer.backing {
&BufferBacking::Gl { raw } | &BufferBacking::GlCachedOnHost { raw, .. } => raw,
BufferBacking::Host { .. } => unreachable!(),
};
self.cmd_buffer.commands.push(C::DispatchIndirect {
indirect_buf: buffer.raw.unwrap(),
indirect_buf,
indirect_offset: offset,
});
}
Expand Down
95 changes: 54 additions & 41 deletions wgpu-hal/src/gles/device.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use arrayvec::ArrayVec;
use glow::HasContext;
use naga::FastHashMap;

use super::{conv, lock, MaybeMutex, PrivateCapabilities};
use super::{conv, lock, BufferBacking, MaybeMutex, PrivateCapabilities};
use crate::auxil::map_naga_stage;
use crate::TlasInstance;

Expand Down Expand Up @@ -526,13 +526,16 @@ impl crate::Device for super::Device {
.private_caps
.contains(PrivateCapabilities::BUFFER_ALLOCATION);

let host_backed_bytes = || Arc::new(MaybeMutex::new(vec![0; desc.size as usize]));

if emulate_map && desc.usage.intersects(wgt::BufferUses::MAP_WRITE) {
return Ok(super::Buffer {
raw: None,
backing: BufferBacking::Host {
data: host_backed_bytes(),
},
target,
size: desc.size,
map_flags: 0,
data: Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize]))),
offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
});
}
Expand Down Expand Up @@ -560,8 +563,8 @@ impl crate::Device for super::Device {
map_flags |= glow::MAP_WRITE_BIT;
}

let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
unsafe { gl.bind_buffer(target, raw) };
let raw = unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
unsafe { gl.bind_buffer(target, Some(raw)) };
let raw_size = desc
.size
.try_into()
Expand Down Expand Up @@ -614,33 +617,38 @@ impl crate::Device for super::Device {
.private_caps
.contains(PrivateCapabilities::DEBUG_FNS)
{
let name = raw.map_or(0, |buf| buf.0.get());
let name = raw.0.get();
unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
}
}

let data = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize])))
let backing = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
BufferBacking::GlCachedOnHost {
cache: host_backed_bytes(),
raw,
}
} else {
None
BufferBacking::Gl { raw }
};

self.counters.buffers.add(1);

Ok(super::Buffer {
raw,
backing,
target,
size: desc.size,
map_flags,
data,
offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
})
}

unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
if let Some(raw) = buffer.raw {
let gl = &self.shared.context.lock();
unsafe { gl.delete_buffer(raw) };
match buffer.backing {
BufferBacking::Gl { raw } | BufferBacking::GlCachedOnHost { raw, cache: _ } => {
let gl = &self.shared.context.lock();
unsafe { gl.delete_buffer(raw) };
}
BufferBacking::Host { data: _ } => {}
}

self.counters.buffers.sub(1);
Expand All @@ -656,33 +664,32 @@ impl crate::Device for super::Device {
range: crate::MemoryRange,
) -> Result<crate::BufferMapping, crate::DeviceError> {
let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
let ptr = match buffer.raw {
None => {
let mut vec = lock(buffer.data.as_ref().unwrap());
let ptr = match &buffer.backing {
BufferBacking::Host { data } => {
let mut vec = lock(data);
let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
slice.as_mut_ptr()
}
Some(raw) => {
&BufferBacking::Gl { raw } => {
let gl = &self.shared.context.lock();
unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
let ptr = if let Some(ref map_read_allocation) = buffer.data {
let mut guard = lock(map_read_allocation);
let slice = guard.as_mut_slice();
unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
slice.as_mut_ptr()
} else {
*lock(&buffer.offset_of_current_mapping) = range.start;
unsafe {
gl.map_buffer_range(
buffer.target,
range.start as i32,
(range.end - range.start) as i32,
buffer.map_flags,
)
}
};
unsafe { gl.bind_buffer(buffer.target, None) };
ptr
*lock(&buffer.offset_of_current_mapping) = range.start;
unsafe {
gl.map_buffer_range(
buffer.target,
range.start as i32,
(range.end - range.start) as i32,
buffer.map_flags,
)
}
}
&BufferBacking::GlCachedOnHost { raw, ref cache } => {
let gl = &self.shared.context.lock();
unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
let mut guard = lock(cache);
let slice = guard.as_mut_slice();
unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
slice.as_mut_ptr()
}
};
Ok(crate::BufferMapping {
Expand All @@ -691,22 +698,23 @@ impl crate::Device for super::Device {
})
}
unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
if let Some(raw) = buffer.raw {
if buffer.data.is_none() {
match &buffer.backing {
&BufferBacking::Gl { raw } => {
let gl = &self.shared.context.lock();
unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
unsafe { gl.unmap_buffer(buffer.target) };
unsafe { gl.bind_buffer(buffer.target, None) };
*lock(&buffer.offset_of_current_mapping) = 0;
}
&BufferBacking::Host { .. } | &BufferBacking::GlCachedOnHost { .. } => {}
}
}
unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
where
I: Iterator<Item = crate::MemoryRange>,
{
if let Some(raw) = buffer.raw {
if buffer.data.is_none() {
match &buffer.backing {
&BufferBacking::Gl { raw } => {
let gl = &self.shared.context.lock();
unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
for range in ranges {
Expand All @@ -720,6 +728,7 @@ impl crate::Device for super::Device {
};
}
}
&BufferBacking::Host { .. } | &BufferBacking::GlCachedOnHost { .. } => {}
}
}
unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
Expand Down Expand Up @@ -1261,7 +1270,11 @@ impl crate::Device for super::Device {
wgt::BindingType::Buffer { .. } => {
let bb = &desc.buffers[entry.resource_index as usize];
super::RawBinding::Buffer {
raw: bb.buffer.raw.unwrap(),
raw: match &bb.buffer.backing {
&BufferBacking::Gl { raw }
| &BufferBacking::GlCachedOnHost { raw, .. } => raw,
&BufferBacking::Host { .. } => unreachable!(),
},
offset: bb.offset as i32,
size: match bb.size {
Some(s) => s.get() as i32,
Expand Down
34 changes: 32 additions & 2 deletions wgpu-hal/src/gles/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -342,14 +342,44 @@ impl Drop for Queue {

#[derive(Clone, Debug)]
pub struct Buffer {
raw: Option<glow::Buffer>,
backing: BufferBacking,
target: BindTarget,
size: wgt::BufferAddress,
map_flags: u32,
data: Option<Arc<MaybeMutex<Vec<u8>>>>,
offset_of_current_mapping: Arc<MaybeMutex<wgt::BufferAddress>>,
}

/// Storage backing a [`Buffer`]'s operations, possibly implemented with a host-side vector of
/// bytes.
///
/// The [`Self::OnlyRaw`] variant is preferred, when supported. However, various workarounds for
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Out of date docs

Suggested change
/// The [`Self::OnlyRaw`] variant is preferred, when supported. However, various workarounds for
/// The [`Self::Gl`] variant is preferred, when supported. However, various workarounds for

/// lack of support are needed to implement some operations. See [`Device::create_buffer`] for more
/// details.
#[derive(Clone, Debug)]
enum BufferBacking {
/// A single [`glow::Buffer`] backing all operations.
Gl { raw: glow::Buffer },
/// A synchronized vector of bytes on the host. When needed, a newly created buffer with the
/// contents of `emulated_map_data` will be used for copy operations.
///
/// This variant is used for write-only buffers.
Host { data: Arc<MaybeMutex<Vec<u8>>> },
/// A [`glow::Buffer`] that does not support byte access, and so requires whole copies
/// between a synchronized vector of bytes (`cache`) and a `raw` GL buffer.
GlCachedOnHost {
raw: glow::Buffer,
cache: Arc<MaybeMutex<Vec<u8>>>,
},
}

// #[derive(Clone, Debug)]
// enum MapState {
// Mapped {
// offset: Arc<MaybeMutex<wgt::BufferAddress>>,
// },
// Unmapped,
// }
Comment on lines +375 to +381
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Commented code.


#[cfg(send_sync)]
unsafe impl Sync for Buffer {}
#[cfg(send_sync)]
Expand Down
Loading
Loading