use bytemuck::Pod;
use lyon::tessellation::{
FillVertex, FillVertexConstructor, StrokeVertex, StrokeVertexConstructor, VertexBuffers,
};
use crate::render::ShaderVertex;
pub mod zero_tessellator;
const DEFAULT_TOLERANCE: f32 = 0.02;
pub type IndexDataType = u32; pub struct VertexConstructor {}
impl FillVertexConstructor<ShaderVertex> for VertexConstructor {
fn new_vertex(&mut self, vertex: FillVertex) -> ShaderVertex {
ShaderVertex::new(vertex.position().to_array(), [0.0, 0.0])
}
}
impl StrokeVertexConstructor<ShaderVertex> for VertexConstructor {
fn new_vertex(&mut self, vertex: StrokeVertex) -> ShaderVertex {
ShaderVertex::new(
vertex.position_on_path().to_array(),
vertex.normal().to_array(),
)
}
}
#[derive(Clone)]
pub struct OverAlignedVertexBuffer<V, I> {
pub buffer: VertexBuffers<V, I>,
pub usable_indices: u32,
}
impl<V, I> OverAlignedVertexBuffer<V, I> {
pub fn empty() -> Self {
Self {
buffer: VertexBuffers::with_capacity(0, 0),
usable_indices: 0,
}
}
pub fn from_iters<IV, II>(vertices: IV, indices: II, usable_indices: u32) -> Self
where
IV: IntoIterator<Item = V>,
II: IntoIterator<Item = I>,
IV::IntoIter: ExactSizeIterator,
II::IntoIter: ExactSizeIterator,
{
let vertices = vertices.into_iter();
let indices = indices.into_iter();
let mut buffers = VertexBuffers::with_capacity(vertices.len(), indices.len());
buffers.vertices.extend(vertices);
buffers.indices.extend(indices);
Self {
buffer: buffers,
usable_indices,
}
}
}
impl<V: Pod, I: Pod> From<VertexBuffers<V, I>> for OverAlignedVertexBuffer<V, I> {
fn from(mut buffer: VertexBuffers<V, I>) -> Self {
let usable_indices = buffer.indices.len() as u32;
buffer.align_vertices();
buffer.align_indices();
Self {
buffer,
usable_indices,
}
}
}
trait Align<V: Pod, I: Pod> {
fn align_vertices(&mut self);
fn align_indices(&mut self);
}
impl<V: Pod, I: Pod> Align<V, I> for VertexBuffers<V, I> {
fn align_vertices(&mut self) {
let align = wgpu::COPY_BUFFER_ALIGNMENT;
let stride = std::mem::size_of::<ShaderVertex>() as wgpu::BufferAddress;
let unpadded_bytes = self.vertices.len() as wgpu::BufferAddress * stride;
let padding_bytes = (align - unpadded_bytes % align) % align;
if padding_bytes != 0 {
panic!(
"vertices are always aligned to wgpu::COPY_BUFFER_ALIGNMENT \
because GpuVertexUniform is aligned"
)
}
}
fn align_indices(&mut self) {
let align = wgpu::COPY_BUFFER_ALIGNMENT;
let stride = std::mem::size_of::<I>() as wgpu::BufferAddress;
let unpadded_bytes = self.indices.len() as wgpu::BufferAddress * stride;
let padding_bytes = (align - unpadded_bytes % align) % align;
let overpad = (padding_bytes + stride - 1) / stride; for _ in 0..overpad {
self.indices.push(I::zeroed());
}
}
}