maplibre/vector/resource/
buffer_pool.rs

1//! A ring-buffer like pool of [buffers](wgpu::Buffer).
2
3use std::{
4    collections::{btree_map, BTreeMap, HashSet, VecDeque},
5    fmt::Debug,
6    marker::PhantomData,
7    mem::size_of,
8    ops::Range,
9};
10
11use bytemuck::Pod;
12
13use crate::{
14    coords::{Quadkey, WorldTileCoords},
15    render::{
16        resource::{BackingBufferDescriptor, Queue},
17        tile_view_pattern::HasTile,
18    },
19    style::layer::StyleLayer,
20    tcs::world::World,
21    vector::tessellation::OverAlignedVertexBuffer,
22};
23
24// TODO: Too low values can cause a back-and-forth between unloading and loading layers
25pub const VERTEX_SIZE: wgpu::BufferAddress = 10 * 1_000_000;
26pub const INDICES_SIZE: wgpu::BufferAddress = 10 * 1_000_000;
27
28pub const FEATURE_METADATA_SIZE: wgpu::BufferAddress = 10 * 1024 * 1000;
29pub const LAYER_METADATA_SIZE: wgpu::BufferAddress = 10 * 1024;
30
31/// This is inspired by the memory pool in Vulkan documented
32/// [here](https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/custom_memory_pools.html).
33#[derive(Debug)]
34pub struct BufferPool<Q, B, V, I, TM, FM> {
35    vertices: BackingBuffer<B>,
36    indices: BackingBuffer<B>,
37    layer_metadata: BackingBuffer<B>,
38    feature_metadata: BackingBuffer<B>,
39
40    index: RingIndex,
41    phantom_v: PhantomData<V>,
42    phantom_i: PhantomData<I>,
43    phantom_q: PhantomData<Q>,
44    phantom_m: PhantomData<TM>,
45    phantom_fm: PhantomData<FM>,
46}
47
48#[derive(Clone, Copy, Debug)]
49pub enum BackingBufferType {
50    Vertices,
51    Indices,
52    Metadata,
53    FeatureMetadata,
54}
55
56#[derive(Debug)]
57struct BackingBuffer<B> {
58    /// The internal structure which is used for storage
59    inner: B,
60    /// The size of the `inner` buffer
61    inner_size: wgpu::BufferAddress,
62    typ: BackingBufferType,
63}
64
65impl<B> BackingBuffer<B> {
66    fn new(inner: B, inner_size: wgpu::BufferAddress, typ: BackingBufferType) -> Self {
67        Self {
68            inner,
69            inner_size,
70            typ,
71        }
72    }
73}
74
75impl<V: Pod, I: Pod, TM: Pod, FM: Pod> BufferPool<wgpu::Queue, wgpu::Buffer, V, I, TM, FM> {
76    pub fn from_device(device: &wgpu::Device) -> Self {
77        let vertex_buffer_desc = wgpu::BufferDescriptor {
78            label: Some("vertex buffer"),
79            size: size_of::<V>() as wgpu::BufferAddress * VERTEX_SIZE,
80            usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
81            mapped_at_creation: false,
82        };
83
84        let indices_buffer_desc = wgpu::BufferDescriptor {
85            label: Some("indices buffer"),
86            size: size_of::<I>() as wgpu::BufferAddress * INDICES_SIZE,
87            usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
88            mapped_at_creation: false,
89        };
90
91        let feature_metadata_desc = wgpu::BufferDescriptor {
92            label: Some("feature metadata buffer"),
93            size: size_of::<FM>() as wgpu::BufferAddress * FEATURE_METADATA_SIZE,
94            usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
95            mapped_at_creation: false,
96        };
97
98        let layer_metadata_desc = wgpu::BufferDescriptor {
99            label: Some("layer metadata buffer"),
100            size: size_of::<TM>() as wgpu::BufferAddress * LAYER_METADATA_SIZE,
101            usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
102            mapped_at_creation: false,
103        };
104
105        BufferPool::new(
106            BackingBufferDescriptor::new(
107                device.create_buffer(&vertex_buffer_desc),
108                vertex_buffer_desc.size,
109            ),
110            BackingBufferDescriptor::new(
111                device.create_buffer(&indices_buffer_desc),
112                indices_buffer_desc.size,
113            ),
114            BackingBufferDescriptor::new(
115                device.create_buffer(&layer_metadata_desc),
116                layer_metadata_desc.size,
117            ),
118            BackingBufferDescriptor::new(
119                device.create_buffer(&feature_metadata_desc),
120                feature_metadata_desc.size,
121            ),
122        )
123    }
124}
125impl<Q: Queue<B>, B, V: Pod, I: Pod, TM: Pod, FM: Pod> BufferPool<Q, B, V, I, TM, FM> {
126    pub fn new(
127        vertices: BackingBufferDescriptor<B>,
128        indices: BackingBufferDescriptor<B>,
129        layer_metadata: BackingBufferDescriptor<B>,
130        feature_metadata: BackingBufferDescriptor<B>,
131    ) -> Self {
132        Self {
133            vertices: BackingBuffer::new(
134                vertices.buffer,
135                vertices.inner_size,
136                BackingBufferType::Vertices,
137            ),
138            indices: BackingBuffer::new(
139                indices.buffer,
140                indices.inner_size,
141                BackingBufferType::Indices,
142            ),
143            layer_metadata: BackingBuffer::new(
144                layer_metadata.buffer,
145                layer_metadata.inner_size,
146                BackingBufferType::Metadata,
147            ),
148            feature_metadata: BackingBuffer::new(
149                feature_metadata.buffer,
150                feature_metadata.inner_size,
151                BackingBufferType::FeatureMetadata,
152            ),
153            index: RingIndex::new(),
154            phantom_v: Default::default(),
155            phantom_i: Default::default(),
156            phantom_q: Default::default(),
157            phantom_m: Default::default(),
158            phantom_fm: Default::default(),
159        }
160    }
161
162    pub fn clear(&mut self) {
163        self.index.clear()
164    }
165
166    #[cfg(test)]
167    fn available_space(&self, typ: BackingBufferType) -> wgpu::BufferAddress {
168        let gap = self.index.find_largest_gap(
169            typ,
170            match typ {
171                BackingBufferType::Vertices => &self.vertices,
172                BackingBufferType::Indices => &self.indices,
173                BackingBufferType::Metadata => &self.layer_metadata,
174                BackingBufferType::FeatureMetadata => &self.feature_metadata,
175            }
176            .inner_size,
177        );
178
179        gap.end - gap.start
180    }
181
182    pub fn vertices(&self) -> &B {
183        &self.vertices.inner
184    }
185
186    pub fn indices(&self) -> &B {
187        &self.indices.inner
188    }
189
190    pub fn metadata(&self) -> &B {
191        &self.layer_metadata.inner
192    }
193
194    pub fn feature_metadata(&self) -> &B {
195        &self.feature_metadata.inner
196    }
197
198    /// The VertexBuffers can contain padding elements. Not everything from a VertexBuffers is useable.
199    /// The function returns the `bytes` and `aligned_bytes`. See [`OverAlignedVertexBuffer`].
200    fn align(
201        stride: wgpu::BufferAddress,
202        elements: wgpu::BufferAddress,
203        usable_elements: wgpu::BufferAddress,
204    ) -> (wgpu::BufferAddress, wgpu::BufferAddress) {
205        let bytes = elements * stride;
206
207        let usable_bytes = (usable_elements * stride) as wgpu::BufferAddress;
208
209        let align = wgpu::COPY_BUFFER_ALIGNMENT;
210        let padding = (align - usable_bytes % align) % align;
211
212        let aligned_bytes = usable_bytes + padding;
213
214        (bytes, aligned_bytes)
215    }
216
217    pub fn get_loaded_style_layers_at(&self, coords: WorldTileCoords) -> Option<HashSet<&str>> {
218        self.index.get_layers(coords).map(|layers| {
219            layers
220                .iter()
221                .map(|entry| entry.style_layer.id.as_str())
222                .collect()
223        })
224    }
225
226    /// Allocates
227    /// * `geometry`
228    /// * `layer_metadata` and
229    /// * `feature_metadata` for a layer. This function is able to dynamically evict layers if there
230    /// is not enough space available.
231    #[tracing::instrument(skip_all)]
232    pub fn allocate_layer_geometry(
233        &mut self,
234        queue: &Q,
235        coords: WorldTileCoords,
236        style_layer: StyleLayer,
237        geometry: &OverAlignedVertexBuffer<V, I>,
238        layer_metadata: TM,
239        feature_metadata: &[FM],
240    ) {
241        let vertices_stride = size_of::<V>() as wgpu::BufferAddress;
242        let indices_stride = size_of::<I>() as wgpu::BufferAddress;
243        let layer_metadata_stride = size_of::<TM>() as wgpu::BufferAddress;
244        let feature_metadata_stride = size_of::<FM>() as wgpu::BufferAddress;
245
246        let (vertices_bytes, aligned_vertices_bytes) = Self::align(
247            vertices_stride,
248            geometry.buffer.vertices.len() as wgpu::BufferAddress,
249            geometry.buffer.vertices.len() as wgpu::BufferAddress,
250        );
251        let (indices_bytes, aligned_indices_bytes) = Self::align(
252            indices_stride,
253            geometry.buffer.indices.len() as wgpu::BufferAddress,
254            geometry.usable_indices as wgpu::BufferAddress,
255        );
256        let (layer_metadata_bytes, aligned_layer_metadata_bytes) =
257            Self::align(layer_metadata_stride, 1, 1);
258
259        let (feature_metadata_bytes, aligned_feature_metadata_bytes) = Self::align(
260            feature_metadata_stride,
261            feature_metadata.len() as wgpu::BufferAddress,
262            feature_metadata.len() as wgpu::BufferAddress,
263        );
264
265        if feature_metadata_bytes != aligned_feature_metadata_bytes {
266            // TODO: align if not aligned?
267            panic!(
268                "feature_metadata is not aligned. This should not happen as long as size_of::<FM>() is a multiple of the alignment."
269            )
270        }
271
272        let maybe_entry = IndexEntry {
273            coords,
274            style_layer,
275            buffer_vertices: self.index.make_room(
276                vertices_bytes,
277                self.vertices.typ,
278                self.vertices.inner_size,
279            ),
280            buffer_indices: self.index.make_room(
281                indices_bytes,
282                self.indices.typ,
283                self.indices.inner_size,
284            ),
285            usable_indices: geometry.usable_indices,
286            buffer_layer_metadata: self.index.make_room(
287                layer_metadata_bytes,
288                self.layer_metadata.typ,
289                self.layer_metadata.inner_size,
290            ),
291            buffer_feature_metadata: self.index.make_room(
292                feature_metadata_bytes,
293                self.feature_metadata.typ,
294                self.feature_metadata.inner_size,
295            ),
296        };
297
298        // write_buffer() is the preferred method for WASM: https://toji.github.io/webgpu-best-practices/buffer-uploads.html#when-in-doubt-writebuffer
299        queue.write_buffer(
300            &self.vertices.inner,
301            maybe_entry.buffer_vertices.start,
302            &bytemuck::cast_slice(&geometry.buffer.vertices)[0..aligned_vertices_bytes as usize],
303        );
304
305        queue.write_buffer(
306            &self.indices.inner,
307            maybe_entry.buffer_indices.start,
308            &bytemuck::cast_slice(&geometry.buffer.indices)[0..aligned_indices_bytes as usize],
309        );
310
311        queue.write_buffer(
312            &self.layer_metadata.inner,
313            maybe_entry.buffer_layer_metadata.start,
314            &bytemuck::cast_slice(&[layer_metadata])[0..aligned_layer_metadata_bytes as usize],
315        );
316
317        queue.write_buffer(
318            &self.feature_metadata.inner,
319            maybe_entry.buffer_feature_metadata.start,
320            &bytemuck::cast_slice(feature_metadata)[0..aligned_feature_metadata_bytes as usize],
321        );
322
323        self.index.push_back(maybe_entry);
324    }
325
326    #[tracing::instrument(skip_all)]
327    pub fn update_layer_metadata(&self, queue: &Q, entry: &IndexEntry, layer_metadata: TM) {
328        let layer_metadata_stride = size_of::<TM>() as wgpu::BufferAddress; // TODO: deduplicate
329        let (layer_metadata_bytes, aligned_layer_metadata_bytes) =
330            Self::align(layer_metadata_stride, 1, 1);
331
332        if entry.buffer_layer_metadata.end - entry.buffer_layer_metadata.start
333            != layer_metadata_bytes
334        {
335            panic!("Updated layer metadata has wrong size!");
336        }
337
338        queue.write_buffer(
339            &self.layer_metadata.inner,
340            entry.buffer_layer_metadata.start,
341            &bytemuck::cast_slice(&[layer_metadata])[0..aligned_layer_metadata_bytes as usize],
342        );
343    }
344
345    #[tracing::instrument(skip_all)]
346    pub fn update_feature_metadata(&self, queue: &Q, entry: &IndexEntry, feature_metadata: &[FM]) {
347        let feature_metadata_stride = size_of::<FM>() as wgpu::BufferAddress; // TODO: deduplicate
348
349        let (feature_metadata_bytes, aligned_feature_metadata_bytes) = Self::align(
350            feature_metadata_stride,
351            feature_metadata.len() as wgpu::BufferAddress,
352            feature_metadata.len() as wgpu::BufferAddress,
353        );
354
355        if entry.buffer_feature_metadata.end - entry.buffer_feature_metadata.start
356            != feature_metadata_bytes
357        {
358            panic!("Updated feature metadata has wrong size!");
359        }
360
361        if feature_metadata_bytes != aligned_feature_metadata_bytes {
362            // FIXME: align if not aligned?
363            panic!(
364                "feature_metadata is not aligned. This should not happen as long as size_of::<FM>() is a multiple of the alignment."
365            )
366        }
367
368        queue.write_buffer(
369            &self.feature_metadata.inner,
370            entry.buffer_feature_metadata.start,
371            &bytemuck::cast_slice(feature_metadata)[0..aligned_feature_metadata_bytes as usize],
372        );
373    }
374
375    pub fn index(&self) -> &RingIndex {
376        &self.index
377    }
378}
379
380#[derive(Debug, Clone)]
381pub struct IndexEntry {
382    pub coords: WorldTileCoords, // TODO: replace with generic key
383    pub style_layer: StyleLayer, // TODO: remove
384    // Range of bytes within the backing buffer for vertices
385    buffer_vertices: Range<wgpu::BufferAddress>,
386    // Range of bytes within the backing buffer for indices
387    buffer_indices: Range<wgpu::BufferAddress>,
388    // Range of bytes within the backing buffer for metadata
389    buffer_layer_metadata: Range<wgpu::BufferAddress>,
390    // Range of bytes within the backing buffer for feature metadata
391    buffer_feature_metadata: Range<wgpu::BufferAddress>,
392    // Amount of actually usable indices. Each index has the size/format `IndexDataType`.
393    // Can be lower than size(buffer_indices) / indices_stride because of alignment.
394    usable_indices: u32,
395}
396
397impl IndexEntry {
398    pub fn indices_range(&self) -> Range<u32> {
399        0..self.usable_indices
400    }
401
402    pub fn indices_buffer_range(&self) -> Range<wgpu::BufferAddress> {
403        self.buffer_indices.clone()
404    }
405
406    pub fn vertices_buffer_range(&self) -> Range<wgpu::BufferAddress> {
407        self.buffer_vertices.clone()
408    }
409
410    pub fn layer_metadata_buffer_range(&self) -> Range<wgpu::BufferAddress> {
411        self.buffer_layer_metadata.clone()
412    }
413
414    pub fn feature_metadata_buffer_range(&self) -> Range<wgpu::BufferAddress> {
415        self.buffer_feature_metadata.clone()
416    }
417}
418
419#[derive(Debug)]
420pub struct RingIndexEntry {
421    layers: VecDeque<IndexEntry>,
422}
423
424#[derive(Debug)]
425pub struct RingIndex {
426    tree_index: BTreeMap<Quadkey, RingIndexEntry>,
427    linear_index: VecDeque<Quadkey>,
428}
429
430impl RingIndex {
431    pub fn new() -> Self {
432        Self {
433            tree_index: Default::default(),
434            linear_index: Default::default(),
435        }
436    }
437
438    pub fn clear(&mut self) {
439        self.linear_index.clear();
440        self.tree_index.clear();
441    }
442
443    pub fn front(&self) -> Option<&IndexEntry> {
444        self.linear_index.front().and_then(|key| {
445            self.tree_index
446                .get(key)
447                .and_then(|entry| entry.layers.front())
448        })
449    }
450
451    pub fn back(&self) -> Option<&IndexEntry> {
452        self.linear_index.back().and_then(|key| {
453            self.tree_index
454                .get(key)
455                .and_then(|entry| entry.layers.back())
456        })
457    }
458
459    pub fn get_layers(&self, coords: WorldTileCoords) -> Option<&VecDeque<IndexEntry>> {
460        coords
461            .build_quad_key()
462            .and_then(|key| self.tree_index.get(&key))
463            .map(|entry| &entry.layers)
464    }
465
466    pub fn iter(&self) -> impl Iterator<Item = impl Iterator<Item = &IndexEntry>> + '_ {
467        self.linear_index
468            .iter()
469            .flat_map(|key| self.tree_index.get(key).map(|entry| entry.layers.iter()))
470    }
471
472    fn pop_front(&mut self) -> Option<IndexEntry> {
473        if let Some(entry) = self
474            .linear_index
475            .pop_front()
476            .and_then(|key| self.tree_index.get_mut(&key))
477        {
478            entry.layers.pop_front()
479        } else {
480            None
481        }
482    }
483
484    fn push_back(&mut self, entry: IndexEntry) {
485        if let Some(key) = entry.coords.build_quad_key() {
486            match self.tree_index.entry(key) {
487                btree_map::Entry::Vacant(index_entry) => {
488                    index_entry.insert(RingIndexEntry {
489                        layers: VecDeque::from([entry]),
490                    });
491                }
492                btree_map::Entry::Occupied(mut index_entry) => {
493                    index_entry.get_mut().layers.push_back(entry);
494                }
495            }
496
497            self.linear_index.push_back(key)
498        } else {
499            unreachable!() // TODO handle
500        }
501    }
502
503    fn make_room(
504        &mut self,
505        new_data: wgpu::BufferAddress,
506        typ: BackingBufferType,
507        inner_size: wgpu::BufferAddress,
508    ) -> Range<wgpu::BufferAddress> {
509        if new_data > inner_size {
510            panic!("can not allocate because backing buffer {typ:?} are too small")
511        }
512
513        let mut available_gap = self.find_largest_gap(typ, inner_size);
514
515        while new_data > available_gap.end - available_gap.start {
516            // no more space, we need to evict items
517            if self.pop_front().is_some() {
518                available_gap = self.find_largest_gap(typ, inner_size);
519            } else {
520                panic!("evicted even though index is empty")
521            }
522        }
523
524        available_gap.start..available_gap.start + new_data
525    }
526
527    fn find_largest_gap(
528        &self,
529        typ: BackingBufferType,
530        inner_size: wgpu::BufferAddress,
531    ) -> Range<wgpu::BufferAddress> {
532        let start = self.front().map(|first| match typ {
533            BackingBufferType::Vertices => first.buffer_vertices.start,
534            BackingBufferType::Indices => first.buffer_indices.start,
535            BackingBufferType::Metadata => first.buffer_layer_metadata.start,
536            BackingBufferType::FeatureMetadata => first.buffer_feature_metadata.start,
537        });
538        let end = self.back().map(|first| match typ {
539            BackingBufferType::Vertices => first.buffer_vertices.end,
540            BackingBufferType::Indices => first.buffer_indices.end,
541            BackingBufferType::Metadata => first.buffer_layer_metadata.end,
542            BackingBufferType::FeatureMetadata => first.buffer_feature_metadata.end,
543        });
544
545        if let Some(start) = start {
546            if let Some(end) = end {
547                if end > start {
548                    // we haven't wrapped yet in the ring buffer
549
550                    let gap_from_start = 0..start; // gap from beginning to first entry
551                    let gap_to_end = end..inner_size;
552
553                    if gap_to_end.end - gap_to_end.start > gap_from_start.end - gap_from_start.start
554                    {
555                        gap_to_end
556                    } else {
557                        gap_from_start
558                    }
559                } else {
560                    // we already wrapped in the ring buffer
561                    // we choose the gab between the two
562                    end..start
563                }
564            } else {
565                unreachable!()
566            }
567        } else {
568            0..inner_size
569        }
570    }
571}
572
573impl<Q: Queue<B>, B, V: Pod, I: Pod, TM: Pod, FM: Pod> HasTile for BufferPool<Q, B, V, I, TM, FM> {
574    fn has_tile(&self, coords: WorldTileCoords, _world: &World) -> bool {
575        self.index().get_layers(coords).is_some()
576    }
577}
578
579impl Default for RingIndex {
580    fn default() -> Self {
581        Self::new()
582    }
583}
584
585#[cfg(test)]
586mod tests {
587    use lyon::tessellation::VertexBuffers;
588
589    use crate::{
590        coords::ZoomLevel,
591        render::resource::{BackingBufferDescriptor, Queue},
592        style::layer::StyleLayer,
593        vector::resource::{BackingBufferType, BufferPool},
594    };
595
596    #[derive(Debug)]
597    struct TestBuffer {
598        size: wgpu::BufferAddress,
599    }
600    struct TestQueue;
601
602    impl Queue<TestBuffer> for TestQueue {
603        fn write_buffer(&self, buffer: &TestBuffer, offset: wgpu::BufferAddress, data: &[u8]) {
604            if offset + data.len() as wgpu::BufferAddress > buffer.size {
605                panic!("write out of bounds");
606            }
607        }
608    }
609
610    #[repr(C)]
611    #[derive(Default, Copy, Clone, bytemuck_derive::Pod, bytemuck_derive::Zeroable)]
612    struct TestVertex {
613        data: [u8; 24],
614    }
615
616    fn create_48byte() -> Vec<TestVertex> {
617        vec![TestVertex::default(), TestVertex::default()]
618    }
619
620    fn create_24byte() -> Vec<TestVertex> {
621        vec![TestVertex::default()]
622    }
623
624    #[test]
625    fn test_allocate() {
626        let mut pool: BufferPool<TestQueue, TestBuffer, TestVertex, u32, u32, u32> =
627            BufferPool::new(
628                BackingBufferDescriptor::new(TestBuffer { size: 128 }, 128),
629                BackingBufferDescriptor::new(TestBuffer { size: 128 }, 128),
630                BackingBufferDescriptor::new(TestBuffer { size: 128 }, 128),
631                BackingBufferDescriptor::new(TestBuffer { size: 128 }, 128),
632            );
633
634        let queue = TestQueue {};
635        let style_layer = StyleLayer::default();
636
637        let mut data48bytes = VertexBuffers::new();
638        data48bytes.vertices.append(&mut create_48byte());
639        data48bytes.indices.append(&mut vec![1, 2, 3, 4]);
640        let data48bytes_aligned = data48bytes.into();
641
642        let mut data24bytes = VertexBuffers::new();
643        data24bytes.vertices.append(&mut create_24byte());
644        data24bytes.indices.append(&mut vec![1, 2, 3, 4]);
645        let data24bytes_aligned = data24bytes.into();
646
647        for _ in 0..2 {
648            pool.allocate_layer_geometry(
649                &queue,
650                (0, 0, ZoomLevel::default()).into(),
651                style_layer.clone(),
652                &data48bytes_aligned,
653                2,
654                &[],
655            );
656        }
657        assert_eq!(
658            128 - 2 * 48,
659            pool.available_space(BackingBufferType::Vertices)
660        );
661
662        pool.allocate_layer_geometry(
663            &queue,
664            (0, 0, ZoomLevel::default()).into(),
665            style_layer.clone(),
666            &data24bytes_aligned,
667            2,
668            &[],
669        );
670        assert_eq!(
671            128 - 2 * 48 - 24,
672            pool.available_space(BackingBufferType::Vertices)
673        );
674        println!("{:?}", pool.index);
675
676        pool.allocate_layer_geometry(
677            &queue,
678            (0, 0, ZoomLevel::default()).into(),
679            style_layer.clone(),
680            &data24bytes_aligned,
681            2,
682            &[],
683        );
684        // appended now at the beginning
685        println!("{:?}", pool.index);
686        assert_eq!(24, pool.available_space(BackingBufferType::Vertices));
687
688        pool.allocate_layer_geometry(
689            &queue,
690            (0, 0, ZoomLevel::default()).into(),
691            style_layer.clone(),
692            &data24bytes_aligned,
693            2,
694            &[],
695        );
696        println!("{:?}", pool.index);
697        assert_eq!(0, pool.available_space(BackingBufferType::Vertices));
698
699        pool.allocate_layer_geometry(
700            &queue,
701            (0, 0, ZoomLevel::default()).into(),
702            style_layer.clone(),
703            &data24bytes_aligned,
704            2,
705            &[],
706        );
707        println!("{:?}", pool.index);
708        assert_eq!(24, pool.available_space(BackingBufferType::Vertices));
709
710        pool.allocate_layer_geometry(
711            &queue,
712            (0, 0, ZoomLevel::default()).into(),
713            style_layer,
714            &data24bytes_aligned,
715            2,
716            &[],
717        );
718        println!("{:?}", pool.index);
719        assert_eq!(0, pool.available_space(BackingBufferType::Vertices));
720    }
721}