22
33#include < jni.h>
44
5+ #include < algorithm>
56#include < cstddef>
67#include < cstdint>
7- #include < cstring>
88#include < limits>
99#include < vector>
1010
1919
2020namespace ink ::jni {
2121
22+ namespace {
23+
24+ using internal::PartitionedCoatIndices;
25+ using internal::UpdatePartitionedCoatIndices;
26+
27+ }// namespace
28+
2229int InProgressStrokeWrapper::VertexCount (jint coat_index,
2330 jint mesh_partition_index)const {
24- // TODO: b/294561921 - Implement multiple mesh partitions.
25- return in_progress_stroke_.GetMesh (coat_index).VertexCount ();
31+ ABSL_CHECK_LT (coat_index, coat_buffer_partitions_.size ());
32+ ABSL_CHECK_LT (mesh_partition_index,
33+ coat_buffer_partitions_[coat_index].partitions .size ());
34+ return coat_buffer_partitions_[coat_index]
35+ .partitions [mesh_partition_index]
36+ .vertex_buffer_size ;
2637}
2738
2839void InProgressStrokeWrapper::Start (const Brush& brush,int noise_seed) {
@@ -43,95 +54,178 @@ absl::Status InProgressStrokeWrapper::UpdateShape(
4354
4455void InProgressStrokeWrapper::UpdateCaches () {
4556int coat_count = in_progress_stroke_.BrushCoatCount ();
46- coat_buffer_caches_ .resize (coat_count);
57+ coat_buffer_partitions_ .resize (coat_count);
4758for (int coat_index =0 ; coat_index < coat_count; ++coat_index) {
4859UpdateCache (coat_index);
4960 }
5061}
5162
52- void InProgressStrokeWrapper::UpdateCache ( int coat_index) {
53- const MutableMesh& mesh = in_progress_stroke_. GetMesh (coat_index);
54- Cache& cache = coat_buffer_caches_[coat_index];
55- size_t index_stride = mesh. IndexStride ();
56- ABSL_CHECK_EQ (index_stride, sizeof ( uint32_t ))
57- << " Unsupported index stride: " << index_stride ;
63+ namespace internal {
64+
65+ void UpdatePartitionedCoatIndices (absl::Span< const uint32_t > index_data,
66+ PartitionedCoatIndices& cache) {
67+ constexpr int kMaxVertexIndexInPartition =
68+ std::numeric_limits< uint16_t >:: max () ;
5869// Clear the contents, but don't give up any of the capacity because it will
5970// be filled again right away.
60- cache.triangle_index_data .clear ();
61- const absl::Span<const std::byte> raw_index_data = mesh.RawIndexData ();
62- uint32_t index_count =3 * mesh.TriangleCount ();
63- for (uint32_t i =0 ; i < index_count; ++i) {
64- uint32_t i_byte =sizeof (uint32_t ) * i;
65- uint32_t triangle_index_32;
66- // Interpret each set of 4 bytes as a 32-bit integer.
67- std::memcpy (&triangle_index_32, &raw_index_data[i_byte],sizeof (uint32_t ));
68- // If that 32-bit integer would not fit into a 16-bit integer, then stop
69- // copying content and return all the triangles that have been processed so
70- // far.
71- if (triangle_index_32 > std::numeric_limits<uint16_t >::max ()) {
72- // The offending index may have been in the middle of a triangle, so
73- // rewind back to the previous multiple of 3 to just return whole
74- // triangles.
75- uint32_t i_last_multiple_of_3 = (i /3 ) *3 ;
76- cache.triangle_index_data .erase (
77- cache.triangle_index_data .begin () + i_last_multiple_of_3,
78- cache.triangle_index_data .end ());
79- ABSL_LOG_EVERY_N_SEC (WARNING,1 )
80- <<" Triangle index data exceeds 16-bit limit, truncating." ;
81- break ;
71+ cache.converted_index_buffer .clear ();
72+ cache.partitions .clear ();
73+ // Start the first partition, vertex_offset and index_offset start at 0. This
74+ // avoids an extra linear pass in the common case where everything fits in
75+ // 16-bit indices.
76+ cache.partitions .emplace_back ();
77+ int index_count = index_data.size ();
78+ for (int i =0 ; i < index_count; ++i) {
79+ uint32_t overall_vertex_index = index_data[i];
80+ uint32_t current_vertex_offset =
81+ cache.partitions .back ().vertex_buffer_offset ;
82+ int vertex_index_in_partition =
83+ overall_vertex_index - current_vertex_offset;
84+
85+ // If this fits into the current partition, add it to the buffer and
86+ // update where the partition's portion of the vertex buffer ends.
87+ if (vertex_index_in_partition <=kMaxVertexIndexInPartition ) {
88+ cache.converted_index_buffer .push_back (vertex_index_in_partition);
89+ PartitionedCoatIndices::Partition& current_partition =
90+ cache.partitions .back ();
91+ current_partition.vertex_buffer_size =std::max (
92+ current_partition.vertex_buffer_size , vertex_index_in_partition +1 );
93+ continue ;
8294 }
83- uint16_t triangle_index_16 = triangle_index_32;
84- cache.triangle_index_data .push_back (triangle_index_16);
95+
96+ // Otherwise, we need to resize down to the last complete triangle and
97+ // start a new partition.
98+ cache.converted_index_buffer .resize (i /3 *3 );
99+ ABSL_LOG_EVERY_N_SEC (WARNING,1 )
100+ <<" Triangle index data exceeds 16-bit limit, attempting to"
101+ <<" partition into" << cache.partitions .size () +1 <<" partitions." ;
102+ // Rewind to just before the new partition.
103+ i = cache.converted_index_buffer .size () -1 ;
104+ // Find the next span of the index buffer that can be represented as
105+ // 16-bit indices into a subspan of the vertex buffer.
106+ uint32_t max_later_overall_vertex_index =0 ;
107+ uint32_t min_later_overall_vertex_index =
108+ std::numeric_limits<uint32_t >::max ();
109+ for (int later_i = i +1 ; later_i < index_count; ++later_i) {
110+ uint32_t later_overall_vertex_index = index_data[later_i];
111+ min_later_overall_vertex_index =
112+ std::min (min_later_overall_vertex_index, later_overall_vertex_index);
113+ max_later_overall_vertex_index =
114+ std::max (max_later_overall_vertex_index, later_overall_vertex_index);
115+ if (max_later_overall_vertex_index - min_later_overall_vertex_index >
116+ kMaxVertexIndexInPartition ) {
117+ // Speaking of hopefully unlikely edge-cases, we do need to be able to
118+ // fit at least one triangle into a partition to make progress and
119+ // avoid an infinite loop. Bail out if the next triangle's vertex
120+ // indices can't fit within a 16-bit-max span.
121+ if (later_i - i <=3 ) {
122+ ABSL_LOG_EVERY_N_SEC (ERROR,1 )
123+ <<" Partitioning failed because the span of the next"
124+ <<" triangle's vertices is more than the 16-bit limit, giving"
125+ <<" up and truncating." ;
126+ return ;
127+ }
128+ break ;
129+ }
130+ }
131+ // The _first_ index is very unlikely to exceed the 16-bit limit. But for
132+ // full generality, avoid creating an extra empty partition in that case.
133+ // If the partition is non-empty, then close it and start a new one.
134+ if (!cache.converted_index_buffer .empty ()) {
135+ cache.partitions .emplace_back ();
136+ }
137+ // Set the start bounds of the new partition.
138+ PartitionedCoatIndices::Partition& current_partition =
139+ cache.partitions .back ();
140+ current_partition.index_buffer_offset = i +1 ;
141+ current_partition.vertex_buffer_offset = min_later_overall_vertex_index;
85142 }
86- ABSL_CHECK_EQ (cache.triangle_index_data .size () %3 ,0u );
143+ }
144+
145+ }// namespace internal
146+
147+ void InProgressStrokeWrapper::UpdateCache (int coat_index) {
148+ const MutableMesh& mesh = in_progress_stroke_.GetMesh (coat_index);
149+ ABSL_CHECK_EQ (mesh.IndexStride (),sizeof (uint32_t ))
150+ <<" Unsupported index stride:" << mesh.IndexStride ();
151+ const absl::Span<const std::byte>& raw_index_data = mesh.RawIndexData ();
152+ int index_count = raw_index_data.size () /sizeof (uint32_t );
153+ ABSL_CHECK_EQ (index_count, mesh.TriangleCount () *3 );
154+ UpdatePartitionedCoatIndices (
155+ absl::MakeConstSpan (
156+ reinterpret_cast <const uint32_t *>(raw_index_data.data ()),
157+ index_count),
158+ coat_buffer_partitions_[coat_index]);
87159}
88160
89161int InProgressStrokeWrapper::MeshPartitionCount (jint coat_index)const {
90- // TODO: b/294561921 - Implement multiple mesh partitions.
91- return 1 ;
162+ ABSL_CHECK_LT (coat_index, coat_buffer_partitions_. size ());
163+ return coat_buffer_partitions_[coat_index]. partitions . size () ;
92164}
93165
94166absl_nullable jobjectInProgressStrokeWrapper::GetUnsafelyMutableRawVertexData (
95167 JNIEnv* env,int coat_index, jint mesh_partition_index)const {
96- // TODO: b/294561921 - Implement multiple mesh partitions.
97- ABSL_CHECK_EQ (mesh_partition_index,0 )
98- <<" Unsupported mesh partition index:" << mesh_partition_index;
99- ABSL_CHECK_LT (coat_index, coat_buffer_caches_.size ());
168+ ABSL_CHECK_LT (coat_index, coat_buffer_partitions_.size ());
169+ ABSL_CHECK_LT (mesh_partition_index,
170+ coat_buffer_partitions_[coat_index].partitions .size ());
100171const absl::Span<const std::byte> raw_vertex_data =
101172 in_progress_stroke_.GetMesh (coat_index).RawVertexData ();
102173// absl::Span::data() may be nullptr if empty, which NewDirectByteBuffer does
103174// not permit (even if the size is zero).
104175if (raw_vertex_data.data () ==nullptr ) {
105176return nullptr ;
106177 }
178+ const PartitionedCoatIndices::Partition& partition =
179+ coat_buffer_partitions_[coat_index].partitions [mesh_partition_index];
180+ uint16_t vertex_stride =
181+ in_progress_stroke_.GetMesh (coat_index).VertexStride ();
182+ ABSL_CHECK_LE (0 , partition.vertex_buffer_offset );
183+ ABSL_CHECK_LE (
184+ (partition.vertex_buffer_offset + partition.vertex_buffer_size ) *
185+ vertex_stride,
186+ raw_vertex_data.size ());
107187return env->NewDirectByteBuffer (
108188// NewDirectByteBuffer needs a non-const void*. The resulting buffer is
109189// writeable, but it will be wrapped at the Kotlin layer in a read-only
110190// buffer that delegates to this one.
111- const_cast <std::byte*>(raw_vertex_data.data ()), raw_vertex_data.size ());
191+ const_cast <std::byte*>(raw_vertex_data.data ()) +
192+ partition.vertex_buffer_offset * vertex_stride,
193+ partition.vertex_buffer_size * vertex_stride);
112194}
113195
114196absl_nullable jobject
115197InProgressStrokeWrapper::GetUnsafelyMutableRawTriangleIndexData (
116198 JNIEnv* env,int coat_index, jint mesh_partition_index)const {
117- // TODO: b/294561921 - Implement multiple mesh partitions.
118- ABSL_CHECK_EQ (mesh_partition_index,0 )
119- << " Unsupported mesh partition index: " << mesh_partition_index ;
120- ABSL_CHECK_LT (coat_index, coat_buffer_caches_. size ()) ;
199+ ABSL_CHECK_LT (coat_index, coat_buffer_partitions_. size ());
200+ ABSL_CHECK_LT (mesh_partition_index,
201+ coat_buffer_partitions_[coat_index]. partitions . size ()) ;
202+ const PartitionedCoatIndices& cache = coat_buffer_partitions_[coat_index] ;
121203const std::vector<uint16_t >& triangle_index_data =
122- coat_buffer_caches_[coat_index]. triangle_index_data ;
204+ cache. converted_index_buffer ;
123205// std::vector::data() may be nullptr if empty, which NewDirectByteBuffer
124206// does not permit (even if the size is zero).
125207if (triangle_index_data.data () ==nullptr ) {
126208return nullptr ;
127209 }
210+ const PartitionedCoatIndices::Partition& partition =
211+ cache.partitions [mesh_partition_index];
212+ ABSL_CHECK_LE (0 , partition.index_buffer_offset );
213+ int next_partition_index_buffer_offset =
214+ mesh_partition_index ==static_cast <int >(cache.partitions .size ()) -1
215+ ? triangle_index_data.size ()
216+ : cache.partitions [mesh_partition_index +1 ].index_buffer_offset ;
217+ int partition_index_buffer_size =
218+ next_partition_index_buffer_offset - partition.index_buffer_offset ;
219+ ABSL_CHECK_LE (partition.index_buffer_offset + partition_index_buffer_size,
220+ triangle_index_data.size ());
128221return env->NewDirectByteBuffer (
129222// NewDirectByteBuffer needs a non-const void*. The resulting buffer
130223// is writeable, but it will be wrapped at the Kotlin layer in a
131224// read-only buffer that delegates to this one. This one needs to be
132225// compatible with ShortBuffer, which expects 16-bit values.
133- const_cast <uint16_t *>(triangle_index_data.data ()),
134- triangle_index_data.size () *sizeof (uint16_t ));
226+ const_cast <uint16_t *>(triangle_index_data.data () +
227+ partition.index_buffer_offset ),
228+ partition_index_buffer_size *sizeof (uint16_t ));
135229}
136230
137231}// namespace ink::jni