Row to columnar conversion#

Fixed Schemas#

The following example converts an array of structs to aarrow::Tableinstance, and then converts it back to the original array of structs.

// Licensed to the Apache Software Foundation (ASF) under one// or more contributor license agreements. See the NOTICE file// distributed with this work for additional information// regarding copyright ownership. The ASF licenses this file// to you under the Apache License, Version 2.0 (the// "License"); you may not use this file except in compliance// with the License. You may obtain a copy of the License at//// http://www.apache.org/licenses/LICENSE-2.0//// Unless required by applicable law or agreed to in writing,// software distributed under the License is distributed on an// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY// KIND, either express or implied. See the License for the// specific language governing permissions and limitations// under the License.#include<arrow/api.h>#include<arrow/result.h>#include<cstdint>#include<iomanip>#include<iostream>#include<vector>usingarrow::DoubleBuilder;usingarrow::Int64Builder;usingarrow::ListBuilder;// While we want to use columnar data structures to build efficient operations, we// often receive data in a row-wise fashion from other systems. In the following,// we want give a brief introduction into the classes provided by Apache Arrow by// showing how to transform row-wise data into a columnar table.//// The table contains an id for a product, the number of components in the product// and the cost of each component.//// The data in this example is stored in the following struct:structdata_row{int64_tid;int64_tcomponents;std::vector<double>component_cost;};// Transforming a vector of structs into a columnar Table.//// The final representation should be an `arrow::Table` which in turn// is made up of an `arrow::Schema` and a list of// `arrow::ChunkedArray` instances. As the first step, we will iterate// over the data and build up the arrays incrementally.  For this// task, we provide `arrow::ArrayBuilder` classes that help in the// construction of the final `arrow::Array` instances.//// For each type, Arrow has a specially typed builder class. For the primitive// values `id` and `components` we can use the `arrow::Int64Builder`. For the// `component_cost` vector, we need to have two builders, a top-level// `arrow::ListBuilder` that builds the array of offsets and a nested// `arrow::DoubleBuilder` that constructs the underlying values array that// is referenced by the offsets in the former array.arrow::Result<std::shared_ptr<arrow::Table>>VectorToColumnarTable(conststd::vector<structdata_row>&rows){// The builders are more efficient using// arrow::jemalloc::MemoryPool::default_pool() as this can increase the size of// the underlying memory regions in-place. At the moment, arrow::jemalloc is only// supported on Unix systems, not Windows.arrow::MemoryPool*pool=arrow::default_memory_pool();Int64Builderid_builder(pool);Int64Buildercomponents_builder(pool);ListBuildercomponent_cost_builder(pool,std::make_shared<DoubleBuilder>(pool));// The following builder is owned by component_cost_builder.DoubleBuilder*component_item_cost_builder=(static_cast<DoubleBuilder*>(component_cost_builder.value_builder()));// Now we can loop over our existing data and insert it into the builders. The// `Append` calls here may fail (e.g. we cannot allocate enough additional memory).// Thus we need to check their return values. For more information on these values,// check the documentation about `arrow::Status`.for(constdata_row&row:rows){ARROW_RETURN_NOT_OK(id_builder.Append(row.id));ARROW_RETURN_NOT_OK(components_builder.Append(row.components));// Indicate the start of a new list row. This will memorise the current// offset in the values builder.ARROW_RETURN_NOT_OK(component_cost_builder.Append());// Store the actual values. The same memory layout is// used for the component cost data, in this case a vector of// type double, as for the memory that Arrow uses to hold this// data and will be created.ARROW_RETURN_NOT_OK(component_item_cost_builder->AppendValues(row.component_cost.data(),row.component_cost.size()));}// At the end, we finalise the arrays, declare the (type) schema and combine them// into a single `arrow::Table`:std::shared_ptr<arrow::Array>id_array;ARROW_RETURN_NOT_OK(id_builder.Finish(&id_array));std::shared_ptr<arrow::Array>components_array;ARROW_RETURN_NOT_OK(components_builder.Finish(&components_array));// No need to invoke component_item_cost_builder.Finish because it is implied by// the parent builder's Finish invocation.std::shared_ptr<arrow::Array>component_cost_array;ARROW_RETURN_NOT_OK(component_cost_builder.Finish(&component_cost_array));std::vector<std::shared_ptr<arrow::Field>>schema_vector={arrow::field("id",arrow::int64()),arrow::field("components",arrow::int64()),arrow::field("component_cost",arrow::list(arrow::float64()))};autoschema=std::make_shared<arrow::Schema>(schema_vector);// The final `table` variable is the one we can then pass on to other functions// that can consume Apache Arrow memory structures. This object has ownership of// all referenced data, thus we don't have to care about undefined references once// we leave the scope of the function building the table and its underlying arrays.std::shared_ptr<arrow::Table>table=arrow::Table::Make(schema,{id_array,components_array,component_cost_array});returntable;}arrow::Result<std::vector<data_row>>ColumnarTableToVector(conststd::shared_ptr<arrow::Table>&table){// To convert an Arrow table back into the same row-wise representation as in the// above section, we first will check that the table conforms to our expected// schema and then will build up the vector of rows incrementally.//// For the check if the table is as expected, we can utilise solely its schema.std::vector<std::shared_ptr<arrow::Field>>schema_vector={arrow::field("id",arrow::int64()),arrow::field("components",arrow::int64()),arrow::field("component_cost",arrow::list(arrow::float64()))};autoexpected_schema=std::make_shared<arrow::Schema>(schema_vector);if(!expected_schema->Equals(*table->schema())){// The table doesn't have the expected schema thus we cannot directly// convert it to our target representation.returnarrow::Status::Invalid("Schemas are not matching!");}// As we have ensured that the table has the expected structure, we can unpack the// underlying arrays. For the primitive columns `id` and `components` we can use the// high level functions to get the values whereas for the nested column// `component_costs` we need to access the C-pointer to the data to copy its// contents into the resulting `std::vector<double>`. Here we need to be careful to// also add the offset to the pointer. This offset is needed to enable zero-copy// slicing operations. While this could be adjusted automatically for double// arrays, this cannot be done for the accompanying bitmap as often the slicing// border would be inside a byte.autoids=std::static_pointer_cast<arrow::Int64Array>(table->column(0)->chunk(0));autocomponents=std::static_pointer_cast<arrow::Int64Array>(table->column(1)->chunk(0));autocomponent_cost=std::static_pointer_cast<arrow::ListArray>(table->column(2)->chunk(0));autocomponent_cost_values=std::static_pointer_cast<arrow::DoubleArray>(component_cost->values());// To enable zero-copy slices, the native values pointer might need to account// for this slicing offset. This is not needed for the higher level functions// like Value(…) that already account for this offset internally.constdouble*ccv_ptr=component_cost_values->raw_values();std::vector<data_row>rows;for(int64_ti=0;i<table->num_rows();i++){// Another simplification in this example is that we assume that there are// no null entries, e.g. each row is fill with valid values.int64_tid=ids->Value(i);int64_tcomponent=components->Value(i);constdouble*first=ccv_ptr+component_cost->value_offset(i);constdouble*last=ccv_ptr+component_cost->value_offset(i+1);std::vector<double>components_vec(first,last);rows.push_back({id,component,components_vec});}returnrows;}arrow::StatusRunRowConversion(){std::vector<data_row>original_rows={{1,1,{10.0}},{2,3,{11.0,12.0,13.0}},{3,2,{15.0,25.0}}};std::shared_ptr<arrow::Table>table;std::vector<data_row>converted_rows;ARROW_ASSIGN_OR_RAISE(table,VectorToColumnarTable(original_rows));ARROW_ASSIGN_OR_RAISE(converted_rows,ColumnarTableToVector(table));assert(original_rows.size()==converted_rows.size());// Print out contents of table, should get// ID Components Component prices// 1  1          10// 2  3          11  12  13// 3  2          15  25std::cout<<std::left<<std::setw(3)<<"ID "<<std::left<<std::setw(11)<<"Components "<<std::left<<std::setw(15)<<"Component prices "<<std::endl;for(constauto&row:converted_rows){std::cout<<std::left<<std::setw(3)<<row.id<<std::left<<std::setw(11)<<row.components;for(constauto&cost:row.component_cost){std::cout<<std::left<<std::setw(4)<<cost;}std::cout<<std::endl;}returnarrow::Status::OK();}intmain(intargc,char**argv){autostatus=RunRowConversion();if(!status.ok()){std::cerr<<status.ToString()<<std::endl;returnEXIT_FAILURE;}returnEXIT_SUCCESS;}

Dynamic Schemas#

In many cases, we need to convert to and from row data that does not have aschema known at compile time. To help implement these conversions, this libraryprovides several utilities:

The following example shows how to implement conversion betweenrapidjson::Documentand Arrow objects. You can read the full code example atapache/arrow

Writing conversions to Arrow#

To convert rows to Arrow record batches, we’ll setup Array builders for all the columnsand then for each field iterate through row values and append to the builders.We assume that we already know the target schema, which may havebeen provided by another system or was inferred in another function. Inferringthe schemaduring conversion is a challenging proposition; many systems willcheck the first N rows to infer a schema if there is none already available.

At the top level, we define a functionConvertToRecordBatch:

495arrow::Result<std::shared_ptr<arrow::RecordBatch>>ConvertToRecordBatch(496conststd::vector<rapidjson::Document>&rows,std::shared_ptr<arrow::Schema>schema){497// RecordBatchBuilder will create array builders for us for each field in our498// schema. By passing the number of output rows (`rows.size()`) we can499// pre-allocate the correct size of arrays, except of course in the case of500// string, byte, and list arrays, which have dynamic lengths.501std::unique_ptr<arrow::RecordBatchBuilder>batch_builder;502ARROW_ASSIGN_OR_RAISE(503batch_builder,504arrow::RecordBatchBuilder::Make(schema,arrow::default_memory_pool(),rows.size()));505506// Inner converter will take rows and be responsible for appending values507// to provided array builders.508JsonValueConverterconverter(rows);509for(inti=0;i<batch_builder->num_fields();++i){510std::shared_ptr<arrow::Field>field=schema->field(i);511arrow::ArrayBuilder*builder=batch_builder->GetField(i);512ARROW_RETURN_NOT_OK(converter.Convert(*field.get(),builder));513}514515std::shared_ptr<arrow::RecordBatch>batch;516ARROW_ASSIGN_OR_RAISE(batch,batch_builder->Flush());517518// Use RecordBatch::ValidateFull() to make sure arrays were correctly constructed.519ARROW_RETURN_NOT_OK(batch->ValidateFull());520returnbatch;521}// ConvertToRecordBatch

First we usearrow::RecordBatchBuilder, which conveniently creates buildersfor each field in the schema. Then we iterate over the fields of the schema, getthe builder, and callConvert() on ourJsonValueConverter (to be discussednext). At the end, we callbatch->ValidateFull(), which checks the integrityof our arrays to make sure the conversion was performed correctly, which is usefulfor debugging new conversion implementations.

One level down, theJsonValueConverter is responsible for appending row valuesfor the provided field to a provided array builder. In order to specialize logicfor each data type, it implementsVisit methods and callsarrow::VisitTypeInline().(See more about type visitors inVisitor Pattern.)

At the end of that class is the private methodFieldValues(), which returnsan iterator of the column values for the current field across the rows. Inrow-based structures that are flat (such as a vector of values) this may betrivial to implement. But if the schema is nested, as in the case of JSON documents,a special iterator is needed to navigate the levels of nesting. See thefull examplefor the implementation details ofDocValuesIterator.

323classJsonValueConverter{324public:325explicitJsonValueConverter(conststd::vector<rapidjson::Document>&rows)326:rows_(rows),array_levels_(0){}327328JsonValueConverter(conststd::vector<rapidjson::Document>&rows,329conststd::vector<std::string>&root_path,int64_tarray_levels)330:rows_(rows),root_path_(root_path),array_levels_(array_levels){}331332/// \brief For field passed in, append corresponding values to builder333arrow::StatusConvert(constarrow::Field&field,arrow::ArrayBuilder*builder){334returnConvert(field,field.name(),builder);335}336337/// \brief For field passed in, append corresponding values to builder338arrow::StatusConvert(constarrow::Field&field,conststd::string&field_name,339arrow::ArrayBuilder*builder){340field_name_=field_name;341builder_=builder;342ARROW_RETURN_NOT_OK(arrow::VisitTypeInline(*field.type().get(),this));343returnarrow::Status::OK();344}345346// Default implementation347arrow::StatusVisit(constarrow::DataType&type){348returnarrow::Status::NotImplemented(349"Cannot convert json value to Arrow array of type ",type.ToString());350}351352arrow::StatusVisit(constarrow::Int64Type&type){353arrow::Int64Builder*builder=static_cast<arrow::Int64Builder*>(builder_);354for(constauto&maybe_value:FieldValues()){355ARROW_ASSIGN_OR_RAISE(autovalue,maybe_value);356if(value->IsNull()){357ARROW_RETURN_NOT_OK(builder->AppendNull());358}else{359if(value->IsUint()){360ARROW_RETURN_NOT_OK(builder->Append(value->GetUint()));361}elseif(value->IsInt()){362ARROW_RETURN_NOT_OK(builder->Append(value->GetInt()));363}elseif(value->IsUint64()){364ARROW_RETURN_NOT_OK(builder->Append(value->GetUint64()));365}elseif(value->IsInt64()){366ARROW_RETURN_NOT_OK(builder->Append(value->GetInt64()));367}else{368returnarrow::Status::Invalid("Value is not an integer");369}370}371}372returnarrow::Status::OK();373}374375arrow::StatusVisit(constarrow::DoubleType&type){376arrow::DoubleBuilder*builder=static_cast<arrow::DoubleBuilder*>(builder_);377for(constauto&maybe_value:FieldValues()){378ARROW_ASSIGN_OR_RAISE(autovalue,maybe_value);379if(value->IsNull()){380ARROW_RETURN_NOT_OK(builder->AppendNull());381}else{382ARROW_RETURN_NOT_OK(builder->Append(value->GetDouble()));383}384}385returnarrow::Status::OK();386}387388arrow::StatusVisit(constarrow::StringType&type){389arrow::StringBuilder*builder=static_cast<arrow::StringBuilder*>(builder_);390for(constauto&maybe_value:FieldValues()){391ARROW_ASSIGN_OR_RAISE(autovalue,maybe_value);392if(value->IsNull()){393ARROW_RETURN_NOT_OK(builder->AppendNull());394}else{395ARROW_RETURN_NOT_OK(builder->Append(value->GetString()));396}397}398returnarrow::Status::OK();399}400401arrow::StatusVisit(constarrow::BooleanType&type){402arrow::BooleanBuilder*builder=static_cast<arrow::BooleanBuilder*>(builder_);403for(constauto&maybe_value:FieldValues()){404ARROW_ASSIGN_OR_RAISE(autovalue,maybe_value);405if(value->IsNull()){406ARROW_RETURN_NOT_OK(builder->AppendNull());407}else{408ARROW_RETURN_NOT_OK(builder->Append(value->GetBool()));409}410}411returnarrow::Status::OK();412}413414arrow::StatusVisit(constarrow::StructType&type){415arrow::StructBuilder*builder=static_cast<arrow::StructBuilder*>(builder_);416417std::vector<std::string>child_path(root_path_);418if(field_name_.size()>0){419child_path.push_back(field_name_);420}421autochild_converter=JsonValueConverter(rows_,child_path,array_levels_);422423for(inti=0;i<type.num_fields();++i){424std::shared_ptr<arrow::Field>child_field=type.field(i);425std::shared_ptr<arrow::ArrayBuilder>child_builder=builder->child_builder(i);426427ARROW_RETURN_NOT_OK(428child_converter.Convert(*child_field.get(),child_builder.get()));429}430431// Make null bitmap432for(constauto&maybe_value:FieldValues()){433ARROW_ASSIGN_OR_RAISE(autovalue,maybe_value);434ARROW_RETURN_NOT_OK(builder->Append(!value->IsNull()));435}436437returnarrow::Status::OK();438}439440arrow::StatusVisit(constarrow::ListType&type){441arrow::ListBuilder*builder=static_cast<arrow::ListBuilder*>(builder_);442443// Values and offsets needs to be interleaved in ListBuilder, so first collect the444// values445std::unique_ptr<arrow::ArrayBuilder>tmp_value_builder;446ARROW_ASSIGN_OR_RAISE(tmp_value_builder,447arrow::MakeBuilder(builder->value_builder()->type()));448std::vector<std::string>child_path(root_path_);449child_path.push_back(field_name_);450autochild_converter=JsonValueConverter(rows_,child_path,array_levels_+1);451ARROW_RETURN_NOT_OK(452child_converter.Convert(*type.value_field().get(),"",tmp_value_builder.get()));453454std::shared_ptr<arrow::Array>values_array;455ARROW_RETURN_NOT_OK(tmp_value_builder->Finish(&values_array));456std::shared_ptr<arrow::ArrayData>values_data=values_array->data();457458arrow::ArrayBuilder*value_builder=builder->value_builder();459int64_toffset=0;460for(constauto&maybe_value:FieldValues()){461ARROW_ASSIGN_OR_RAISE(autovalue,maybe_value);462ARROW_RETURN_NOT_OK(builder->Append(!value->IsNull()));463if(!value->IsNull()&&value->Size()>0){464ARROW_RETURN_NOT_OK(465value_builder->AppendArraySlice(*values_data.get(),offset,value->Size()));466offset+=value->Size();467}468}469470returnarrow::Status::OK();471}472473private:474std::stringfield_name_;475arrow::ArrayBuilder*builder_;476conststd::vector<rapidjson::Document>&rows_;477std::vector<std::string>root_path_;478int64_tarray_levels_;479480/// Return a flattened iterator over values at nested location481arrow::Iterator<constrapidjson::Value*>FieldValues(){482std::vector<std::string>path(root_path_);483if(field_name_.size()>0){484path.push_back(field_name_);485}486autoiter=DocValuesIterator(rows_,std::move(path),array_levels_);487autofn=[iter]()mutable->arrow::Result<constrapidjson::Value*>{488returniter.Next();489};490491returnarrow::MakeFunctionIterator(fn);492}493};// JsonValueConverter

Writing conversions from Arrow#

To convert into rowsfrom Arrow record batches, we’ll process the table insmaller batches, visiting each field of the batch and filling the output rowscolumn-by-column.

At the top-level, we defineArrowToDocumentConverter that provides the APIfor converting Arrow batches and tables to rows. In many cases, it’s more optimalto perform conversions to rows in smaller batches, rather than doing the entiretable at once. So we define oneConvertToVector method to convert a singlebatch, then in the other conversion method we usearrow::TableBatchReaderto iterate over slices of a table. This returns Arrow’s iterator type(arrow::Iterator) so rows could then be processed either one-at-a-timeor be collected into a container.

179classArrowToDocumentConverter{180public:181/// Convert a single batch of Arrow data into Documents182arrow::Result<std::vector<rapidjson::Document>>ConvertToVector(183std::shared_ptr<arrow::RecordBatch>batch){184RowBatchBuilderbuilder{batch->num_rows()};185186for(inti=0;i<batch->num_columns();++i){187builder.SetField(batch->schema()->field(i).get());188ARROW_RETURN_NOT_OK(arrow::VisitArrayInline(*batch->column(i).get(),&builder));189}190191returnstd::move(builder).Rows();192}193194/// Convert an Arrow table into an iterator of Documents195arrow::Iterator<rapidjson::Document>ConvertToIterator(196std::shared_ptr<arrow::Table>table,size_tbatch_size){197// Use TableBatchReader to divide table into smaller batches. The batches198// created are zero-copy slices with *at most* `batch_size` rows.199autobatch_reader=std::make_shared<arrow::TableBatchReader>(*table);200batch_reader->set_chunksize(batch_size);201202autoread_batch=[this](conststd::shared_ptr<arrow::RecordBatch>&batch)203->arrow::Result<arrow::Iterator<rapidjson::Document>>{204ARROW_ASSIGN_OR_RAISE(autorows,ConvertToVector(batch));205returnarrow::MakeVectorIterator(std::move(rows));206};207208autonested_iter=arrow::MakeMaybeMapIterator(209read_batch,arrow::MakeIteratorFromReader(std::move(batch_reader)));210211returnarrow::MakeFlattenIterator(std::move(nested_iter));212}213};// ArrowToDocumentConverter

One level down, the output rows are filled in byRowBatchBuilder.TheRowBatchBuilder implementsVisit() methods, but to save on code wewrite a template method for array types that have primitive C equivalents(booleans, integers, and floats) usingarrow::enable_if_primitive_ctype.SeeType Traits for other type predicates.

 57classRowBatchBuilder{ 58public: 59explicitRowBatchBuilder(int64_tnum_rows):field_(nullptr){ 60// Reserve all of the space required up-front to avoid unnecessary resizing 61rows_.reserve(num_rows); 62 63for(int64_ti=0;i<num_rows;++i){ 64rows_.push_back(rapidjson::Document()); 65rows_[i].SetObject(); 66} 67} 68 69/// \brief Set which field to convert. 70voidSetField(constarrow::Field*field){field_=field;} 71 72/// \brief Retrieve converted rows from builder. 73std::vector<rapidjson::Document>Rows()&&{returnstd::move(rows_);} 74 75// Default implementation 76arrow::StatusVisit(constarrow::Array&array){ 77returnarrow::Status::NotImplemented( 78"Cannot convert to json document for array of type ",array.type()->ToString()); 79} 80 81// Handles booleans, integers, floats 82template<typenameArrayType,typenameDataClass=typenameArrayType::TypeClass> 83arrow::enable_if_primitive_ctype<DataClass,arrow::Status>Visit( 84constArrayType&array){ 85assert(static_cast<int64_t>(rows_.size())==array.length()); 86for(int64_ti=0;i<array.length();++i){ 87if(!array.IsNull(i)){ 88rapidjson::Valuestr_key(field_->name(),rows_[i].GetAllocator()); 89rows_[i].AddMember(str_key,array.Value(i),rows_[i].GetAllocator()); 90} 91} 92returnarrow::Status::OK(); 93} 94 95arrow::StatusVisit(constarrow::StringArray&array){ 96assert(static_cast<int64_t>(rows_.size())==array.length()); 97for(int64_ti=0;i<array.length();++i){ 98if(!array.IsNull(i)){ 99rapidjson::Valuestr_key(field_->name(),rows_[i].GetAllocator());100std::string_viewvalue_view=array.Value(i);101rapidjson::Valuevalue;102value.SetString(value_view.data(),103static_cast<rapidjson::SizeType>(value_view.size()),104rows_[i].GetAllocator());105rows_[i].AddMember(str_key,value,rows_[i].GetAllocator());106}107}108returnarrow::Status::OK();109}110111arrow::StatusVisit(constarrow::StructArray&array){112constarrow::StructType*type=array.struct_type();113114assert(static_cast<int64_t>(rows_.size())==array.length());115116RowBatchBuilderchild_builder(rows_.size());117for(inti=0;i<type->num_fields();++i){118constarrow::Field*child_field=type->field(i).get();119child_builder.SetField(child_field);120ARROW_RETURN_NOT_OK(arrow::VisitArrayInline(*array.field(i).get(),&child_builder));121}122std::vector<rapidjson::Document>rows=std::move(child_builder).Rows();123124for(int64_ti=0;i<array.length();++i){125if(!array.IsNull(i)){126rapidjson::Valuestr_key(field_->name(),rows_[i].GetAllocator());127// Must copy value to new allocator128rapidjson::Valuerow_val;129row_val.CopyFrom(rows[i],rows_[i].GetAllocator());130rows_[i].AddMember(str_key,row_val,rows_[i].GetAllocator());131}132}133returnarrow::Status::OK();134}135136arrow::StatusVisit(constarrow::ListArray&array){137assert(static_cast<int64_t>(rows_.size())==array.length());138// First create rows from values139std::shared_ptr<arrow::Array>values=array.values();140RowBatchBuilderchild_builder(values->length());141constarrow::Field*value_field=array.list_type()->value_field().get();142std::stringvalue_field_name=value_field->name();143child_builder.SetField(value_field);144ARROW_RETURN_NOT_OK(arrow::VisitArrayInline(*values.get(),&child_builder));145146std::vector<rapidjson::Document>rows=std::move(child_builder).Rows();147148int64_tvalues_i=0;149for(int64_ti=0;i<array.length();++i){150if(array.IsNull(i))continue;151152rapidjson::Document::AllocatorType&allocator=rows_[i].GetAllocator();153autoarray_len=array.value_length(i);154155rapidjson::Valuevalue;156value.SetArray();157value.Reserve(array_len,allocator);158159for(int64_tj=0;j<array_len;++j){160rapidjson::Valuerow_val;161// Must copy value to new allocator162row_val.CopyFrom(rows[values_i][value_field_name],allocator);163value.PushBack(row_val,allocator);164++values_i;165}166167rapidjson::Valuestr_key(field_->name(),allocator);168rows_[i].AddMember(str_key,value,allocator);169}170171returnarrow::Status::OK();172}173174private:175constarrow::Field*field_;176std::vector<rapidjson::Document>rows_;177};// RowBatchBuilder