/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package parquet.hadoop;
import java.io.IOException;
import org.apache.drill.exec.ops.OperatorContext;
import org.apache.drill.exec.store.parquet.ParquetDirectByteBufferAllocator;
import org.apache.hadoop.conf.Configuration;
import parquet.column.page.PageWriteStore;
import parquet.hadoop.CodecFactory.BytesCompressor;
import parquet.hadoop.metadata.CompressionCodecName;
import parquet.schema.MessageType;
public class ColumnChunkPageWriteStoreExposer {
public static ColumnChunkPageWriteStore newColumnChunkPageWriteStore(OperatorContext oContext,
CompressionCodecName codec,
int pageSize,
MessageType schema,
int initialSize) {
BytesCompressor compressor = new CodecFactory(new Configuration()).getCompressor(codec, pageSize);
return new ColumnChunkPageWriteStore(compressor, schema, initialSize, new ParquetDirectByteBufferAllocator(oContext));
}
public static void flushPageStore(PageWriteStore pageStore, ParquetFileWriter w) throws IOException {
((ColumnChunkPageWriteStore) pageStore).flushToFileWriter(w);
}
public static void close(PageWriteStore pageStore) throws IOException {
((ColumnChunkPageWriteStore) pageStore).close();
}
}