Examples of ReadContext


Examples of org.apache.commons.betwixt.io.read.ReadContext

      * Factory method for new contexts.
      * Ensure that they are correctly configured.
      * @return the ReadContext created, not null
      */
    private ReadContext makeContext() {
        return new ReadContext( log, bindingConfiguration, readConfiguration );
    }
View Full Code Here

Examples of org.apache.commons.betwixt.io.read.ReadContext

    public TestBaseMappingStrategy(String testName) {
        super(testName);
    }

    public void testArrayMapping() throws Exception {
        ReadContext context = new ReadContext(
                    new BindingConfiguration(),
                    new ReadConfiguration());
       
        context.pushElement("LibraryBeanWithArraySetter");  
        context.markClassMap(LibraryBeanWithArraySetter.class);
        context.pushElement("books");
       
        ActionMappingStrategy strategy = ActionMappingStrategy.DEFAULT;
        MappingAction action = strategy.getMappingAction("", "books", new AttributesImpl(), context);
        assertTrue("Should be mapped to an array bind action", action instanceof ArrayBindAction);
    }
View Full Code Here

Examples of org.apache.commons.betwixt.io.read.ReadContext

        this(
            introspector,
            basePath,
            baseElementDescriptor,
            baseBeanClass,
            new ReadContext( context, new ReadConfiguration() ));
    }
View Full Code Here

Examples of org.apache.commons.betwixt.io.read.ReadContext

      * Factory method for new contexts.
      * Ensure that they are correctly configured.
      * @return the ReadContext created, not null
      */
    private ReadContext makeContext() {
        return new ReadContext( log, bindingConfiguration, readConfiguration );
    }
View Full Code Here

Examples of org.apache.commons.betwixt.io.read.ReadContext

        ElementMapping elementMapping = new ElementMapping();
        elementMapping.setAttributes(new AttributesImpl());
        elementMapping.setName("Bogus");
        elementMapping.setDescriptor(descriptor);
        elementMapping.setType(Iterator.class);
        ReadContext readContext = new ReadContext(new BindingConfiguration(), new ReadConfiguration());
       
        assertNull(introspector.getPolymorphicReferenceResolver().resolveType(elementMapping, readContext));
       
        elementMapping.setName("elementA");
        Class resolution = introspector.getPolymorphicReferenceResolver().resolveType(elementMapping, readContext);
View Full Code Here

Examples of parquet.hadoop.api.ReadSupport.ReadContext

      final ParquetMetadata parquetMetadata = ParquetFileReader.readFooter(cloneJob, finalPath);
      final List<BlockMetaData> blocks = parquetMetadata.getBlocks();
      final FileMetaData fileMetaData = parquetMetadata.getFileMetaData();

      final ReadContext readContext = new DataWritableReadSupport()
          .init(cloneJob, fileMetaData.getKeyValueMetaData(), fileMetaData.getSchema());
      schemaSize = MessageTypeParser.parseMessageType(readContext.getReadSupportMetadata()
          .get(DataWritableReadSupport.HIVE_SCHEMA_KEY)).getFieldCount();
      final List<BlockMetaData> splitGroup = new ArrayList<BlockMetaData>();
      final long splitStart = ((FileSplit) oldSplit).getStart();
      final long splitLength = ((FileSplit) oldSplit).getLength();
      for (final BlockMetaData block : blocks) {
        final long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
        if (firstDataPage >= splitStart && firstDataPage < splitStart + splitLength) {
          splitGroup.add(block);
        }
      }
      if (splitGroup.isEmpty()) {
        LOG.warn("Skipping split, could not find row group in: " + (FileSplit) oldSplit);
        split = null;
      } else {
        split = new ParquetInputSplit(finalPath,
                splitStart,
                splitLength,
                ((FileSplit) oldSplit).getLocations(),
                splitGroup,
                readContext.getRequestedSchema().toString(),
                fileMetaData.getSchema().toString(),
                fileMetaData.getKeyValueMetaData(),
                readContext.getReadSupportMetadata());
      }
    } else {
      throw new IllegalArgumentException("Unknown split type: " + oldSplit);
    }
    return split;
View Full Code Here

Examples of parquet.hadoop.api.ReadSupport.ReadContext

      final ParquetMetadata parquetMetadata = ParquetFileReader.readFooter(cloneJob, finalPath);
      final List<BlockMetaData> blocks = parquetMetadata.getBlocks();
      final FileMetaData fileMetaData = parquetMetadata.getFileMetaData();

      final ReadContext readContext = new DataWritableReadSupport()
          .init(cloneJob, fileMetaData.getKeyValueMetaData(), fileMetaData.getSchema());
      schemaSize = MessageTypeParser.parseMessageType(readContext.getReadSupportMetadata()
          .get(DataWritableReadSupport.HIVE_SCHEMA_KEY)).getFieldCount();
      final List<BlockMetaData> splitGroup = new ArrayList<BlockMetaData>();
      final long splitStart = ((FileSplit) oldSplit).getStart();
      final long splitLength = ((FileSplit) oldSplit).getLength();
      for (final BlockMetaData block : blocks) {
        final long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
        if (firstDataPage >= splitStart && firstDataPage < splitStart + splitLength) {
          splitGroup.add(block);
        }
      }
      if (splitGroup.isEmpty()) {
        LOG.warn("Skipping split, could not find row group in: " + (FileSplit) oldSplit);
        split = null;
      } else {
        split = new ParquetInputSplit(finalPath,
                splitStart,
                splitLength,
                ((FileSplit) oldSplit).getLocations(),
                splitGroup,
                readContext.getRequestedSchema().toString(),
                fileMetaData.getSchema().toString(),
                fileMetaData.getKeyValueMetaData(),
                readContext.getReadSupportMetadata());
      }
    } else {
      throw new IllegalArgumentException("Unknown split type: " + oldSplit);
    }
    return split;
View Full Code Here

Examples of parquet.hadoop.api.ReadSupport.ReadContext

      final ParquetMetadata parquetMetadata = ParquetFileReader.readFooter(cloneJob, finalPath);
      final List<BlockMetaData> blocks = parquetMetadata.getBlocks();
      final FileMetaData fileMetaData = parquetMetadata.getFileMetaData();

      final ReadContext readContext = new DataWritableReadSupport()
          .init(cloneJob, fileMetaData.getKeyValueMetaData(), fileMetaData.getSchema());
      schemaSize = MessageTypeParser.parseMessageType(readContext.getReadSupportMetadata()
          .get(DataWritableReadSupport.HIVE_SCHEMA_KEY)).getFieldCount();
      final List<BlockMetaData> splitGroup = new ArrayList<BlockMetaData>();
      final long splitStart = ((FileSplit) oldSplit).getStart();
      final long splitLength = ((FileSplit) oldSplit).getLength();
      for (final BlockMetaData block : blocks) {
        final long firstDataPage = block.getColumns().get(0).getFirstDataPageOffset();
        if (firstDataPage >= splitStart && firstDataPage < splitStart + splitLength) {
          splitGroup.add(block);
        }
      }
      if (splitGroup.isEmpty()) {
        LOG.warn("Skipping split, could not find row group in: " + (FileSplit) oldSplit);
        split = null;
      } else {
        split = new ParquetInputSplit(finalPath,
                splitStart,
                splitLength,
                ((FileSplit) oldSplit).getLocations(),
                splitGroup,
                readContext.getRequestedSchema().toString(),
                fileMetaData.getSchema().toString(),
                fileMetaData.getKeyValueMetaData(),
                readContext.getReadSupportMetadata());
      }
    } else {
      throw new IllegalArgumentException("Unknown split type: " + oldSplit);
    }
    return split;
View Full Code Here

Examples of parquet.hadoop.api.ReadSupport.ReadContext

   * @throws IOException
   */
  public List<ParquetInputSplit> getSplits(Configuration configuration, List<Footer> footers) throws IOException {
    List<ParquetInputSplit> splits = new ArrayList<ParquetInputSplit>();
    GlobalMetaData globalMetaData = ParquetFileWriter.getGlobalMetaData(footers);
    ReadContext readContext = getReadSupport(configuration).init(new InitContext(
        configuration,
        globalMetaData.getKeyValueMetaData(),
        globalMetaData.getSchema()));
    for (Footer footer : footers) {
      final Path file = footer.getFile();
      LOG.debug(file);
      FileSystem fs = file.getFileSystem(configuration);
      FileStatus fileStatus = fs.getFileStatus(file);
      ParquetMetadata parquetMetaData = footer.getParquetMetadata();
      List<BlockMetaData> blocks = parquetMetaData.getBlocks();
      BlockLocation[] fileBlockLocations = fs.getFileBlockLocations(fileStatus, 0, fileStatus.getLen());
      splits.addAll(
          generateSplits(
              blocks,
              fileBlockLocations,
              fileStatus,
              parquetMetaData.getFileMetaData(),
              readSupportClass,
              readContext.getRequestedSchema().toString(),
              readContext.getReadSupportMetadata())
          );
    }
    return splits;
  }
View Full Code Here

Examples of parquet.hadoop.api.ReadSupport.ReadContext

                if (!column.isPartitionKey() && column.getHiveColumnIndex() < messageType.getFieldCount()) {
                    fields.add(messageType.getType(column.getName()));
                }
            }
            MessageType requestedProjection = new MessageType(messageType.getName(), fields.build());
            return new ReadContext(requestedProjection);
        }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.