Package org.apache.tajo.catalog

Examples of org.apache.tajo.catalog.TableMeta


      throws IOException {
    Schema schema = new Schema();
    schema.addColumn("id", TajoDataTypes.Type.INT4);
    schema.addColumn("age", TajoDataTypes.Type.INT8);

    TableMeta meta = CatalogUtil.newTableMeta(storeType);
    meta.putOption("compression.codec", codec.getCanonicalName());

    String fileName = "Compression_" + codec.getSimpleName();
    Path tablePath = new Path(testDir, fileName);
    Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
    appender.enableStats();
View Full Code Here


    Schema schema = new Schema();
    schema.addColumn("id", Type.INT4);
    schema.addColumn("age", Type.INT4);
    schema.addColumn("name", Type.TEXT);

    TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

    Tuple[] tuples = new Tuple[4];
    for (int i = 0; i < tuples.length; i++) {
      tuples[i] = new VTuple(3);
      tuples[i]
View Full Code Here

    Schema schema = new Schema();
    schema.addColumn("id", Type.INT4);
    schema.addColumn("age",Type.INT4);
    schema.addColumn("name",Type.TEXT);

    TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);
   
    Tuple[] tuples = new Tuple[4];
    for(int i=0; i < tuples.length; i++) {
      tuples[i] = new VTuple(3);
      tuples[i].put(new Datum[] {
View Full Code Here

      Schema schema = new Schema();
      schema.addColumn("id", Type.INT4);
      schema.addColumn("age",Type.INT4);
      schema.addColumn("name",Type.TEXT);
      TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

      List<FileFragment> splits = Lists.newArrayList();
      // Get FileFragments in partition batch
      splits.addAll(sm.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()])));
      assertEquals(testCount, splits.size());
View Full Code Here

      Schema schema = new Schema();
      schema.addColumn("id", Type.INT4);
      schema.addColumn("age", Type.INT4);
      schema.addColumn("name", Type.TEXT);
      TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV);

      List<FileFragment> splits = Lists.newArrayList();
      splits.addAll(sm.getSplits("data", meta, schema, tablePath));

      assertEquals(testCount, splits.size());
View Full Code Here

    Schema schema = new Schema();
    schema.addColumn("id", Type.INT4);
    schema.addColumn("age", Type.INT8);

    TableMeta meta = CatalogUtil.newTableMeta(storeType);
    meta.putOption("compression.codec", BZip2Codec.class.getCanonicalName());

    Path tablePath = new Path(testDir, "SplitCompression");
    Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
    appender.enableStats();
    appender.init();
View Full Code Here

    Schema schema = new Schema();
    schema.addColumn("id", Type.INT4);
    schema.addColumn("age", Type.FLOAT4);
    schema.addColumn("name", Type.TEXT);

    TableMeta meta = CatalogUtil.newTableMeta(storeType);
    meta.putOption("compression.codec", codec.getCanonicalName());
    meta.putOption("compression.type", SequenceFile.CompressionType.BLOCK.name());
    meta.putOption("rcfile.serde", TextSerializerDeserializer.class.getName());
    meta.putOption("sequencefile.serde", TextSerializerDeserializer.class.getName());

    String fileName = "Compression_" + codec.getSimpleName();
    Path tablePath = new Path(testDir, fileName);
    Appender appender = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, tablePath);
    appender.enableStats();
View Full Code Here

    schema.addColumn("file", Type.TEXT);
    schema.addColumn("name", Type.TEXT);
    schema.addColumn("age", Type.INT8);

    Options options = new Options();
    TableMeta meta = CatalogUtil.newTableMeta(storeType, options);
    meta.setOptions(StorageUtil.newPhysicalProperties(storeType));
    if (storeType == StoreType.AVRO) {
      meta.putOption(StorageConstants.AVRO_SCHEMA_LITERAL,
                     TEST_MULTIPLE_FILES_AVRO_SCHEMA);
    }

    Path table1Path = new Path(testDir, storeType + "_1.data");
    Appender appender1 = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, table1Path);
    appender1.enableStats();
    appender1.init();
    int tupleNum = 10000;
    VTuple vTuple;

    for(int i = 0; i < tupleNum; i++) {
      vTuple = new VTuple(4);
      vTuple.put(0, DatumFactory.createInt4(i + 1));
      vTuple.put(1, DatumFactory.createText("hyunsik"));
      vTuple.put(2, DatumFactory.createText("jihoon"));
      vTuple.put(3, DatumFactory.createInt8(25l));
      appender1.addTuple(vTuple);
    }
    appender1.close();

    TableStats stat1 = appender1.getStats();
    if (stat1 != null) {
      assertEquals(tupleNum, stat1.getNumRows().longValue());
    }

    Path table2Path = new Path(testDir, storeType + "_2.data");
    Appender appender2 = StorageManagerFactory.getStorageManager(conf).getAppender(meta, schema, table2Path);
    appender2.enableStats();
    appender2.init();

    for(int i = 0; i < tupleNum; i++) {
      vTuple = new VTuple(4);
      vTuple.put(0, DatumFactory.createInt4(i + 1));
      vTuple.put(1, DatumFactory.createText("hyunsik"));
      vTuple.put(2, DatumFactory.createText("jihoon"));
      vTuple.put(3, DatumFactory.createInt8(25l));
      appender2.addTuple(vTuple);
    }
    appender2.close();

    TableStats stat2 = appender2.getStats();
    if (stat2 != null) {
      assertEquals(tupleNum, stat2.getNumRows().longValue());
    }


    FileStatus status1 = fs.getFileStatus(table1Path);
    FileStatus status2 = fs.getFileStatus(table2Path);
    FileFragment[] fragment = new FileFragment[2];
    fragment[0] = new FileFragment("tablet1", table1Path, 0, status1.getLen());
    fragment[1] = new FileFragment("tablet1", table2Path, 0, status2.getLen());

    Schema targetSchema = new Schema();
    targetSchema.addColumn(schema.getColumn(0));
    targetSchema.addColumn(schema.getColumn(2));

    Scanner scanner = new MergeScanner(conf, schema, meta, TUtil.<FileFragment>newList(fragment), targetSchema);
    assertEquals(isProjectableStorage(meta.getStoreType()), scanner.isProjectable());

    scanner.init();
    int totalCounts = 0;
    Tuple tuple;
    while ((tuple = scanner.next()) != null) {
      totalCounts++;
      if (isProjectableStorage(meta.getStoreType())) {
        assertNotNull(tuple.get(0));
        assertNull(tuple.get(1));
        assertNotNull(tuple.get(2));
        assertNull(tuple.get(3));
      }
View Full Code Here

TOP

Related Classes of org.apache.tajo.catalog.TableMeta

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.