Package org.apache.hadoop.io.SequenceFile

Examples of org.apache.hadoop.io.SequenceFile.Reader


    sorter.merge(outfiles, output);

    // import the evaluations
    LongWritable key = new LongWritable();
    DoubleWritable value = new DoubleWritable();
    Reader reader = new Reader(fs, output, conf);
    try {
      while (reader.next(key, value)) {
        evaluations.add(value.get());
      }
    } finally {
      reader.close();
    }
  }
View Full Code Here


    PCollection<String> collection = MemPipeline.typedCollectionOf(Writables.strings(), EXPECTED_COLLECTION);
    final Target target = To.sequenceFile(outputFile.toString());
    MemPipeline.getInstance().write(collection, target);

    // read
    final SequenceFile.Reader reader = new Reader(FileSystem.getLocal(
      baseTmpDir.getDefaultConfiguration()), new Path(outputFile.toString()),
        baseTmpDir.getDefaultConfiguration());
    final List<String> actual = Lists.newArrayList();
    final NullWritable key = NullWritable.get();
    final Text value = new Text();
    while (reader.next(key, value)) {
      actual.add(value.toString());
    }
    reader.close();

    // assert read same as written
    assertEquals(EXPECTED_COLLECTION, actual);
  }
View Full Code Here

          Writables.strings()), "test input");
    final Target target = To.sequenceFile(outputFile.toString());
    MemPipeline.getInstance().write(collection, target);

    // read
    final SequenceFile.Reader reader = new Reader(FileSystem.getLocal(baseTmpDir
        .getDefaultConfiguration()), new Path(outputFile.toString()),
        baseTmpDir.getDefaultConfiguration());
    final List<Pair<Integer, String>> actual = Lists.newArrayList();
    final IntWritable key = new IntWritable();
    final Text value = new Text();
    while (reader.next(key, value)) {
      actual.add(Pair.of(key.get(), value.toString()));
    }
    reader.close();

    // assert read same as written
    assertEquals(EXPECTED_TABLE, actual);
  }
View Full Code Here

  public void testAppend() throws IOException {
    final int COL_COUNT = 10;
    final Text regionName = new Text("regionname");
    final Text tableName = new Text("tablename");
    final Text row = new Text("row");
    Reader reader = null;
    HLog log = new HLog(fs, dir, this.conf, null);
    try {
      // Write columns named 1, 2, 3, etc. and then values of single byte
      // 1, 2, 3...
      long timestamp = System.currentTimeMillis();
      TreeMap<HStoreKey, byte []> cols = new TreeMap<HStoreKey, byte []>();
      for (int i = 0; i < COL_COUNT; i++) {
        cols.put(new HStoreKey(row, new Text(Integer.toString(i)), timestamp),
            new byte[] { (byte)(i + '0') });
      }
      log.append(regionName, tableName, cols);
      long logSeqId = log.startCacheFlush();
      log.completeCacheFlush(regionName, tableName, logSeqId);
      log.close();
      Path filename = log.computeFilename(log.filenum - 1);
      log = null;
      // Now open a reader on the log and assert append worked.
      reader = new SequenceFile.Reader(fs, filename, conf);
      HLogKey key = new HLogKey();
      HLogEdit val = new HLogEdit();
      for (int i = 0; i < COL_COUNT; i++) {
        reader.next(key, val);
        assertEquals(regionName, key.getRegionName());
        assertEquals(tableName, key.getTablename());
        assertEquals(row, key.getRow());
        assertEquals((byte)(i + '0'), val.getVal()[0]);
        System.out.println(key + " " + val);
      }
      while (reader.next(key, val)) {
        // Assert only one more row... the meta flushed row.
        assertEquals(regionName, key.getRegionName());
        assertEquals(tableName, key.getTablename());
        assertEquals(HLog.METAROW, key.getRow());
        assertEquals(HLog.METACOLUMN, val.getColumn());
        assertEquals(0, HLogEdit.completeCacheFlush.compareTo(val.getVal()));
        System.out.println(key + " " + val);
      }
    } finally {
      if (log != null) {
        log.closeAndDelete();
      }
      if (reader != null) {
        reader.close();
      }
    }
  }
View Full Code Here

      }
      if (dump) {
        if (!fs.isFile(logPath)) {
          throw new IOException(args[i] + " is not a file");
        }
        Reader log = new SequenceFile.Reader(fs, logPath, conf);
        try {
          HLogKey key = new HLogKey();
          KeyValue val = new KeyValue();
          while (log.next(key, val)) {
            System.out.println(key.toString() + " " + val.toString());
          }
        } finally {
          log.close();
        }
      } else {
        if (!fs.getFileStatus(logPath).isDir()) {
          throw new IOException(args[i] + " is not a directory");
        }
View Full Code Here

  public void testAppend() throws IOException {
    final int COL_COUNT = 10;
    final byte [] regionName = Bytes.toBytes("regionname");
    final byte [] tableName = Bytes.toBytes("tablename");
    final byte [] row = Bytes.toBytes("row");
    Reader reader = null;
    HLog log = new HLog(fs, dir, this.conf, null);
    try {
      // Write columns named 1, 2, 3, etc. and then values of single byte
      // 1, 2, 3...
      long timestamp = System.currentTimeMillis();
      List<KeyValue> cols = new ArrayList<KeyValue>();
      for (int i = 0; i < COL_COUNT; i++) {
        cols.add(new KeyValue(row, Bytes.toBytes("column:" + Integer.toString(i)),
          timestamp, new byte[] { (byte)(i + '0') }));
      }
      log.append(regionName, tableName, cols, false, System.currentTimeMillis());
      long logSeqId = log.startCacheFlush();
      log.completeCacheFlush(regionName, tableName, logSeqId);
      log.close();
      Path filename = log.computeFilename(log.getFilenum());
      log = null;
      // Now open a reader on the log and assert append worked.
      reader = new SequenceFile.Reader(fs, filename, conf);
      HLogKey key = new HLogKey();
      KeyValue val = new KeyValue();
      for (int i = 0; i < COL_COUNT; i++) {
        reader.next(key, val);
        assertTrue(Bytes.equals(regionName, key.getRegionName()));
        assertTrue(Bytes.equals(tableName, key.getTablename()));
        assertTrue(Bytes.equals(row, val.getRow()));
        assertEquals((byte)(i + '0'), val.getValue()[0]);
        System.out.println(key + " " + val);
      }
      while (reader.next(key, val)) {
        // Assert only one more row... the meta flushed row.
        assertTrue(Bytes.equals(regionName, key.getRegionName()));
        assertTrue(Bytes.equals(tableName, key.getTablename()));
        assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
        assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
        assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
          val.getValue()));
        System.out.println(key + " " + val);
      }
    } finally {
      if (log != null) {
        log.closeAndDelete();
      }
      if (reader != null) {
        reader.close();
      }
    }
  }
View Full Code Here

      }
      if (dump) {
        if (!fs.isFile(logPath)) {
          throw new IOException(args[i] + " is not a file");
        }
        Reader log = new SequenceFile.Reader(fs, logPath, conf);
        try {
          HLogKey key = new HLogKey();
          HLogEdit val = new HLogEdit();
          while (log.next(key, val)) {
            System.out.println(key.toString() + " " + val.toString());
          }
        } finally {
          log.close();
        }
      } else {
        if (!fs.getFileStatus(logPath).isDir()) {
          throw new IOException(args[i] + " is not a directory");
        }
View Full Code Here

    sorter.merge(outfiles, output);

    // import the evaluations
    LongWritable key = new LongWritable();
    CDFitness value = new CDFitness();
    Reader reader = new Reader(fs, output, conf);

    while (reader.next(key, value)) {
      evaluations.add(new CDFitness(value));
    }

    reader.close();
  }
View Full Code Here

    writer.append(1L, "one");
    writer.append(2L, "two");
   
    writer.close();
   
    Reader reader = new Reader(fs, file, conf);
    assertEquals(1L, reader.next((Object) null));
    assertEquals("one", reader.getCurrentValue((Object) null));
    assertEquals(2L, reader.next((Object) null));
    assertEquals("two", reader.getCurrentValue((Object) null));
    assertNull(reader.next((Object) null));
    reader.close();
   
  }
View Full Code Here

    LOG.info("Opening SequenceFile " + filename);
    return new SequenceFile.Reader(fs, new Path(filename), conf);
  }

  public static Object getFirstValue(String filename) throws IOException {
    Reader r = null;
    try {
      // read from local filesystem
      Configuration conf = new Configuration();
      if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
        conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
      }
      FileSystem fs = FileSystem.get(conf);
      r = new SequenceFile.Reader(fs, new Path(filename), conf);
      Object key = ReflectionUtils.newInstance(r.getKeyClass(), conf);
      Object val = ReflectionUtils.newInstance(r.getValueClass(), conf);
      LOG.info("Reading value of type " + r.getValueClassName()
          + " from SequenceFile " + filename);
      r.next(key);
      r.getCurrentValue(val);
      LOG.info("Value as string: " + val.toString());
      return val;
    } finally {
      if (null != r) {
        try {
          r.close();
        } catch (IOException ioe) {
          LOG.warn("IOException during close: " + ioe.toString());
        }
      }
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.SequenceFile.Reader

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.