Examples of Reader


Examples of org.apache.accumulo.core.file.rfile.MultiLevelIndex.Reader

    byte[] data = baos.toByteArray();
    SeekableByteArrayInputStream bais = new SeekableByteArrayInputStream(data);
    FSDataInputStream in = new FSDataInputStream(bais);
    CachableBlockFile.Reader _cbr = new CachableBlockFile.Reader(in, data.length, CachedConfiguration.getInstance());
   
    Reader reader = new Reader(_cbr, RFile.RINDEX_VER_6);
    BlockRead rootIn = _cbr.getMetaBlock("root");
    reader.readFields(rootIn);
    rootIn.close();
    IndexIterator liter = reader.lookup(new Key("000000"));
    int count = 0;
    while (liter.hasNext()) {
      assertEquals(count, liter.nextIndex());
      assertEquals(count, liter.peek().getNumEntries());
      assertEquals(count, liter.next().getNumEntries());
      count++;
    }
   
    assertEquals(num + 1, count);
   
    while (liter.hasPrevious()) {
      count--;
      assertEquals(count, liter.previousIndex());
      assertEquals(count, liter.peekPrevious().getNumEntries());
      assertEquals(count, liter.previous().getNumEntries());
    }
   
    assertEquals(0, count);
   
    // go past the end
    liter = reader.lookup(new Key(String.format("%05d000", num + 1)));
    assertFalse(liter.hasNext());
   
    Random rand = new Random();
    for (int i = 0; i < 100; i++) {
      int k = rand.nextInt(num * 1000);
      int expected;
      if (k % 1000 == 0)
        expected = k / 1000; // end key is inclusive
      else
        expected = k / 1000 + 1;
      liter = reader.lookup(new Key(String.format("%08d", k)));
      IndexEntry ie = liter.next();
      assertEquals(expected, ie.getNumEntries());
    }
   
  }
View Full Code Here

Examples of org.apache.accumulo.core.file.rfile.RFile.Reader

    for (String arg : opts.files) {
     
      Path path = new Path(arg);
      FileSystem fs = hadoopFs.exists(path) ? hadoopFs : localFs; // fall back to local
      CachableBlockFile.Reader _rdr = new CachableBlockFile.Reader(fs, path, conf, null, null);
      Reader iter = new RFile.Reader(_rdr);
     
      iter.printInfo();
      System.out.println();
      org.apache.accumulo.core.file.rfile.bcfile.PrintInfo.main(new String[] {arg});
     
      if (opts.histogram || opts.dump) {
        iter.seek(new Range((Key) null, (Key) null), new ArrayList<ByteSequence>(), false);
        while (iter.hasTop()) {
          Key key = iter.getTopKey();
          Value value = iter.getTopValue();
          if (opts.dump)
            System.out.println(key + " -> " + value);
          if (opts.histogram) {
            long size = key.getSize() + value.getSize();
            int bucket = (int) Math.log10(size);
            countBuckets[bucket]++;
            sizeBuckets[bucket] += size;
            totalSize += size;
          }
          iter.next();
        }
      }
      iter.close();
      if (opts.histogram) {
        System.out.println("Up to size      count      %-age");
        for (int i = 1; i < countBuckets.length; i++) {
          System.out.println(String.format("%11.0f : %10d %6.2f%%", Math.pow(10, i), countBuckets[i], sizeBuckets[i] * 100. / totalSize));
        }
View Full Code Here

Examples of org.apache.cocoon.reading.Reader

        return document.toByteArray();
    }
   
    public final byte[] read(String type, Parameters parameters, String source) throws SAXException, IOException, ProcessingException {
        ServiceSelector selector = null;
        Reader reader = null;
        SourceResolver resolver = null;
        Source inputsource = null;

        assertNotNull("Test for component manager", this.getManager());

        ByteArrayOutputStream document = null;

        try {
            selector = (ServiceSelector) this.lookup(Reader.ROLE+
                "Selector");
            assertNotNull("Test lookup of serializer selector", selector);

            resolver = (SourceResolver) this.lookup(SourceResolver.ROLE);
            assertNotNull("Test lookup of source resolver", resolver);

            assertNotNull("Test if reader name is not null", type);
            reader = (Reader) selector.select(type);
            assertNotNull("Test lookup of reader", reader);
           
            reader.setup(new SourceResolverAdapter(resolver),
                    this.getObjectModel(), source, parameters);

            document = new ByteArrayOutputStream();
            reader.setOutputStream(document);

            reader.generate();
        } catch (ServiceException ce) {
            getLogger().error("Could not retrieve serializer", ce);
            fail("Could not retrieve serializer:"+ce.toString());
        } finally {
            if (reader!=null) {
View Full Code Here

Examples of org.apache.hadoop.hbase.io.SequenceFile.Reader

      }
      if (dump) {
        if (!fs.isFile(logPath)) {
          throw new IOException(args[i] + " is not a file");
        }
        Reader log = new SequenceFile.Reader(fs, logPath, conf);
        try {
          HLogKey key = new HLogKey();
          HLogEdit val = new HLogEdit();
          while (log.next(key, val)) {
            System.out.println(key.toString() + " " + val.toString());
          }
        } finally {
          log.close();
        }
      } else {
        if (!fs.getFileStatus(logPath).isDir()) {
          throw new IOException(args[i] + " is not a directory");
        }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFile.Reader

   
    Path path = new Path("/Users/ryan/rfile.big.txt");
    long start = System.currentTimeMillis();
    SimpleBlockCache cache = new SimpleBlockCache();
    //LruBlockCache cache = new LruBlockCache();
    Reader reader = new HFile.Reader(lfs, path, cache, false);
    reader.loadFileInfo();
    System.out.println(reader.trailer);
    long end = System.currentTimeMillis();
   
    System.out.println("Index read time: " + (end - start));

    List<String> keys = slurp("/Users/ryan/xaa.50k");
   
    HFileScanner scanner = reader.getScanner();
    int count;
    long totalBytes = 0;
    int notFound = 0;

    start = System.nanoTime();
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.StoreFile.Reader

   * @param files files that should be included in the compaction
   */
  private void recalculateSize() {
    long sz = 0;
    for (StoreFile sf : this.filesToCompact) {
      Reader r = sf.getReader();
      sz += r == null ? 0 : r.length();
    }
    this.totalSize = sz;
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.wal.HLog.Reader

  @Test
  public void testAppend() throws IOException {
    final int COL_COUNT = 10;
    final byte [] tableName = Bytes.toBytes("tablename");
    final byte [] row = Bytes.toBytes("row");
    Reader reader = null;
    HLog log = new HLog(fs, dir, oldLogDir, conf);
    try {
      // Write columns named 1, 2, 3, etc. and then values of single byte
      // 1, 2, 3...
      long timestamp = System.currentTimeMillis();
      WALEdit cols = new WALEdit();
      for (int i = 0; i < COL_COUNT; i++) {
        cols.add(new KeyValue(row, Bytes.toBytes("column"),
          Bytes.toBytes(Integer.toString(i)),
          timestamp, new byte[] { (byte)(i + '0') }));
      }
      HRegionInfo hri = new HRegionInfo(tableName,
          HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
      HTableDescriptor htd = new HTableDescriptor();
      htd.addFamily(new HColumnDescriptor("column"));
      log.append(hri, tableName, cols, System.currentTimeMillis(), htd);
      long logSeqId = log.startCacheFlush(hri.getEncodedNameAsBytes());
      log.completeCacheFlush(hri.getEncodedNameAsBytes(), tableName, logSeqId, false);
      log.close();
      Path filename = log.computeFilename();
      log = null;
      // Now open a reader on the log and assert append worked.
      reader = HLog.getReader(fs, filename, conf);
      HLog.Entry entry = reader.next();
      assertEquals(COL_COUNT, entry.getEdit().size());
      int idx = 0;
      for (KeyValue val : entry.getEdit().getKeyValues()) {
        assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
          entry.getKey().getEncodedRegionName()));
        assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
        assertTrue(Bytes.equals(row, val.getRow()));
        assertEquals((byte)(idx + '0'), val.getValue()[0]);
        System.out.println(entry.getKey() + " " + val);
        idx++;
      }

      // Get next row... the meta flushed row.
      entry = reader.next();
      assertEquals(1, entry.getEdit().size());
      for (KeyValue val : entry.getEdit().getKeyValues()) {
        assertTrue(Bytes.equals(hri.getEncodedNameAsBytes(),
          entry.getKey().getEncodedRegionName()));
        assertTrue(Bytes.equals(tableName, entry.getKey().getTablename()));
        assertTrue(Bytes.equals(HLog.METAROW, val.getRow()));
        assertTrue(Bytes.equals(HLog.METAFAMILY, val.getFamily()));
        assertEquals(0, Bytes.compareTo(HLog.COMPLETE_CACHE_FLUSH,
          val.getValue()));
        System.out.println(entry.getKey() + " " + val);
      }
    } finally {
      if (log != null) {
        log.closeAndDelete();
      }
      if (reader != null) {
        reader.close();
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Reader

    try {
      logVersion = readLogVersion(in);
    } catch (EOFException e) {
      throw new LedgerHeaderCorruptException("No header file in the ledger");
    }
    reader = new Reader(in, logVersion);
    LOG.info("Reading from ledger id " + ledgerId +
        ", starting with book keeper entry id " + firstBookKeeperEntry +
        ", log version " + logVersion +
        ", first txn id " +
        firstTxId +  (isInProgress() ?
View Full Code Here

Examples of org.apache.hadoop.hive.ql.io.orc.Reader

                LOG.info(MessageFormat.format(
                        "Loading ORCFile metadata ({0}): {1}",
                        descriptor.getDataModelClass().getSimpleName(),
                        path));
            }
            Reader orc = OrcFile.createReader(fileSystem, path);
            StructObjectInspector sourceInspector = (StructObjectInspector) orc.getObjectInspector();
            driver = new DataModelDriver(descriptor, sourceInspector, configuration);
            if (LOG.isInfoEnabled()) {
                LOG.info(MessageFormat.format(
                        "Loading ORCFile contents ({0}): path={1}, range={2}+{3}",
                        descriptor.getDataModelClass().getSimpleName(),
                        path,
                        offset,
                        fragmentSize));
            }
            boolean[] availableColumns = computeAvailableColumns(orc);
            reader = orc.rows(offset, fragmentSize, availableColumns);
            currentReader = reader;
        }
        return reader;
    }
View Full Code Here

Examples of org.apache.hadoop.io.MapFile.Reader

        continue;
      if (child.getPath().getName().equals("finished")) {
        foundFinish = true;
        continue;
      }
      heap.add(new Index(new Reader(fs, child.getPath().toString(), conf)));
    }
    if (!foundFinish)
      throw new IOException("Sort \"finished\" flag not found in " + directory);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.