Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataInputStream.available()


    Path p = new Path(dir, getName() + ".fsdos");
    FSDataOutputStream out = fs.create(p);
    out.write(bytes);
    out.sync();
    FSDataInputStream in = fs.open(p);
    assertTrue(in.available() > 0);
    byte [] buffer = new byte [1024];
    int read = in.read(buffer);
    assertEquals(bytes.length, read);
    out.close();
    in.close();
View Full Code Here


    return f.getAbsolutePath();
  }

  private String readFully(Path file) throws IOException {
    FSDataInputStream in = fs.open(file);
    byte[] b = new byte[in.available()];
    in.readFully(b);
    in.close();
    return new String(b);
  }
View Full Code Here

        int v;

        v = s2.read();
        assertEquals(v, 32);

        assertEquals(s2.available(), data.length);

        byte[] buf = new byte[bufsz];
        s2.read(buf, 0, buf.length);
        for (int i = 0; i < data.length; i++)
            assertEquals(data[i], buf[i]);
View Full Code Here

        byte[] buf = new byte[bufsz];
        s2.read(buf, 0, buf.length);
        for (int i = 0; i < data.length; i++)
            assertEquals(data[i], buf[i]);

        assertEquals(s2.available(), 0);

        s2.close();

        kosmosFileSystem.delete(file1, true);
        assertFalse(kosmosFileSystem.exists(file1));       
View Full Code Here

    LOG.debug("Done with bucketing.  Split time!");
    long startTime = System.currentTimeMillis();

    // open the split file and modify it as splits finish
    FSDataInputStream tmpIn = fs.open(splitFile);
    byte[] rawData = new byte[tmpIn.available()];
    tmpIn.readFully(rawData);
    tmpIn.close();
    FSDataOutputStream splitOut = fs.create(splitFile);
    splitOut.write(rawData);
View Full Code Here

      FSUtils.getInstance(fs, table.getConfiguration())
        .recoverFileLease(fs, splitFile, table.getConfiguration(), null);

      // parse split file and process remaining splits
      FSDataInputStream tmpIn = fs.open(splitFile);
      StringBuilder sb = new StringBuilder(tmpIn.available());
      while (tmpIn.available() > 0) {
        sb.append(tmpIn.readChar());
      }
      tmpIn.close();
      for (String line : sb.toString().split("\n")) {
View Full Code Here

        .recoverFileLease(fs, splitFile, table.getConfiguration(), null);

      // parse split file and process remaining splits
      FSDataInputStream tmpIn = fs.open(splitFile);
      StringBuilder sb = new StringBuilder(tmpIn.available());
      while (tmpIn.available() > 0) {
        sb.append(tmpIn.readChar());
      }
      tmpIn.close();
      for (String line : sb.toString().split("\n")) {
        String[] cmd = line.split(splitAlgo.separator());
View Full Code Here

            "nor Syncable.hflush().");
      }
    }
    syncMethod.invoke(out, new Object[]{});
    FSDataInputStream in = fs.open(p);
    assertTrue(in.available() > 0);
    byte [] buffer = new byte [1024];
    int read = in.read(buffer);
    assertEquals(tableName.getName().length, read);
    out.close();
    in.close();
View Full Code Here

    Path lengths = OrcRecordUpdater.getSideFile(deltaFile);
    long result = Long.MAX_VALUE;
    try {
      FSDataInputStream stream = fs.open(lengths);
      result = -1;
      while (stream.available() > 0) {
        result = stream.readLong();
      }
      stream.close();
      return result;
    } catch (IOException ioe) {
View Full Code Here

            "nor Syncable.hflush().");
      }
    }
    syncMethod.invoke(out, new Object[]{});
    FSDataInputStream in = fs.open(p);
    assertTrue(in.available() > 0);
    byte [] buffer = new byte [1024];
    int read = in.read(buffer);
    assertEquals(tableName.getName().length, read);
    out.close();
    in.close();
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.