Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DFSClient


   * Get the saving of this code in bytes
   * @return The saving in bytes
   */
  public long getSaving() {
    try {
      DFSClient dfs = new DFSClient(new Configuration());
      Counters raidedCounters = stateToSourceCounters.get(RaidState.RAIDED);
      long physical = raidedCounters.getNumBytes() +
          parityCounters.getNumBytes();
      long logical = raidedCounters.getNumLogical();
      return logical * dfs.getDefaultReplication() - physical;
    } catch (Exception e) {
      return -1;
    }
  }
View Full Code Here


   * Get the estimated saving of this code in bytes when RAIDing is done
   * @return The saving in bytes
   */
  public long getDoneSaving() {
    try {
      DFSClient dfs = new DFSClient(new Configuration());
      Counters raidedCounters = stateToSourceCounters.get(RaidState.RAIDED);
      Counters shouldRaidCounters =
          stateToSourceCounters.get(RaidState.NOT_RAIDED_BUT_SHOULD);
      long physical = estimatedDoneSourceSize + estimatedDoneParitySize;
      long logical = raidedCounters.getNumLogical() +
          shouldRaidCounters.getNumLogical();
      return logical * dfs.getDefaultReplication() - physical;
    } catch (Exception e) {
      return -1;
    }
  }
View Full Code Here

            lastUpdate = now;
            synchronized (this) {
              // This obtain the datanodes from the HDFS cluster in config file.
              // If we need to support parity file in a different cluster, this
              // has to change.
              DFSClient client = new DFSClient(conf);
              liveNodes =
                  client.namenode.getDatanodeReport(DatanodeReportType.LIVE);
              for (DatanodeInfo n : liveNodes) {
                topology.add(n);
              }
View Full Code Here

  /** Get DFSClient for a namenode corresponding to the BPID from a datanode */
  public static DFSClient getDFSClient(final HttpServletRequest request,
    final Configuration conf) throws IOException, InterruptedException {
    final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS);
    return new DFSClient(DFSUtil.getSocketAddress(nnAddr), conf);
  }
View Full Code Here

    }
  }
 
  private void lostFoundMove(FileStatus file, LocatedBlocks blocks)
    throws IOException {
    final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
    try {
    if (!lfInited) {
      lostFoundInit(dfs);
    }
    if (!lfInitedOk) {
      return;
    }
    String target = lostFound + file.getPath();
    String errmsg = "Failed to move " + file.getPath() + " to /lost+found";
    try {
      PermissionStatus ps = new PermissionStatus(
          file.getOwner(), file.getGroup(), file.getPermission());
      if (!nn.namesystem.dir.mkdirs(target, ps, false, FSNamesystem.now())) {
        LOG.warn(errmsg);
        return;
      }
      // create chains
      int chain = 0;
      OutputStream fos = null;
      for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        LocatedBlock lblock = lBlk;
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(target + "/" + chain, true);
          if (fos != null) chain++;
          else {
            LOG.warn(errmsg + ": could not store chain " + chain);
            // perhaps we should bail out here...
            // return;
            continue;
          }
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          LOG.warn(" - could not copy block " + lblock.getBlock() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      LOG.warn("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(file.getPath().toString(), true);
    catch (Exception e) {
      e.printStackTrace();
      LOG.warn(errmsg + ": " + e.getMessage());
    }
    } finally {
      dfs.close();
    }
  }
View Full Code Here

      create.write(testData.getBytes());
      create.hflush();
      long fileId = ((DFSOutputStream)create.
          getWrappedStream()).getFileId();
      FileStatus fileStatus = dfs.getFileStatus(filePath);
      DFSClient client = DFSClientAdapter.getClient(dfs);
      // add one dummy block at NN, but not write to DataNode
      ExtendedBlock previousBlock =
          DFSClientAdapter.getPreviousBlock(client, fileId);
      DFSClientAdapter.getNamenode(client).addBlock(
          pathString,
          client.getClientName(),
          new ExtendedBlock(previousBlock),
          new DatanodeInfo[0],
          DFSClientAdapter.getFileId((DFSOutputStream) create
              .getWrappedStream()), null);
      cluster.restartNameNode(0, true);
View Full Code Here

      final DataNode datanode = (DataNode) context.getAttribute("datanode");
      final Configuration conf =
        new HdfsConfiguration(datanode.getConf());
     
      try {
        final DFSClient dfs = DatanodeJspHelper.getDFSClient(request,
            datanode, conf, getUGI(request, conf));
        final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path, Long.MAX_VALUE);
        MD5MD5CRC32FileChecksum.write(xml, checksum);
      } catch(IOException ioe) {
        writeXml(ioe, path, xml);
      } catch (InterruptedException e) {
        writeXml(e, path, xml);
View Full Code Here

  @Test
  public void testDotdotInodePath() throws Exception {
    final Configuration conf = new Configuration();
    MiniDFSCluster cluster = null;
    DFSClient client = null;
    try {
      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
      cluster.waitActive();
      final DistributedFileSystem hdfs = cluster.getFileSystem();
      final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();

      final Path dir = new Path("/dir");
      hdfs.mkdirs(dir);
      long dirId = fsdir.getINode(dir.toString()).getId();
      long parentId = fsdir.getINode("/").getId();
      String testPath = "/.reserved/.inodes/" + dirId + "/..";

      client = new DFSClient(NameNode.getAddress(conf), conf);
      HdfsFileStatus status = client.getFileInfo(testPath);
      assertTrue(parentId == status.getFileId());
     
      // Test root's parent is still root
      testPath = "/.reserved/.inodes/" + parentId + "/..";
      status = client.getFileInfo(testPath);
      assertTrue(parentId == status.getFileId());
     
    } finally {
      IOUtils.cleanup(LOG, client);
      if (cluster != null) {
View Full Code Here

  @Test
  public void testDecommissionStatus() throws IOException, InterruptedException {
    InetSocketAddress addr = new InetSocketAddress("localhost", cluster
        .getNameNodePort());
    DFSClient client = new DFSClient(addr, conf);
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
    assertEquals("Number of Datanodes ", 2, info.length);
    DistributedFileSystem fileSys = cluster.getFileSystem();
    DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));

    short replicas = 2;
View Full Code Here

            DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
    ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
        failoverProxyProvider.getInterface().getClassLoader(),
        new Class[] { ClientProtocol.class }, dummyHandler);
   
    DFSClient client = new DFSClient(null, proxy, conf, null);
    return client;
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.DFSClient

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.