Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DFSClient


    if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) {
      response.setStatus(Nfs3Status.NFS3ERR_ACCES);
      return response;
    }
   
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
      response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
      return response;
    }
   
View Full Code Here


  @Override
  public COMMIT3Response commit(XDR xdr, Channel channel, int xid,
      SecurityHandler securityHandler, InetAddress client) {
    COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK);
    DFSClient dfsClient = clientCache.getDfsClient(securityHandler.getUser());
    if (dfsClient == null) {
      response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT);
      return response;
    }
   
View Full Code Here

    }
  }
 
  private void lostFoundMove(FileStatus file, LocatedBlocks blocks)
    throws IOException {
    DFSClient dfs = new DFSClient(conf);
    if (!lfInited) {
      lostFoundInit(dfs);
    }
    if (!lfInitedOk) {
      return;
    }
    String target = lostFound + file.getPath();
    String errmsg = "Failed to move " + file.getPath() + " to /lost+found";
    try {
      PermissionStatus ps = new PermissionStatus(
          file.getOwner(), file.getGroup(), file.getPermission());
      if (!nn.namesystem.dir.mkdirs(target, ps, false, FSNamesystem.now())) {
        LOG.warn(errmsg);
        return;
      }
      // create chains
      int chain = 0;
      OutputStream fos = null;
      for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        LocatedBlock lblock = lBlk;
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(target + "/" + chain, true);
          if (fos != null) chain++;
          else {
            LOG.warn(errmsg + ": could not store chain " + chain);
            // perhaps we should bail out here...
            // return;
            continue;
          }
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          LOG.warn(" - could not copy block " + lblock.getBlock() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      LOG.warn("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(file.getPath().toString(), true);
    catch (Exception e) {
      e.printStackTrace();
      LOG.warn(errmsg + ": " + e.getMessage());
    }
  }
View Full Code Here

      String outStr = runFsck(conf, 0, true, "/");
      assertTrue(outStr.contains("HEALTHY"));
     
      // Corrupt a block by deleting it
      String[] fileNames = util.getFileNames(topDir);
      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                          cluster.getNameNodePort()), conf);
      String block = dfsClient.namenode.
                      getBlockLocations(fileNames[0], 0, Long.MAX_VALUE).
                      get(0).getBlock().getBlockName();
      File baseDir = new File(System.getProperty("test.build.data"),"dfs/data");
View Full Code Here

  public void testCorruptBlock() throws Exception {
    Configuration conf = new Configuration();
    conf.setLong("dfs.blockreport.intervalMsec", 1000);
    FileSystem fs = null;
    DFSClient dfsClient = null;
    LocatedBlocks blocks = null;
    int replicaCount = 0;
    Random random = new Random();
    String outStr = null;

    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
    cluster.waitActive();
    fs = cluster.getFileSystem();
    Path file1 = new Path("/testCorruptBlock");
    DFSTestUtil.createFile(fs, file1, 1024, (short)3, 0);
    // Wait until file replication has completed
    DFSTestUtil.waitReplication(fs, file1, (short)3);
    String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();

    // Make sure filesystem is in healthy state
    outStr = runFsck(conf, 0, true, "/");
    System.out.println(outStr);
    assertTrue(outStr.contains("HEALTHY"));
   
    // corrupt replicas
    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
    for (int i=0; i < 6; i++) {
      File blockFile = new File(baseDir, "data" + (i+1) + "/current/" +
                                block);
      if (blockFile.exists()) {
        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
        FileChannel channel = raFile.getChannel();
        String badString = "BADBAD";
        int rand = random.nextInt((int)channel.size()/2);
        raFile.seek(rand);
        raFile.write(badString.getBytes());
        raFile.close();
      }
    }
    // Read the file to trigger reportBadBlocks
    try {
      IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf,
                        true);
    } catch (IOException ie) {
      // Ignore exception
    }

    dfsClient = new DFSClient(new InetSocketAddress("localhost",
                               cluster.getNameNodePort()), conf);
    blocks = dfsClient.namenode.
               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
    replicaCount = blocks.get(0).getLocations().length;
    while (replicaCount != 3) {
View Full Code Here

    }
  }
 
  private void lostFoundMove(FileStatus file, LocatedBlocks blocks)
    throws IOException {
    final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
    try {
    if (!lfInited) {
      lostFoundInit(dfs);
    }
    if (!lfInitedOk) {
      return;
    }
    String target = lostFound + file.getPath();
    String errmsg = "Failed to move " + file.getPath() + " to /lost+found";
    try {
      PermissionStatus ps = new PermissionStatus(
          file.getOwner(), file.getGroup(), file.getPermission());
      if (!nn.namesystem.dir.mkdirs(target, ps, false, FSNamesystem.now())) {
        LOG.warn(errmsg);
        return;
      }
      // create chains
      int chain = 0;
      OutputStream fos = null;
      for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        LocatedBlock lblock = lBlk;
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(target + "/" + chain, true);
          if (fos != null) chain++;
          else {
            LOG.warn(errmsg + ": could not store chain " + chain);
            // perhaps we should bail out here...
            // return;
            continue;
          }
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          e.printStackTrace();
          // something went wrong copying this block...
          LOG.warn(" - could not copy block " + lblock.getBlock() + " to " + target);
          fos.flush();
          fos.close();
          fos = null;
        }
      }
      if (fos != null) fos.close();
      LOG.warn("\n - moved corrupted file " + file.getPath() + " to /lost+found");
      dfs.delete(file.getPath().toString(), true);
    catch (Exception e) {
      e.printStackTrace();
      LOG.warn(errmsg + ": " + e.getMessage());
    }
    } finally {
      dfs.close();
    }
  }
View Full Code Here

  public double getEffectiveReplication() {
    if (lastRaidStatistics == null) {
      return -1;
    }
    DFSClient dfs;
    double totalPhysical;
    try {
      dfs = new DFSClient(conf);
      totalPhysical = dfs.getNSDiskStatus().getDfsUsed();
    } catch (IOException e) {
      return -1;
    }
    double notRaidedPhysical = totalPhysical;
    double totalLogical = 0;
    for (Codec codec : Codec.getCodecs()) {
      String code = codec.id;
      Statistics st = lastRaidStatistics.get(code);
      totalLogical += st.getSourceCounters(RaidState.RAIDED).getNumLogical();
      notRaidedPhysical -= st.getSourceCounters(RaidState.RAIDED).getNumBytes();
      notRaidedPhysical -= st.getParityCounters().getNumBytes();
    }
    totalLogical += notRaidedPhysical / dfs.getDefaultReplication();
    if (totalLogical == 0) {
      // divided by 0
      return -1;
    }
    return totalPhysical / totalLogical;
View Full Code Here

        }
        if(dstfs instanceof DistributedFileSystem) {
          DistributedFileSystem dstdistfs = (DistributedFileSystem) dstfs;
          //set copybychunk to false if the concat method is not available for the
          //distributed file system
          DFSClient dfsClient = dstdistfs.getClient();
          if(dfsClient.isConcatAvailable())
            copyByChunk = true;
        }
        LOG.debug("After check, copy by chunk is set to: " + copyByChunk);
      }
View Full Code Here

    teardown();
  }
 
  private String decommissionOneNode() throws IOException {
   
    DFSClient client = ((DistributedFileSystem)fileSys).getClient();
    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);

    int index = 0;
    boolean found = false;
    while (!found) {
      index = rand.nextInt(info.length);
View Full Code Here

 
    @Override
    public void run() {
      boolean error = false;
      INodeFile node = null;
      DFSClient client = null;

      try {
        client = new DFSClient(conf);

  LOG.info("Trying to update lease for file at " + path);

        // verify that path exists in namespace
        node = fsNamesys.dir.getFileINode(path);
        if (node == null) {
          error = true;
        }
        if (!node.isUnderConstruction()) {
          error = true;
        }
      }
      catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
        error = true;
      }

      // Could not find inode in FSNamespace, quit now
      if (error) {
        LOG.error("Couldn't update length for leased file at " + path +
                  " because file not in namespace");
  return;
      }

      BlockInfo[] blks = node.getBlocks();

      // If NN has not leased out any block, return
      if (blks.length == 0) return;

      int index = blks.length - 1; // index of last file block

      LOG.info("Block at index " + index + " being written for file at  " +
               path);

      // Pessimistically update last block length from DataNode.
      // File could have been renamed, and a new file created in its place.
      try {
        DFSInputStream stm = client.open(path);
        DFSLocatedBlocks locBlks = stm.fetchLocatedBlocks();

        if (locBlks.locatedBlockCount() >= blks.length) {
          if (blks[index] != null && locBlks.get(index) != null) {
            if (blks[index].getBlockId() == locBlks.get(index).getBlock().getBlockId()) {
              blks[index].setNumBytes(locBlks.get(index).getBlock().getNumBytes());
              return;
            }
          }
        }

        stm.close();
        client.close(); // close dfs client
      }
      catch (IOException e) {
        LOG.error(StringUtils.stringifyException(e));
      }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.DFSClient

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.