Package org.apache.hadoop.hdfs

Examples of org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$Packet


    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testAppend() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new AppendOp(client, "/testfile");
    testClientRetryWithFailover(op);
  }
View Full Code Here


    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testRename() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new RenameOp(client, "/file1", "/file2");
    testClientRetryWithFailover(op);
  }
View Full Code Here

    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testRename2() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new Rename2Op(client, "/file1", "/file2");
    testClientRetryWithFailover(op);
  }
View Full Code Here

    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testConcat() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new ConcatOp(client, new Path("/test/file"), 5);
    testClientRetryWithFailover(op);
  }
View Full Code Here

    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testDelete() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new DeleteOp(client, "/testfile");
    testClientRetryWithFailover(op);
  }
View Full Code Here

    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testCreateSymlink() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new CreateSymlinkOp(client, "/testfile", "/testlink");
    testClientRetryWithFailover(op);
  }
View Full Code Here

    testClientRetryWithFailover(op);
  }
 
  @Test (timeout=60000)
  public void testUpdatePipeline() throws Exception {
    final DFSClient client = genClientWithDummyHandler();
    AtMostOnceOp op = new UpdatePipelineOp(client, "/testfile");
    testClientRetryWithFailover(op);
  }
View Full Code Here

    }
  }
 
  private void copyBlocksToLostFound(String parent, HdfsFileStatus file,
        LocatedBlocks blocks) throws IOException {
    final DFSClient dfs = new DFSClient(NameNode.getAddress(conf), conf);
    final String fullName = file.getFullName(parent);
    OutputStream fos = null;
    try {
      if (!lfInited) {
        lostFoundInit(dfs);
      }
      if (!lfInitedOk) {
        throw new IOException("failed to initialize lost+found");
      }
      String target = lostFound + fullName;
      if (hdfsPathExists(target)) {
        LOG.warn("Fsck: can't copy the remains of " + fullName + " to " +
          "lost+found, because " + target + " already exists.");
        return;
      }
      if (!namenode.getRpcServer().mkdirs(
          target, file.getPermission(), true)) {
        throw new IOException("failed to create directory " + target);
      }
      // create chains
      int chain = 0;
      boolean copyError = false;
      for (LocatedBlock lBlk : blocks.getLocatedBlocks()) {
        LocatedBlock lblock = lBlk;
        DatanodeInfo[] locs = lblock.getLocations();
        if (locs == null || locs.length == 0) {
          if (fos != null) {
            fos.flush();
            fos.close();
            fos = null;
          }
          continue;
        }
        if (fos == null) {
          fos = dfs.create(target + "/" + chain, true);
          if (fos == null) {
            throw new IOException("Failed to copy " + fullName +
                " to /lost+found: could not store chain " + chain);
          }
          chain++;
        }
       
        // copy the block. It's a pity it's not abstracted from DFSInputStream ...
        try {
          copyBlock(dfs, lblock, fos);
        } catch (Exception e) {
          LOG.error("Fsck: could not copy block " + lblock.getBlock() +
              " to " + target, e);
          fos.flush();
          fos.close();
          fos = null;
          internalError = true;
          copyError = true;
        }
      }
      if (copyError) {
        LOG.warn("Fsck: there were errors copying the remains of the " +
          "corrupted file " + fullName + " to /lost+found");
      } else {
        LOG.info("Fsck: copied the remains of the corrupted file " +
          fullName + " to /lost+found");
      }
    } catch (Exception e) {
      LOG.error("copyBlocksToLostFound: error processing " + fullName, e);
      internalError = true;
    } finally {
      if (fos != null) fos.close();
      dfs.close();
    }
  }
View Full Code Here

                                                 InterruptedException {
    return
      user.doAs(new PrivilegedExceptionAction<DFSClient>() {
        @Override
        public DFSClient run() throws IOException {
          return new DFSClient(NetUtils.createSocketAddr(addr), conf);
        }
      });
  }
View Full Code Here

    if (nnAddr == null){
      out.print(JspHelper.NAMENODE_ADDRESS + " url param is null");
      return;
    }
   
    DFSClient dfs = getDFSClient(ugi, nnAddr, conf);
    String target = dir;
    final HdfsFileStatus targetStatus = dfs.getFileInfo(target);
    if (targetStatus == null) { // not exists
      out.print("<h3>File or directory : " + target + " does not exist</h3>");
      JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, target,
          nnAddr);
    } else {
      if (!targetStatus.isDir()) { // a file
        List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(dir, 0, 1)
            .getLocatedBlocks();

        LocatedBlock firstBlock = null;
        DatanodeInfo[] locations = null;
        if (blocks.size() > 0) {
          firstBlock = blocks.get(0);
          locations = firstBlock.getLocations();
        }
        if (locations == null || locations.length == 0) {
          out.print("Empty file");
        } else {
          DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
          String fqdn = canonicalize(chosenNode.getIpAddr());
          int datanodePort = chosenNode.getXferPort();
          String redirectLocation = HttpConfig.getSchemePrefix() + fqdn + ":"
              + chosenNode.getInfoPort() + "/browseBlock.jsp?blockId="
              + firstBlock.getBlock().getBlockId() + "&blockSize="
              + firstBlock.getBlock().getNumBytes() + "&genstamp="
              + firstBlock.getBlock().getGenerationStamp() + "&filename="
              + URLEncoder.encode(dir, "UTF-8") + "&datanodePort="
              + datanodePort + "&namenodeInfoPort=" + namenodeInfoPort
              + JspHelper.getDelegationTokenUrlParam(tokenString)
              + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
          resp.sendRedirect(redirectLocation);
        }
        return;
      }
      // directory
      // generate a table and dump the info
      String[] headings = { "Name", "Type", "Size", "Replication",
          "Block Size", "Modification Time", "Permission", "Owner", "Group" };
      out.print("<h3>Contents of directory ");
      JspHelper.printPathWithLinks(dir, out, namenodeInfoPort, tokenString,
          nnAddr);
      out.print("</h3><hr>");
      JspHelper.printGotoForm(out, namenodeInfoPort, tokenString, dir, nnAddr);
      out.print("<hr>");

      File f = new File(dir);
      String parent;
      if ((parent = f.getParent()) != null)
        out.print("<a href=\"" + req.getRequestURL() + "?dir=" + parent
            + "&namenodeInfoPort=" + namenodeInfoPort
            + JspHelper.getDelegationTokenUrlParam(tokenString)
            + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr)
            + "\">Go to parent directory</a><br>");

      DirectoryListing thisListing =
        dfs.listPaths(target, HdfsFileStatus.EMPTY_NAME);
      if (thisListing == null || thisListing.getPartialListing().length == 0) {
        out.print("Empty directory");
      } else {
        JspHelper.addTableHeader(out);
        int row = 0;
        JspHelper.addTableRow(out, headings, row++);
        String cols[] = new String[headings.length];
        do {
          HdfsFileStatus[] files = thisListing.getPartialListing();
          for (int i = 0; i < files.length; i++) {
            String localFileName = files[i].getLocalName();
            // Get the location of the first block of the file
            if (!files[i].isDir()) {
              cols[1] = "file";
              cols[2] = StringUtils.byteDesc(files[i].getLen());
              cols[3] = Short.toString(files[i].getReplication());
              cols[4] = StringUtils.byteDesc(files[i].getBlockSize());
            } else {
              cols[1] = "dir";
              cols[2] = "";
              cols[3] = "";
              cols[4] = "";
            }
            String datanodeUrl = req.getRequestURL() + "?dir="
              + URLEncoder.encode(files[i].getFullName(target), "UTF-8")
              + "&namenodeInfoPort=" + namenodeInfoPort
              + JspHelper.getDelegationTokenUrlParam(tokenString)
              + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr);
            cols[0] = "<a href=\"" + datanodeUrl + "\">"
              + localFileName + "</a>";
            cols[5] = lsDateFormat.format(new Date((files[i]
              .getModificationTime())));
            cols[6] = files[i].getPermission().toString();
            cols[7] = files[i].getOwner();
            cols[8] = files[i].getGroup();
            JspHelper.addTableRow(out, cols, row++);
          }
          if (!thisListing.hasMore()) {
            break;
          }
          thisListing = dfs.listPaths(target, thisListing.getLastName());
        } while (thisListing != null);
        JspHelper.addTableFooter(out);
      }
    }
    out.print("<br><a href=\"" + HttpConfig.getSchemePrefix()
        + canonicalize(nnAddr) + ":"
        + namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
    dfs.close();
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.DFSClient$DFSOutputStream$Packet

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.