Package org.apache.hadoop.hdfs.protocol

Examples of org.apache.hadoop.hdfs.protocol.HdfsFileStatus


            addCloseOp.clientMachine);
        fsNamesys.leaseManager.addLease(addCloseOp.clientName, path);

        // add the op into retry cache if necessary
        if (toAddRetryCache) {
          HdfsFileStatus stat = fsNamesys.dir.createFileStatus(
              HdfsFileStatus.EMPTY_NAME, newFile, Snapshot.CURRENT_STATE_ID);
          fsNamesys.addCacheEntryWithPayload(addCloseOp.rpcClientId,
              addCloseOp.rpcCallId, stat);
        }
      } else { // This is OP_ADD on an existing file
View Full Code Here


      InetAddress remoteAddress = InetAddress.getLocalHost();
      NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
          NUM_REPLICAS, (short)1, remoteAddress);
     
      // Run the fsck and check the Result
      final HdfsFileStatus file =
          namenode.getRpcServer().getFileInfo(pathString);
      assertNotNull(file);
      Result res = new Result(conf);
      fsck.check(pathString, file, res);
      // Also print the output from the fsck, for ex post facto sanity checks
View Full Code Here

      InetAddress remoteAddress = InetAddress.getLocalHost();
      NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
          NUM_DN, REPL_FACTOR, remoteAddress);
     
      // Run the fsck and check the Result
      final HdfsFileStatus file =
          namenode.getRpcServer().getFileInfo(pathString);
      assertNotNull(file);
      Result res = new Result(conf);
      fsck.check(pathString, file, res);
      // check misReplicatedBlock number.
View Full Code Here

    byte [] path = new byte[128];
    path = DFSUtil.string2Bytes(pathString);
    long fileId = 312321L;
    int numChildren = 1;

    HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
        blockSize, modTime, accessTime, perms, owner, group, symlink, path,
        fileId, numChildren);
    Result res = new Result(conf);

    try {
View Full Code Here

    }
    if (!checkPathLength(src)) {
      throw new IOException("create: Pathname too long.  Limit "
          + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
    }
    HdfsFileStatus fileStatus = namesystem.startFile(src, new PermissionStatus(
        getRemoteUser().getShortUserName(), null, masked),
        clientName, clientMachine, flag.get(), createParent, replication,
        blockSize);
    metrics.incrFilesCreated();
    metrics.incrCreateFileOps();
View Full Code Here

  }

  @Override // ClientProtocol
  public String getLinkTarget(String path) throws IOException {
    metrics.incrGetLinkTargetOps();
    HdfsFileStatus stat = null;
    try {
      stat = namesystem.getFileInfo(path, false);
    } catch (UnresolvedPathException e) {
      return e.getResolvedPath().toString();
    } catch (UnresolvedLinkException e) {
      // The NameNode should only throw an UnresolvedPathException
      throw new AssertionError("UnresolvedLinkException thrown");
    }
    if (stat == null) {
      throw new FileNotFoundException("File does not exist: " + path);
    } else if (!stat.isSymlink()) {
      throw new IOException("Path " + path + " is not a symbolic link");
    }
    return stat.getSymlink();
  }
View Full Code Here

    public int getTotalMissingBlocks() {
      return blocksToCorrupt.size();
    }

    private byte[] cacheInitialContents() throws IOException {
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      byte[] content = new byte[(int)status.getLen()];
      DFSInputStream in = null;
      try {
        in = dfsClient.open(name);
        IOUtils.readFully(in, content, 0, content.length);
      } finally {
View Full Code Here

      }
    }
   
    public void checkSalvagedRemains() throws IOException {
      int chainIdx = 0;
      HdfsFileStatus status = dfsClient.getFileInfo(name);
      long length = status.getLen();
      int numBlocks = (int)((length + blockSize - 1) / blockSize);
      DFSInputStream in = null;
      byte[] blockBuffer = new byte[blockSize];

      try {
View Full Code Here

  @Test
  public void testCreate() throws Exception {
    String src = "/testNamenodeRetryCache/testCreate/file";
    // Two retried calls succeed
    newCall();
    HdfsFileStatus status = namesystem.startFile(src, perm, "holder",
        "clientmachine", EnumSet.of(CreateFlag.CREATE), true, (short) 1, BlockSize);
    Assert.assertEquals(status, namesystem.startFile(src, perm,
        "holder", "clientmachine", EnumSet.of(CreateFlag.CREATE),
        true, (short) 1, BlockSize));
    Assert.assertEquals(status, namesystem.startFile(src, perm,
View Full Code Here

            snapshottableDirs.add(dir.getFullPath().toString());
          }
        }
      }

      final HdfsFileStatus file = namenode.getRpcServer().getFileInfo(path);
      if (file != null) {

        if (showCorruptFileBlocks) {
          listCorruptFileBlocks();
          return;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.protocol.HdfsFileStatus

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.