Package org.apache.hadoop

Examples of org.apache.hadoop.HadoopIllegalArgumentException


  public void setOwner(final Path f, final String username,
      final String groupname) throws AccessControlException,
      UnsupportedFileSystemException, FileNotFoundException,
      IOException {
    if ((username == null) && (groupname == null)) {
      throw new HadoopIllegalArgumentException(
          "username and groupname cannot both be null");
    }
    final Path absF = fixRelativePart(f);
    new FSLinkResolver<Void>() {
      @Override
View Full Code Here


      boolean r = StringUtils.popOption("-R", args);
      setRecursive(r);
      dump = StringUtils.popOption("-d", args);

      if (!dump && name == null) {
        throw new HadoopIllegalArgumentException(
            "Must specify '-n name' or '-d' option.");
      }

      if (args.isEmpty()) {
        throw new HadoopIllegalArgumentException("<path> is missing.");
      }
      if (args.size() > 1) {
        throw new HadoopIllegalArgumentException("Too many arguments.");
      }
    }
View Full Code Here

        value = XAttrCodec.decodeValue(v);
      }
      xname = StringUtils.popOptionWithArgument("-x", args);

      if (name != null && xname != null) {
        throw new HadoopIllegalArgumentException(
            "Can not specify both '-n name' and '-x name' option.");
      }
      if (name == null && xname == null) {
        throw new HadoopIllegalArgumentException(
            "Must specify '-n name' or '-x name' option.");
      }

      if (args.isEmpty()) {
        throw new HadoopIllegalArgumentException("<path> is missing.");
      }
      if (args.size() > 1) {
        throw new HadoopIllegalArgumentException("Too many arguments.");
      }
    }
View Full Code Here

  private final Map<String, Map<Long, ReplicaInfo>> map =
    new HashMap<String, Map<Long, ReplicaInfo>>();
 
  ReplicaMap(Object mutex) {
    if (mutex == null) {
      throw new HadoopIllegalArgumentException(
          "Object to synchronize on cannot be null");
    }
    this.mutex = mutex;
  }
View Full Code Here

    protected void processOptions(LinkedList<String> args) throws IOException {
      CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "R");
      cf.parse(args);
      setRecursive(cf.getOpt("R"));
      if (args.isEmpty()) {
        throw new HadoopIllegalArgumentException("<path> is missing");
      }
      if (args.size() > 1) {
        throw new HadoopIllegalArgumentException("Too many arguments");
      }
    }
View Full Code Here

      boolean oneModifyOption = cf.getOpt("m") || cf.getOpt("x");
      boolean setOption = cf.getOpt("-set");
      if ((bothRemoveOptions || bothModifyOptions)
          || (oneRemoveOption && oneModifyOption)
          || (setOption && (oneRemoveOption || oneModifyOption))) {
        throw new HadoopIllegalArgumentException(
            "Specified flags contains both remove and modify flags");
      }

      // Only -m, -x and --set expects <acl_spec>
      if (oneModifyOption || setOption) {
        if (args.size() < 2) {
          throw new HadoopIllegalArgumentException("<acl_spec> is missing");
        }
        aclEntries = AclEntry.parseAclSpec(args.removeFirst(), !cf.getOpt("x"));
      }

      if (args.isEmpty()) {
        throw new HadoopIllegalArgumentException("<path> is missing");
      }
      if (args.size() > 1) {
        throw new HadoopIllegalArgumentException("Too many arguments");
      }

      // In recursive mode, save a separate list of just the access ACL entries.
      // Only directories may have a default ACL.  When a recursive operation
      // encounters a file under the specified path, it must pass only the
View Full Code Here

  private LocatedBlocks getBlockLocationsInt(String src, long offset,
      long length, boolean doAccessTime, boolean needBlockToken,
      boolean checkSafeMode)
      throws FileNotFoundException, UnresolvedLinkException, IOException {
    if (offset < 0) {
      throw new HadoopIllegalArgumentException(
          "Negative offset is not supported. File: " + src);
    }
    if (length < 0) {
      throw new HadoopIllegalArgumentException(
          "Negative length is not supported. File: " + src);
    }
    final LocatedBlocks ret = getBlockLocationsUpdateTimes(src,
        offset, length, doAccessTime, needBlockToken)
    logAuditEvent(true, "open", src);
View Full Code Here

    // check the target
    final INodeFile trgInode = INodeFile.valueOf(dir.getINode4Write(target),
        target);
    if(trgInode.isUnderConstruction()) {
      throw new HadoopIllegalArgumentException("concat: target file "
          + target + " is under construction");
    }
    // per design target shouldn't be empty and all the blocks same size
    if(trgInode.numBlocks() == 0) {
      throw new HadoopIllegalArgumentException("concat: target file "
          + target + " is empty");
    }
    if (trgInode.isWithSnapshot()) {
      throw new HadoopIllegalArgumentException("concat: target file "
          + target + " is in a snapshot");
    }

    long blockSize = trgInode.getPreferredBlockSize();

    // check the end block to be full
    final BlockInfo last = trgInode.getLastBlock();
    if(blockSize != last.getNumBytes()) {
      throw new HadoopIllegalArgumentException("The last block in " + target
          + " is not full; last block size = " + last.getNumBytes()
          + " but file block size = " + blockSize);
    }

    si.add(trgInode);
    final short repl = trgInode.getFileReplication();

    // now check the srcs
    boolean endSrc = false; // final src file doesn't have to have full end block
    for(int i=0; i<srcs.length; i++) {
      String src = srcs[i];
      if(i==srcs.length-1)
        endSrc=true;

      final INodeFile srcInode = INodeFile.valueOf(dir.getINode4Write(src), src);
      if(src.isEmpty()
          || srcInode.isUnderConstruction()
          || srcInode.numBlocks() == 0) {
        throw new HadoopIllegalArgumentException("concat: source file " + src
            + " is invalid or empty or underConstruction");
      }

      // check replication and blocks size
      if(repl != srcInode.getBlockReplication()) {
        throw new HadoopIllegalArgumentException("concat: the source file "
            + src + " and the target file " + target
            + " should have the same replication: source replication is "
            + srcInode.getBlockReplication()
            + " but target replication is " + repl);
      }

      //boolean endBlock=false;
      // verify that all the blocks are of the same length as target
      // should be enough to check the end blocks
      final BlockInfo[] srcBlocks = srcInode.getBlocks();
      int idx = srcBlocks.length-1;
      if(endSrc)
        idx = srcBlocks.length-2; // end block of endSrc is OK not to be full
      if(idx >= 0 && srcBlocks[idx].getNumBytes() != blockSize) {
        throw new HadoopIllegalArgumentException("concat: the source file "
            + src + " and the target file " + target
            + " should have the same blocks sizes: target block size is "
            + blockSize + " but the size of source block " + idx + " is "
            + srcBlocks[idx].getNumBytes());
      }

      si.add(srcInode);
    }

    // make sure no two files are the same
    if(si.size() < srcs.length+1) { // trg + srcs
      // it means at least two files are the same
      throw new HadoopIllegalArgumentException(
          "concat: at least two of the source files are the same");
    }

    if(NameNode.stateChangeLog.isDebugEnabled()) {
      NameNode.stateChangeLog.debug("DIR* NameSystem.concat: " +
View Full Code Here

    int size = xAttr.getName().getBytes(Charsets.UTF_8).length;
    if (xAttr.getValue() != null) {
      size += xAttr.getValue().length;
    }
    if (size > nnConf.xattrMaxSize) {
      throw new HadoopIllegalArgumentException(
          "The XAttr is too big. The maximum combined size of the"
          + " name and value is " + nnConf.xattrMaxSize
          + ", but the total size is " + size);
    }
  }
View Full Code Here

      List<ZKAuthInfo> authInfo,
      ActiveStandbyElectorCallback app) throws IOException,
      HadoopIllegalArgumentException, KeeperException {
    if (app == null || acl == null || parentZnodeName == null
        || zookeeperHostPorts == null || zookeeperSessionTimeout <= 0) {
      throw new HadoopIllegalArgumentException("Invalid argument");
    }
    zkHostPort = zookeeperHostPorts;
    zkSessionTimeout = zookeeperSessionTimeout;
    zkAcl = acl;
    zkAuthInfo = authInfo;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.HadoopIllegalArgumentException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.