Package org.apache.hadoop

Examples of org.apache.hadoop.HadoopIllegalArgumentException


   * @param supportedScheme
   */
  public void checkScheme(URI uri, String supportedScheme) {
    String scheme = uri.getScheme();
    if (scheme == null) {
      throw new HadoopIllegalArgumentException("Uri without scheme: " + uri);
    }
    if (!scheme.equals(supportedScheme)) {
      throw new HadoopIllegalArgumentException("Uri scheme " + uri
          + " does not match the scheme " + supportedScheme);
    }
  }
View Full Code Here


      boolean authorityNeeded, int defaultPort) throws URISyntaxException {
    checkScheme(uri, supportedScheme);
    // A file system implementation that requires authority must always
    // specify default port
    if (defaultPort < 0 && authorityNeeded) {
      throw new HadoopIllegalArgumentException(
          "FileSystem implementation error -  default port " + defaultPort
              + " is not valid");
    }
    String authority = uri.getAuthority();
    if (authority == null) {
       if (authorityNeeded) {
         throw new HadoopIllegalArgumentException("Uri without authority: " + uri);
       } else {
         return new URI(supportedScheme + ":///");
       }  
    }
    // authority is non null  - AuthorityNeeded may be true or false.
View Full Code Here

    Boolean createParent = null;
    for (CreateOpts iOpt : opts) {
      if (CreateOpts.BlockSize.class.isInstance(iOpt)) {
        if (blockSize != -1) {
          throw new HadoopIllegalArgumentException(
              "BlockSize option is set multiple times");
        }
        blockSize = ((CreateOpts.BlockSize) iOpt).getValue();
      } else if (CreateOpts.BufferSize.class.isInstance(iOpt)) {
        if (bufferSize != -1) {
          throw new HadoopIllegalArgumentException(
              "BufferSize option is set multiple times");
        }
        bufferSize = ((CreateOpts.BufferSize) iOpt).getValue();
      } else if (CreateOpts.ReplicationFactor.class.isInstance(iOpt)) {
        if (replication != -1) {
          throw new HadoopIllegalArgumentException(
              "ReplicationFactor option is set multiple times");
        }
        replication = ((CreateOpts.ReplicationFactor) iOpt).getValue();
      } else if (CreateOpts.BytesPerChecksum.class.isInstance(iOpt)) {
        if (bytesPerChecksum != -1) {
          throw new HadoopIllegalArgumentException(
              "BytesPerChecksum option is set multiple times");
        }
        bytesPerChecksum = ((CreateOpts.BytesPerChecksum) iOpt).getValue();
      } else if (CreateOpts.Perms.class.isInstance(iOpt)) {
        if (permission != null) {
          throw new HadoopIllegalArgumentException(
              "Perms option is set multiple times");
        }
        permission = ((CreateOpts.Perms) iOpt).getValue();
      } else if (CreateOpts.Progress.class.isInstance(iOpt)) {
        if (progress != null) {
          throw new HadoopIllegalArgumentException(
              "Progress option is set multiple times");
        }
        progress = ((CreateOpts.Progress) iOpt).getValue();
      } else if (CreateOpts.CreateParent.class.isInstance(iOpt)) {
        if (createParent != null) {
          throw new HadoopIllegalArgumentException(
              "CreateParent option is set multiple times");
        }
        createParent = ((CreateOpts.CreateParent) iOpt).getValue();
      } else {
        throw new HadoopIllegalArgumentException("Unkown CreateOpts of type " +
            iOpt.getClass().getName());
      }
    }
    if (permission == null) {
      throw new HadoopIllegalArgumentException("no permission supplied");
    }


    FsServerDefaults ssDef = getServerDefaults();
    if (ssDef.getBlockSize() % ssDef.getBytesPerChecksum() != 0) {
      throw new IOException("Internal error: default blockSize is" +
          " not a multiple of default bytesPerChecksum ");
    }
   
    if (blockSize == -1) {
      blockSize = ssDef.getBlockSize();
    }
    if (bytesPerChecksum == -1) {
      bytesPerChecksum = ssDef.getBytesPerChecksum();
    }
    if (bufferSize == -1) {
      bufferSize = ssDef.getFileBufferSize();
    }
    if (replication == -1) {
      replication = ssDef.getReplication();
    }
    if (createParent == null) {
      createParent = false;
    }

    if (blockSize % bytesPerChecksum != 0) {
      throw new HadoopIllegalArgumentException(
             "blockSize should be a multiple of checksumsize");
    }

    return this.createInternal(f, createFlag, permission, bufferSize,
      replication, blockSize, progress, bytesPerChecksum, createParent);
View Full Code Here

   
    if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf)) &&
        (startOpt == StartupOption.UPGRADE ||
         startOpt == StartupOption.ROLLBACK ||
         startOpt == StartupOption.FINALIZE)) {
      throw new HadoopIllegalArgumentException("Invalid startup option. " +
          "Cannot perform DFS upgrade with HA enabled.");
    }

    switch (startOpt) {
      case FORMAT: {
View Full Code Here

   * Pathnames with scheme and relative path are illegal.
   * @param path to be checked
   */
  void checkNotSchemeWithRelative() {
    if (toUri().isAbsolute() && !isUriPathAbsolute()) {
      throw new HadoopIllegalArgumentException(
          "Unsupported name: has scheme but relative path-part");
    }
  }
View Full Code Here

    }
  }

  void checkNotRelative() {
    if (!isAbsolute() && toUri().getScheme() == null) {
      throw new HadoopIllegalArgumentException("Path is relative");
    }
  }
View Full Code Here

        force = true;
      } else if ("-nonInteractive".equals(arg)) {
        interactive = false;
      } else {
        printUsage();
        throw new HadoopIllegalArgumentException(
            "Illegal argument: " + arg);
      }
    }
  }
View Full Code Here

  private void parseConfAndFindOtherNN() throws IOException {
    Configuration conf = getConf();
    nsId = DFSUtil.getNamenodeNameServiceId(conf);

    if (!HAUtil.isHAEnabled(conf, nsId)) {
      throw new HadoopIllegalArgumentException(
          "HA is not enabled for this namenode.");
    }
    nnId = HAUtil.getNameNodeId(conf, nsId);
    NameNode.initializeGenericKeys(conf, nsId, nnId);

    if (!HAUtil.usesSharedEditsDir(conf)) {
      throw new HadoopIllegalArgumentException(
        "Shared edits storage is not enabled for this namenode.");
    }
   
    Configuration otherNode = HAUtil.getConfForOtherNode(conf);
    otherNNId = HAUtil.getNameNodeId(otherNode, nsId);
View Full Code Here

   * modification time update and space count update are not needed.
   */
  private void addToParent(INodeDirectory parent, INode child) {
    FSDirectory fsDir = namesystem.dir;
    if (parent == fsDir.rootDir && FSDirectory.isReservedName(child)) {
        throw new HadoopIllegalArgumentException("File name \""
            + child.getLocalName() + "\" is reserved. Please "
            + " change the name of the existing file or directory to another "
            + "name before upgrading to this release.");
    }
    // NOTE: This does not update space counts for parents
View Full Code Here

      List<ZKAuthInfo> authInfo,
      ActiveStandbyElectorCallback app) throws IOException,
      HadoopIllegalArgumentException, KeeperException {
    if (app == null || acl == null || parentZnodeName == null
        || zookeeperHostPorts == null || zookeeperSessionTimeout <= 0) {
      throw new HadoopIllegalArgumentException("Invalid argument");
    }
    zkHostPort = zookeeperHostPorts;
    zkSessionTimeout = zookeeperSessionTimeout;
    zkAcl = acl;
    zkAuthInfo = authInfo;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.HadoopIllegalArgumentException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.