Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.Path.toUri()


    // add a file that would get failed to localize
    DistributedCache.addCacheFile(firstCacheFile.toUri(), conf2);
    // add a file that is already localized by different job
    DistributedCache.addCacheFile(secondCacheFile.toUri(), conf2);
    // add a file that is never localized
    DistributedCache.addCacheFile(thirdCacheFile.toUri(), conf2);
   
    TrackerDistributedCacheManager.determineTimestamps(conf2);
    TrackerDistributedCacheManager.determineCacheVisibilities(conf2);

    // Task localizing for second job
View Full Code Here


      createPrivateTempFile(cacheFile);
    }

    Configuration conf1 = new Configuration(conf);
    conf1.set("user.name", userName);
    DistributedCache.addCacheFile(cacheFile.toUri(), conf1);
    TrackerDistributedCacheManager.determineTimestamps(conf1);
    TrackerDistributedCacheManager.determineCacheVisibilities(conf1);
    dumpState(conf1);

    // Task localizing for job
View Full Code Here

    String[] finalArr = new String[fileArr.length];
    for (int i =0; i < fileArr.length; i++) {
      String tmp = fileArr[i];
      String finalPath;
      Path path = new Path(tmp);
      URI pathURI =  path.toUri();
      FileSystem localFs = FileSystem.getLocal(conf);
      if (pathURI.getScheme() == null) {
        //default to the local file system
        //check if the file exists or not first
        if (!localFs.exists(path)) {
View Full Code Here

         * Read the data from the sigle map-output file and
         * send it to the reducer.
         */
        //open the map-output file
        mapOutputIn = SecureIOUtils.openForRead(
            new File(mapOutputFileName.toUri().getPath()), runAsUserName, null);

        //seek to the correct offset for the reduce
        mapOutputIn.skip(info.startOffset);
        long rem = info.partLength;
        int len =
View Full Code Here

      String[] fileArr = files.split(",");
      for (String tmpFile: fileArr) {
        Path tmp = new Path(tmpFile);
        Path newPath = copyRemoteFiles(fs,filesDir, tmp, job, replication);
        try {
          URI pathURI = new URI(newPath.toUri().toString() + "#" + newPath.getName());
          DistributedCache.addCacheFile(pathURI, job);
        } catch(URISyntaxException ue) {
          //should not throw a uri exception
          throw new IOException("Failed to create uri for " + tmpFile);
        }
View Full Code Here

      String[] libjarsArr = libjars.split(",");
      for (String tmpjars: libjarsArr) {
        Path tmp = new Path(tmpjars);
        Path newPath = copyRemoteFiles(fs, libjarsDir, tmp, job, replication);
        DistributedCache.addFileToClassPath(
          new Path(newPath.toUri().getPath()), job, fs);
      }
    }
   
   
    if (archives != null) {
View Full Code Here

     String[] archivesArr = archives.split(",");
     for (String tmpArchives: archivesArr) {
       Path tmp = new Path(tmpArchives);
       Path newPath = copyRemoteFiles(fs, archivesDir, tmp, job, replication);
       try {
         URI pathURI = new URI(newPath.toUri().toString() + "#" + newPath.getName());
         DistributedCache.addCacheArchive(pathURI, job);
       } catch(URISyntaxException ue) {
         //should not throw an uri excpetion
         throw new IOException("Failed to create uri for " + tmpArchives);
       }
View Full Code Here

          localJobFile = null;
        }

        Path tempDir = jobtracker.getSystemDirectoryForJob(getJobID());
        new CleanupQueue().addToQueue(new PathDeletionContext(
            jobtracker.getFileSystem(), tempDir.toUri().getPath()));
      } catch (IOException e) {
        LOG.warn("Error cleaning up "+profile.getJobID()+": "+e);
      }

      cleanUpMetrics();
View Full Code Here

    TokenCache.setJobToken(token, tokenStorage);
       
    // write TokenStorage out
    tokenStorage.writeTokenStorageFile(keysFile, jobtracker.getConf());
    LOG.info("jobToken generated and stored with users keys in "
        + keysFile.toUri().getPath());
  }
 
  int getMaxCacheLevel() {
    return maxLevel;
  }
View Full Code Here

    @Override
    protected int execute(String[] args) throws IOException, InterruptedException {
        Configuration conf = getConf();
        Path path = getPath(conf);
        FileSystem fileSystem = FileSystem.get(path.toUri(), conf);
        try {
            LOG.info(MessageFormat.format(
                    "Searching for cleanup target: batchId={0}, flowId={1}, executionId={2}, operationId={3}, path={4}",
                    getBatchId(),
                    getFlowId(),
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.