Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.DF


    if (minSpace == 0) {
      return true;
    }
    String[] localDirs = fConf.getLocalDirs();
    for (int i = 0; i < localDirs.length; i++) {
      DF df = null;
      if (localDirsDf.containsKey(localDirs[i])) {
        df = localDirsDf.get(localDirs[i]);
      } else {
        df = new DF(new File(localDirs[i]), fConf);
        localDirsDf.put(localDirs[i], df);
      }

      if (df.getAvailable() > minSpace)
        return true;
    }

    return false;
  }
View Full Code Here


    if (minSpace == 0) {
      return true;
    }
    String[] localDirs = fConf.getLocalDirs();
    for (int i = 0; i < localDirs.length; i++) {
      DF df = null;
      if (localDirsDf.containsKey(localDirs[i])) {
        df = localDirsDf.get(localDirs[i]);
      } else {
        df = new DF(new File(localDirs[i]), fConf);
        localDirsDf.put(localDirs[i], df);
      }

      if (df.getAvailable() < minSpace)
        return false;
    }

    return true;
  }
View Full Code Here

      if (!detachDir.mkdirs()) {
        if (!detachDir.isDirectory()) {
          throw new IOException("Mkdirs failed to create " + detachDir.toString());
        }
      }
      this.usage = new DF(parent, conf);
      this.dfsUsage = new DU(parent, conf);
      this.dfsUsage.start();
    }
View Full Code Here

 
  private long getFreeSpace() throws IOException {
    long biggestSeenSoFar = 0;
    String[] localDirs = fConf.getLocalDirs();
    for (int i = 0; i < localDirs.length; i++) {
      DF df = null;
      if (localDirsDf.containsKey(localDirs[i])) {
        df = localDirsDf.get(localDirs[i]);
      } else {
        df = new DF(new File(localDirs[i]), fConf);
        localDirsDf.put(localDirs[i], df);
      }

      long availOnThisVol = df.getAvailable();
      if (availOnThisVol > biggestSeenSoFar) {
        biggestSeenSoFar = availOnThisVol;
      }
    }
   
View Full Code Here

      try {
        // canonicalize f  
        f = makeAbsolute(f).getCanonicalFile();
     
        // find highest writable parent dir of f on the same device
        String device = new DF(f.toString(), getConf()).getMount();
        File parent = f.getParentFile();
        File dir;
        do {
          dir = parent;
          parent = parent.getParentFile();
View Full Code Here

            configCapacity));
        assertTrue(percentBpUsed == DFSUtil.getPercentUsed(bpUsed,
            configCapacity));
      }  
     
      DF df = new DF(new File(cluster.getDataDirectory()), conf);
    
      //
      // Currently two data directories are created by the data node
      // in the MiniDFSCluster. This results in each data directory having
      // capacity equals to the disk capacity of the data directory.
      // Hence the capacity reported by the data node is twice the disk space
      // the disk capacity
      //
      // So multiply the disk capacity and reserved space by two
      // for accommodating it
      //
      int numOfDataDirs = 2;
     
      long diskCapacity = numOfDataDirs * df.getCapacity();
      reserved *= numOfDataDirs;
     
      configCapacity = namesystem.getCapacityTotal();
      used = namesystem.getCapacityUsed();
      nonDFSUsed = namesystem.getNonDfsUsedSpace();
View Full Code Here

    if (minSpace == 0) {
      return true;
    }
    String[] localDirs = fConf.getLocalDirs();
    for (int i = 0; i < localDirs.length; i++) {
      DF df = null;
      if (localDirsDf.containsKey(localDirs[i])) {
        df = localDirsDf.get(localDirs[i]);
      } else {
        df = new DF(new File(localDirs[i]), fConf);
        localDirsDf.put(localDirs[i], df);
      }

      if (df.getAvailable() < minSpace)
        return false;
    }

    return true;
  }
View Full Code Here

    if (minSpace == 0) {
      return true;
    }
    String[] localDirs = fConf.getLocalDirs();
    for (int i = 0; i < localDirs.length; i++) {
      DF df = null;
      if (localDirsDf.containsKey(localDirs[i])) {
        df = localDirsDf.get(localDirs[i]);
      } else {
        df = new DF(new File(localDirs[i]), fConf);
        localDirsDf.put(localDirs[i], df);
      }

      if (df.getAvailable() < minSpace)
        return false;
    }

    return true;
  }
View Full Code Here

    private DU dfsUsage;
   
    FSVolume(FSDataset dataset, File currentDir, Configuration conf) throws IOException {
      this.currentDir = currentDir;
      File parent = currentDir.getParentFile();
      this.usage = new DF(parent, conf);
      this.reserved = usage.getReserved();
      this.dataset = dataset;
      this.namespaceMap = new NamespaceMap();
      this.dfsUsage = new DU(currentDir, conf);
      this.dfsUsage.start();
View Full Code Here

    private boolean required;
    private String volume;
   
    public CheckedVolume(File dirToCheck, boolean required)
        throws IOException {
      df = new DF(dirToCheck, conf);
      this.required = required;
      volume = df.getFilesystem();
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.DF

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.