Package org.apache.hadoop.dfs

Examples of org.apache.hadoop.dfs.DistributedFileSystem


    final int nameNodePort = cluster.getNameNodePort();

    // Get a reference to its DFS directly
    FileSystem fs = cluster.getFileSystem();
    assertTrue(fs instanceof DistributedFileSystem);
    DistributedFileSystem directDfs = (DistributedFileSystem) fs;

    // Get another reference via network using a specific socket factory
    Configuration cconf = new Configuration();
    cconf.set("fs.default.name", String.format("hdfs://localhost:%s/",
        nameNodePort + 10));
    cconf.set("hadoop.rpc.socket.factory.class.default",
        "org.apache.hadoop.ipc.DummySocketFactory");
    cconf.set("hadoop.rpc.socket.factory.class.ClientProtocol",
        "org.apache.hadoop.ipc.DummySocketFactory");
    cconf.set("hadoop.rpc.socket.factory.class.JobSubmissionProtocol",
        "org.apache.hadoop.ipc.DummySocketFactory");

    fs = FileSystem.get(cconf);
    assertTrue(fs instanceof DistributedFileSystem);
    DistributedFileSystem dfs = (DistributedFileSystem) fs;

    JobClient client = null;

    try {
      // This will test RPC to the NameNode only.
      // could we test Client-DataNode connections?
      Path filePath = new Path("/dir");

      assertFalse(directDfs.exists(filePath));
      assertFalse(dfs.exists(filePath));

      directDfs.mkdirs(filePath);
      assertTrue(directDfs.exists(filePath));
      assertTrue(dfs.exists(filePath));

      // This will test TPC to a JobTracker
      MiniMRCluster mr = new MiniMRCluster(1, fs.getUri().toString(), 1);
      final int jobTrackerPort = mr.getJobTrackerPort();

      JobConf jconf = new JobConf(cconf);
      jconf.set("mapred.job.tracker", String.format("localhost:%d",
          jobTrackerPort + 10));
      client = new JobClient(jconf);

      JobStatus[] jobs = client.jobsToComplete();
      assertTrue(jobs.length == 0);

    } finally {
      try {
        if (client != null)
          client.close();
      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
      try {
        if (dfs != null)
          dfs.close();

      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
View Full Code Here


  throws IOException {
    if (!(fs instanceof DistributedFileSystem)) {
      return;
    }
    IOException exception = null;
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    try {
      if (dfs.exists(new Path("/"))) {
        return;
      }
    } catch (IOException e) {
      exception = RemoteExceptionHandler.checkIOException(e);
    }
View Full Code Here

    if (!(fs instanceof DistributedFileSystem)) {
      return true;
    }
    String exception = "";
    boolean available = false;
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    try {
      if (dfs.exists(new Path("/"))) {
        available = true;
      }
    } catch (IOException e) {
      exception = e.getMessage();
    }
View Full Code Here

    if (!(fs instanceof DistributedFileSystem)) {
      return true;
    }
    String exception = "";
    boolean available = false;
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    try {
      if (dfs.exists(new Path("/"))) {
        available = true;
      }
    } catch (IOException e) {
      exception = e.getMessage();
    }
View Full Code Here

  throws IOException {
    if (!(fs instanceof DistributedFileSystem)) {
      return;
    }
    IOException exception = null;
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    try {
      if (dfs.exists(new Path("/"))) {
        return;
      }
    } catch (IOException e) {
      exception = RemoteExceptionHandler.checkIOException(e);
    }
View Full Code Here

        long usedBytes = fs.getUsed();
        stats.put(USED_BYTES_KEY, new Long(usedBytes).toString());

        if (fs instanceof DistributedFileSystem) {
            DistributedFileSystem dfs = (DistributedFileSystem) fs;

            long rawCapacityBytes = dfs.getRawCapacity();
            stats.put(RAW_CAPACITY_KEY, new Long(rawCapacityBytes).toString());

            long rawUsedBytes = dfs.getRawUsed();
            stats.put(RAW_USED_KEY, new Long(rawUsedBytes).toString());
        }

        return stats;
    }
View Full Code Here

  throws IOException {
    if (!(fs instanceof DistributedFileSystem)) {
      return;
    }
    IOException exception = null;
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    try {
      if (dfs.exists(new Path("/"))) {
        return;
      }
    } catch (IOException e) {
      exception = RemoteExceptionHandler.checkIOException(e);
    }
View Full Code Here

  throws IOException {
    if (!(fs instanceof DistributedFileSystem)) {
      return;
    }
    IOException exception = null;
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    try {
      if (dfs.exists(new Path("/"))) {
        return;
      }
    } catch (IOException e) {
      exception = RemoteExceptionHandler.checkIOException(e);
    }
View Full Code Here

    final int nameNodePort = cluster.getNameNodePort();

    // Get a reference to its DFS directly
    FileSystem fs = cluster.getFileSystem();
    assertTrue(fs instanceof DistributedFileSystem);
    DistributedFileSystem directDfs = (DistributedFileSystem) fs;

    // Get another reference via network using a specific socket factory
    Configuration cconf = new Configuration();
    FileSystem.setDefaultUri(cconf, String.format("hdfs://localhost:%s/",
        nameNodePort + 10));
    cconf.set("hadoop.rpc.socket.factory.class.default",
        "org.apache.hadoop.ipc.DummySocketFactory");
    cconf.set("hadoop.rpc.socket.factory.class.ClientProtocol",
        "org.apache.hadoop.ipc.DummySocketFactory");
    cconf.set("hadoop.rpc.socket.factory.class.JobSubmissionProtocol",
        "org.apache.hadoop.ipc.DummySocketFactory");

    fs = FileSystem.get(cconf);
    assertTrue(fs instanceof DistributedFileSystem);
    DistributedFileSystem dfs = (DistributedFileSystem) fs;

    JobClient client = null;

    try {
      // This will test RPC to the NameNode only.
      // could we test Client-DataNode connections?
      Path filePath = new Path("/dir");

      assertFalse(directDfs.exists(filePath));
      assertFalse(dfs.exists(filePath));

      directDfs.mkdirs(filePath);
      assertTrue(directDfs.exists(filePath));
      assertTrue(dfs.exists(filePath));

      // This will test TPC to a JobTracker
      MiniMRCluster mr = new MiniMRCluster(1, fs.getUri().toString(), 1);
      final int jobTrackerPort = mr.getJobTrackerPort();

      JobConf jconf = new JobConf(cconf);
      jconf.set("mapred.job.tracker", String.format("localhost:%d",
          jobTrackerPort + 10));
      client = new JobClient(jconf);

      JobStatus[] jobs = client.jobsToComplete();
      assertTrue(jobs.length == 0);

    } finally {
      try {
        if (client != null)
          client.close();
      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
      try {
        if (dfs != null)
          dfs.close();

      } catch (Exception ignored) {
        // nothing we can do
        ignored.printStackTrace();
      }
View Full Code Here

        long usedBytes = fs.getUsed();
        stats.put(USED_BYTES_KEY , new Long(usedBytes).toString());
       
        if (fs instanceof DistributedFileSystem) {
            DistributedFileSystem dfs = (DistributedFileSystem) fs;
           
            long rawCapacityBytes = dfs.getRawCapacity();
            stats.put(RAW_CAPACITY_KEY, new Long(rawCapacityBytes).toString());
           
            long rawUsedBytes = dfs.getRawUsed();
            stats.put(RAW_USED_KEY, new Long(rawUsedBytes).toString());
        }
       
        return stats;
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.dfs.DistributedFileSystem

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.