Package org.apache.hadoop.dfs

Examples of org.apache.hadoop.dfs.MiniDFSCluster


  public void testMultipleCachefiles()
  {
    try {
      boolean mayExit = false;
      MiniMRCluster mr = null;
      MiniDFSCluster dfs = null;
      try{
        Configuration conf = new Configuration();
        dfs = new MiniDFSCluster(conf, 1, true, null);
        FileSystem fileSys = dfs.getFileSystem();
        String namenode = fileSys.getName();
        mr  = new MiniMRCluster(1, namenode, 3);
        // During tests, the default Configuration will use a local mapred
        // So don't specify -config or -cluster
        String strJobtracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
        String strNamenode = "fs.default.name=" + namenode;
        String argv[] = new String[] {
          "-input", INPUT_FILE,
          "-output", OUTPUT_DIR,
          "-mapper", map,
          "-reducer", reduce,
          //"-verbose",
          //"-jobconf", "stream.debug=set"
          "-jobconf", strNamenode,
          "-jobconf", strJobtracker,
          "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
          "-jobconf", "mapred.child.java.opts=-Dcontrib.name=" + System.getProperty("contrib.name") + " " +
                      "-Dbuild.test=" + System.getProperty("build.test") + " " +
                      conf.get("mapred.child.java.opts",""),
          "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#" + mapString,
          "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE_2 + "#" + mapString2
        };

        fileSys.delete(new Path(OUTPUT_DIR));
       
        DataOutputStream file = fileSys.create(new Path(INPUT_FILE));
        file.writeBytes(mapString + "\n");
        file.writeBytes(mapString2 + "\n");
        file.close();
        file = fileSys.create(new Path(CACHE_FILE));
        file.writeBytes(cacheString);
        file.close();
        file = fileSys.create(new Path(CACHE_FILE_2));
        file.writeBytes(cacheString2);
        file.close();
         
        job = new StreamJob(argv, mayExit);    
        job.go();

  fileSys = dfs.getFileSystem();
        String line = null;
        String line2 = null;
        Path[] fileList = fileSys.listPaths(new Path(OUTPUT_DIR));
        for (int i = 0; i < fileList.length; i++){
          System.out.println(fileList[i].toString());
          BufferedReader bread =
            new BufferedReader(new InputStreamReader(fileSys.open(fileList[i])));
          line = bread.readLine();
          System.out.println(line);
          line2 = bread.readLine();
          System.out.println(line2);
        }
        assertEquals(cacheString + "\t", line);
        assertEquals(cacheString2 + "\t", line2);
      } finally{
        if (dfs != null) { dfs.shutdown(); }
        if (mr != null) { mr.shutdown();}
      }
     
    } catch(Exception e) {
      failTrace(e);
View Full Code Here


    this.rows[2] = Bytes.toByteArrays(new Text[] { new Text("row_0110"), new Text("row_0175") });
    this.rows[3] = Bytes.toByteArrays(new Text[] { new Text("row_0525"), new Text("row_0560") });
    this.rows[4] = Bytes.toByteArrays(new Text[] { new Text("row_0050"), new Text("row_1000") });
   
    // Start up dfs
    this.dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
    this.fs = this.dfsCluster.getFileSystem();
    conf.set("fs.default.name", fs.getUri().toString());
    Path parentdir = fs.getHomeDirectory();
    conf.set(HConstants.HBASE_DIR, parentdir.toString());
    fs.mkdirs(parentdir);
View Full Code Here

  /**
   * Test migration
   * @throws IOException
   */
  public void testUpgrade() throws IOException {
    MiniDFSCluster dfsCluster = null;
    try {
      dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
      // Set the hbase.rootdir to be the home directory in mini dfs.
      this.conf.set(HConstants.HBASE_DIR, new Path(
        dfsCluster.getFileSystem().getHomeDirectory(), "hbase").toString());
      FileSystem dfs = dfsCluster.getFileSystem();
      Path rootDir =
        dfs.makeQualified(new Path(conf.get(HConstants.HBASE_DIR)));
      dfs.mkdirs(rootDir);
      loadTestData(dfs, rootDir);
      listPaths(dfs, rootDir, rootDir.toString().length() + 1);
View Full Code Here

  private MiniDFSCluster cluster;

  /** {@inheritDoc} */
  @Override
  public void setUp() throws Exception {
    cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
    // Set the hbase.rootdir to be the home directory in mini dfs.
    this.conf.set(HConstants.HBASE_DIR,
      this.cluster.getFileSystem().getHomeDirectory().toString());
    super.setUp();
    this.dir = new Path("/hbase", getName());
View Full Code Here

  public void setUp() throws Exception {
    this.conf.set("hbase.hstore.compactionThreshold", "2");

    conf.setLong("hbase.hregion.max.filesize", 65536);

    cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
    fs = cluster.getFileSystem();
   
    // Set the hbase.rootdir to be the home directory in mini dfs.
    this.conf.set(HConstants.HBASE_DIR,
      this.cluster.getFileSystem().getHomeDirectory().toString());
View Full Code Here

 
  @Override
  protected void setUp() throws Exception {
    super.setUp();
    try {
      this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
      // Set the hbase.rootdir to be the home directory in mini dfs.
      this.conf.set(HConstants.HBASE_DIR,
        this.miniHdfs.getFileSystem().getHomeDirectory().toString());
    } catch (Exception e) {
      LOG.fatal("error starting MiniDFSCluster", e);
View Full Code Here

 
  /** {@inheritDoc} */
  @Override
  public void setUp() throws Exception {
    try {
      this.cluster = new MiniDFSCluster(this.conf, 2, true, (String[])null);
      // Set the hbase.rootdir to be the home directory in mini dfs.
      this.conf.set(HConstants.HBASE_DIR,
        this.cluster.getFileSystem().getHomeDirectory().toString());
      this.dir = new Path(DIR, getName());
    } catch (IOException e) {
View Full Code Here

      // pg. 9 of BigTable paper).
      R = (ONE_GB / 10) * N;
    }
   
    MiniHBaseCluster hbaseMiniCluster = null;
    MiniDFSCluster dfsCluster = null;
    if (this.miniCluster) {
      dfsCluster = new MiniDFSCluster(conf, 2, true, (String[])null);
      // mangle the conf so that the fs parameter points to the minidfs we
      // just started up
      FileSystem fs = dfsCluster.getFileSystem();
      conf.set("fs.default.name", fs.getUri().toString());     
      Path parentdir = fs.getHomeDirectory();
      conf.set(HConstants.HBASE_DIR, parentdir.toString());
      fs.mkdirs(parentdir);
      FSUtils.setVersion(fs, parentdir);
View Full Code Here

  /**
   * the test
   * @throws IOException
   */
  public void testGet() throws IOException {
    MiniDFSCluster cluster = null;
    HRegion region = null;

    try {
     
      // Initialization
     
      cluster = new MiniDFSCluster(conf, 2, true, (String[])null);
      // Set the hbase.rootdir to be the home directory in mini dfs.
      this.conf.set(HConstants.HBASE_DIR,
        cluster.getFileSystem().getHomeDirectory().toString());
     
      HTableDescriptor desc = new HTableDescriptor("test");
      desc.addFamily(new HColumnDescriptor(CONTENTS));
      desc.addFamily(new HColumnDescriptor(HConstants.COLUMN_FAMILY));
     
View Full Code Here

  private MiniDFSCluster miniHdfs;

  @Override
  protected void setUp() throws Exception {
    super.setUp();
    this.miniHdfs = new MiniDFSCluster(this.conf, 1, true, null);
    // Set the hbase.rootdir to be the home directory in mini dfs.
    this.conf.set(HConstants.HBASE_DIR,
      this.miniHdfs.getFileSystem().getHomeDirectory().toString());
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.dfs.MiniDFSCluster

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.