Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem


      super(path);

      FSDataInputStream in = hfileLink.open(fs);
      FSDataInputStream inNoChecksum = in;
      if (fs instanceof HFileSystem) {
        FileSystem noChecksumFs = ((HFileSystem)fs).getNoChecksumFs();
        inNoChecksum = hfileLink.open(noChecksumFs);
      }

      reader = HFile.createReaderWithEncoding(fs, path, in, inNoChecksum,
                  size, cacheConf, preferredEncodingInCache, closeIStream);
View Full Code Here


    // Ok, now we can start
    this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
      true, null, null, hosts, null);

    // Set this just-started cluster as our filesystem.
    FileSystem fs = this.dfsCluster.getFileSystem();
    this.conf.set("fs.defaultFS", fs.getUri().toString());
    // Do old style too just to be safe.
    this.conf.set("fs.default.name", fs.getUri().toString());

    // Wait for the cluster to be totally up
    this.dfsCluster.waitClusterUp();

    return this.dfsCluster;
View Full Code Here

   * Note: this does not cause the root dir to be created.
   * @return Fully qualified path for the default hbase root dir
   * @throws IOException
   */
  public Path getDefaultRootDirPath() throws IOException {
  FileSystem fs = FileSystem.get(this.conf);
  return new Path(fs.makeQualified(fs.getHomeDirectory()),"hbase");
  }
View Full Code Here

   * method if you were doing manual operation.
   * @return Fully qualified path to hbase root dir
   * @throws IOException
   */
  public Path createRootDir() throws IOException {
    FileSystem fs = FileSystem.get(this.conf);
    Path hbaseRootdir = getDefaultRootDirPath();
    this.conf.set(HConstants.HBASE_DIR, hbaseRootdir.toString());
    fs.mkdirs(hbaseRootdir);
    FSUtils.setVersion(fs, hbaseRootdir);
    return hbaseRootdir;
  }
View Full Code Here

   * @param dir Directory to delete
   * @return True if we deleted it.
   * @throws IOException
   */
  public boolean deleteDir(final Path dir) throws IOException {
    FileSystem fs = getTestFileSystem();
    if (fs.exists(dir)) {
      return fs.delete(getDataTestDir(), true);
    }
    return false;
  }
View Full Code Here

   * @throws IOException
   */
  public static User getDifferentUser(final Configuration c,
    final String differentiatingSuffix)
  throws IOException {
    FileSystem currentfs = FileSystem.get(c);
    if (!(currentfs instanceof DistributedFileSystem)) {
      return User.getCurrent();
    }
    // Else distributed filesystem.  Make a new instance per daemon.  Below
    // code is taken from the AppendTestUtil over in hdfs.
View Full Code Here

   *   [If something fails here: we still have the table in disabled state]
   * - Update ZooKeeper with the enabled state
   */
  private void handleCreateTable(String tableName) throws IOException, KeeperException {
    Path tempdir = fileSystemManager.getTempDir();
    FileSystem fs = fileSystemManager.getFileSystem();

    // 1. Create Table Descriptor
    FSTableDescriptors.createTableDescriptor(fs, tempdir, this.hTableDescriptor);
    Path tempTableDir = new Path(tempdir, tableName);
    Path tableDir = new Path(fileSystemManager.getRootDir(), tableName);

    // 2. Create Regions
    List<HRegionInfo> regionInfos = handleCreateHdfsRegions(tempdir, tableName);

    // 3. Move Table temp directory to the hbase root location
    if (!fs.rename(tempTableDir, tableDir)) {
      throw new IOException("Unable to move table from temp=" + tempTableDir +
        " to hbase root=" + tableDir);
    }

    if (regionInfos != null && regionInfos.size() > 0) {
View Full Code Here

    MasterFileSystem mfs = this.masterServices.getMasterFileSystem();
    Path tempTableDir = mfs.moveTableToTemp(tableName);

    try {
      // 4. Delete regions from FS (temp directory)
      FileSystem fs = mfs.getFileSystem();
      for (HRegionInfo hri: regions) {
        LOG.debug("Archiving region " + hri.getRegionNameAsString() + " from FS");
        HFileArchiver.archiveRegion(fs, mfs.getRootDir(),
            tempTableDir, new Path(tempTableDir, hri.getEncodedName()));
      }

      // 5. Delete table from FS (temp directory)
      if (!fs.delete(tempTableDir, true)) {
        LOG.error("Couldn't delete " + tempTableDir);
      }
    } finally {
      // 6. Update table descriptor cache
      this.masterServices.getTableDescriptors().remove(Bytes.toString(tableName));
View Full Code Here

    UTIL.loadRegion(region, TEST_FAM);

    // shutdown the table so we can manipulate the files
    admin.disableTable(STRING_TABLE_NAME);

    FileSystem fs = UTIL.getTestFileSystem();

    // now attempt to depose the region
    Path regionDir = HRegion.getRegionDir(region.getTableDir().getParent(), region.getRegionInfo());

    HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());

    // check for the existence of the archive directory and some files in it
    Path archiveDir = HFileArchiveTestingUtil.getRegionArchiveDir(UTIL.getConfiguration(), region);
    assertTrue(fs.exists(archiveDir));

    // check to make sure the store directory was copied
    FileStatus[] stores = fs.listStatus(archiveDir);
    assertTrue(stores.length == 1);

    // make sure we archived the store files
    FileStatus[] storeFiles = fs.listStatus(stores[0].getPath());
    assertTrue(storeFiles.length > 0);

    // then ensure the region's directory isn't present
    assertFalse(fs.exists(regionDir));
  }
View Full Code Here

    List<HRegion> servingRegions = UTIL.getHBaseCluster().getRegions(TABLE_NAME);
    // make sure we only have 1 region serving this table
    assertEquals(1, servingRegions.size());
    HRegion region = servingRegions.get(0);

    FileSystem fs = region.getFilesystem();

    // make sure there are some files in the regiondir
    Path rootDir = FSUtils.getRootDir(fs.getConf());
    Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
    FileStatus[] regionFiles = FSUtils.listStatus(fs, regionDir, null);
    Assert.assertNotNull("No files in the region directory", regionFiles);
    if (LOG.isDebugEnabled()) {
      List<Path> files = new ArrayList<Path>();
      for (FileStatus file : regionFiles) {
        files.add(file.getPath());
      }
      LOG.debug("Current files:" + files);
    }
    // delete the visible folders so we just have hidden files/folders
    final PathFilter dirFilter = new FSUtils.DirFilter(fs);
    PathFilter nonHidden = new PathFilter() {
      @Override
      public boolean accept(Path file) {
        return dirFilter.accept(file) && !file.getName().toString().startsWith(".");
      }
    };
    FileStatus[] storeDirs = FSUtils.listStatus(fs, regionDir, nonHidden);
    for (FileStatus store : storeDirs) {
      LOG.debug("Deleting store for test");
      fs.delete(store.getPath(), true);
    }

    // then archive the region
    HFileArchiver.archiveRegion(UTIL.getConfiguration(), fs, region.getRegionInfo());

    // and check to make sure the region directoy got deleted
    assertFalse("Region directory (" + regionDir + "), still exists.", fs.exists(regionDir));
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FileSystem

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.