Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FileSystem$UGISelect


        new String[] {"hadPreBulkLoadHFile", "hadPostBulkLoadHFile"},
        tableName,
        new Boolean[] {false, false}
    );

    FileSystem fs = util.getTestFileSystem();
    final Path dir = util.getDataTestDir(testName).makeQualified(fs);
    Path familyDir = new Path(dir, Bytes.toString(A));

    createHFile(util.getConfiguration(), fs, new Path(familyDir,Bytes.toString(A)), A, A);
View Full Code Here


    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());

    try {
      testLinkReadDuringRename(fs, testUtil.getDefaultRootDirPath());
    } finally {
      testUtil.shutdownMiniCluster();
View Full Code Here

   * even when the current file gets renamed.
   */
  @Test
  public void testLocalLinkReadDuringRename() throws IOException {
    HBaseTestingUtility testUtil = new HBaseTestingUtility();
    FileSystem fs = testUtil.getTestFileSystem();
    assertEquals("file", fs.getUri().getScheme());
    testLinkReadDuringRename(fs, testUtil.getDataTestDir());
  }
View Full Code Here

    conf.setInt("dfs.blocksize", 1024 * 1024);
    conf.setInt("dfs.client.read.prefetch.size", 2 * 1024 * 1024);

    testUtil.startMiniDFSCluster(1);
    MiniDFSCluster cluster = testUtil.getDFSCluster();
    FileSystem fs = cluster.getFileSystem();
    assertEquals("hdfs", fs.getUri().getScheme());

    try {
      List<Path> files = new ArrayList<Path>();
      for (int i = 0; i < 3; i++) {
        Path path = new Path(String.format("test-data-%d", i));
        writeSomeData(fs, path, 1 << 20, (byte)i);
        files.add(path);
      }

      FileLink link = new FileLink(files);
      FSDataInputStream in = link.open(fs);
      try {
        byte[] data = new byte[8192];
        int n;

        // Switch to file 1
        n = in.read(data);
        dataVerify(data, n, (byte)0);
        fs.delete(files.get(0));
        skipBuffer(in, (byte)0);

        // Switch to file 2
        n = in.read(data);
        dataVerify(data, n, (byte)1);
        fs.delete(files.get(1));
        skipBuffer(in, (byte)1);

        // Switch to file 3
        n = in.read(data);
        dataVerify(data, n, (byte)2);
        fs.delete(files.get(2));
        skipBuffer(in, (byte)2);

        // No more files available
        try {
          n = in.read(data);
View Full Code Here

    // make sure we have the snapshot
    List<SnapshotDescription> snapshots = SnapshotTestingUtils.assertOneSnapshotThatMatches(admin,
      snapshot, TABLE_NAME);

    // make sure its a valid snapshot
    FileSystem fs = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getFileSystem();
    Path rootDir = UTIL.getHBaseCluster().getMaster().getMasterFileSystem().getRootDir();
    LOG.debug("FS state after snapshot:");
    FSUtils.logFileSystemState(UTIL.getTestFileSystem(),
      FSUtils.getRootDir(UTIL.getConfiguration()), LOG);
View Full Code Here

    byte [] enabledTable = Bytes.toBytes("enabledTable");
    HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
    htdEnabled.addFamily(new HColumnDescriptor(FAMILY));

    FileSystem filesystem = FileSystem.get(conf);
    Path rootdir = filesystem.makeQualified(
        new Path(conf.get(HConstants.HBASE_DIR)));
    // Write the .tableinfo
    FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);

    HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(), null, null);
View Full Code Here

    };

    byte [] enabledTable = Bytes.toBytes("enabledTable");
    HTableDescriptor htdEnabled = new HTableDescriptor(enabledTable);
    htdEnabled.addFamily(new HColumnDescriptor(FAMILY));
    FileSystem filesystem = FileSystem.get(conf);
    Path rootdir = filesystem.makeQualified(
           new Path(conf.get(HConstants.HBASE_DIR)));
    // Write the .tableinfo
    FSTableDescriptors.createTableDescriptor(filesystem, rootdir, htdEnabled);
    HRegionInfo hriEnabled = new HRegionInfo(htdEnabled.getName(),
        null, null);
View Full Code Here

        }

        if (regionInfoOnly) {
          LOG.info("deleting hdfs .regioninfo data: " + hri.toString() + hsa.toString());
          Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
          FileSystem fs = rootDir.getFileSystem(conf);
          Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName());
          Path hriPath = new Path(p, HRegion.REGIONINFO_FILE);
          fs.delete(hriPath, true);
        }

        if (hdfs) {
          LOG.info("deleting hdfs data: " + hri.toString() + hsa.toString());
          Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
          FileSystem fs = rootDir.getFileSystem(conf);
          Path p = new Path(rootDir + "/" + htd.getNameAsString(), hri.getEncodedName());
          HBaseFsck.debugLsr(conf, p);
          boolean success = fs.delete(p, true);
          LOG.info("Deleted " + p + " sucessfully? " + success);
          HBaseFsck.debugLsr(conf, p);
        }

        if (metaRow) {
View Full Code Here

  }

  @Test
  public void testHbckFixOrphanTable() throws Exception {
    String table = "tableInfo";
    FileSystem fs = null;
    Path tableinfo = null;
    try {
      setupTable(table);
      HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();

      Path hbaseTableDir = new Path(conf.get(HConstants.HBASE_DIR) + "/" + table );
      fs = hbaseTableDir.getFileSystem(conf);
      FileStatus status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
      tableinfo = status.getPath();
      fs.rename(tableinfo, new Path("/.tableinfo"));

      //to report error if .tableinfo is missing.
      HBaseFsck hbck = doFsck(conf, false);
      assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_TABLEINFO_FILE });

      // fix OrphanTable with default .tableinfo (htd not yet cached on master)
      hbck = doFsck(conf, true);
      assertNoErrors(hbck);
      status = null;
      status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
      assertNotNull(status);

      HTableDescriptor htd = admin.getTableDescriptor(table.getBytes());
      htd.setValue("NOT_DEFAULT", "true");
      admin.disableTable(table);
      admin.modifyTable(table.getBytes(), htd);
      admin.enableTable(table);
      fs.delete(status.getPath(), true);

      // fix OrphanTable with cache
      htd = admin.getTableDescriptor(table.getBytes()); // warms up cached htd on master
      hbck = doFsck(conf, true);
      assertNoErrors(hbck);
      status = null;
      status = FSTableDescriptors.getTableInfoPath(fs, hbaseTableDir);
      assertNotNull(status);
      htd = admin.getTableDescriptor(table.getBytes());
      assertEquals(htd.getValue("NOT_DEFAULT"), "true");
    } finally {
      fs.rename(new Path("/.tableinfo"), tableinfo);
      deleteTable(table);
    }
  }
View Full Code Here

   */
  @Test
  public void testNoVersionFile() throws Exception {
    // delete the hbase.version file
    Path rootDir = new Path(conf.get(HConstants.HBASE_DIR));
    FileSystem fs = rootDir.getFileSystem(conf);
    Path versionFile = new Path(rootDir, HConstants.VERSION_FILE_NAME);
    fs.delete(versionFile, true);

    // test
    HBaseFsck hbck = doFsck(conf, false);
    assertErrors(hbck, new ERROR_CODE[] { ERROR_CODE.NO_VERSION_FILE });
    // fix hbase.version missing
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FileSystem$UGISelect

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.