Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.Path.toUri()


    checkTable(table);
    List<String> result = new ArrayList<String>();
    try {
      TableDescriptor descriptor = getTableDescriptor(table);
      Path tablePath = new Path(descriptor.tableUri);
      FileSystem fileSystem = FileSystem.get(tablePath.toUri(), _configuration);
      if (!fileSystem.exists(tablePath)) {
        LOG.error("Table [{0}] is missing, defined location [{1}]", table, tablePath.toUri());
        throw new RuntimeException("Table [" + table + "] is missing, defined location [" + tablePath.toUri() + "]");
      }
      FileStatus[] listStatus = fileSystem.listStatus(tablePath);
View Full Code Here


    try {
      TableDescriptor descriptor = getTableDescriptor(table);
      Path tablePath = new Path(descriptor.tableUri);
      FileSystem fileSystem = FileSystem.get(tablePath.toUri(), _configuration);
      if (!fileSystem.exists(tablePath)) {
        LOG.error("Table [{0}] is missing, defined location [{1}]", table, tablePath.toUri());
        throw new RuntimeException("Table [" + table + "] is missing, defined location [" + tablePath.toUri() + "]");
      }
      FileStatus[] listStatus = fileSystem.listStatus(tablePath);
      for (FileStatus status : listStatus) {
        if (status.isDir()) {
View Full Code Here

      TableDescriptor descriptor = getTableDescriptor(table);
      Path tablePath = new Path(descriptor.tableUri);
      FileSystem fileSystem = FileSystem.get(tablePath.toUri(), _configuration);
      if (!fileSystem.exists(tablePath)) {
        LOG.error("Table [{0}] is missing, defined location [{1}]", table, tablePath.toUri());
        throw new RuntimeException("Table [" + table + "] is missing, defined location [" + tablePath.toUri() + "]");
      }
      FileStatus[] listStatus = fileSystem.listStatus(tablePath);
      for (FileStatus status : listStatus) {
        if (status.isDir()) {
          String name = status.getPath().getName();
View Full Code Here

  @Override
  public long getTableSize(String table) throws IOException {
    checkTable(table);
    Path tablePath = new Path(getTableUri(table));
    FileSystem fileSystem = FileSystem.get(tablePath.toUri(), _configuration);
    ContentSummary contentSummary = fileSystem.getContentSummary(tablePath);
    return contentSummary.getLength();
  }

  @Override
View Full Code Here

  }

  private Map<String, BlurIndex> openFromDisk() throws IOException {
    String table = _tableContext.getDescriptor().getName();
    Path tablePath = _tableContext.getTablePath();
    File tableFile = new File(tablePath.toUri());
    if (tableFile.isDirectory()) {
      Map<String, BlurIndex> shards = new ConcurrentHashMap<String, BlurIndex>();
      int shardCount = _tableContext.getDescriptor().getShardCount();
      for (int i = 0; i < shardCount; i++) {
        String shardName = BlurUtil.getShardName(BlurConstants.SHARD_PREFIX, i);
View Full Code Here

  @Override
  public List<String> getShardList(String table) {
    try {
      List<String> result = new ArrayList<String>();
      Path tablePath = _tableContext.getTablePath();
      File tableFile = new File(new File(tablePath.toUri()), table);
      if (tableFile.isDirectory()) {
        for (File f : tableFile.listFiles()) {
          if (f.isDirectory()) {
            result.add(f.getName());
          }
View Full Code Here

      Path path = entry.getKey();
      System.out.println("This sidelined region dir should be bulk loaded: "
        + path.toString());
      System.out.println("Bulk load command looks like: "
        + "hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles "
        + path.toUri().getPath() + " "+ tableName);
    }
  }

  public Multimap<byte[], HbckInfo> getOverlapGroups(
      String table) {
View Full Code Here

      // Now corrupt and fix the parity file.
      FileStatus parityStat = fileSys.getFileStatus(parityFile);
      long parityCrc = getCRC(fileSys, parityFile);
      locations = RaidDFSUtil.getBlockLocations(
        dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());
      corruptBlock(locations.get(0).getBlock().getBlockName());
      TestBlockFixer.reportCorruptBlocks(fileSys, parityFile, new int[]{0},
        srcStat.getBlockSize());
      waitForCorruptBlocks(1, dfs, parityFile);
View Full Code Here

      corruptBlock(locations.get(0).getBlock().getBlockName());
      TestBlockFixer.reportCorruptBlocks(fileSys, parityFile, new int[]{0},
        srcStat.getBlockSize());
      waitForCorruptBlocks(1, dfs, parityFile);

      args[1] = parityFile.toUri().getPath();
      ToolRunner.run(shell, args);

      waitForCorruptBlocks(0, dfs, file1);
      assertEquals(parityCrc, getCRC(fileSys, parityFile));
View Full Code Here

      long parityCRC = getCRC(fileSys, parityFile);

      FileStatus parityStat = fileSys.getFileStatus(parityFile);
      DistributedFileSystem dfs = (DistributedFileSystem)fileSys;
      LocatedBlocks locs = RaidDFSUtil.getBlockLocations(
        dfs, parityFile.toUri().getPath(), 0, parityStat.getLen());

      String[] corruptFiles = RaidDFSUtil.getCorruptFiles(conf);
      assertEquals(corruptFiles.length, 0);
      assertEquals(0, cnode.blockFixer.filesFixed());
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.