Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.Path


      List<HRegionInfo> regionsToAdd = new LinkedList<HRegionInfo>();

      monitor.rethrowException();
      for (String regionName: snapshotRegionNames) {
        LOG.info("region to add: " + regionName);
        Path regionDir = new Path(snapshotDir, regionName);
        regionsToAdd.add(HRegion.loadDotRegionInfoFileContent(fs, regionDir));
      }

      // Create new regions cloning from the snapshot
      monitor.rethrowException();
View Full Code Here


  /**
   * Restore region by removing files not in the snapshot
   * and adding the missing ones from the snapshot.
   */
  private void restoreRegion(HRegionInfo regionInfo) throws IOException {
    Path snapshotRegionDir = new Path(snapshotDir, regionInfo.getEncodedName());
    Map<String, List<String>> snapshotFiles =
                SnapshotReferenceUtil.getRegionHFileReferences(fs, snapshotRegionDir);
    Path regionDir = new Path(tableDir, regionInfo.getEncodedName());
    String tableName = tableDesc.getNameAsString();

    // Restore families present in the table
    for (Path familyDir: FSUtils.getFamilyDirs(fs, regionDir)) {
      byte[] family = Bytes.toBytes(familyDir.getName());
      Set<String> familyFiles = getTableRegionFamilyFiles(familyDir);
      List<String> snapshotFamilyFiles = snapshotFiles.remove(familyDir.getName());
      if (snapshotFamilyFiles != null) {
        List<String> hfilesToAdd = new LinkedList<String>();
        for (String hfileName: snapshotFamilyFiles) {
          if (familyFiles.contains(hfileName)) {
            // HFile already present
            familyFiles.remove(hfileName);
          } else {
            // HFile missing
            hfilesToAdd.add(hfileName);
          }
        }

        // Restore Missing files
        for (String hfileName: hfilesToAdd) {
          LOG.trace("Adding HFileLink " + hfileName +
            " to region=" + regionInfo.getEncodedName() + " table=" + tableName);
          restoreStoreFile(familyDir, regionInfo, hfileName);
        }

        // Remove hfiles not present in the snapshot
        for (String hfileName: familyFiles) {
          Path hfile = new Path(familyDir, hfileName);
          LOG.trace("Removing hfile=" + hfile +
            " from region=" + regionInfo.getEncodedName() + " table=" + tableName);
          HFileArchiver.archiveStoreFile(fs, regionInfo, conf, tableDir, family, hfile);
        }
      } else {
        // Family doesn't exists in the snapshot
        LOG.trace("Removing family=" + Bytes.toString(family) +
          " from region=" + regionInfo.getEncodedName() + " table=" + tableName);
        HFileArchiver.archiveFamily(fs, conf, regionInfo, tableDir, family);
        fs.delete(familyDir, true);
      }
    }

    // Add families not present in the table
    for (Map.Entry<String, List<String>> familyEntry: snapshotFiles.entrySet()) {
      Path familyDir = new Path(regionDir, familyEntry.getKey());
      if (!fs.mkdirs(familyDir)) {
        throw new IOException("Unable to create familyDir=" + familyDir);
      }

      for (String hfileName: familyEntry.getValue()) {
View Full Code Here

   * @param region {@link HRegion} cloned
   * @param snapshotRegionInfo
   */
  private void cloneRegion(final HRegion region, final HRegionInfo snapshotRegionInfo)
      throws IOException {
    final Path snapshotRegionDir = new Path(snapshotDir, snapshotRegionInfo.getEncodedName());
    final Path regionDir = new Path(tableDir, region.getRegionInfo().getEncodedName());
    final String tableName = tableDesc.getNameAsString();
    SnapshotReferenceUtil.visitRegionStoreFiles(fs, snapshotRegionDir,
      new FSVisitor.StoreFileVisitor() {
        public void storeFile (final String region, final String family, final String hfile)
            throws IOException {
          LOG.info("Adding HFileLink " + hfile + " to table=" + tableName);
          Path familyDir = new Path(regionDir, family);
          restoreStoreFile(familyDir, snapshotRegionInfo, hfile);
        }
    });
  }
View Full Code Here

   */
  private void restoreReferenceFile(final Path familyDir, final HRegionInfo regionInfo,
      final String hfileName) throws IOException {
    // Extract the referred information (hfile name and parent region)
    String tableName = snapshotDesc.getTable();
    Path refPath = StoreFile.getReferredToFile(new Path(new Path(new Path(tableName,
        regionInfo.getEncodedName()), familyDir.getName()), hfileName));
    String snapshotRegionName = refPath.getParent().getParent().getName();
    String fileName = refPath.getName();

    // The new reference should have the cloned region name as parent, if it is a clone.
    String clonedRegionName = Bytes.toString(regionsMap.get(Bytes.toBytes(snapshotRegionName)));
    if (clonedRegionName == null) clonedRegionName = snapshotRegionName;

    // The output file should be a reference link table=snapshotRegion-fileName.clonedRegionName
    String refLink = fileName;
    if (!HFileLink.isHFileLink(fileName)) {
      refLink = HFileLink.createHFileLinkName(tableName, snapshotRegionName, fileName);
    }
    Path outPath = new Path(familyDir, refLink + '.' + clonedRegionName);

    // Create the new reference
    Path linkPath = new Path(familyDir,
      HFileLink.createHFileLinkName(tableName, regionInfo.getEncodedName(), hfileName));
    InputStream in = new HFileLink(conf, linkPath).open(fs);
    OutputStream out = fs.create(outPath);
    IOUtils.copyBytes(in, out, conf);
  }
View Full Code Here

    try {
      // Recover.Edits
      SnapshotReferenceUtil.visitRecoveredEdits(fs, snapshotDir,
          new FSVisitor.RecoveredEditsVisitor() {
        public void recoveredEdits (final String region, final String logfile) throws IOException {
          Path path = SnapshotReferenceUtil.getRecoveredEdits(snapshotDir, region, logfile);
          logSplitter.splitRecoveredEdit(path);
        }
      });

      // Region Server Logs
View Full Code Here

    final AtomicLong logSize = new AtomicLong();
    SnapshotReferenceUtil.visitReferencedFiles(fs, snapshotDir,
      new SnapshotReferenceUtil.FileVisitor() {
        public void storeFile (final String region, final String family, final String hfile)
            throws IOException {
          Path path = new Path(family, HFileLink.createHFileLinkName(table, region, hfile));
          HFileLink link = new HFileLink(conf, path);
          boolean inArchive = false;
          long size = -1;
          try {
            if ((inArchive = fs.exists(link.getArchivePath()))) {
              size = fs.getFileStatus(link.getArchivePath()).getLen();
              hfileArchiveSize.addAndGet(size);
              hfileArchiveCount.addAndGet(1);
            } else {
              size = link.getFileStatus(fs).getLen();
              hfileSize.addAndGet(size);
              hfilesCount.addAndGet(1);
            }
          } catch (FileNotFoundException e) {
            hfilesMissing.addAndGet(1);
          }

          if (showFiles) {
            System.out.printf("%8s %s/%s/%s/%s %s%n",
              (size < 0 ? "-" : StringUtils.humanReadableInt(size)),
              table, region, family, hfile,
              (inArchive ? "(archive)" : (size < 0) ? "(NOT FOUND)" : ""));
          }
        }

        public void recoveredEdits (final String region, final String logfile)
            throws IOException {
          Path path = SnapshotReferenceUtil.getRecoveredEdits(snapshotDir, region, logfile);
          long size = fs.getFileStatus(path).getLen();
          logSize.addAndGet(size);
          logsCount.addAndGet(1);

          if (showFiles) {
View Full Code Here

   * families to identify the min and max keys. The resulting region will
   * likely violate table integrity but will be dealt with by merging
   * overlapping regions.
   */
  private void adoptHdfsOrphan(HbckInfo hi) throws IOException {
    Path p = hi.getHdfsRegionDir();
    FileSystem fs = p.getFileSystem(getConf());
    FileStatus[] dirs = fs.listStatus(p);
    if (dirs == null) {
      LOG.warn("Attempt to adopt ophan hdfs region skipped becuase no files present in " +
          p + ". This dir could probably be deleted.");
      return ;
    }

    String tableName = Bytes.toString(hi.getTableName());
    TableInfo tableInfo = tablesInfo.get(tableName);
    Preconditions.checkNotNull("Table " + tableName + "' not present!", tableInfo);
    HTableDescriptor template = tableInfo.getHTD();

    // find min and max key values
    Pair<byte[],byte[]> orphanRegionRange = null;
    for (FileStatus cf : dirs) {
      String cfName= cf.getPath().getName();
      // TODO Figure out what the special dirs are
      if (cfName.startsWith(".") || cfName.equals("splitlog")) continue;

      FileStatus[] hfiles = fs.listStatus(cf.getPath());
      for (FileStatus hfile : hfiles) {
        byte[] start, end;
        HFile.Reader hf = null;
        try {
          CacheConfig cacheConf = new CacheConfig(getConf());
          hf = HFile.createReader(fs, hfile.getPath(), cacheConf);
          hf.loadFileInfo();
          KeyValue startKv = KeyValue.createKeyValueFromKey(hf.getFirstKey());
          start = startKv.getRow();
          KeyValue endKv = KeyValue.createKeyValueFromKey(hf.getLastKey());
          end = endKv.getRow();
        } catch (IOException ioe) {
          LOG.warn("Problem reading orphan file " + hfile + ", skipping");
          continue;
        } catch (NullPointerException ioe) {
          LOG.warn("Orphan file " + hfile + " is possibly corrupted HFile, skipping");
          continue;
        } finally {
          if (hf != null) {
            hf.close();
          }
        }

        // expand the range to include the range of all hfiles
        if (orphanRegionRange == null) {
          // first range
          orphanRegionRange = new Pair<byte[], byte[]>(start, end);
        } else {
          // TODO add test

          // expand range only if the hfile is wider.
          if (Bytes.compareTo(orphanRegionRange.getFirst(), start) > 0) {
            orphanRegionRange.setFirst(start);
          }
          if (Bytes.compareTo(orphanRegionRange.getSecond(), end) < 0 ) {
            orphanRegionRange.setSecond(end);
          }
        }
      }
    }
    if (orphanRegionRange == null) {
      LOG.warn("No data in dir " + p + ", sidelining data");
      fixes++;
      sidelineRegionDir(fs, hi);
      return;
    }
    LOG.info("Min max keys are : [" + Bytes.toString(orphanRegionRange.getFirst()) + ", " +
        Bytes.toString(orphanRegionRange.getSecond()) + ")");

    // create new region on hdfs.  move data into place.
    HRegionInfo hri = new HRegionInfo(template.getName(), orphanRegionRange.getFirst(), orphanRegionRange.getSecond());
    LOG.info("Creating new region : " + hri);
    HRegion region = HBaseFsckRepair.createHDFSRegionDir(getConf(), hri, template);
    Path target = region.getRegionDir();

    // rename all the data to new region
    mergeRegionDirs(target, hi);
    fixes++;
  }
View Full Code Here

   * Lingering reference file prevents a region from opening. It has to
   * be fixed before a cluster can start properly.
   */
  private void offlineReferenceFileRepair() throws IOException {
    Configuration conf = getConf();
    Path hbaseRoot = FSUtils.getRootDir(conf);
    FileSystem fs = hbaseRoot.getFileSystem(conf);
    Map<String, Path> allFiles = FSUtils.getTableStoreFilePathMap(fs, hbaseRoot);
    for (Path path: allFiles.values()) {
      boolean isReference = false;
      try {
        isReference = StoreFile.isReference(path);
      } catch (Throwable t) {
        // Ignore. Some files may not be store files at all.
        // For example, files under .oldlogs folder in .META.
        // Warning message is already logged by
        // StoreFile#isReference.
      }
      if (!isReference) continue;

      Path referredToFile = StoreFile.getReferredToFile(path);
      if (fs.exists(referredToFile)) continue// good, expected

      // Found a lingering reference file
      errors.reportError(ERROR_CODE.LINGERING_REFERENCE_HFILE,
        "Found lingering reference file " + path);
      if (!shouldFixReferenceFiles()) continue;

      // Now, trying to fix it since requested
      boolean success = false;
      String pathStr = path.toString();

      // A reference file path should be like
      // ${hbase.rootdir}/table_name/region_id/family_name/referred_file.region_name
      // Up 3 directories to get the table folder.
      // So the file will be sidelined to a similar folder structure.
      int index = pathStr.lastIndexOf(Path.SEPARATOR_CHAR);
      for (int i = 0; index > 0 && i < 3; i++) {
        index = pathStr.lastIndexOf(Path.SEPARATOR_CHAR, index);
      }
      if (index > 0) {
        Path rootDir = getSidelineDir();
        Path dst = new Path(rootDir, pathStr.substring(index));
        fs.mkdirs(dst.getParent());
        LOG.info("Trying to sildeline reference file"
          + path + " to " + dst);
        setShouldRerun();

        success = fs.rename(path, dst);
View Full Code Here

  /**
   * Read the .regioninfo file from the file system.  If there is no
   * .regioninfo, add it to the orphan hdfs region list.
   */
  private void loadHdfsRegioninfo(HbckInfo hbi) throws IOException {
    Path regionDir = hbi.getHdfsRegionDir();
    if (regionDir == null) {
      LOG.warn("No HDFS region dir found: " + hbi + " meta=" + hbi.metaEntry);
      return;
    }

    if (hbi.hdfsEntry.hri != null) {
      // already loaded data
      return;
    }

    Path regioninfo = new Path(regionDir, HRegion.REGIONINFO_FILE);
    FileSystem fs = regioninfo.getFileSystem(getConf());

    FSDataInputStream in = fs.open(regioninfo);
    HRegionInfo hri = new HRegionInfo();
    hri.readFields(in);
    in.close();
View Full Code Here

      TableInfo modTInfo = tablesInfo.get(tableName);
      if (modTInfo == null) {
        // only executed once per table.
        modTInfo = new TableInfo(tableName);
        Path hbaseRoot = FSUtils.getRootDir(getConf());
        tablesInfo.put(tableName, modTInfo);
        try {
          HTableDescriptor htd =
              FSTableDescriptors.getTableDescriptor(hbaseRoot.getFileSystem(getConf()),
              hbaseRoot, tableName);
          modTInfo.htds.add(htd);
        } catch (IOException ioe) {
          if (!orphanTableDirs.containsKey(tableName)) {
            LOG.warn("Unable to read .tableinfo from " + hbaseRoot, ioe);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.Path

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.