Package org.apache.accumulo.server.fs

Examples of org.apache.accumulo.server.fs.FileRef


public class SizeLimitCompactionStrategyTest {
  private Map<FileRef,DataFileValue> nfl(String... sa) {

    HashMap<FileRef,DataFileValue> ret = new HashMap<FileRef,DataFileValue>();
    for (int i = 0; i < sa.length; i += 2) {
      ret.put(new FileRef("hdfs://nn1/accumulo/tables/5/t-0001/" + sa[i]), new DataFileValue(AccumuloConfiguration.getMemoryInBytes(sa[i + 1]), 1));
    }

    return ret;
  }
View Full Code Here


    public void run() {
      minorCompactionWaitingToStart = false;
      minorCompactionInProgress = true;
      Span minorCompaction = Trace.on("minorCompaction");
      try {
        FileRef newMapfileLocation = getNextMapFilename(mergeFile == null ? "F" : "M");
        FileRef tmpFileRef = new FileRef(newMapfileLocation.path() + "_tmp");
        Span span = Trace.start("waitForCommits");
        synchronized (Tablet.this) {
          commitSession.waitForCommitsToFinish();
        }
        span.stop();
View Full Code Here

      KeyExtent extent = extents[i];
     
      String tdir = ServerConstants.getTablesDirs()[0] + "/" + extent.getTableId().toString() + "/dir_" + i;
      MetadataTableUtil.addTablet(extent, tdir, SystemCredentials.get(), TabletTime.LOGICAL_TIME_ID, zl);
      SortedMap<FileRef,DataFileValue> mapFiles = new TreeMap<FileRef,DataFileValue>();
      mapFiles.put(new FileRef(tdir + "/" + RFile.EXTENSION + "_000_000"), new DataFileValue(1000017 + i, 10000 + i));
     
      if (i == extentToSplit) {
        splitMapFiles = mapFiles;
      }
      int tid = 0;
View Full Code Here

      if (datafileSizes.size() >= maxFiles) {
        // find the smallest file

        long min = Long.MAX_VALUE;
        FileRef minName = null;

        for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) {
          if (entry.getValue().getSize() < min && !majorCompactingFiles.contains(entry.getKey())) {
            min = entry.getValue().getSize();
            minName = entry.getKey();
View Full Code Here

    return fullMajorCompaction;
  }
 
  @Override
  public SortedKeyValueIterator<Key,Value> reserveMapFileReader(String mapFileName) throws IOException {
    FileRef ref = new FileRef(mapFileName, new Path(mapFileName));
    return trm.openFiles(Collections.singletonMap(ref, files.get(ref)), false).get(0);
  }
View Full Code Here

public class FileUtilTest {

  @Test
  public void testToPathStrings() {
    Collection<FileRef> c = new java.util.ArrayList<FileRef>();
    FileRef r1 = createMock(FileRef.class);
    expect(r1.path()).andReturn(new Path("/foo"));
    replay(r1);
    c.add(r1);
    FileRef r2 = createMock(FileRef.class);
    expect(r2.path()).andReturn(new Path("/bar"));
    replay(r2);
    c.add(r2);

    Collection<String> cs = FileUtil.toPathStrings(c);
    Assert.assertEquals(2, cs.size());
View Full Code Here

      scanner3.fetchColumnFamily(DataFileColumnFamily.NAME);
      scanner3.setRange(new Range(rowKey, rowKey.followingKey(PartialKey.ROW)));
     
      for (Entry<Key,Value> entry : scanner3) {
        if (entry.getKey().compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
          origDatafileSizes.put(new FileRef(fs, entry.getKey()), new DataFileValue(entry.getValue().get()));
        }
      }
     
      MetadataTableUtil.splitDatafiles(table, metadataPrevEndRow, splitRatio, new HashMap<FileRef,FileUtil.FileInfo>(), origDatafileSizes, lowDatafileSizes,
          highDatafileSizes, highDatafilesToRemove);
View Full Code Here

      scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME);
      Set<FileRef> datafiles = new TreeSet<FileRef>();
      for (Entry<Key,Value> entry : scanner) {
        Key key = entry.getKey();
        if (key.compareColumnFamily(DataFileColumnFamily.NAME) == 0) {
          datafiles.add(new FileRef(this.master.fs, key));
          if (datafiles.size() > 1000) {
            MetadataTableUtil.addDeleteEntries(extent, datafiles, SystemCredentials.get());
            datafiles.clear();
          }
        } else if (TabletsSection.ServerColumnFamily.TIME_COLUMN.hasColumns(key)) {
          timeType = entry.getValue().toString().charAt(0);
        } else if (key.compareColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME) == 0) {
          throw new IllegalStateException("Tablet " + key.getRow() + " is assigned during a merge!");
        } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) {
          datafiles.add(new FileRef(entry.getValue().toString(), this.master.fs.getFullPath(FileType.TABLE, entry.getValue().toString())));
          if (datafiles.size() > 1000) {
            MetadataTableUtil.addDeleteEntries(extent, datafiles, SystemCredentials.get());
            datafiles.clear();
          }
        }
View Full Code Here

  }

  FileRef getNextMapFilename(String prefix) throws IOException {
    String extension = FileOperations.getNewFileExtension(tabletServer.getTableConfiguration(extent));
    checkTabletDir();
    return new FileRef(location.toString() + "/" + prefix + UniqueNameAllocator.getInstance().getNextName() + "." + extension);
  }
View Full Code Here

      FileStatus[] files = fs.listStatus(location);
      Collection<String> goodPaths = RootFiles.cleanupReplacement(fs, files, true);
      for (String good : goodPaths) {
        Path path = new Path(good);
        String filename = path.getName();
        FileRef ref = new FileRef(location.toString() + "/" + filename, path);
        DataFileValue dfv = new DataFileValue(0, 0);
        datafiles.put(ref, dfv);
      }
    } else {

      Text rowName = extent.getMetadataEntry();

      String tableId = extent.isMeta() ? RootTable.ID : MetadataTable.ID;
      ScannerImpl mdScanner = new ScannerImpl(HdfsZooInstance.getInstance(), SystemCredentials.get(), tableId, Authorizations.EMPTY);

      // Commented out because when no data file is present, each tablet will scan through metadata table and return nothing
      // reduced batch size to improve performance
      // changed here after endKeys were implemented from 10 to 1000
      mdScanner.setBatchSize(1000);

      // leave these in, again, now using endKey for safety
      mdScanner.fetchColumnFamily(DataFileColumnFamily.NAME);

      mdScanner.setRange(new Range(rowName));

      for (Entry<Key,Value> entry : mdScanner) {

        if (entry.getKey().compareRow(rowName) != 0) {
          break;
        }

        FileRef ref = new FileRef(fs, entry.getKey());
        datafiles.put(ref, new DataFileValue(entry.getValue().get()));
      }
    }
    return datafiles;
  }
View Full Code Here

TOP

Related Classes of org.apache.accumulo.server.fs.FileRef

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.