Examples of HdfsDirectory


Examples of org.apache.blur.store.hdfs.HdfsDirectory

    assertTrue(_fileSystem.exists(_badIndexPath));
    validateIndex();
  }

  private void validateIndex() throws IOException {
    HdfsDirectory dir = new HdfsDirectory(_configuration, _shardPath);
    DirectoryReader.open(dir).close();
  }
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    Path tablePath = tableContext.getTablePath();
    Path hdfsDirPath = new Path(tablePath, shard);

    BlurLockFactory lockFactory = new BlurLockFactory(_configuration, hdfsDirPath, _nodeName, BlurUtil.getPid());

    HdfsDirectory longTermStorage = new HdfsDirectory(_configuration, hdfsDirPath);
    longTermStorage.setLockFactory(lockFactory);

    Directory directory;
    URI uri = hdfsDirPath.toUri();
    String scheme = uri.getScheme();
    if (scheme != null && scheme.equals("hdfs")) {
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    int numberOfShardsToMergePerPass = paths.length / newShardCount;
    for (int i = 0; i < newShardCount; i++) {
      System.out.println("Base Index [" + paths[i] + "]");
      IndexWriterConfig lconf = new IndexWriterConfig(LUCENE_VERSION, new KeywordAnalyzer());
      lconf.setCodec(new Blur022Codec());
      HdfsDirectory dir = new HdfsDirectory(getConf(), paths[i]);
      IndexWriter indexWriter = new IndexWriter(dir, lconf);
      Directory[] dirs = new Directory[numberOfShardsToMergePerPass - 1];
      Path[] pathsToDelete = new Path[numberOfShardsToMergePerPass - 1];
      for (int p = 1; p < numberOfShardsToMergePerPass; p++) {
        Path pathToMerge = paths[i + p * newShardCount];
        System.out.println("Merge [" + pathToMerge + "]");
        dirs[p - 1] = new HdfsDirectory(getConf(), pathToMerge);
        pathsToDelete[p - 1] = pathToMerge;
      }
      indexWriter.addIndexes(dirs);
      // Causes rewrite of of index and the symlinked files are
      // merged/rewritten.
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

  @Override
  protected Directory setupDirectory() throws IOException {
    URI uri = new File(file, "hdfs-join").toURI();
    Path hdfsDirPath = new Path(uri.toString());
    Configuration conf = new Configuration();
    HdfsDirectory longTerm = new HdfsDirectory(conf, new Path(hdfsDirPath, "long"));
    HdfsDirectory shortTerm = new HdfsDirectory(conf, new Path(hdfsDirPath, "short"));
    return new JoinDirectory(longTerm, shortTerm);
  }
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    System.out.println("Counters: " + ctrs);

    Path path = new Path(tableUri, BlurUtil.getShardName(0));
    Collection<Path> commitedTasks = getCommitedTasks(path);
    assertEquals(1, commitedTasks.size());
    DirectoryReader reader = DirectoryReader.open(new HdfsDirectory(conf, commitedTasks.iterator().next()));
    assertEquals(2, reader.numDocs());
    reader.close();
  }
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    Path path = new Path(tableUri, BlurUtil.getShardName(0));
    Collection<Path> commitedTasks = getCommitedTasks(path);
    assertEquals(1, commitedTasks.size());

    DirectoryReader reader = DirectoryReader.open(new HdfsDirectory(conf, commitedTasks.iterator().next()));
    assertEquals(80000, reader.numDocs());
    reader.close();
  }
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    for (int i = 0; i < tableDescriptor.getShardCount(); i++) {
      Path path = new Path(tableUri, BlurUtil.getShardName(i));
      Collection<Path> commitedTasks = getCommitedTasks(path);
      assertEquals(1, commitedTasks.size());

      DirectoryReader reader = DirectoryReader.open(new HdfsDirectory(conf, commitedTasks.iterator().next()));
      total += reader.numDocs();
      reader.close();
    }
    assertEquals(80000, total);
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    Path tableOutput = BlurOutputFormat.getOutputPath(_configuration);
    String shardName = BlurUtil.getShardName(BlurConstants.SHARD_PREFIX, shardId);
    Path indexPath = new Path(tableOutput, shardName);
    _newIndex = new Path(indexPath, tmpDirName);
    _finalDir = new ProgressableDirectory(new HdfsDirectory(_configuration, _newIndex), getProgressable());
    _finalDir.setLockFactory(NoLockFactory.getNoLockFactory());

    TableContext tableContext = TableContext.create(tableDescriptor);
    _fieldManager = tableContext.getFieldManager();
    Analyzer analyzer = _fieldManager.getAnalyzerForIndex();
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    for (int i = 0; i < tableDescriptor.getShardCount(); i++) {
      Path path = new Path(tableUri, BlurUtil.getShardName(i));
      Collection<Path> commitedTasks = getCommitedTasks(path);
      assertTrue(multiple >= commitedTasks.size());
      for (Path p : commitedTasks) {
        DirectoryReader reader = DirectoryReader.open(new HdfsDirectory(conf, p));
        total += reader.numDocs();
        reader.close();
      }
    }
    assertEquals(80000, total);
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    System.out.println("Counters: " + ctrs);

    Path path = new Path(tableUri, BlurUtil.getShardName(0));
    Collection<Path> commitedTasks = getCommitedTasks(path);
    assertEquals(1, commitedTasks.size());
    DirectoryReader reader = DirectoryReader.open(new HdfsDirectory(conf, commitedTasks.iterator().next()));
    assertEquals(2, reader.numDocs());
    reader.close();
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.