Examples of HdfsDirectory


Examples of org.apache.blur.store.hdfs.HdfsDirectory

    for (int i = 0; i < tableDescriptor.getShardCount(); i++) {
      Path path = new Path(tableUri, BlurUtil.getShardName(i));
      Collection<Path> commitedTasks = getCommitedTasks(path);
      assertEquals(1, commitedTasks.size());

      DirectoryReader reader = DirectoryReader.open(new HdfsDirectory(conf, commitedTasks.iterator().next()));
      total += reader.numDocs();
      reader.close();
    }
    assertEquals(80000, total);
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    for (int i = 0; i < tableDescriptor.getShardCount(); i++) {
      Path path = new Path(tableUri, BlurUtil.getShardName(i));
      Collection<Path> commitedTasks = getCommitedTasks(path);
      assertTrue(multiple >= commitedTasks.size());
      for (Path p : commitedTasks) {
        DirectoryReader reader = DirectoryReader.open(new HdfsDirectory(conf, p));
        total += reader.numDocs();
        reader.close();
      }
    }
    assertEquals(80000, total);
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    Path shardPath = new Path(tablePath, "shard-00000000");
    String indexDirName = "index_" + uuid;
    path = new Path(shardPath, indexDirName + ".commit");
    fileSystem.mkdirs(path);
    badRowIdsPath = new Path(shardPath, indexDirName + ".bad_rowids");
    Directory commitDirectory = new HdfsDirectory(configuration, path);
    Directory mainDirectory = new HdfsDirectory(configuration, shardPath);
    _fieldManager = tableContext.getFieldManager();
    Analyzer analyzerForIndex = _fieldManager.getAnalyzerForIndex();
    IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, analyzerForIndex);
    commitWriter = new IndexWriter(commitDirectory, conf);
   
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

  }

  private void assertData(int totalShardCount) throws IOException {
    Partitioner<IntWritable, IntWritable> partitioner = new HashPartitioner<IntWritable, IntWritable>();
    for (int i = 0; i < totalShardCount; i++) {
      HdfsDirectory directory = new HdfsDirectory(configuration, new Path(path, BlurUtil.getShardName(i)));
      DirectoryReader reader = DirectoryReader.open(directory);
      int numDocs = reader.numDocs();
      for (int d = 0; d < numDocs; d++) {
        Document document = reader.document(d);
        IndexableField field = document.getField("id");
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    }
  }

  private static void createShard(Configuration configuration, int i, Path path, int totalShardCount)
      throws IOException {
    HdfsDirectory hdfsDirectory = new HdfsDirectory(configuration, path);
    IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer());
    TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
    mergePolicy.setUseCompoundFile(false);
    IndexWriter indexWriter = new IndexWriter(hdfsDirectory, conf);
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    Path[] paths = getPaths();
    int numberOfShardsToMergePerPass = paths.length / newShardCount;
    for (int i = 0; i < newShardCount; i++) {
      System.out.println("Base Index [" + paths[i] + "]");
      IndexWriterConfig lconf = new IndexWriterConfig(Version.LUCENE_42, new KeywordAnalyzer());
      HdfsDirectory dir = new HdfsDirectory(getConf(), paths[i]);
      IndexWriter indexWriter = new IndexWriter(dir, lconf);
      Directory[] dirs = new Directory[numberOfShardsToMergePerPass - 1];
      Path[] pathsToDelete = new Path[numberOfShardsToMergePerPass - 1];
      for (int p = 1; p < numberOfShardsToMergePerPass; p++) {
        Path pathToMerge = paths[i + p * newShardCount];
        System.out.println("Merge [" + pathToMerge + "]");
        dirs[p - 1] = new HdfsDirectory(getConf(), pathToMerge);
        pathsToDelete[p - 1] = pathToMerge;
      }
      indexWriter.addIndexes(dirs);
      indexWriter.close();
      FileSystem fileSystem = path.getFileSystem(getConf());
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    Path tableOutput = BlurOutputFormat.getOutputPath(_configuration);
    String shardName = BlurUtil.getShardName(BlurConstants.SHARD_PREFIX, shardId);
    Path indexPath = new Path(tableOutput, shardName);
    _newIndex = new Path(indexPath, tmpDirName);
    _finalDir = new ProgressableDirectory(new HdfsDirectory(_configuration, _newIndex), getProgressable());
    _finalDir.setLockFactory(NoLockFactory.getNoLockFactory());

    TableContext tableContext = TableContext.create(tableDescriptor);
    _fieldManager = tableContext.getFieldManager();
    Analyzer analyzer = _fieldManager.getAnalyzerForIndex();
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

  @Override
  protected Directory setupDirectory() throws IOException {
    URI uri = new File(file, "hdfs").toURI();
    Path hdfsDirPath = new Path(uri.toString());
    Configuration conf = new Configuration();
    return new HdfsDirectory(conf, hdfsDirPath);
  }
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

          Path file = fileStatus.getPath();
          if (fileStatus.isDir() && file.getName().endsWith(COMMIT)) {
            // rename to inuse, if good continue else rename to badindex
            Path inuse = new Path(file.getParent(), rename(file.getName(), INUSE));
            if (fileSystem.rename(file, inuse)) {
              HdfsDirectory hdfsDirectory = new HdfsDirectory(configuration, inuse);
              if (DirectoryReader.indexExists(hdfsDirectory)) {
                IndexAction indexAction = getIndexAction(hdfsDirectory, fileSystem);
                _blurIndex.process(indexAction);
                return;
              } else {
View Full Code Here

Examples of org.apache.blur.store.hdfs.HdfsDirectory

    _path = new Path(_shardPath, indexDirName + ".commit");
    _fileSystem.mkdirs(_path);
    _badRowIdsPath = new Path(_shardPath, indexDirName + ".badrowids");
    _badIndexPath = new Path(_shardPath, indexDirName + ".badindex");
    _inUsePath = new Path(_shardPath, indexDirName + ".inuse");
    Directory commitDirectory = new HdfsDirectory(configuration, _path);
    _mainDirectory = new HdfsDirectory(configuration, _shardPath);
    _fieldManager = tableContext.getFieldManager();
    Analyzer analyzerForIndex = _fieldManager.getAnalyzerForIndex();
    IndexWriterConfig conf = new IndexWriterConfig(LUCENE_VERSION, analyzerForIndex);
    // conf.setMergePolicy(NoMergePolicy.NO_COMPOUND_FILES);
    TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.