Examples of FileSystemDirectory


Examples of com.alimama.mdrill.hdfsDirectory.FileSystemDirectory

            FileUtils.forceDelete(fd.getDirectory());
    }
   
    if(m1.d instanceof FileSystemDirectory)
    {
      FileSystemDirectory fd=(FileSystemDirectory)m1.d;
      FileSystem fs=FileSystem.get(conf);
      fs.delete(fd.directory,true);
    }
  }
View Full Code Here

Examples of com.alimama.mdrill.hdfsDirectory.FileSystemDirectory

          for(FileStatus s:list)
          {
            try{
            DirectoryInfo info=new DirectoryInfo();
            info.tp=DirTpe.hdfs;
            info.d=new FileSystemDirectory(fs, s.getPath(), false, conf);
            savedTxid=Math.max(savedTxid, info.readTxid());
            }catch(Throwable e){
              LOG.error("recoverFromEditlog error",e);
            }
          }
View Full Code Here

Examples of com.alimama.mdrill.hdfsDirectory.FileSystemDirectory

                {
                  continue;
                }
                for(FileStatus ssss:sublist)
                {
                  FileSystemDirectory d=new FileSystemDirectory(fstimeout, ssss.getPath(), false, conf_timeout);
                  d.setUsedBlockBuffer(true);
                  DirectoryInfo info=new DirectoryInfo();
                  info.d=d;
                  info.tp=DirectoryInfo.DirTpe.file;
                  diskDirector.put(s1+"/sigment/"+ssss.getPath().getName(), info);
                  ishdfsmode=true;
                  SolrCore.log.info(">>>>>FileSystemDirectory hdfs add links "+ssss.getPath());
                }
               
              }else{

             
              FileSystemDirectory d=new FileSystemDirectory(fs, p, false, conf);
              d.setUsedBlockBuffer(true);
              DirectoryInfo info=new DirectoryInfo();
              info.d=d;
              info.tp=DirectoryInfo.DirTpe.file;
              diskDirector.put(s1, info);
              ishdfsmode=true;
View Full Code Here

Examples of com.alimama.mdrill.hdfsDirectory.FileSystemDirectory

          for(FileStatus s:list)
          {
            try{
            DirectoryInfo info=new DirectoryInfo();
            info.tp=DirTpe.hdfs;
            info.d=new FileSystemDirectory(fs, s.getPath(), false, conf);
            savedTxid=Math.max(savedTxid, info.readTxid());
            }catch(Throwable e){
              LOG.error("recoverFromEditlog error",e);
            }
          }
View Full Code Here

Examples of com.alimama.mdrill.hdfsDirectory.FileSystemDirectory

                {
                  continue;
                }
                for(FileStatus ssss:sublist)
                {
                  FileSystemDirectory d=new FileSystemDirectory(fstimeout, ssss.getPath(), false, conf_timeout);
                  d.setUsedBlockBuffer(true);
                  DirectoryInfo info=new DirectoryInfo();
                  info.d=d;
                  info.tp=DirectoryInfo.DirTpe.file;
                  diskDirector.put(s1+"/sigment/"+ssss.getPath().getName(), info);
                  ishdfsmode=true;
                  SolrCore.log.info(">>>>>FileSystemDirectory hdfs add links "+ssss.getPath());
                }
               
              }else{

             
              FileSystemDirectory d=new FileSystemDirectory(fs, p, false, conf);
              d.setUsedBlockBuffer(true);
              DirectoryInfo info=new DirectoryInfo();
              info.d=d;
              info.tp=DirectoryInfo.DirTpe.file;
              diskDirector.put(s1, info);
              ishdfsmode=true;
View Full Code Here

Examples of com.alimama.mdrill.hdfsDirectory.FileSystemDirectory

      if (dir instanceof FSDirectory) {
        FSDirectory fsd = (FSDirectory) dir;
        sb.append("dir=").append(fsd.getDirectory());
      }else if (dir instanceof FileSystemDirectory) {
        FileSystemDirectory fsd = (FileSystemDirectory) dir;
          sb.append("dir=").append(fsd.directory.toString());
        }else {
        sb.append("dir=").append(dir);
      }
View Full Code Here

Examples of com.alimama.mdrill.hdfsDirectory.FileSystemDirectory

    buffer.append(si.name).append("@");
    if(core.dir instanceof FSDirectory){
        FSDirectory dddir=(FSDirectory)core.dir;
        buffer.append(dddir.getDirectory().getAbsolutePath()).append("@");
      }else if(core.dir instanceof FileSystemDirectory){
        FileSystemDirectory dddir=(FileSystemDirectory)core.dir;
        buffer.append("@hdfs@"+dddir.directory.toString()).append("@");
      }
      else if(core.dir instanceof RAMDirectory){
        RAMDirectory dddir=(RAMDirectory)core.dir;
        buffer.append(dddir.uuid).append("@");
View Full Code Here

Examples of com.senseidb.indexing.hadoop.reduce.FileSystemDirectory

        for (int i = 0; i < shards.length; i++) {
          Path path = new Path(shards[i].getDirectory());
          long generation = -1;

          if (fs.exists(path)) {
            FileSystemDirectory dir = null;

            try {
              dir = new FileSystemDirectory(fs, path, false, conf);
              generation = LuceneUtil.getCurrentSegmentGeneration(dir);
            } finally {
              if (dir != null) {
                dir.close();
              }
            }
          }

          if (generation != shards[i].getGeneration()) {
View Full Code Here

Examples of org.apache.hadoop.contrib.index.lucene.FileSystemDirectory

    // verify the index
    IndexReader[] readers = new IndexReader[shards.length];
    for (int i = 0; i < shards.length; i++) {
      Directory dir =
          new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
              false, conf);
      readers[i] = IndexReader.open(dir);
    }

    IndexReader reader = new MultiReader(readers);
    IndexSearcher searcher = new IndexSearcher(reader);
    Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));

    assertEquals(numRuns * numDocsPerRun, hits.length());

    int[] counts = new int[numDocsPerRun];
    for (int i = 0; i < hits.length(); i++) {
      Document doc = hits.doc(i);
      counts[Integer.parseInt(doc.get("id"))]++;
    }

    for (int i = 0; i < numDocsPerRun; i++) {
      assertEquals(numRuns, counts[i]);
    }

    // max field length is 2, so "dot" is also indexed but not "org"
    hits = searcher.search(new TermQuery(new Term("content", "dot")));
    assertEquals(numRuns, hits.length());

    hits = searcher.search(new TermQuery(new Term("content", "org")));
    assertEquals(0, hits.length());

    searcher.close();
    reader.close();

    // open and close an index writer with KeepOnlyLastCommitDeletionPolicy
    // to remove earlier checkpoints
    for (int i = 0; i < shards.length; i++) {
      Directory dir =
          new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
              false, conf);
      IndexWriter writer =
          new IndexWriter(dir, false, null,
              new KeepOnlyLastCommitDeletionPolicy());
      writer.close();
View Full Code Here

Examples of org.apache.hadoop.contrib.index.lucene.FileSystemDirectory

    // verify the index
    IndexReader[] readers = new IndexReader[shards.length];
    for (int i = 0; i < shards.length; i++) {
      Directory dir =
          new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
              false, conf);
      readers[i] = IndexReader.open(dir);
    }

    IndexReader reader = new MultiReader(readers);
    IndexSearcher searcher = new IndexSearcher(reader);
    Hits hits = searcher.search(new TermQuery(new Term("content", "apache")));

    assertEquals(numRuns * numDocsPerRun, hits.length());

    int[] counts = new int[numDocsPerRun];
    for (int i = 0; i < hits.length(); i++) {
      Document doc = hits.doc(i);
      counts[Integer.parseInt(doc.get("id"))]++;
    }

    for (int i = 0; i < numDocsPerRun; i++) {
      assertEquals(numRuns, counts[i]);
    }

    // max field length is 2, so "dot" is also indexed but not "org"
    hits = searcher.search(new TermQuery(new Term("content", "dot")));
    assertEquals(numRuns, hits.length());

    hits = searcher.search(new TermQuery(new Term("content", "org")));
    assertEquals(0, hits.length());

    searcher.close();
    reader.close();

    // open and close an index writer with KeepOnlyLastCommitDeletionPolicy
    // to remove earlier checkpoints
    for (int i = 0; i < shards.length; i++) {
      Directory dir =
          new FileSystemDirectory(fs, new Path(shards[i].getDirectory()),
              false, conf);
      IndexWriter writer =
          new IndexWriter(dir, false, null,
              new KeepOnlyLastCommitDeletionPolicy());
      writer.close();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.