Package org.apache.solr.client.solrj.impl

Examples of org.apache.solr.client.solrj.impl.HttpSolrServer


    {
        if (_server == null)
        {
            try
            {
                _server = new HttpSolrServer(
                        ConfigurationManager.getProperty("oai", "solr.url"));
                log.debug("Solr Server Initialized");
            }           
            catch (Exception e)
            {
View Full Code Here


    jobConf.setBoolean(ExtractingParams.IGNORE_TIKA_EXCEPTION, false);

    MapReduceIndexerTool tool;
    int res;
    QueryResponse results;
    HttpSolrServer server = new HttpSolrServer(cloudJettys.get(0).url);

    String[] args = new String[] {
        "--solr-home-dir=" + MINIMR_CONF_DIR.getAbsolutePath(),
        "--output-dir=" + outDir.toString(),
        "--mappers=3",
        ++numRuns % 2 == 0 ? "--input-list=" + INPATH.toString() : dataDir.toString(),
        "--shard-url", cloudJettys.get(0).url,
        "--shard-url", cloudJettys.get(1).url,
        "--shard-url", cloudJettys.get(2).url,
        "--go-live-threads", Integer.toString(random().nextInt(15) + 1),
        "--verbose",
        "--go-live"
    };
    args = prependInitialArgs(args);
   
    if (true) {
      tool = new MapReduceIndexerTool();
      res = ToolRunner.run(jobConf, tool, args);
      assertEquals(0, res);
      assertTrue(tool.job.isComplete());
      assertTrue(tool.job.isSuccessful());
      results = server.query(new SolrQuery("*:*"));
      assertEquals(20, results.getResults().getNumFound());
    }   
   
    fs.delete(inDir, true);  
    fs.delete(outDir, true)
    fs.delete(dataDir, true);
    assertTrue(fs.mkdirs(inDir));
    INPATH = upAvroFile(fs, inDir, DATADIR, dataDir, inputAvroFile2);
   
    args = new String[] {
        "--solr-home-dir=" + MINIMR_CONF_DIR.getAbsolutePath(),
        "--output-dir=" + outDir.toString(),
        "--mappers=3",
        "--verbose",
        "--go-live",
        ++numRuns % 2 == 0 ? "--input-list=" + INPATH.toString() : dataDir.toString(),
        "--shard-url", cloudJettys.get(0).url,
        "--shard-url", cloudJettys.get(1).url,
        "--shard-url", cloudJettys.get(2).url,
        "--go-live-threads", Integer.toString(random().nextInt(15) + 1)
    };
    args = prependInitialArgs(args);
   
    if (true) {
      tool = new MapReduceIndexerTool();
      res = ToolRunner.run(jobConf, tool, args);
      assertEquals(0, res);
      assertTrue(tool.job.isComplete());
      assertTrue(tool.job.isSuccessful());     
      results = server.query(new SolrQuery("*:*"));
     
      assertEquals(22, results.getResults().getNumFound());
    }   
   
    // try using zookeeper
    String collection = "collection1";
    if (random().nextBoolean()) {
      // sometimes, use an alias
      createAlias("updatealias", "collection1");
      collection = "updatealias";
    }
   
    fs.delete(inDir, true);  
    fs.delete(outDir, true)
    fs.delete(dataDir, true);   
    INPATH = upAvroFile(fs, inDir, DATADIR, dataDir, inputAvroFile3);

    cloudClient.deleteByQuery("*:*");
    cloudClient.commit();
    assertEquals(0, cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound());     

    args = new String[] {
        "--output-dir=" + outDir.toString(),
        "--mappers=3",
        "--reducers=12",
        "--fanout=2",
        "--verbose",
        "--go-live",
        ++numRuns % 2 == 0 ? "--input-list=" + INPATH.toString() : dataDir.toString(),
        "--zk-host", zkServer.getZkAddress(),
        "--collection", collection
    };
    args = prependInitialArgs(args);

    if (true) {
      tool = new MapReduceIndexerTool();
      res = ToolRunner.run(jobConf, tool, args);
      assertEquals(0, res);
      assertTrue(tool.job.isComplete());
      assertTrue(tool.job.isSuccessful());
     
      SolrDocumentList resultDocs = executeSolrQuery(cloudClient, "*:*");     
      assertEquals(RECORD_COUNT, resultDocs.getNumFound());
      assertEquals(RECORD_COUNT, resultDocs.size());
     
      // perform updates
      for (int i = 0; i < RECORD_COUNT; i++) {
          SolrDocument doc = resultDocs.get(i);
          SolrInputDocument update = new SolrInputDocument();
          for (Map.Entry<String, Object> entry : doc.entrySet()) {
              update.setField(entry.getKey(), entry.getValue());
          }
          update.setField("user_screen_name", "Nadja" + i);
          update.removeField("_version_");
          cloudClient.add(update);
      }
      cloudClient.commit();
     
      // verify updates
      SolrDocumentList resultDocs2 = executeSolrQuery(cloudClient, "*:*");  
      assertEquals(RECORD_COUNT, resultDocs2.getNumFound());
      assertEquals(RECORD_COUNT, resultDocs2.size());
      for (int i = 0; i < RECORD_COUNT; i++) {
          SolrDocument doc = resultDocs.get(i);
          SolrDocument doc2 = resultDocs2.get(i);
          assertEquals(doc.getFirstValue("id"), doc2.getFirstValue("id"));
          assertEquals("Nadja" + i, doc2.getFirstValue("user_screen_name"));
          assertEquals(doc.getFirstValue("text"), doc2.getFirstValue("text"));
         
          // perform delete
          cloudClient.deleteById((String)doc.getFirstValue("id"));
      }
      cloudClient.commit();
     
      // verify deletes
      assertEquals(0, executeSolrQuery(cloudClient, "*:*").size());
    }   
   
    cloudClient.deleteByQuery("*:*");
    cloudClient.commit();
    assertEquals(0, cloudClient.query(new SolrQuery("*:*")).getResults().getNumFound());     
    server.shutdown();

   
    // try using zookeeper with replication
    String replicatedCollection = "replicated_collection";
    createCollection(replicatedCollection, 11, 3, 11);
View Full Code Here

          Callable<Request> task = new Callable<Request>() {
            @Override
            public Request call() {
              Request req = new Request();
              LOG.info("Live merge " + dir.getPath() + " into " + mergeUrl);
              final HttpSolrServer server = new HttpSolrServer(mergeUrl);
              try {
                CoreAdminRequest.MergeIndexes mergeRequest = new CoreAdminRequest.MergeIndexes();
                mergeRequest.setCoreName(name);
                mergeRequest.setIndexDirs(Arrays.asList(dir.getPath().toString() + "/data/index"));
                try {
                  mergeRequest.process(server);
                  req.success = true;
                } catch (SolrServerException e) {
                  req.e = e;
                  return req;
                } catch (IOException e) {
                  req.e = e;
                  return req;
                }
              } finally {
                server.shutdown();
              }
              return req;
            }
          };
          pending.add(completionService.submit(task));
        }
      }
     
      while (pending != null && pending.size() > 0) {
        try {
          Future<Request> future = completionService.take();
          if (future == null) break;
          pending.remove(future);
         
          try {
            Request req = future.get();
           
            if (!req.success) {
              // failed
              LOG.error("A live merge command failed", req.e);
              return false;
            }
           
          } catch (ExecutionException e) {
            LOG.error("Error sending live merge command", e);
            return false;
          }
         
        } catch (InterruptedException e) {
          Thread.currentThread().interrupt();
          LOG.error("Live merge process interrupted", e);
          return false;
        }
      }
     
      cnt = -1;
     
     
      try {
        LOG.info("Committing live merge...");
        if (options.zkHost != null) {
          CloudSolrServer server = new CloudSolrServer(options.zkHost);
          server.setDefaultCollection(options.collection);
          server.commit();
          server.shutdown();
        } else {
          for (List<String> urls : options.shardUrls) {
            for (String url : urls) {
              // TODO: we should do these concurrently
              HttpSolrServer server = new HttpSolrServer(url);
              server.commit();
              server.shutdown();
            }
          }
        }
        LOG.info("Done committing live merge");
      } catch (Exception e) {
View Full Code Here

        .getSlices(replicatedCollection);
    for (Slice slice : slices) {
      Collection<Replica> replicas = slice.getReplicas();
      long found = -1;
      for (Replica replica : replicas) {
        HttpSolrServer client = new HttpSolrServer(
            new ZkCoreNodeProps(replica).getCoreUrl());
        SolrQuery query = new SolrQuery("*:*");
        query.set("distrib", false);
        QueryResponse replicaResults = client.query(query);
        long count = replicaResults.getResults().getNumFound();
        if (found != -1) {
          assertEquals(slice.getName() + " is inconsistent "
              + new ZkCoreNodeProps(replica).getCoreUrl(), found, count);
        }
View Full Code Here

                preQuery.setFilter(preFilter);
                SolrQuery q = store.count(getSchema(), preQuery);
                if (store.getLogger().isLoggable(Level.FINE)) {
                    store.getLogger().log(Level.FINE, q.toString());
                }
                HttpSolrServer server = store.getSolrServer();
                QueryResponse rsp = server.query(q);
                count = new Long(rsp.getResults().getNumFound()-rsp.getResults().getStart()).intValue();
                //Manage max manually
                if (query.getMaxFeatures() > 0 && query.getMaxFeatures() < Integer.MAX_VALUE) {
                    if(count > query.getMaxFeatures()){
                        count = query.getMaxFeatures();
View Full Code Here

     * @param field SOLR field to query to obtain the store types
     */
    public SolrDataStore(URL url, String field) {
        this.url = url;
        this.field = field;
        this.solrServer = new HttpSolrServer(url.toString());
        this.solrServer.setAllowCompression(true);
        this.solrServer.setConnectionTimeout(10000);
        this.solrServer.setFollowRedirects(true);
        this.solrServer.setSoTimeout(10000);
    }
View Full Code Here

        return solrServer;
    }

    private SolrServer initializeWithExistingHttpServer() throws IOException, SolrServerException {
        // try basic Solr HTTP client
        HttpSolrServer httpSolrServer = new HttpSolrServer(remoteSolrServerConfiguration.getSolrHttpUrls()[0]);
        SolrPingResponse ping = httpSolrServer.ping();
        if (ping != null && 0 == ping.getStatus()) {
            return httpSolrServer;
        } else {
            throw new IOException("the found HTTP Solr server is not alive");
        }
View Full Code Here

      }
    }
  }

  private SolrServer getSolrServer() throws MalformedURLException {
    return new HttpSolrServer(solrDeployPath);
  }
View Full Code Here

      }
    }
  }

  private SolrServer getSolrServer() throws MalformedURLException {
    return new HttpSolrServer(solrDeployPath);
  }
View Full Code Here

  private SolrServer createSolrServer() {
    if (StringUtils.hasText(this.properties.getZkHost())) {
      return new CloudSolrServer(this.properties.getZkHost());
    }
    return new HttpSolrServer(this.properties.getHost());
  }
View Full Code Here

TOP

Related Classes of org.apache.solr.client.solrj.impl.HttpSolrServer

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.