Package org.sonatype.nexus.httpclient

Examples of org.sonatype.nexus.httpclient.Page


          final ResourceStoreRequest rmRequest = new ResourceStoreRequest("/.meta/repository-metadata.xml");
          final URL nxRepoMetadataUrl = appendQueryString(
              repository, rmRequest,
              getAbsoluteUrlFromBase(repository, rmRequest));
          try {
            final Page page = Page.getPageFor(pageContext, nxRepoMetadataUrl.toExternalForm());
            if (page.getStatusCode() == 200) {
              // this is a Nexus with browsing disabled. say OK
              log.debug(
                  "Original GET request for URL {} failed with 404, but GET request for URL {} succeeded, we assume remote is a Nexus repository having browsing disabled.",
                  remoteUrl, nxRepoMetadataUrl);
              return true;
View Full Code Here


          final int siblingDepth = currentDepth + 1;
          if (siblingDepth < context.getScrapeDepth()) {
            maySleepBeforeSubsequentFetch();
            final String newSiblingEncodedUrl =
                getRemoteUrlForRepositoryPath(context, newSibling.getPathElements()) + "/";
            final Page siblingPage = Page.getPageFor(context, newSiblingEncodedUrl);
            if (siblingPage.getHttpResponse().getStatusLine().getStatusCode() == 200) {
              diveIn(context, siblingPage, siblingDepth, parentOMatic, newSibling);
            }
            else {
              // we do expect strictly 200 here
              throw new UnexpectedPageResponse(page.getUrl(), page.getHttpResponse().getStatusLine());
View Full Code Here

    final RemoteDetectionResult result = super.detectRemoteRepository(context, page);
    if (RemoteDetectionOutcome.RECOGNIZED_SHOULD_BE_SCRAPED == result.getRemoteDetectionOutcome()) {
      try {
        // so index page looks like Nexus index page, let's see about repo metadata
        // this is not cheap, as we are doing extra HTTP requests to get it
        final Page repoMetadataPage =
            Page.getPageFor(context, context.getRemoteRepositoryRootUrl() + ".meta/repository-metadata.xml");
        if (page.getHttpResponse().getStatusLine().getStatusCode() == 200) {
          // sanity: all nexus repo MD has these elements (see below)
          final Elements url = repoMetadataPage.getDocument().getElementsByTag("url");
          final Elements layout = repoMetadataPage.getDocument().getElementsByTag("layout");
          // only proxies has this element
          final Elements localUrl = repoMetadataPage.getDocument().getElementsByTag("localUrl");
          // only groups has this element
          final Elements memberRepositories =
              repoMetadataPage.getDocument().getElementsByTag("memberRepositories");

          // sanity checks:
          // all of them must have "url" tag
          // all of the must have "layout" tag with value "maven2"
          if (!url.isEmpty() && !layout.isEmpty() && "maven2".equals(layout.get(0).text())) {
View Full Code Here

    }

    // get client configured in same way as proxy is using it
    final HttpClient httpClient = createHttpClientFor(mavenProxyRepository);
    final ScrapeContext context = new ScrapeContext(mavenProxyRepository, httpClient, config.getRemoteScrapeDepth());
    final Page rootPage = Page.getPageFor(context, remoteRepositoryRootUrl);
    final ArrayList<Scraper> appliedScrapers = new ArrayList<Scraper>(scrapers);
    Collections.sort(appliedScrapers, new PriorityOrderingComparator<Scraper>());
    for (Scraper scraper : appliedScrapers) {
      log.debug("Remote scraping {} with Scraper {}", mavenProxyRepository, scraper.getId());
      scraper.scrape(context, rootPage);
View Full Code Here

  @Override
  protected List<String> diveIn(final ScrapeContext context, final Page page)
      throws IOException
  {
    String prefix = null;
    Page initialPage = page;
    String initialPageUrl = page.getUrl();
    if (initialPage.getHttpResponse().getStatusLine().getStatusCode() != 200) {
      // we probably have the NoSuchKey response from S3, usually when repo root is not in bucket root
      prefix = getKeyFromNoSuchKeyResponse(initialPage);
      if (prefix == null) {
        log.info("Unexpected S3 response from remote of {}, cannot scrape this: {}", context.getProxyRepository(),
            initialPage.getDocument().outerHtml());
        context.stop("Remote recognized as " + getTargetedServer()
            + ", but unexpected response code and response body received (see logs).");
        return null;
      }
      // repo.remoteUrl does not have query parameters...
View Full Code Here

  protected void diveIn(final ScrapeContext context, final Page firstPage, final String rootUrl, final String prefix,
                        final Set<String> entries)
      throws IOException
  {
    Page page = firstPage;
    boolean truncated;
    do {
      // check for truncation (isTruncated elem, this means we need to "page" the bucket to get all entries)
      truncated = isTruncated(page);

      // cancelation
      CancelableUtil.checkInterruption();

      // response should be 200 OK, if not, give up
      if (page.getHttpResponse().getStatusLine().getStatusCode() != 200) {
        context.stop("Remote recognized as " + getTargetedServer()
            + ", but cannot be scraped (unexpected response status " + page.getHttpResponse().getStatusLine() + ")");
        return;
      }

      final Elements root = page.getDocument().getElementsByTag("ListBucketResult");
      if (root.size() != 1 || !root.get(0).attr("xmlns").equals("http://s3.amazonaws.com/doc/2006-03-01/")) {
        context.stop("Remote recognized as " + getTargetedServer()
            + ", but unexpected response was received (not \"ListBucketResult\").");
        return;
      }

      log.debug("Processing S3 page response from remote of {} got from URL {}", context.getProxyRepository(), page.getUrl());
      String markerElement = null;
      final Elements elements = page.getDocument().getElementsByTag("Contents");
      for (Element element : elements) {
        final Elements keyElements = element.getElementsByTag("Key");
        if (keyElements.isEmpty()) {
          continue; // skip it
        }
View Full Code Here

  {
    final HttpClient httpClient = new DefaultHttpClient();
    final String repoRoot = "http://repository.springsource.com/maven/bundles/external/";
    when(mavenProxyRepository.getRemoteUrl()).thenReturn(repoRoot);
    final ScrapeContext context = new ScrapeContext(mavenProxyRepository, httpClient, 2);
    final Page page = Page.getPageFor(context, repoRoot);
    s3scraper.scrape(context, page);
    assertThat(context.isStopped(), is(true));
    assertThat(context.isSuccessful(), is(true));
    assertThat(context.getPrefixSource(), notNullValue());
    final List<String> entries = context.getPrefixSource().readEntries();
View Full Code Here

  {
    final HttpClient httpClient = new DefaultHttpClient();
    final String remoteUrl = "http://spring-roo-repository.springsource.org/release/";
    when(mavenProxyRepository.getRemoteUrl()).thenReturn(remoteUrl);
    final ScrapeContext context = new ScrapeContext(mavenProxyRepository, httpClient, 2);
    final Page page = Page.getPageFor(context, remoteUrl);
    s3scraper.scrape(context, page);

    if (context.isSuccessful()) {
      System.out.println(context.getPrefixSource().readEntries());
    }
View Full Code Here

    try {
      final HttpClient httpClient = new DefaultHttpClient();
      final String repoRoot = server.getUrl().toString() + "/";
      when(mavenProxyRepository.getRemoteUrl()).thenReturn(repoRoot);
      final ScrapeContext context = new ScrapeContext(mavenProxyRepository, httpClient, 2);
      final Page page = Page.getPageFor(context, repoRoot);
      s3scraper.scrape(context, page);
      assertThat(context.isStopped(), is(true));
      assertThat(context.isSuccessful(), is(true));
      assertThat(context.getPrefixSource(), notNullValue());
      final List<String> entries = context.getPrefixSource().readEntries();
View Full Code Here

    try {
      final HttpClient httpClient = new DefaultHttpClient();
      final String repoRoot = server.getUrl().toString() + "/";
      when(mavenProxyRepository.getRemoteUrl()).thenReturn(repoRoot);
      final ScrapeContext context = new ScrapeContext(mavenProxyRepository, httpClient, 2);
      final Page page = Page.getPageFor(context, repoRoot);
      s3scraper.scrape(context, page);
      assertThat(context.isStopped(), is(true));
      assertThat(context.isSuccessful(), is(false));
    }
    finally {
View Full Code Here

TOP

Related Classes of org.sonatype.nexus.httpclient.Page

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.