Package org.apache.nutch.crawl

Examples of org.apache.nutch.crawl.CrawlDatum


            LOG.debug(" - " + redirType + " redirect to " +
                url + " (fetching now)");
          }
          return url;
        } else {
          CrawlDatum newDatum = new CrawlDatum(CrawlDatum.STATUS_LINKED,
              datum.getFetchInterval(),datum.getScore());
          // transfer existing metadata
          newDatum.getMetaData().putAll(datum.getMetaData());
          try {
            scfilters.initialScore(url, newDatum);
          } catch (ScoringFilterException e) {
            e.printStackTrace();
          }
          if (reprUrl != null) {
            newDatum.getMetaData().put(Nutch.WRITABLE_REPR_URL_KEY,
                new Text(reprUrl));
          }
          output(url, newDatum, null, null, CrawlDatum.STATUS_LINKED);
          if (LOG.isDebugEnabled()) {
            LOG.debug(" - " + redirType + " redirect to " +
View Full Code Here


        return null;
      }
    }

    private void queueRedirect(Text redirUrl, FetchItem fit) throws ScoringFilterException {
      CrawlDatum newDatum = new CrawlDatum(CrawlDatum.STATUS_DB_UNFETCHED,
          fit.datum.getFetchInterval(), fit.datum.getScore());
      // transfer all existing metadata to the redirect
      newDatum.getMetaData().putAll(fit.datum.getMetaData());
      scfilters.initialScore(redirUrl, newDatum);
      if (reprUrl != null) {
        newDatum.getMetaData().put(Nutch.WRITABLE_REPR_URL_KEY,
            new Text(reprUrl));
      }
      fit = FetchItem.create(redirUrl, newDatum, queueMode);
      if (fit != null) {
        FetchItemQueue fiq =
View Full Code Here

      file.setMaxContentLength(maxContentLength);

    // set log level
    //LOG.setLevel(Level.parse((new String(logLevel)).toUpperCase()));

    Content content = file.getProtocolOutput(new Text(urlString), new CrawlDatum()).getContent();

    System.err.println("Content-Type: " + content.getContentType());
    System.err.println("Content-Length: " +
                       content.getMetadata().get(Response.CONTENT_LENGTH));
    System.err.println("Last-Modified: " +
View Full Code Here

    Metadata metadata = null;
    try {
      String urlString = "file:" + sampleDir + fileSeparator + fileName;
      Protocol protocol = new ProtocolFactory(conf).getProtocol(urlString);
      Content content = protocol.getProtocolOutput(new Text(urlString),
          new CrawlDatum()).getContent();
      Parse parse = new ParseUtil(conf).parse(content).get(content.getUrl());
      metadata = parse.getData().getParseMeta();
    } catch (Exception e) {
      e.printStackTrace();
      Assert.fail(e.toString());
View Full Code Here

          .getFetchSchedule(new JobConf(conf));
      for (int i = 0; i < fetchDbStatusPairs.length; i++) {
        byte fromDbStatus = fetchDbStatusPairs[i][1];
        for (int j = 0; j < fetchDbStatusPairs.length; j++) {
          byte fetchStatus = fetchDbStatusPairs[j][0];
          CrawlDatum fromDb = null;
          if (fromDbStatus == -1) {
            // nothing yet in CrawlDb
            // CrawlDatum added by FreeGenerator or via outlink
          } else {
            fromDb = new CrawlDatum();
            fromDb.setStatus(fromDbStatus);
            // initialize fetchInterval:
            schedule.initializeSchedule(CrawlDbUpdateUtil.dummyURL, fromDb);
          }
          // expected db status
          byte toDbStatus = fetchDbStatusPairs[j][1];
          if (fetchStatus == -1) {
            if (fromDbStatus == -1) {
              // nothing fetched yet: new document detected via outlink
              toDbStatus = STATUS_DB_UNFETCHED;
            } else {
              // nothing fetched but new inlinks detected: status is unchanged
              toDbStatus = fromDbStatus;
            }
          } else if (fetchStatus == STATUS_FETCH_RETRY) {
            // a simple test of fetch_retry (without retries)
            if (fromDb == null || fromDb.getRetriesSinceFetch() < retryMax) {
              toDbStatus = STATUS_DB_UNFETCHED;
            } else {
              toDbStatus = STATUS_DB_GONE;
            }
          }
          String fromDbStatusName = (fromDbStatus == -1 ? "<not in CrawlDb>"
              : getStatusName(fromDbStatus));
          String fetchStatusName = (fetchStatus == -1 ? "<only inlinks>" : CrawlDatum
              .getStatusName(fetchStatus));
          LOG.info(fromDbStatusName + " + " + fetchStatusName + " => "
              + getStatusName(toDbStatus));
          List<CrawlDatum> values = new ArrayList<CrawlDatum>();
          for (int l = 0; l <= 2; l++) { // number of additional in-links
            CrawlDatum fetch = null;
            if (fetchStatus == -1) {
              // nothing fetched, need at least one in-link
              if (l == 0) continue;
            } else {
              fetch = new CrawlDatum();
              if (fromDb != null) {
                fetch.set(fromDb);
              } else {
                // not yet in CrawlDb: added by FreeGenerator
                schedule.initializeSchedule(CrawlDbUpdateUtil.dummyURL, fetch);
              }
              fetch.setStatus(fetchStatus);
              fetch.setFetchTime(System.currentTimeMillis());
            }
            if (fromDb != null)
              values.add(fromDb);
            if (fetch != null)
              values.add(fetch);
View Full Code Here

        byte fromDbStatus = fetchDbStatusPairs[i][1];
        byte toDbStatus = fromDbStatus;
        if (fromDbStatus == -1) {
          toDbStatus = STATUS_DB_UNFETCHED;
        } else {
          CrawlDatum fromDb = new CrawlDatum();
          fromDb.setStatus(fromDbStatus);
          schedule.initializeSchedule(CrawlDbUpdateUtil.dummyURL, fromDb);
          values.add(fromDb);
        }
        LOG.info("inject "
            + (fromDbStatus == -1 ? "<not in CrawlDb>" : CrawlDatum
                .getStatusName(fromDbStatus)) + " + "
            + getStatusName(STATUS_INJECTED) + " => "
            + getStatusName(toDbStatus));
        CrawlDatum injected = new CrawlDatum(STATUS_INJECTED,
            conf.getInt("db.fetch.interval.default", 2592000), 0.1f);
        schedule.initializeSchedule(CrawlDbUpdateUtil.dummyURL, injected);
        try {
          scfilters.injectedScore(CrawlDbUpdateUtil.dummyURL, injected);
        } catch (ScoringFilterException e) {
View Full Code Here

          .getFetchSchedule(new JobConf(conf));
      for (int i = 0; i < fetchDbStatusPairs.length; i++) {
        byte fromDbStatus = fetchDbStatusPairs[i][1];
        for (int j = 0; j < fetchDbStatusPairs.length; j++) {
          byte fetchStatus = fetchDbStatusPairs[j][0];
          CrawlDatum fromDb = null;
          if (fromDbStatus == -1) {
            // nothing yet in CrawlDb
            // CrawlDatum added by FreeGenerator or via outlink
          } else {
            fromDb = new CrawlDatum();
            fromDb.setStatus(fromDbStatus);
            // initialize fetchInterval:
            schedule.initializeSchedule(CrawlDbUpdateUtil.dummyURL, fromDb);
          }
          // expected db status
          byte toDbStatus = fetchDbStatusPairs[j][1];
          if (fetchStatus == -1) {
            if (fromDbStatus == -1) {
              // nothing fetched yet: new document detected via outlink
              toDbStatus = STATUS_DB_UNFETCHED;
            } else {
              // nothing fetched but new inlinks detected: status is unchanged
              toDbStatus = fromDbStatus;
            }
          } else if (fetchStatus == STATUS_FETCH_RETRY) {
            // a simple test of fetch_retry (without retries)
            if (fromDb == null || fromDb.getRetriesSinceFetch() < retryMax) {
              toDbStatus = STATUS_DB_UNFETCHED;
            } else {
              toDbStatus = STATUS_DB_GONE;
            }
          }
          String fromDbStatusName = (fromDbStatus == -1 ? "<not in CrawlDb>"
              : getStatusName(fromDbStatus));
          String fetchStatusName = (fetchStatus == -1 ? "<only inlinks>" : CrawlDatum
              .getStatusName(fetchStatus));
          LOG.info(fromDbStatusName + " + " + fetchStatusName + " => "
              + getStatusName(toDbStatus));
          List<CrawlDatum> values = new ArrayList<CrawlDatum>();
          for (int l = 0; l <= 2; l++) { // number of additional in-links
            CrawlDatum fetch = null;
            if (fetchStatus == -1) {
              // nothing fetched, need at least one in-link
              if (l == 0) continue;
            } else {
              fetch = new CrawlDatum();
              if (fromDb != null) {
                fetch.set(fromDb);
              } else {
                // not yet in CrawlDb: added by FreeGenerator
                schedule.initializeSchedule(CrawlDbUpdateUtil.dummyURL, fetch);
              }
              fetch.setStatus(fetchStatus);
              fetch.setFetchTime(System.currentTimeMillis());
            }
            if (fromDb != null)
              values.add(fromDb);
            if (fetch != null)
              values.add(fetch);
View Full Code Here

        byte fromDbStatus = fetchDbStatusPairs[i][1];
        byte toDbStatus = fromDbStatus;
        if (fromDbStatus == -1) {
          toDbStatus = STATUS_DB_UNFETCHED;
        } else {
          CrawlDatum fromDb = new CrawlDatum();
          fromDb.setStatus(fromDbStatus);
          schedule.initializeSchedule(CrawlDbUpdateUtil.dummyURL, fromDb);
          values.add(fromDb);
        }
        LOG.info("inject "
            + (fromDbStatus == -1 ? "<not in CrawlDb>" : CrawlDatum
                .getStatusName(fromDbStatus)) + " + "
            + getStatusName(STATUS_INJECTED) + " => "
            + getStatusName(toDbStatus));
        CrawlDatum injected = new CrawlDatum(STATUS_INJECTED,
            conf.getInt("db.fetch.interval.default", 2592000), 0.1f);
        schedule.initializeSchedule(CrawlDbUpdateUtil.dummyURL, injected);
        try {
          scfilters.injectedScore(CrawlDbUpdateUtil.dummyURL, injected);
        } catch (ScoringFilterException e) {
View Full Code Here

      ftp.setMaxContentLength(maxContentLength);

    // set log level
    //LOG.setLevel(Level.parse((new String(logLevel)).toUpperCase()));

    Content content = ftp.getProtocolOutput(new Text(urlString), new CrawlDatum()).getContent();

    System.err.println("Content-Type: " + content.getContentType());
    System.err.println("Content-Length: " +
                       content.getMetadata().get(Response.CONTENT_LENGTH));
    System.err.println("Last-Modified: " +
View Full Code Here

    Inlinks inlinks = new Inlinks();
    inlinks.add(new Inlink("http://test1.com/", "text1"));
    inlinks.add(new Inlink("http://test2.com/", "text2"));
    inlinks.add(new Inlink("http://test3.com/", "text2"));
    try {
      filter.filter(doc, parse, new Text("http://nutch.apache.org/index.html"), new CrawlDatum(), inlinks);
    } catch(Exception e){
      e.printStackTrace();
      Assert.fail(e.getMessage());
    }
    Assert.assertNotNull(doc);
View Full Code Here

TOP

Related Classes of org.apache.nutch.crawl.CrawlDatum

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.