Package org.apache.nutch.storage

Examples of org.apache.nutch.storage.WebPage


    DataInputStream in = new DataInputStream(new FileInputStream(file));
    in.readFully(bytes);
    Configuration conf = NutchConfiguration.create();
    HtmlParser parser = new HtmlParser();
    parser.setConf(conf);
    WebPage page = new WebPage();
    page.setBaseUrl(new Utf8(url));
    page.setContent(ByteBuffer.wrap(bytes));
    page.setContentType(new Utf8("text/html"));
    Parse parse = parser.getParse(url, page);
    System.out.println("title: "+parse.getTitle());
    System.out.println("text: "+parse.getText());
    System.out.println("outlinks: " + Arrays.toString(parse.getOutlinks()));
View Full Code Here


  private List<String> readDb() throws Exception {
    List<URLWebPage> pages = CrawlTestUtil.readContents(webPageStore, null, fields);
    ArrayList<String> read = new ArrayList<String>();
    for (URLWebPage up : pages) {
      WebPage page = up.getDatum();
      String representation = up.getUrl();
      representation += "\tnutch.score=" + (int)page.getScore();
      ByteBuffer bb = page.getFromMetadata(new Utf8("custom.attribute"));
      if (bb != null) {
        representation += "\tcustom.attribute=" + Bytes.toString(bb);
      }
      read.add(representation);
    }
View Full Code Here

    DataInputStream in = new DataInputStream(new FileInputStream(file));
    in.readFully(bytes);
    Configuration conf = NutchConfiguration.create();
    // TikaParser parser = new TikaParser();
    // parser.setConf(conf);
    WebPage page = new WebPage();
    page.setBaseUrl(new Utf8(url));
    page.setContent(ByteBuffer.wrap(bytes));
    MimeUtil mimeutil = new MimeUtil(conf);
    String mtype = mimeutil.getMimeType(file);
    page.setContentType(new Utf8(mtype));
    // Parse parse = parser.getParse(url, page);

    Parse parse = new ParseUtil(conf).parse(url, page);

    System.out.println("content type: " + mtype);
View Full Code Here

    Result<String, WebPage> result = datastore.execute(query);
    boolean found = false;
    // should happen only once
    while (result.next()) {
      try {
        WebPage page = result.get();
        String skey = result.getKey();
        // we should not get to this point but nevermind
        if (page == null || skey == null)
          break;
        found = true;
View Full Code Here

  @Override
  protected void reduce(UrlWithScore key, Iterable<NutchWritable> values,
      Context context) throws IOException, InterruptedException {
    String keyUrl = key.getUrl().toString();

    WebPage page = null;
    inlinkedScoreData.clear();
   
    for (NutchWritable nutchWritable : values) {
      Writable val = nutchWritable.get();
      if (val instanceof WebPageWritable) {
        page = ((WebPageWritable) val).getWebPage();
      } else {
        inlinkedScoreData.add((ScoreDatum) val);
        if (inlinkedScoreData.size() >= maxLinks) {
          LOG.info("Limit reached, skipping further inlinks for " + keyUrl);
          break;
        }
      }
    }
    String url;
    try {
      url = TableUtil.unreverseUrl(keyUrl);
    } catch (Exception e) {
      // this can happen because a newly discovered malformed link
      // may slip by url filters
      // TODO: Find a better solution
      return;
    }

    if (page == null) { // new row
      if (!additionsAllowed) {
        return;
      }
      page = new WebPage();
      schedule.initializeSchedule(url, page);
      page.setStatus(CrawlStatus.STATUS_UNFETCHED);
      try {
        scoringFilters.initialScore(url, page);
      } catch (ScoringFilterException e) {
        page.setScore(0.0f);
      }
    } else {
      byte status = (byte)page.getStatus();
      switch (status) {
      case CrawlStatus.STATUS_FETCHED:         // succesful fetch
      case CrawlStatus.STATUS_REDIR_TEMP:      // successful fetch, redirected
      case CrawlStatus.STATUS_REDIR_PERM:
      case CrawlStatus.STATUS_NOTMODIFIED:     // successful fetch, notmodified
        int modified = FetchSchedule.STATUS_UNKNOWN;
        if (status == CrawlStatus.STATUS_NOTMODIFIED) {
          modified = FetchSchedule.STATUS_NOTMODIFIED;
        }
        ByteBuffer prevSig = page.getPrevSignature();
        ByteBuffer signature = page.getSignature();
        if (prevSig != null && signature != null) {
          if (SignatureComparator.compare(prevSig, signature) != 0) {
            modified = FetchSchedule.STATUS_MODIFIED;
          } else {
            modified = FetchSchedule.STATUS_NOTMODIFIED;
          }
        }
        long fetchTime = page.getFetchTime();
        long prevFetchTime = page.getPrevFetchTime();
        long modifiedTime = page.getModifiedTime();
        long prevModifiedTime = page.getPrevModifiedTime();

        schedule.setFetchSchedule(url, page, prevFetchTime, prevModifiedTime,
            fetchTime, modifiedTime, modified);
        if (maxInterval < page.getFetchInterval())
          schedule.forceRefetch(url, page, false);
        break;
      case CrawlStatus.STATUS_RETRY:
        schedule.setPageRetrySchedule(url, page, 0L, page.getPrevModifiedTime(), page.getFetchTime());
        if (page.getRetriesSinceFetch() < retryMax) {
          page.setStatus(CrawlStatus.STATUS_UNFETCHED);
        } else {
          page.setStatus(CrawlStatus.STATUS_GONE);
        }
        break;
      case CrawlStatus.STATUS_GONE:
        schedule.setPageGoneSchedule(url, page, 0L, page.getPrevModifiedTime(), page.getFetchTime());
        break;
      }
    }

    if (page.getInlinks() != null) {
      page.getInlinks().clear();
    }
    for (ScoreDatum inlink : inlinkedScoreData) {
      page.putToInlinks(new Utf8(inlink.getUrl()), new Utf8(inlink.getAnchor()));
    }

    // Distance calculation.
    // Retrieve smallest distance from all inlinks distances
    // Calculate new distance for current page: smallest inlink distance plus 1.
    // If the new distance is smaller than old one (or if old did not exist yet),
    // write it to the page.
    int smallestDist=Integer.MAX_VALUE;
    for (ScoreDatum inlink : inlinkedScoreData) {
      int inlinkDist = inlink.getDistance();
      if (inlinkDist < smallestDist) {
        smallestDist=inlinkDist;
      }
      page.putToInlinks(new Utf8(inlink.getUrl()), new Utf8(inlink.getAnchor()));
    }
    if (smallestDist != Integer.MAX_VALUE) {
      int oldDistance=Integer.MAX_VALUE;
      Utf8 oldDistUtf8 = page.getFromMarkers(DbUpdaterJob.DISTANCE);
      if (oldDistUtf8 != null)oldDistance=Integer.parseInt(oldDistUtf8.toString());
      int newDistance = smallestDist+1;
      if (newDistance < oldDistance) {
        page.putToMarkers(DbUpdaterJob.DISTANCE, new Utf8(Integer.toString(newDistance)));
      }
    }

    try {
      scoringFilters.updateScore(url, page, inlinkedScoreData);
    } catch (ScoringFilterException e) {
      LOG.warn("Scoring filters failed with exception " +
                StringUtils.stringifyException(e));
    }

    // clear markers
    // But only delete when they exist. This is much faster for the underlying
    // store. The markers are on the input anyway.
    if (page.getFromMetadata(FetcherJob.REDIRECT_DISCOVERED) != null) {
      page.removeFromMetadata(FetcherJob.REDIRECT_DISCOVERED);
    }
    Mark.GENERATE_MARK.removeMarkIfExist(page);
    Mark.FETCH_MARK.removeMarkIfExist(page);
    Utf8 parse_mark = Mark.PARSE_MARK.checkMark(page);
    if (parse_mark != null) {
View Full Code Here

    int numReduceTasks = 100;
   
    int partitionFromRef = refPartitioner.getPartition("http://www.example.org/", numReduceTasks);
    //init selector entry (score shouldn't matter)
    SelectorEntry selectorEntry = new SelectorEntry("http://www.example.org/", 1337);
    WebPage page = new WebPage();
    int partitionFromSig = sigPartitioner.getPartition(selectorEntry, page, numReduceTasks);
   
    assertEquals("partitions should be same",
        partitionFromRef, partitionFromSig);
   
View Full Code Here

   
    int numReduceTasks = 100;
   
    int partitionFromRef = refPartitioner.getPartition("http://www.example.org/", numReduceTasks);
    IntWritable intWritable = new IntWritable(1337); //doesn't matter
    WebPage page = new WebPage();
    String key = TableUtil.reverseUrl("http://www.example.org/");
    FetchEntry fetchEntry = new FetchEntry(conf, key, page);
    int partitionFromSig = sigPartitioner.getPartition(intWritable, fetchEntry, numReduceTasks);
   
    assertEquals("partitions should be same",
View Full Code Here

   * @param score
   * @return Constructed object
   */
  private URLWebPage createURLWebPage(final String url,
      final int fetchInterval, final float score) {
    WebPage page = new WebPage();
    page.setFetchInterval(fetchInterval);
    page.setScore(score);
    page.setStatus(CrawlStatus.STATUS_UNFETCHED);
    return new URLWebPage(url, page);
  }
View Full Code Here

      LOG.info("fetching: " + url);
    }

    ProtocolFactory factory = new ProtocolFactory(conf);
    Protocol protocol = factory.getProtocol(url);
    WebPage page = new WebPage();
    Content content = protocol.getProtocolOutput(url, page).getContent();
    page.setBaseUrl(new org.apache.avro.util.Utf8(url));
    page.setContent(ByteBuffer.wrap(content.getContent()));

    if (force) {
      content.setContentType(contentType);
    } else {
      contentType = content.getContentType();
    }

    if (contentType == null) {
      System.err.println("");
      return (-1);
    }

    if (LOG.isInfoEnabled()) {
      LOG.info("parsing: " + url);
      LOG.info("contentType: " + contentType);
    }

    page.setContentType(new Utf8(contentType));

    Parse parse = new ParseUtil(conf).parse(url, page);

    if (parse == null) {
      System.err.println("Problem with parse - check log");
      return (-1);
    }

    System.out.print("---------\nUrl\n---------------\n");
    System.out.print(url + "\n");
    System.out.print("---------\nMetadata\n---------\n");
    Map<Utf8, ByteBuffer> metadata = page.getMetadata();
    StringBuffer sb = new StringBuffer();
    if (metadata != null) {
      Iterator<Entry<Utf8, ByteBuffer>> iterator = metadata.entrySet()
          .iterator();
      while (iterator.hasNext()) {
View Full Code Here

    IndexingFilters filters = new IndexingFilters(conf);
//    filters.filter(new NutchDocument(), new ParseImpl("text", new ParseData(
//        new ParseStatus(), "title", new Outlink[0], new Metadata())), new Text(
//        "http://www.example.com/"), new CrawlDatum(), new Inlinks());
    WebPage page = new WebPage();
    page.setText(new Utf8("text"));
    page.setTitle(new Utf8("title"));
    filters.filter(new NutchDocument(),"http://www.example.com/",page);
  }
View Full Code Here

TOP

Related Classes of org.apache.nutch.storage.WebPage

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.