Examples of Payload


Examples of com.scaleunlimited.cascading.Payload

        // TODO CSc - re-enable this test, when termination really works.
        // Assert.assertEquals(numPages, totalEntries);
    }
   
    protected void testPayloads(BixoPlatform platform) throws Exception {
        Payload payload = new Payload();
        payload.put("key", "value");
        Tap in = makeInputData(platform, "testPayloads", 1, 1, payload);

        Pipe pipe = new Pipe("urlSource");
        BaseFetcher fetcher = new FakeHttpFetcher(false, 10);
        BaseScoreGenerator scorer = new FixedScoreGenerator();
View Full Code Here

Examples of com.scaleunlimited.cascading.Payload

            throw e;
        }
    }

    public FetchedResult fetch(String url) throws BaseFetchException{
      return fetch(new HttpGet(), url, new Payload());
    }
View Full Code Here

Examples of com.scaleunlimited.cascading.Payload


    @Override
    public FetchedDatum get(ScoredUrlDatum datum) throws BaseFetchException {
        String url = datum.getUrl();
        Payload payload = datum.getPayload();
        logPayload(url, payload);
       
        // Create a simple HTML page here, where we fill in the URL as
        // the field, and return that as the BytesWritable. we could add
        // more of the datum values to the template if we cared.
View Full Code Here

Examples of com.tll.common.data.Payload

        ForgotPasswordServiceContext.KEY);
  }

  public Payload requestPassword(final String emailAddress) {
    final Status status = new Status();
    final Payload p = new Payload(status);
    final Map<String, Object> data = new HashMap<String, Object>();

    if(StringUtil.isEmpty(emailAddress)) {
      status.addMsg("An email address must be specified.", MsgLevel.ERROR, MsgAttr.STATUS.flag);
    }
View Full Code Here

Examples of gwlpr.protocol.handshake.messages.P000_VerifyClient.Payload

    @Event.Handler
    public void onHandshakeDone(HandShakeDoneEvent event)
    {
        LOGGER.debug("Got a new client to verify.");

        Payload verifyClient = event.getVerifyClient();

        // failcheck
        if (verifyClient == null) { return; }

        // check the server key
View Full Code Here

Examples of it.freedomotic.gwtclient.client.api.Payload

    public void refreshDisplays() {
        dataProvider.refresh();
    }

    public static void message(String message) {
        Payload payload = FreedomoticStompHelper.parseMessage(message);
//    EnvObject obj = FloorPlanWidget.environmentEnvironmentController.getInstance().getObject(payload.getStatements("object.name").get(0).getValue());            
//    EnvObject obj =
//    Iterator it = payload.iterator();
//        while (it.hasNext()) {
//            Statement st = (Statement) it.next();             
View Full Code Here

Examples of it.freedomotic.reactions.Payload

    private void save(Trigger t) {
        if (table.getCellEditor() != null) {
            table.getCellEditor().stopCellEditing();
        }

        Payload payload = new Payload();

        for (int r = 0; r < model.getRowCount(); r++) {
            boolean saveCurrent = true;

            for (int k = 0; k < 4; k++) {
                String test = model.getValueAt(r, 0).toString();

                if ((test == null) || test.equals("")) {
                    saveCurrent = false;

                    break;
                }
            }

            if (saveCurrent) {
                payload.addStatement(model.getValueAt(r, 0).toString(),
                        model.getValueAt(r, 1).toString(),
                        model.getValueAt(r, 2).toString(),
                        model.getValueAt(r, 3).toString());
            }
        }
View Full Code Here

Examples of it.unimi.dsi.mg4j.index.payload.Payload

    /* This will be set if *all* indices to be merged agree. Moreover, if some
     * indices disagree we will emit a warning. */
    TermProcessor termProcessor = null;
    /* This will be set if *all* indices to be merged agree. Moreover, if some
     * indices disagree we will emit a warning. */
    Payload payload = null;
    String field = null;
    writeGlobCounts = writeSizes = true;
    boolean someGlobCounts = false, someSizes = false, allDataForSizeComputation = true;
   
    for( int i = 0; i < numIndices; i++ ) {
      index[ i ] = (BitStreamIndex)Index.getInstance( inputBasename[ i ], false, requireSizes, false );
      if ( i == 0 ) {
        termProcessor = index[ 0 ].termProcessor.copy();
        payload = index[ 0 ].payload == null ? null : index[ 0 ].payload.copy();
      }
      else {
        if ( ! termProcessor.equals( index[ i ].termProcessor ) ) throw new IllegalStateException( "The term processor of the first index (" + termProcessor + ") is different from the term processor of index " + i + " (" + index[ i ].termProcessor + ")" );
        if ( ( payload == null ) != ( index[ i ].payload == null ) || payload != null && ! payload.compatibleWith( index[ i ].payload ) ) throw new IllegalStateException( "The payload specification of index " + index[ 0 ] + " is not compatible with that of index " + index[ i ] );
      }

      if ( index[ i ].field != null ) {
        if ( field == null ) {
          if ( i != 0 ) LOGGER.warn( "Not all indices specify the field property" );
          field = index[ i ].field;
        }
        else if ( ! field.equals( index[ i ].field ) ) LOGGER.warn( "Index fields disagree: \"" + field + "\", \"" + index[ i ].field + "\"" );
      }


      haveCounts &= index[ i ].hasCounts;
      havePositions &= index[ i ].hasPositions;
      maxCount = Math.max( maxCount, index[ i ].maxCount );
      indexReader[ i ] = index[ i ].getReader( bufferSize );
      if ( index[ i ].properties.getLong( Index.PropertyKeys.OCCURRENCES, -1 ) == -1 ) numberOfOccurrences = -1;
      if ( numberOfOccurrences != -1 ) numberOfOccurrences += index[ i ].properties.getLong( Index.PropertyKeys.OCCURRENCES );
      final File globCountsFile = new File( this.inputBasename[ i ] + DiskBasedIndex.GLOBCOUNTS_EXTENSION );
      writeGlobCounts &= globCountsFile.exists();
      someGlobCounts |= globCountsFile.exists();
      if ( writeGlobCounts ) globCounts[ i ] = new InputBitStream( globCountsFile );

      if ( ! metadataOnly ) {
        final File offsetsFile = new File( this.inputBasename[ i ] + DiskBasedIndex.OFFSETS_EXTENSION );
        allDataForSizeComputation &= offsetsFile.exists();
        if ( quantum < 0 && allDataForSizeComputation ) offsets[ i ] = new InputBitStream( offsetsFile );

        if ( index[ i ].hasPositions ) {
          final File positionsLengthsFile = new File( this.inputBasename[ i ] + DiskBasedIndex.POSITIONS_NUMBER_OF_BITS_EXTENSION );
          allDataForSizeComputation &= positionsLengthsFile.exists();
          if ( quantum < 0 && allDataForSizeComputation ) posNumBits[ i ] = new InputBitStream( positionsLengthsFile );
        }
      }
     
      final File sizesFile = new File( this.inputBasename[ i ] + DiskBasedIndex.SIZES_EXTENSION );
      writeSizes &= sizesFile.exists();
      someSizes |= sizesFile.exists();

      term[ i ] = new MutableString();
      termReader[ i ] = new FastBufferedReader( new InputStreamReader( new FileInputStream( this.inputBasename[ i ] + DiskBasedIndex.TERMS_EXTENSION ), "UTF-8" ) );
      if ( termReader[ i ].readLine( term[ i ] ) != null ) termQueue.enqueue( i ); // If the term list is nonempty, we enqueue it
    }

    if ( writeGlobCounts != someGlobCounts ) LOGGER.warn"Some (but not all) global-counts file missing" );
    if ( writeSizes != someSizes ) LOGGER.warn"Some (but not all) sizes file missing" );
   
    additionalProperties = new Properties();
    additionalProperties.setProperty( Index.PropertyKeys.TERMPROCESSOR, ObjectParser.toSpec( termProcessor ) );
    if ( payload != null ) {
      additionalProperties.setProperty( Index.PropertyKeys.PAYLOADCLASS, payload.getClass().getName() );
      //writerFlags.put( Component.PAYLOADS, null );
    }
    additionalProperties.setProperty( Index.PropertyKeys.BATCHES, inputBasename.length );
    if ( field != null ) additionalProperties.setProperty( Index.PropertyKeys.FIELD, field );

View Full Code Here

Examples of it.unimi.dsi.mg4j.index.payload.Payload

    final Index index = curr.top();
    if ( ! index.hasPayloads ) throw new IllegalStateException( "Index " + index + " does not have payloads" );
    try {
      final Object parser = index2Parser.containsKey( index ) ? index2Parser.get( index ) : index.payload;
      final Method method = parser.getClass().getMethod( "parse", String.class );
      final Payload left = index.payload.copy(), right = index.payload.copy();
      if ( node.left != null ) left.set( method.invoke( parser, node.left.toString() ) );
      if ( node.right != null ) right.set( method.invoke( parser, node.right.toString() ) );
      return PayloadPredicateDocumentIterator.getInstance( index.documents( 0 ),
          index.payload.rangeFilter( node.left == null ? null : left, node.right == null ? null : right ) ).weight( weight() );
    }
    catch( InvocationTargetException e ) {
View Full Code Here

Examples of it.unimi.dsi.mg4j.index.payload.Payload

      indirect[ i ] = new InputBitStream( tempFile[ i ] );
      if ( bloomFilterPrecision != 0 ) bloomFilter[ i ] = new BloomFilter( globalIndex.numberOfTerms, bloomFilterPrecision );
    }
    int usedIndices;
    MutableString currentTerm = new MutableString();
    Payload payload = null;
    int frequency, globalPointer, localIndex, localPointer, count = -1;

    pl.expectedUpdates = globalIndex.numberOfPostings;
    pl.itemsName = "postings";
    pl.logInterval = logInterval;
    pl.start( "Partitioning index..." );

    for ( int t = 0; t < globalIndex.numberOfTerms; t++ ) {
      terms.readLine( currentTerm );
      indexIterator = indexReader.nextIterator();
      usedIndices = 0;
      frequency = indexIterator.frequency();
     
      for ( int j = 0; j < frequency; j++ ) {
        globalPointer = indexIterator.nextDocument();               
        localIndex = strategy.localIndex( globalPointer )

        if ( localFrequency[ localIndex ] == 0 ) {
          // First time we see a document for this index.
          currentTerm.println( localTerms[ localIndex ] );
          numTerms[ localIndex ]++;
          usedIndex[ usedIndices++ ] = localIndex;
          if ( bloomFilterPrecision != 0 ) bloomFilter[ localIndex ].add( currentTerm );
        }
       
        /* Store temporarily posting data; note that we save the global pointer as we
         * will have to access the size list. */
       
        localFrequency[ localIndex ]++;
        numPostings[ localIndex ]++;
        temp[ localIndex ].writeGamma( globalPointer );

        if ( globalIndex.hasPayloads ) payload = indexIterator.payload();
        if ( havePayloads ) payload.write( temp[ localIndex ] );
       
        if ( haveCounts ) {
          count = indexIterator.count();
          temp[ localIndex ].writeGamma( count );
          globCount[ localIndex ] += count;       
          if ( maxDocPos[ localIndex ] < count ) maxDocPos[ localIndex ] = count;        
          if ( havePositions ) {
            final int[] pos = indexIterator.positionArray();
            // TODO: compress this stuff
            for( int p = 0; p < count; p++ ) temp[ localIndex ].writeGamma( pos[ p ] );
          }
        }
      }
     
      // We now run through the indices used by this term and copy from the temporary buffer.

      OutputBitStream obs;
     
      for( int k = 0; k < usedIndices; k++ ) {
        final int i = usedIndex[ k ];

        localFrequencies[ i ].writeGamma( localFrequency[ i ] );
        if ( haveCounts ) numOccurrences[ i ] += globCount[ i ];
        if ( localGlobCounts[ i ] != null ) localGlobCounts[ i ].writeLongGamma( globCount[ i ] );
        globCount[ i ] = 0;
       
        InputBitStream ibs;
        indexWriter[ i ].newInvertedList();

        temp[ i ].align();
        if ( temp[ i ].buffer() != null ) ibs = direct[ i ];
        else {
          // We cannot read directly from the internal buffer.
          ibs = indirect[ i ];
          ibs.flush();
          temp[ i ].flush();
        }

        ibs.position( 0 );
         
        indexWriter[ i ].writeFrequency( localFrequency[ i ] );
        for( int j = 0; j < localFrequency[ i ]; j++ ) {
          obs = indexWriter[ i ].newDocumentRecord();
          globalPointer = ibs.readGamma();
          localPointer = strategy.localPointer( globalPointer )
          indexWriter[ i ].writeDocumentPointer( obs, localPointer );
          if ( havePayloads ) {
            payload.read( ibs );
            indexWriter[ i ].writePayload( obs, payload );
          }
          if ( haveCounts ) indexWriter[ i ].writePositionCount( obs, count = ibs.readGamma() );
          if ( havePositions ) {
            for( int p = 0; p < count; p++ ) position[ p ] = ibs.readGamma();
            indexWriter[ i ].writeDocumentPositions( obs, position, 0, count, sizeList != null ? sizeList.getInt( globalPointer ) : -1 );
          }
         
        }
        temp[ i ].position( 0 );
        temp[ i ].writtenBits( 0 );
        localFrequency[ i ] = 0;
      }
     
      usedIndices = 0;
      pl.count += frequency - 1;
      pl.update();
    }

    pl.done();

    Properties globalProperties = new Properties();
    globalProperties.setProperty( Index.PropertyKeys.FIELD, inputProperties.getProperty( Index.PropertyKeys.FIELD ) );
    globalProperties.setProperty( Index.PropertyKeys.TERMPROCESSOR, inputProperties.getProperty( Index.PropertyKeys.TERMPROCESSOR ) );
   
    for ( int i = 0; i < numIndices; i++ ) {
      localFrequencies[ i ].close();
      if ( localGlobCounts[ i ] != null ) localGlobCounts[ i ].close();
      localTerms[ i ].close();
      indexWriter[ i ].close();
      if ( bloomFilterPrecision != 0 ) BinIO.storeObject( bloomFilter[ i ], localBasename[ i ] + DocumentalCluster.BLOOM_EXTENSION );
      temp[ i ].close();
      tempFile[ i ].delete();
     
      Properties localProperties = indexWriter[ i ].properties();
      localProperties.addAll( globalProperties );
      localProperties.setProperty( Index.PropertyKeys.MAXCOUNT, String.valueOf( maxDocPos[ i ] ) );
      localProperties.setProperty( Index.PropertyKeys.MAXDOCSIZE, maxDocSize[ i ] );
      localProperties.setProperty( Index.PropertyKeys.FIELD, globalProperties.getProperty( Index.PropertyKeys.FIELD ) );
      localProperties.setProperty( Index.PropertyKeys.OCCURRENCES, haveCounts ? numOccurrences[ i ] : -1 );
      localProperties.setProperty( Index.PropertyKeys.POSTINGS, numPostings[ i ] );
      localProperties.setProperty( Index.PropertyKeys.TERMS, numTerms[ i ] );
      if ( havePayloads ) localProperties.setProperty( Index.PropertyKeys.PAYLOADCLASS, payload.getClass().getName() );
      if ( strategyProperties[ i ] != null ) localProperties.addAll( strategyProperties[ i ] );
      localProperties.save( localBasename[ i ] + DiskBasedIndex.PROPERTIES_EXTENSION );
    }

    if ( strategyFilename != null ) globalProperties.setProperty( IndexCluster.PropertyKeys.STRATEGY, strategyFilename );
    for( int i = 0; i < numIndices; i++ ) globalProperties.addProperty( IndexCluster.PropertyKeys.LOCALINDEX, localBasename[ i ] );
    globalProperties.setProperty( DocumentalCluster.PropertyKeys.BLOOM, bloomFilterPrecision != 0 );
    // If we partition an index with a single term, by definition we have a flat cluster
    globalProperties.setProperty( DocumentalCluster.PropertyKeys.FLAT, inputProperties.getInt( Index.PropertyKeys.TERMS ) <= 1 );
    globalProperties.setProperty( Index.PropertyKeys.MAXCOUNT, inputProperties.getProperty( Index.PropertyKeys.MAXCOUNT ) );
    globalProperties.setProperty( Index.PropertyKeys.MAXDOCSIZE, inputProperties.getProperty( Index.PropertyKeys.MAXDOCSIZE ) );
    globalProperties.setProperty( Index.PropertyKeys.POSTINGS, inputProperties.getProperty( Index.PropertyKeys.POSTINGS ) );
    globalProperties.setProperty( Index.PropertyKeys.OCCURRENCES, inputProperties.getProperty( Index.PropertyKeys.OCCURRENCES ) );
    globalProperties.setProperty( Index.PropertyKeys.DOCUMENTS, inputProperties.getProperty( Index.PropertyKeys.DOCUMENTS ) );
    globalProperties.setProperty( Index.PropertyKeys.TERMS, inputProperties.getProperty( Index.PropertyKeys.TERMS ) );
    if ( havePayloads ) globalProperties.setProperty( Index.PropertyKeys.PAYLOADCLASS, payload.getClass().getName() );

    /* For the general case, we must rely on a merged cluster. However, if we detect a contiguous
     * strategy we can optimise a bit. */
   
    globalProperties.setProperty( Index.PropertyKeys.INDEXCLASS,
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.