Package it.unimi.dsi.util

Examples of it.unimi.dsi.util.Properties


    //    new int[] { 0, 1 }
    //  );
    //map.defaultReturnValue( 2 );
    //DispatchingStrategy strategy = new StringBasedDispatchingStrategy( MetadataKeys.MIMETYPE, map  );
   
    Properties p = new Properties();
    p.addProperty( MetadataKeys.FIELDNAME.name().toLowerCase(), "text,title" );
    p.addProperty( MetadataKeys.KEY.name().toLowerCase(), PropertyBasedDocumentFactory.MetadataKeys.MIMETYPE.name() );
    p.addProperty( MetadataKeys.RULE.name().toLowerCase(), "application/pdf:it.unimi.dsi.mg4j.document.PdfDocumentFactory,text/html:it.unimi.dsi.mg4j.document.HtmlDocumentFactory,?:it.unimi.dsi.mg4j.document.IdentityDocumentFactory" );
    p.addProperty( MetadataKeys.MAP.name().toLowerCase(), "0:-1,0:1,0:-1" );
    p.addProperty( MetadataKeys.MAP.name().toLowerCase(), "0:-1,0:1,0:-1" );
    p.addProperty( MetadataKeys.MAP.name().toLowerCase(), "0:-1,0:1,0:-1" );
    p.addProperty( PropertyBasedDocumentFactory.MetadataKeys.ENCODING.name().toLowerCase(), "iso-8859-1" );
   
    DispatchingDocumentFactory factory = new DispatchingDocumentFactory( p );
    DocumentCollection dc = new FileSetDocumentCollection( arg, factory );
    BinIO.storeObject( dc, "test.collection" );
  }
View Full Code Here


    }

    if ( writeGlobCounts != someGlobCounts ) LOGGER.warn"Some (but not all) global-counts file missing" );
    if ( writeSizes != someSizes ) LOGGER.warn"Some (but not all) sizes file missing" );
   
    additionalProperties = new Properties();
    additionalProperties.setProperty( Index.PropertyKeys.TERMPROCESSOR, ObjectParser.toSpec( termProcessor ) );
    if ( payload != null ) {
      additionalProperties.setProperty( Index.PropertyKeys.PAYLOADCLASS, payload.getClass().getName() );
      //writerFlags.put( Component.PAYLOADS, null );
    }
View Full Code Here

        }
        termReader[ i ].close();
      }
      final long indexSize = indexWriter.writtenBits();
      indexWriter.close();
      final Properties properties = indexWriter.properties();
      additionalProperties.setProperty( Index.PropertyKeys.SIZE, indexSize );
      additionalProperties.setProperty( Index.PropertyKeys.MAXDOCSIZE, maxDocSize );
      additionalProperties.setProperty( Index.PropertyKeys.OCCURRENCES, numberOfOccurrences );
      properties.addAll( additionalProperties );
      logger.debug( "Post-merge properties: " + new ConfigurationMap( properties ) );
      properties.save( outputBasename + DiskBasedIndex.PROPERTIES_EXTENSION );
    }
       
    final PrintStream stats = new PrintStream( new FileOutputStream ( outputBasename + DiskBasedIndex.STATS_EXTENSION ) );
    if ( ! metadataOnly ) indexWriter.printStats( stats );
    stats.close();
View Full Code Here

      throw new IllegalArgumentException( "When invoking " + Combine.class.getName() + " from " + combineClass.getName() + " you cannot choose the combination process" );
   
    final String[] inputBasename;
    if ( jsapResult.getBoolean( "properties" ) ) {
      if ( jsapResult.getStringArray( "inputBasename" ).length > 1 ) throw new IllegalArgumentException( "When using --properties, you must specify exactly one inputBasename" );
      inputBasename = new Properties( jsapResult.getStringArray( "inputBasename" )[ 0 ] + Scan.CLUSTER_PROPERTIES_EXTENSION ).getStringArray( IndexCluster.PropertyKeys.LOCALINDEX );
    }
    else inputBasename = jsapResult.getStringArray( "inputBasename" );
    // TODO: resolve problem of passing default flag values without knowing type of index
    ( combineClass == Paste.class || jsapResult.getBoolean( "duplicates" ) ?
    (Combine)new Paste( jsapResult.getString( "outputBasename" ),
View Full Code Here

      basename = questionMarkPos == -1 ? uriString : uriString.substring( 0, questionMarkPos );
      query = questionMarkPos == -1 ? null : uriString.substring( questionMarkPos + 1 );
    }
   
    LOGGER.debug( "Searching for an index with basename " + basename + "..." );
    Properties properties = new Properties( basename + DiskBasedIndex.PROPERTIES_EXTENSION );
    LOGGER.debug( "Properties: " + properties );
         
    // We parse the key/value pairs appearing in the query part.
    final EnumMap<UriKeys,String> queryProperties = new EnumMap<UriKeys,String>( UriKeys.class );
    if ( query != null ) {
      String[] keyValue = query.split( ";" );
      for( int i = 0; i < keyValue.length; i++ ) {
        String[] piece = keyValue[ i ].split( "=" );
        if ( piece.length != 2 ) throw new IllegalArgumentException( "Malformed key/value pair: "  + keyValue[ i ] );
        // Convert to standard keys
        boolean found = false;
        for( UriKeys key: UriKeys.values() ) 
          if ( found = PropertyBasedDocumentFactory.sameKey( key, piece[ 0 ] ) ) {
            queryProperties.put( key, piece[ 1 ] );
            break;
          }
        if ( ! found ) throw new IllegalArgumentException( "Unknown key: " + piece[ 0 ] );
      }
    }

    String className = properties.getString( Index.PropertyKeys.INDEXCLASS, "(missing index class)" );
    // Temporary patch
    if ( "it.unimi.dsi.mg4j.index.SkipFileIndex".equals( className ) ) className = FileIndex.class.getName();
    Class<?> indexClass = Class.forName( className );

    // It is a cluster
View Full Code Here

      makeEmpty( batchBasename + DiskBasedIndex.GLOBCOUNTS_EXTENSION );
      if ( ! indexingIsVirtual ) sizes.close();
     
      final IndexWriter indexWriter = new BitStreamIndexWriter( batchBasename, totDocuments, true, flags );
      indexWriter.close();
      final Properties properties = indexWriter.properties();
      properties.setProperty( Index.PropertyKeys.TERMPROCESSOR, ObjectParser.toSpec( termProcessor ) );
      properties.setProperty( Index.PropertyKeys.OCCURRENCES, 0 );
      properties.setProperty( Index.PropertyKeys.MAXCOUNT, 0 );
      properties.setProperty( Index.PropertyKeys.MAXDOCSIZE, maxDocSize );
      properties.setProperty( Index.PropertyKeys.SIZE, 0 );
      if ( field != null ) properties.setProperty( Index.PropertyKeys.FIELD, field );
      properties.save( batchBasename + DiskBasedIndex.PROPERTIES_EXTENSION );
      batch = 1;
    }

    termMap = null;

    final Properties properties = new Properties();
    if ( field != null ) properties.setProperty( Index.PropertyKeys.FIELD, field );
    properties.setProperty( Index.PropertyKeys.BATCHES, batch );
    properties.setProperty( Index.PropertyKeys.DOCUMENTS, totDocuments );
    properties.setProperty( Index.PropertyKeys.MAXDOCSIZE, globMaxDocSize );
    properties.setProperty( Index.PropertyKeys.MAXCOUNT, maxCount );
    properties.setProperty( Index.PropertyKeys.OCCURRENCES, totOccurrences );
    properties.setProperty( Index.PropertyKeys.POSTINGS, totPostings );
    properties.setProperty( Index.PropertyKeys.TERMPROCESSOR, termProcessor.getClass().getName() );

    if ( ! indexingIsVirtual ) {
      // This set of batches can be seen as a documental cluster index.
      final Properties clusterProperties = new Properties();
      clusterProperties.addAll( properties );
      clusterProperties.setProperty( Index.PropertyKeys.TERMS, -1 );
      clusterProperties.setProperty( DocumentalCluster.PropertyKeys.BLOOM, false );
      clusterProperties.setProperty( IndexCluster.PropertyKeys.FLAT, false );

      if ( indexingIsStandard ) {
        clusterProperties.setProperty( Index.PropertyKeys.INDEXCLASS, DocumentalConcatenatedCluster.class.getName() );
        BinIO.storeObject( new ContiguousDocumentalStrategy( cutPoints.toIntArray() ), basename + CLUSTER_STRATEGY_EXTENSION );
      }
      else { // Remapped
        clusterProperties.setProperty( Index.PropertyKeys.INDEXCLASS, DocumentalMergedCluster.class.getName() );
        BinIO.storeObject( new IdentityDocumentalStrategy( batch, totDocuments ), basename + CLUSTER_STRATEGY_EXTENSION );
      }
      clusterProperties.setProperty( IndexCluster.PropertyKeys.STRATEGY, basename + CLUSTER_STRATEGY_EXTENSION );
      for ( int i = 0; i < batch; i++ )
        clusterProperties.addProperty( IndexCluster.PropertyKeys.LOCALINDEX, batchBasename( i, basename, batchDir ) );
      clusterProperties.save( basename + CLUSTER_PROPERTIES_EXTENSION );

    }

    properties.save( basename + DiskBasedIndex.PROPERTIES_EXTENSION );
  }
View Full Code Here

  }
 
  public Properties[] properties() {
    Properties[] properties = new Properties[ k ];
    for( int i = 0; i < k; i++ ) {
      properties[ i ] = new Properties();
      properties[ i ].addProperty( "pointerfrom", cutPoint[ i ] )
      properties[ i ].addProperty( "pointerto", cutPoint[ i + 1 ] )
    }
    return properties;
  }
View Full Code Here

          maxDocInBatch = 0;
        }

        indexWriter.close();

        final Properties properties = indexWriter.properties();
        totPostings += properties.getLong( "postings" );
        properties.setProperty( Index.PropertyKeys.OCCURRENCES, -1 );
        properties.setProperty( Index.PropertyKeys.MAXDOCSIZE, -1 );
        properties.setProperty( Index.PropertyKeys.SIZE, indexWriter.writtenBits() );
        properties.setProperty( Index.PropertyKeys.TERMPROCESSOR, NullTermProcessor.class.getName() );
        properties.setProperty( Index.PropertyKeys.PAYLOADCLASS, payload.getClass().getName() );
        if ( field != null ) properties.setProperty( Index.PropertyKeys.FIELD, field );
        properties.save( batchBasename + DiskBasedIndex.PROPERTIES_EXTENSION );

        // We *must* generate a fake term file, or index combination won't work.
        final PrintWriter termWriter = new PrintWriter( new FileWriter( batchBasename + DiskBasedIndex.TERMS_EXTENSION ) );
        termWriter.println( "#" );
        termWriter.close();
View Full Code Here

        final String batchBasename = batchBasename( 0, basename, batchDir );
        LOGGER.debug( "Generating empty index " + batchBasename );

        final IndexWriter indexWriter = new BitStreamIndexWriter( batchBasename, 0, true, flags );
        indexWriter.close();
        final Properties properties = indexWriter.properties();
        properties.setProperty( Index.PropertyKeys.SIZE, 0 );
        properties.setProperty( Index.PropertyKeys.OCCURRENCES, -1 );
        properties.setProperty( Index.PropertyKeys.MAXCOUNT, -1 );
        properties.setProperty( Index.PropertyKeys.MAXDOCSIZE, -1 );
        properties.setProperty( Index.PropertyKeys.TERMPROCESSOR, NullTermProcessor.class.getName() );
        properties.setProperty( Index.PropertyKeys.PAYLOADCLASS, payload.getClass().getName() );
        if ( field != null ) properties.setProperty( Index.PropertyKeys.FIELD, field );
        properties.save( batchBasename + DiskBasedIndex.PROPERTIES_EXTENSION );
        new FileOutputStream( batchBasename + DiskBasedIndex.TERMS_EXTENSION ).close();
        batch = 1;
      }

      accumulator = null;
      accumulatorStream = null;
      position = null;

      final Properties properties = new Properties();
      if ( field != null ) properties.setProperty( Index.PropertyKeys.FIELD, field );
      properties.setProperty( Index.PropertyKeys.BATCHES, batch );
      properties.setProperty( Index.PropertyKeys.DOCUMENTS, totDocuments );
      properties.setProperty( Index.PropertyKeys.POSTINGS, totPostings );
      properties.setProperty( Index.PropertyKeys.OCCURRENCES, -1 );
      properties.setProperty( Index.PropertyKeys.MAXCOUNT, -1 );
      properties.setProperty( Index.PropertyKeys.MAXDOCSIZE, -1 );
      properties.setProperty( Index.PropertyKeys.TERMPROCESSOR, NullTermProcessor.class.getName() );
      properties.setProperty( Index.PropertyKeys.PAYLOADCLASS, payload.getClass().getName() );

      // This set of batches can be seen as a documental cluster index.
      final Properties clusterProperties = new Properties();
      clusterProperties.addAll( properties );
      clusterProperties.setProperty( Index.PropertyKeys.TERMS, 1 );
      clusterProperties.setProperty( IndexCluster.PropertyKeys.BLOOM, false );
      clusterProperties.setProperty( IndexCluster.PropertyKeys.FLAT, true );

      if ( indexingType == IndexingType.STANDARD ) {
        clusterProperties.setProperty( Index.PropertyKeys.INDEXCLASS, DocumentalConcatenatedCluster.class.getName() );
        BinIO.storeObject( new ContiguousDocumentalStrategy( cutPoints.toIntArray() ), basename + CLUSTER_STRATEGY_EXTENSION );
      }
      else {
        clusterProperties.setProperty( Index.PropertyKeys.INDEXCLASS, DocumentalMergedCluster.class.getName() );
        BinIO.storeObject( new IdentityDocumentalStrategy( batch, totDocuments ), basename + CLUSTER_STRATEGY_EXTENSION );
      }
      clusterProperties.setProperty( IndexCluster.PropertyKeys.STRATEGY, basename + CLUSTER_STRATEGY_EXTENSION );
      for ( int i = 0; i < batch; i++ )
        clusterProperties.addProperty( IndexCluster.PropertyKeys.LOCALINDEX, batchBasename( i, basename, batchDir ) );
      clusterProperties.save( basename + CLUSTER_PROPERTIES_EXTENSION );

      properties.save( basename + DiskBasedIndex.PROPERTIES_EXTENSION );
    }
View Full Code Here

    final PrintWriter[] localTerms = new PrintWriter[ numIndices ];
    final int numTerms[] = new int[ numIndices ];
    final long numberOfOccurrences[] = new long[ numIndices ];
    final long numberOfPostings[] = new long[ numIndices ];
   
    final boolean isHighPerformance = BitStreamHPIndex.class.isAssignableFrom( Class.forName( new Properties( inputBasename + DiskBasedIndex.PROPERTIES_EXTENSION ).getString( Index.PropertyKeys.INDEXCLASS ) ) );
   
    final InputBitStream globalIndex = new InputBitStream( inputBasename + DiskBasedIndex.INDEX_EXTENSION, bufferSize );
    final long globalPositionsLength = new File( inputBasename + DiskBasedIndex.POSITIONS_EXTENSION ).length();
    final InputBitStream globalPositions = isHighPerformance ? new InputBitStream( inputBasename + DiskBasedIndex.POSITIONS_EXTENSION, bufferSize ) : null;
    final FastBufferedReader terms = new FastBufferedReader( new InputStreamReader( new FileInputStream( inputBasename + DiskBasedIndex.TERMS_EXTENSION ), "UTF-8" ) );
    final InputBitStream offsets = new InputBitStream( inputBasename + DiskBasedIndex.OFFSETS_EXTENSION );
   
    final File posNumBitsFile = new File( inputBasename + DiskBasedIndex.POSITIONS_NUMBER_OF_BITS_EXTENSION );
    final InputBitStream posNumBits = posNumBitsFile.exists() ? new InputBitStream( inputBasename + DiskBasedIndex.POSITIONS_NUMBER_OF_BITS_EXTENSION ) : null;
    final InputBitStream frequencies = new InputBitStream( inputBasename + DiskBasedIndex.FREQUENCIES_EXTENSION );
    final InputBitStream globCounts = new InputBitStream( inputBasename + DiskBasedIndex.GLOBCOUNTS_EXTENSION );
    offsets.readGamma();
   
    for( int i = 0; i < numIndices; i++ ) {
      localIndexStream[ i ] = new OutputBitStream( localBasename[ i ] + DiskBasedIndex.INDEX_EXTENSION, bufferSize );
      if ( isHighPerformance ) localPositionsStream[ i ] = new OutputBitStream( localBasename[ i ] + DiskBasedIndex.POSITIONS_EXTENSION, bufferSize );
      localFrequencies[ i ] = new OutputBitStream( localBasename[ i ] + DiskBasedIndex.FREQUENCIES_EXTENSION );
      localGlobCounts[ i ] = new OutputBitStream( localBasename[ i ] + DiskBasedIndex.GLOBCOUNTS_EXTENSION );
      localTerms[ i ] = new PrintWriter( new OutputStreamWriter( new FastBufferedOutputStream( new FileOutputStream( localBasename[ i ] + DiskBasedIndex.TERMS_EXTENSION ) ), "UTF-8" ) );
      localOffsets[ i ] = new OutputBitStream( localBasename[ i ] + DiskBasedIndex.OFFSETS_EXTENSION );
      if ( posNumBits != null ) localPosNumBits[ i ] = new OutputBitStream( localBasename[ i ] + DiskBasedIndex.POSITIONS_NUMBER_OF_BITS_EXTENSION );
      localOffsets[ i ].writeGamma( 0 );
    }

    // The current term
    final MutableString currTerm = new MutableString();
   
    pl.expectedUpdates = ( new File( inputBasename + DiskBasedIndex.INDEX_EXTENSION ).length() + ( isHighPerformance ? new File( inputBasename + DiskBasedIndex.POSITIONS_EXTENSION ).length() : 0 ) ) * 8;
    pl.itemsName = "bits";
    pl.logInterval = logInterval;
    pl.start( "Partitioning index..." );

    int termNumber = 0, k, prevK = -1, previousHeaderLength = 0, newHeaderLength = 0;
    long length, count, positionsOffset = 0;
    int res, frequency;
   
    while( terms.readLine( currTerm ) != null ) {
      k = strategy.localIndex( termNumber ); // The local index for this term
      if ( numTerms[ k ] != strategy.localNumber( termNumber ) ) throw new IllegalStateException();
      numTerms[ k ]++;
     
      if ( isHighPerformance ) {
        final long temp = globalIndex.readBits();
        positionsOffset = globalIndex.readLongDelta();
        previousHeaderLength = (int)( globalIndex.readBits() - temp );
        if ( prevK != -1 ) {
          length = positionsOffset - globalPositions.readBits();
          pl.count += length;
          while( length > 0 ) {
            res = (int)Math.min( bufferSize * 8, length );
            globalPositions.read( buffer, res );
            localPositionsStream[ prevK ].write( buffer, res );
            length -= res;
          }
        }
        newHeaderLength = localIndexStream[ k ].writeLongDelta( localPositionsStream[ k ].writtenBits() );
      }
     
     
      frequency = frequencies.readGamma();
      localFrequencies[ k ].writeGamma( frequency );
      numberOfPostings[ k ] += frequency;

      if ( posNumBits != null ) localPosNumBits[ k ].writeGamma( posNumBits.readGamma() );
     
      count = globCounts.readLongGamma();
      numberOfOccurrences[ k ] += count;
      localGlobCounts[ k ].writeLongGamma( count );
     
      currTerm.println( localTerms[ k ] );
     
      length = offsets.readLongGamma() - previousHeaderLength;
      localOffsets[ k ].writeLongGamma( length + newHeaderLength );
      pl.count += length + previousHeaderLength - 1;
     
      while( length > 0 ) {
        res = (int)Math.min( bufferSize * 8, length );
        globalIndex.read( buffer, res );
        localIndexStream[ k ].write( buffer, res );
        length -= res;
      }
     
      pl.update();
      prevK = k;
      termNumber++;
    }

    // We pour the last piece of positions
    if ( isHighPerformance ) {
      if ( prevK != -1 ) {
        length = globalPositionsLength * 8 - globalPositions.readBits();
        System.err.println( globalPositionsLength * 8 - globalPositions.readBits() );
        while( length > 0 ) {
          res = (int)Math.min( bufferSize * 8, length );
          globalPositions.read( buffer, res );
          localPositionsStream[ prevK ].write( buffer, res );
          length -= res;
        }
      }
    }

    pl.done();

    terms.close();
    offsets.close();
    frequencies.close();
    globCounts.close();
    globalIndex.close();
    if ( posNumBits != null ) posNumBits.close();
    if ( isHighPerformance ) globalPositions.close();
   
    // We copy the relevant properties from the original
    Properties properties = new Properties( inputBasename + DiskBasedIndex.PROPERTIES_EXTENSION );
    Properties globalProperties = new Properties();
    if ( strategyFilename != null ) globalProperties.setProperty( IndexCluster.PropertyKeys.STRATEGY, strategyFilename );
    globalProperties.setProperty( DocumentalCluster.PropertyKeys.BLOOM, false );
    globalProperties.setProperty( Index.PropertyKeys.INDEXCLASS, LexicalCluster.class.getName() );
    for( int i = 0; i < numIndices; i++ ) globalProperties.addProperty( IndexCluster.PropertyKeys.LOCALINDEX, localBasename[ i ] );
    globalProperties.setProperty( Index.PropertyKeys.FIELD, properties.getProperty( Index.PropertyKeys.FIELD ) );
    globalProperties.setProperty( Index.PropertyKeys.POSTINGS, properties.getProperty( Index.PropertyKeys.POSTINGS ) );
    globalProperties.setProperty( Index.PropertyKeys.OCCURRENCES, properties.getProperty( Index.PropertyKeys.OCCURRENCES ) );
    globalProperties.setProperty( Index.PropertyKeys.DOCUMENTS, properties.getProperty( Index.PropertyKeys.DOCUMENTS ) );
    globalProperties.setProperty( Index.PropertyKeys.TERMS, properties.getProperty( Index.PropertyKeys.TERMS ) );
    globalProperties.setProperty( Index.PropertyKeys.TERMPROCESSOR, properties.getProperty( Index.PropertyKeys.TERMPROCESSOR ) );
    globalProperties.setProperty( Index.PropertyKeys.MAXCOUNT, properties.getProperty( Index.PropertyKeys.MAXCOUNT ) );
    globalProperties.setProperty( Index.PropertyKeys.MAXDOCSIZE, properties.getProperty( Index.PropertyKeys.MAXDOCSIZE ) );
    globalProperties.save( outputBasename + DiskBasedIndex.PROPERTIES_EXTENSION );
    LOGGER.debug( "Properties for clustered index " + outputBasename + ": " + new ConfigurationMap( globalProperties ) );
   
    for( int i = 0; i < numIndices; i++ ) {
      localIndexStream[ i ].close();
      if ( isHighPerformance ) localPositionsStream[ i ].close();
      localOffsets[ i ].close();
      if ( posNumBits != null ) localPosNumBits[ i ].close();
      localFrequencies[ i ].close();
      localGlobCounts[ i ].close();
      localTerms[ i ].close();
      final InputStream input = new FileInputStream( inputBasename + DiskBasedIndex.SIZES_EXTENSION );
      final OutputStream output = new FileOutputStream( localBasename[ i ] + DiskBasedIndex.SIZES_EXTENSION );
      IOUtils.copy( input, output );
      input.close();
      output.close();
      Properties localProperties = new Properties();
      localProperties.addAll( globalProperties );
      localProperties.setProperty( Index.PropertyKeys.TERMS, numTerms[ i ] );
      localProperties.setProperty( Index.PropertyKeys.OCCURRENCES, numberOfOccurrences[ i ] );
      localProperties.setProperty( Index.PropertyKeys.POSTINGS, numberOfPostings[ i ] );
      localProperties.setProperty( Index.PropertyKeys.POSTINGS, numberOfPostings[ i ] );
      localProperties.setProperty( Index.PropertyKeys.INDEXCLASS, properties.getProperty( Index.PropertyKeys.INDEXCLASS ) );
      localProperties.addProperties( Index.PropertyKeys.CODING, properties.getStringArray( Index.PropertyKeys.CODING ) );
      localProperties.setProperty( BitStreamIndex.PropertyKeys.SKIPQUANTUM, properties.getProperty( BitStreamIndex.PropertyKeys.SKIPQUANTUM ) );
      localProperties.setProperty( BitStreamIndex.PropertyKeys.SKIPHEIGHT, properties.getProperty( BitStreamIndex.PropertyKeys.SKIPHEIGHT ) );
      if ( strategyProperties[ i ] != null ) localProperties.addAll( strategyProperties[ i ] );
      localProperties.save( localBasename[ i ] + DiskBasedIndex.PROPERTIES_EXTENSION );
      LOGGER.debug( "Post-partitioning properties for index " + localBasename[ i ] + ": " + new ConfigurationMap( localProperties ) );
    }
  }
View Full Code Here

TOP

Related Classes of it.unimi.dsi.util.Properties

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.