Package org.hibernate.search.exception

Examples of org.hibernate.search.exception.ErrorHandler


    Assert.assertEquals( "failed work message", exception.getMessage() );
  }

  private MockErrorHandler getErrorHandlerAndAssertCorrectTypeIsUsed() {
    SearchFactoryImplementor searchFactory = getSearchFactoryImpl();
    ErrorHandler errorHandler = searchFactory.getErrorHandler();
    Assert.assertTrue( errorHandler instanceof MockErrorHandler );
    return (MockErrorHandler)errorHandler;
  }
View Full Code Here


    session.close();
  }

  private MockErrorHandler getErrorHandlerAndAssertCorrectTypeIsUsed() {
    SearchFactoryImplementor searchFactory = getSearchFactoryImpl();
    ErrorHandler errorHandler = searchFactory.getErrorHandler();
    Assert.assertTrue( errorHandler instanceof MockErrorHandler );
    return (MockErrorHandler) errorHandler;
  }
View Full Code Here

    session.close();
  }

  private MockErrorHandler getErrorHandlerAndAssertCorrectTypeIsUsed() {
    SearchFactoryImplementor searchFactory = getSearchFactoryImpl();
    ErrorHandler errorHandler = searchFactory.getErrorHandler();
    Assert.assertTrue( errorHandler instanceof MockErrorHandler );
    return (MockErrorHandler) errorHandler;
  }
View Full Code Here

    assertEquals( 0, getDocumentNbrFromReaderProvider( indexManager ) );

    s.getTransaction().commit();
    s.close();

    ErrorHandler errorHandler = searchFactoryBySFI.getErrorHandler();
    Assert.assertTrue( errorHandler instanceof MockErrorHandler );
    MockErrorHandler mockErrorHandler = (MockErrorHandler)errorHandler;
    Assert.assertNull( "Errors detected in the backend!", mockErrorHandler.getLastException() );
  }
View Full Code Here

    this.monitor = monitor;
    this.objectsLimit = objectsLimit;
  }

  public void run() {
    ErrorHandler errorHandler = searchFactory.getErrorHandler();
    try {

      //first start the consumers, then the producers (reverse order):
      for ( int i = 0; i < luceneWorkerBuildingThreadNum; i++ ) {
        //from entity to LuceneWork:
        final EntityConsumerLuceneWorkProducer producer = new EntityConsumerLuceneWorkProducer(
            indexedType,
            fromEntityToAddwork, monitor,
            sessionFactory, producerEndSignal, searchFactory,
            cacheMode, backend, errorHandler
        );
        execDocBuilding.execute( new OptionallyWrapInJTATransaction( sessionFactory, errorHandler, producer ) );
      }
      for ( int i = 0; i < objectLoadingThreadNum; i++ ) {
        //from primary key to loaded entity:
        final IdentifierConsumerEntityProducer producer = new IdentifierConsumerEntityProducer(
            fromIdentifierListToEntities, fromEntityToAddwork, monitor,
            sessionFactory, cacheMode, indexedType, idNameOfIndexedType, errorHandler
        );
        execFirstLoader.execute( new OptionallyWrapInJTATransaction( sessionFactory, errorHandler, producer ) );
      }
      //from class definition to all primary keys:
      final IdentifierProducer producer = new IdentifierProducer(
          fromIdentifierListToEntities, sessionFactory,
          objectLoadingBatchSize, indexedType, monitor,
          objectsLimit, errorHandler, idFetchSize
      );
      execIdentifiersLoader.execute( new OptionallyWrapInJTATransaction( sessionFactory, errorHandler, producer ) );

      //shutdown all executors:
      execIdentifiersLoader.shutdown();
      execFirstLoader.shutdown();
      execDocBuilding.shutdown();
      try {
        producerEndSignal.await(); //await for all work being sent to the backend
        log.debugf( "All work for type %s has been produced", indexedType.getName() );
      }
      catch ( InterruptedException e ) {
        //restore interruption signal:
        Thread.currentThread().interrupt();
        throw new SearchException( "Interrupted on batch Indexing; index will be left in unknown state!", e );
      }
    }
    catch ( RuntimeException re ) {
      //being this an async thread we want to make sure everything is somehow reported
      errorHandler.handleException( log.massIndexerUnexpectedErrorMessage() , re );
    }
    finally {
      endAllSignal.countDown();
    }
  }
View Full Code Here

    this.monitor = monitor;
    this.objectsLimit = objectsLimit;
  }

  public void run() {
    ErrorHandler errorHandler = searchFactory.getErrorHandler();
    try {

      //first start the consumers, then the producers (reverse order):
      for ( int i = 0; i < luceneWorkerBuildingThreadNum; i++ ) {
        //from entity to LuceneWork:
        final EntityConsumerLuceneWorkProducer producer = new EntityConsumerLuceneWorkProducer(
            fromEntityToAddwork, monitor,
            sessionFactory, producerEndSignal, searchFactory,
            cacheMode, backend, errorHandler
        );
        execDocBuilding.execute( new OptionallyWrapInJTATransaction( sessionFactory, errorHandler, producer ) );
      }
      for ( int i = 0; i < objectLoadingThreadNum; i++ ) {
        //from primary key to loaded entity:
        final IdentifierConsumerEntityProducer producer = new IdentifierConsumerEntityProducer(
            fromIdentifierListToEntities, fromEntityToAddwork, monitor,
            sessionFactory, cacheMode, indexedType, idNameOfIndexedType, errorHandler
        );
        execFirstLoader.execute( new OptionallyWrapInJTATransaction( sessionFactory, errorHandler, producer ) );
      }
      //from class definition to all primary keys:
      final IdentifierProducer producer = new IdentifierProducer(
          fromIdentifierListToEntities, sessionFactory,
          objectLoadingBatchSize, indexedType, monitor,
          objectsLimit, errorHandler
      );
      execIdentifiersLoader.execute( new OptionallyWrapInJTATransaction( sessionFactory, errorHandler, producer ) );

      //shutdown all executors:
      execIdentifiersLoader.shutdown();
      execFirstLoader.shutdown();
      execDocBuilding.shutdown();
      try {
        producerEndSignal.await(); //await for all work being sent to the backend
        log.debugf( "All work for type %s has been produced", indexedType.getName() );
      }
      catch ( InterruptedException e ) {
        //restore interruption signal:
        Thread.currentThread().interrupt();
        throw new SearchException( "Interrupted on batch Indexing; index will be left in unknown state!", e );
      }
    }
    catch ( RuntimeException re ) {
      //being this an async thread we want to make sure everything is somehow reported
      errorHandler.handleException( log.massIndexerUnexpectedErrorMessage() , re );
    }
    finally {
      endAllSignal.countDown();
    }
  }
View Full Code Here

    return new EntityKeyMetadata( persister.getTableName(), persister.getRootTableIdentifierColumnNames() );
  }

  @Override
  public void run() {
    ErrorHandler errorHandler = searchFactory.getErrorHandler();
    try {
      final EntityKeyMetadata keyMetadata = getEntityKeyMetadata();
      final SessionAwareRunnable consumer = new TupleIndexer( indexedType, monitor, sessionFactory, searchFactory, cacheMode, batchBackend, errorHandler );
      gridDialect.forEachTuple( new OptionallyWrapInJTATransaction( sessionFactory, errorHandler, consumer ), keyMetadata );
    }
    catch ( RuntimeException re ) {
      // being this an async thread we want to make sure everything is somehow reported
      errorHandler.handleException( log.massIndexerUnexpectedErrorMessage(), re );
    }
    finally {
      endAllSignal.countDown();
    }
  }
View Full Code Here

    OgmEntityPersister persister = (OgmEntityPersister) ( (SessionFactoryImplementor) sessionFactory ).getEntityPersister( indexedType.getName() );
    return new EntityKeyMetadata( persister.getTableName(), persister.getRootTableIdentifierColumnNames() );
  }

  public void run() {
    ErrorHandler errorHandler = searchFactory.getErrorHandler();
    try {
      final EntityKeyMetadata keyMetadata = metadata( sessionFactory, indexedType );
      final SessionAwareRunnable consumer = new TupleIndexer( indexedType, monitor, sessionFactory, searchFactory, cacheMode, batchBackend, errorHandler );
      gridDialect.forEachTuple( new OptionallyWrapInJTATransaction( sessionFactory, errorHandler, consumer ), keyMetadata );
    }
    catch ( RuntimeException re ) {
      // being this an async thread we want to make sure everything is somehow reported
      errorHandler.handleException( log.massIndexerUnexpectedErrorMessage(), re );
    }
    finally {
      endAllSignal.countDown();
    }
  }
View Full Code Here

    this.objectsLimit = objectsLimit;
  }

  @Override
  public void run() {
    ErrorHandler errorHandler = searchFactory.getErrorHandler();
    try {
      final BatchTransactionalContext btctx = new BatchTransactionalContext( sessionFactory, errorHandler );

      //first start the consumers, then the producers (reverse order):
      for ( int i = 0; i < luceneWorkerBuildingThreadNum; i++ ) {
        //from entity to LuceneWork:
        final EntityConsumerLuceneWorkProducer producer = new EntityConsumerLuceneWorkProducer(
            fromEntityToAddwork, monitor,
            sessionFactory, producerEndSignal, searchFactory,
            cacheMode, backend, errorHandler
        );
        execDocBuilding.execute( new OptionallyWrapInJTATransaction( btctx, producer ) );
      }
      for ( int i = 0; i < objectLoadingThreadNum; i++ ) {
        //from primary key to loaded entity:
        final IdentifierConsumerEntityProducer producer = new IdentifierConsumerEntityProducer(
            fromIdentifierListToEntities, fromEntityToAddwork, monitor,
            sessionFactory, cacheMode, indexedType, idNameOfIndexedType, errorHandler
        );
        execFirstLoader.execute( new OptionallyWrapInJTATransaction( btctx, producer ) );
      }
      //from class definition to all primary keys:
      final IdentifierProducer producer = new IdentifierProducer(
          fromIdentifierListToEntities, sessionFactory,
          objectLoadingBatchSize, indexedType, monitor,
          objectsLimit, errorHandler, idFetchSize
      );
      execIdentifiersLoader.execute( new OptionallyWrapInJTATransaction( btctx, producer ) );

      //shutdown all executors:
      execIdentifiersLoader.shutdown();
      execFirstLoader.shutdown();
      execDocBuilding.shutdown();
      try {
        producerEndSignal.await(); //await for all work being sent to the backend
        log.debugf( "All work for type %s has been produced", indexedType.getName() );
      }
      catch (InterruptedException e) {
        //restore interruption signal:
        Thread.currentThread().interrupt();
        throw new SearchException( "Interrupted on batch Indexing; index will be left in unknown state!", e );
      }
    }
    catch (RuntimeException re) {
      //being this an async thread we want to make sure everything is somehow reported
      errorHandler.handleException( log.massIndexerUnexpectedErrorMessage() , re );
    }
    finally {
      endAllSignal.countDown();
    }
  }
View Full Code Here

  private final PerDirectoryWorkProcessor syncWorker = new SyncBatchPerDirectoryWorkProcessor();

  public void initialize(Properties cfg, MassIndexerProgressMonitor monitor, WorkerBuildContext context) {
    this.searchFactoryImplementor = context.getUninitializedSearchFactory();
    final int maxThreadsPerIndex = definedIndexWriters( cfg );
    ErrorHandler errorHandler = searchFactoryImplementor.getErrorHandler();
    for ( DirectoryProvider<?> dp : context.getDirectoryProviders() ) {
      DirectoryProviderWorkspace resources = new DirectoryProviderWorkspace( context, dp, monitor, maxThreadsPerIndex, errorHandler );
      resourcesMap.put( dp, resources );
    }
  }
View Full Code Here

TOP

Related Classes of org.hibernate.search.exception.ErrorHandler

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.