Package com.github.neuralnetworks.training

Examples of com.github.neuralnetworks.training.TrainingInputDataImpl


      Matrix input = getImages(indexes);

      Matrix target = targetConverter.convert(getLabels(indexes));

      result = new TrainingInputDataImpl(input, target);
  }

  return result;
    }
View Full Code Here


  this.random = new Random();
  this.useRandom = useRandom;
  this.scale = scale;
  this.attachTargetToInput = attachTargetToInput;
  this.dataset = createDataset();
  this.currentExample = new TrainingInputDataImpl(new Matrix(dataset.getRows() - 1 + (attachTargetToInput == true ? 1 : 0), batchSize));
  reset();
    }
View Full Code Here

    private TrainingInputData input;

    public XorInputProvider(int inputSize) {
  super();
  this.inputSize = inputSize;
  this.input = new TrainingInputDataImpl(new Matrix(2, 1), new Matrix(1, 1));
    }
View Full Code Here

    }

    @Override
    protected TrainingInputData getInput() {
  if (input == null) {
      input = new TrainingInputDataImpl(getLayerCalculator().getPositivePhaseVisible());
  }

  return input;
    }
View Full Code Here

    }

    @Override
    protected TrainingInputData getInput() {
  if (input == null) {
      input = new TrainingInputDataImpl(activations.get(getNeuralNetwork().getInputLayer()), activations.get(getProperties().getParameter(Constants.OUTPUT_ERROR_DERIVATIVE)));
  }

  return input;
    }
View Full Code Here

      vp = TensorFactory.tensorProvider(n, 1, Environment.getInstance().getUseDataSharedMemory());
        }
        if (vp.get(outputError) == null) {
      vp.add(outputError, vp.get(n.getOutputLayer()).getDimensions());
        }
        TrainingInputData input = new TrainingInputDataImpl(vp.get(n.getInputLayer()), vp.get(outputError));

        Set<Layer> calculatedLayers = new UniqueList<>();
        for (int i = 0; i < inputProvider.getInputSize(); i++) {
      inputProvider.populateNext(input);
      calculatedLayers.clear();
      calculatedLayers.add(n.getInputLayer());

      n.getLayerCalculator().calculate(n, n.getOutputLayer(), calculatedLayers, vp);

      outputError.addItem(vp.get(n.getOutputLayer()), input.getTarget());
        }

        float e = outputError.getTotalNetworkError();
        if (e <= acceptanceError) {
      System.out.println("Stopping at error " + e + " (" + (e * 100) + "%) for " + mbe.getBatchCount() + " minibatches");
View Full Code Here

TOP

Related Classes of com.github.neuralnetworks.training.TrainingInputDataImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.