Examples of AdagradLearningRate


Examples of tv.floe.metronome.classification.neuralnetworks.learning.adagrad.AdagradLearningRate

           
                for (Neuron neuron : this.nn.getLayerByIndex(x).getNeurons()) {
                 
                    for (Connection connection : neuron.getInConnections()) {
                     
                        connection.getWeight().trainingMetaData.put("adagrad", new AdagradLearningRate(this.adagradInitLearningRate));
                       
                    }
                   
                }
                 
View Full Code Here

Examples of tv.floe.metronome.classification.neuralnetworks.learning.adagrad.AdagradLearningRate

  @Override
    protected void updateNeuronWeights(Neuron neuron) {

      double neuronError = neuron.getError();
        double lrTemp = 0;
        AdagradLearningRate alr = null;
       
        for (Connection connection : neuron.getInConnections()) {

          if (this.adagradLearningOn) {
            alr = (AdagradLearningRate)connection.getWeight().trainingMetaData.get("adagrad");
            lrTemp = alr.compute();
          } else {
            lrTemp = this.learningRate;
          }
         
          double input = connection.getInput();
            //double weightChange = this.learningRate * neuronError * input;
          double weightChange = lrTemp * neuronError * input;

            Weight weight = connection.getWeight();

            if (this.isInBatchMode() == false) {            
                weight.weightChange = weightChange;               
                weight.value += weightChange;
            } else {
                weight.weightChange += weightChange;
            }
           
          if (this.adagradLearningOn) {
            alr = (AdagradLearningRate)connection.getWeight().trainingMetaData.get("adagrad");
            alr.addLastIterationGradient(weightChange);
          }           
           
            if (this.isMetricCollectionOn()) {
              this.metrics.incWeightOpCount();
            }
View Full Code Here

Examples of tv.floe.metronome.classification.neuralnetworks.learning.adagrad.AdagradLearningRate

        out += "n" + n + "=";
       
            for (Connection connection : neuron.getInConnections()) {

              if (this.adagradLearningOn) {
                AdagradLearningRate alr = (AdagradLearningRate)connection.getWeight().trainingMetaData.get("adagrad");
                //lrTemp = alr.compute();
                out += "" + alr.compute() +",";
              }

            }
           
            n++;
View Full Code Here

Examples of tv.floe.metronome.classification.neuralnetworks.learning.adagrad.AdagradLearningRate

       

       
      Connection c = neuron.getInConnections().get(0);
               
      AdagradLearningRate alr = (AdagradLearningRate)c.getWeight().trainingMetaData.get("adagrad");
                //lrTemp = alr.compute();
       
      out += "[Ada: " + alr.compute() +" ]";

      c = neuron.getInConnections().get(1);
       
      alr = (AdagradLearningRate)c.getWeight().trainingMetaData.get("adagrad");

      out += "[Ada: " + alr.compute() +" ]";
     
    }
   
    return out;
   
View Full Code Here

Examples of tv.floe.metronome.deeplearning.neuralnetwork.core.learning.AdagradLearningRate

        } else {
            this.connectionWeights = weights;
        }


        this.wAdagrad = new AdagradLearningRate( this.connectionWeights.numRows(), this.connectionWeights.numCols() );


        if (hBias == null) {
            // TODO: recheck if this column vector is correctly oriented
            this.hiddenBiasNeurons = new DenseMatrix(1, nHidden); //Matrix.zeros(nHidden);
            //} else if(hBias.numRows() != nHidden) {
            //throw new IllegalArgumentException("Hidden bias must have a length of " + nHidden + " length was " + hBias.numRows());
        } else {
            this.hiddenBiasNeurons = hBias;
        }

        //         this.hBiasAdaGrad = new AdaGrad(hBias.rows,hBias.columns);

        this.hBiasAdaGrad = new AdagradLearningRate( this.hiddenBiasNeurons.numRows(), this.hiddenBiasNeurons.numCols() );

        if (vBias == null) {
            this.visibleBiasNeurons = new DenseMatrix(1, nVisible); //Matrix.zeros(nVisible);
            this.visibleBiasNeurons.assign(0);

        } else if(vBias.numRows() != nVisible) {
            throw new IllegalArgumentException("Visible bias must have a length of " + nVisible + " but length was " + vBias.numRows());

        } else {
            this.visibleBiasNeurons = vBias;
        }

        // this.vBiasAdaGrad = new AdaGrad(vBias.rows,vBias.columns);

        this.vBiasAdaGrad = new AdagradLearningRate( this.visibleBiasNeurons.numRows(), this.visibleBiasNeurons.numCols() );

    }
View Full Code Here

Examples of tv.floe.metronome.deeplearning.neuralnetwork.core.learning.AdagradLearningRate

            }

        }

        this.wAdagrad = new AdagradLearningRate( this.connectionWeights.numRows(), this.connectionWeights.numCols() );


        //if(this.hBias == null) {
        if ( this.hiddenBiasNeurons == null) {

            //this.hBias = Matrix.zeros(nHidden);
            this.hiddenBiasNeurons = new DenseMatrix(1, this.numberHiddenNeurons);// Matrix.zeros(nHidden);
            this.hiddenBiasNeurons.assign(0.0);
     
      /*
       * Encourage sparsity.
       * See Hinton's Practical guide to RBMs
       */
            //this.hBias.subi(4);
        }

        this.hBiasAdaGrad = new AdagradLearningRate( this.hiddenBiasNeurons.numRows(), this.hiddenBiasNeurons.numCols() );


        if (this.visibleBiasNeurons == null) {

            if (this.trainingDataset != null) {

                this.visibleBiasNeurons = new DenseMatrix(1, this.numberVisibleNeurons); // Matrix.zeros(nVisible);
                this.visibleBiasNeurons.assign(0.0);


            } else {
//        this.vBias = Matrix.zeros(nVisible);
                this.visibleBiasNeurons = new DenseMatrix(1, this.numberVisibleNeurons); // Matrix.zeros(nVisible);
                this.visibleBiasNeurons.assign(0.0);

            }

        }

        this.vBiasAdaGrad = new AdagradLearningRate( this.visibleBiasNeurons.numRows(), this.visibleBiasNeurons.numCols() );




    }
View Full Code Here

Examples of tv.floe.metronome.deeplearning.neuralnetwork.core.learning.AdagradLearningRate

        this.hiddenBiasNeurons.assign(0.0);

        //  this.visibleBiasNeurons = new DenseMatrix(1, this.numberVisibleNeurons); // Matrix.zeros(nVisible);
        this.visibleBiasNeurons.assign(0.0);

        this.wAdagrad = new AdagradLearningRate( this.connectionWeights.numRows(), this.connectionWeights.numCols() );


    }
View Full Code Here

Examples of tv.floe.metronome.deeplearning.neuralnetwork.core.learning.AdagradLearningRate

    this.connectionWeights = new DenseMatrix(nIn, nOut);
    this.connectionWeights.assign(0.0);
    this.biasTerms = new DenseMatrix(1, nOut); //Matrix.zeros(nOut);
    this.biasTerms.assign(0.0);
   
    this.adaLearningRates = new AdagradLearningRate( nIn, nOut );
   
    this.biasAdaGrad = new AdagradLearningRate( this.biasTerms.numRows(), this.biasTerms.numCols() );
 
    optimizationAlgorithm = OptimizationAlgorithm.CONJUGATE_GRADIENT;
       
  }
View Full Code Here

Examples of tv.floe.metronome.deeplearning.neuralnetwork.core.learning.AdagradLearningRate

     
        this.useAdaGrad = di.readBoolean();
       
        if ( null == this.adaLearningRates ) {

          this.adaLearningRates = new AdagradLearningRate( nIn, nOut );
        }

        this.adaLearningRates.load( is );
       
        if ( null == this.biasAdaGrad ) {
       
        this.biasAdaGrad = new AdagradLearningRate( this.biasTerms.numRows(), this.biasTerms.numCols() );
         
        }
       
        this.biasAdaGrad.load( is );
       
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.