Package ca.nengo.math.impl

Examples of ca.nengo.math.impl.GradientDescentApproximator


      public Constraints clone() throws CloneNotSupportedException {
        return (Constraints) super.clone();
      }
    };
   
    GradientDescentApproximator approximator = new GradientDescentApproximator(evalPoints, values, constraints, true);
    float[] coefficients = approximator.findCoefficients(target);
   
    float approx;
    for (int j = 0; j < evalPoints.length; j++) {
      approx = 0f;
      for (int i = 0; i < frequencies.length; i++) {
View Full Code Here


      public Constraints clone() throws CloneNotSupportedException {
        return (Constraints) super.clone();
      }     
    };
   
    GradientDescentApproximator approximator = new GradientDescentApproximator(new float[][]{{1f},{2f},{3f}}, new float[][]{{1f},{2f},{3f}}, constraints, true);
    assertEquals(1000, approximator.getMaxIterations());
    approximator.setMaxIterations(500);
    assertEquals(500, approximator.getMaxIterations());
   
  }
View Full Code Here

      public Constraints clone() throws CloneNotSupportedException {
        return (Constraints) super.clone();
      }
    };
   
    GradientDescentApproximator approximator = new GradientDescentApproximator(new float[][]{{1f},{2f},{3f}}, new float[][]{{1f},{2f},{3f}}, constraints, true);
    assertEquals(.000000001f, approximator.getTolerance());
    approximator.setTolerance(.000001f);
    assertEquals(.000001f, approximator.getTolerance());
   
  }
View Full Code Here

   * @param excitatory If true, weights are to be kept positive (otherwise negative)
   */
  public void optimizeDecoders(float[][] baseWeights, float[] biasEncoders, boolean excitatory) {
    float[][] evalPoints = MU.transpose(new float[][]{new float[myConstantOutputs[0].length]}); //can use anything here because target function is constant
    GradientDescentApproximator.Constraints constraints = new BiasEncodersMaintained(baseWeights, biasEncoders, excitatory);
    GradientDescentApproximator approximator = new GradientDescentApproximator(evalPoints, MU.clone(myConstantOutputs), constraints, true);
    approximator.setStartingCoefficients(MU.transpose(getDecoders())[0]);
    float[] newDecoders = approximator.findCoefficients(new ConstantFunction(1, 0));
    super.setDecoders(MU.transpose(new float[][]{newDecoders}));
  }
View Full Code Here

TOP

Related Classes of ca.nengo.math.impl.GradientDescentApproximator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.