Package org.apache.hama.ml.math

Examples of org.apache.hama.ml.math.DoubleVector


        ann.trainOnline(new DenseDoubleVector(instances[j % instances.length]));
      }
    }

    for (int i = 0; i < instances.length; ++i) {
      DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
      // the expected output is the last element in array
      double result = instances[i][2];
      double actual = ann.getOutput(input).get(0);
      if (result < 0.5 && actual >= 0.5 || result >= 0.5 && actual < 0.5) {
        Log.info("Neural network failes to lear the XOR.");
      }
    }

    // write model into file and read out
    String modelPath = "/tmp/testSmallLayeredNeuralNetworkXORLocalWithMomentum";
    ann.setModelPath(modelPath);
    try {
      ann.writeModelToFile();
    } catch (IOException e) {
      e.printStackTrace();
    }
    SmallLayeredNeuralNetwork annCopy = new SmallLayeredNeuralNetwork(modelPath);
    // test on instances
    for (int i = 0; i < instances.length; ++i) {
      DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
      // the expected output is the last element in array
      double result = instances[i][2];
      double actual = annCopy.getOutput(input).get(0);
      if (result < 0.5 && actual >= 0.5 || result >= 0.5 && actual < 0.5) {
        Log.info("Neural network failes to lear the XOR.");
View Full Code Here


        ann.trainOnline(new DenseDoubleVector(instances[j % instances.length]));
      }
    }

    for (int i = 0; i < instances.length; ++i) {
      DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
      // the expected output is the last element in array
      double result = instances[i][2];
      double actual = ann.getOutput(input).get(0);
      if (result < 0.5 && actual >= 0.5 || result >= 0.5 && actual < 0.5) {
        Log.info("Neural network failes to lear the XOR.");
      }
    }

    // write model into file and read out
    String modelPath = "/tmp/testSmallLayeredNeuralNetworkXORLocalWithRegularization";
    ann.setModelPath(modelPath);
    try {
      ann.writeModelToFile();
    } catch (IOException e) {
      e.printStackTrace();
    }
    SmallLayeredNeuralNetwork annCopy = new SmallLayeredNeuralNetwork(modelPath);
    // test on instances
    for (int i = 0; i < instances.length; ++i) {
      DoubleVector input = new DenseDoubleVector(instances[i]).slice(2);
      // the expected output is the last element in array
      double result = instances[i][2];
      double actual = annCopy.getOutput(input).get(0);
      if (result < 0.5 && actual >= 0.5 || result >= 0.5 && actual < 0.5) {
        Log.info("Neural network failes to lear the XOR.");
View Full Code Here

        (double) (end - start) / 1000));

    double errorRate = 0;
    // calculate the error on test instance
    for (double[] testInstance : testInstances) {
      DoubleVector instance = new DenseDoubleVector(testInstance);
      double expected = instance.get(instance.getDimension() - 1);
      instance = instance.slice(instance.getDimension() - 1);
      double actual = ann.getOutput(instance).get(0);
      if (actual < 0.5 && expected >= 0.5 || actual >= 0.5 && expected < 0.5) {
        ++errorRate;
      }
    }
View Full Code Here

          instanceList.size()));
      trainingInstances = instanceList.subList(0, instanceList.size()
          - testSize);

      for (double[] instance : trainingInstances) {
        DoubleVector vec = new DenseDoubleVector(instance);
        writer.append(new LongWritable(count++), new VectorWritable(vec));
      }
      writer.close();
    } catch (FileNotFoundException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    } catch (URISyntaxException e) {
      e.printStackTrace();
    }

    // create model
    int dimension = 8;
    SmallLayeredNeuralNetwork ann = new SmallLayeredNeuralNetwork();
    ann.setLearningRate(0.7);
    ann.setMomemtumWeight(0.5);
    ann.setRegularizationWeight(0.1);
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(dimension, false,
        FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.addLayer(1, true, FunctionFactory.createDoubleFunction("Sigmoid"));
    ann.setCostFunction(FunctionFactory
        .createDoubleDoubleFunction("CrossEntropy"));
    ann.setModelPath(modelPath);

    long start = new Date().getTime();
    Map<String, String> trainingParameters = new HashMap<String, String>();
    trainingParameters.put("tasks", "5");
    trainingParameters.put("training.max.iterations", "2000");
    trainingParameters.put("training.batch.size", "300");
    trainingParameters.put("convergence.check.interval", "1000");
    ann.train(tmpDatasetPath, trainingParameters);

    long end = new Date().getTime();

    // validate results
    double errorRate = 0;
    // calculate the error on test instance
    for (double[] testInstance : testInstances) {
      DoubleVector instance = new DenseDoubleVector(testInstance);
      double expected = instance.get(instance.getDimension() - 1);
      instance = instance.slice(instance.getDimension() - 1);
      double actual = ann.getOutput(instance).get(0);
      if (actual < 0.5 && expected >= 0.5 || actual >= 0.5 && expected < 0.5) {
        ++errorRate;
      }
    }
View Full Code Here

        encoder.trainOnline(new DenseDoubleVector(instances[rnd.nextInt(instances.length)]));
      }
    }

    for (int i = 0; i < instances.length; ++i) {
      DoubleVector encodeVec = encoder.encode(new DenseDoubleVector(
          instances[i]));
      DoubleVector decodeVec = encoder.decode(encodeVec);
      for (int d = 0; d < instances[i].length; ++d) {
        assertEquals(instances[i][d], decodeVec.get(d), 0.1);
      }
    }

  }
View Full Code Here

      }
    }

    double errorInstance = 0;
    for (DoubleVector vector : vecInstanceList) {
      DoubleVector decoded = encoder.getOutput(vector);
      DoubleVector diff = vector.subtract(decoded);
      double error = diff.dot(diff);
      if (error > 0.1) {
        ++errorInstance;
      }
    }
    Log.info(String.format("Autoecoder error rate: %f%%\n", errorInstance * 100 / vecInstanceList.size()));
View Full Code Here

      // normalize instances
      zeroOneNormalization(instanceList, instanceList.get(0).length);
     
      SequenceFile.Writer writer = new SequenceFile.Writer(fs, conf, path, LongWritable.class, VectorWritable.class);
      for (int i = 0; i < instanceList.size(); ++i) {
        DoubleVector vector = new DenseDoubleVector(instanceList.get(i));
        writer.append(new LongWritable(i), new VectorWritable(vector));
      }
     
      writer.close();
    } catch (FileNotFoundException e) {
      e.printStackTrace();
    } catch (IOException e) {
      e.printStackTrace();
    } catch (URISyntaxException e) {
      e.printStackTrace();
    }
   
    AutoEncoder encoder = new AutoEncoder(3, 2);
    String modelPath = "/tmp/autoencoder-modelpath";
    encoder.setModelPath(modelPath);
    Map<String, String> trainingParams = new HashMap<String, String>();
    encoder.setLearningRate(0.5);
    trainingParams.put("tasks", "5");
    trainingParams.put("training.max.iterations", "3000");
    trainingParams.put("training.batch.size", "200");
    encoder.train(path, trainingParams);
   
    double errorInstance = 0;
    for (double[] instance : instanceList) {
      DoubleVector vector = new DenseDoubleVector(instance);
      DoubleVector decoded = encoder.getOutput(vector);
      DoubleVector diff = vector.subtract(decoded);
      double error = diff.dot(diff);
      if (error > 0.1) {
        ++errorInstance;
      }
    }
    Log.info(String.format("Autoecoder error rate: %f%%\n", errorInstance * 100 / instanceList.size()));
View Full Code Here

      e.printStackTrace();
    }

    // initial the mlp with existing model meta-data and get the output
    MultiLayerPerceptron mlp = new SmallMultiLayerPerceptron(modelPath);
    DoubleVector input = new DenseDoubleVector(new double[] { 1, 2, 3 });
    try {
      DoubleVector result = mlp.output(input);
      assertArrayEquals(new double[] { 0.6636557, 0.7009963, 0.7213835 },
          result.toArray(), 0.0001);
    } catch (Exception e1) {
      e1.printStackTrace();
    }

    // delete meta-data
View Full Code Here

    for (int recordsRead = 0; recordsRead < batchSize; ++recordsRead) {
      if (peer.readNext(key, value) == false) {
        peer.reopenInput();
        peer.readNext(key, value);
      }
      DoubleVector trainingInstance = value.getVector();
      SmallLayeredNeuralNetwork.matricesAdd(weightUpdates,
          this.inMemoryModel.trainByInstance(trainingInstance));
      avgTrainingError += this.inMemoryModel.trainingError;
    }
    avgTrainingError /= batchSize;
View Full Code Here

   *
   * @param inputInstance
   * @return The compressed information.
   */
  private DoubleVector transform(DoubleVector inputInstance, int inputLayer) {
    DoubleVector internalInstance = new DenseDoubleVector(inputInstance.getDimension() + 1);
    internalInstance.set(0, 1);
    for (int i = 0; i < inputInstance.getDimension(); ++i) {
      internalInstance.set(i + 1, inputInstance.get(i));
    }
    DoubleFunction squashingFunction = model
        .getSquashingFunction(inputLayer);
    DoubleMatrix weightMatrix = null;
    if (inputLayer == 0) {
      weightMatrix = this.getEncodeWeightMatrix();
    } else {
      weightMatrix = this.getDecodeWeightMatrix();
    }
    DoubleVector vec = weightMatrix.multiplyVectorUnsafe(internalInstance);
    vec = vec.applyToElements(squashingFunction);
    return vec;
  }
View Full Code Here

TOP

Related Classes of org.apache.hama.ml.math.DoubleVector

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.