Package com.github.neuralnetworks.architecture

Examples of com.github.neuralnetworks.architecture.NeuralNetworkImpl


    @Test
    public void testCNNBackpropagation3() {
  //Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);

  Environment.getInstance().setUseWeightsSharedMemory(true);
  NeuralNetworkImpl nn = NNFactory.convNN(new int[][] { { 3, 3, 2 }, { 2, 2, 1, 1 } }, true);
  nn.setLayerCalculator(NNFactory.lcSigmoid(nn, null));

  Conv2DConnection c = (Conv2DConnection) nn.getInputLayer().getConnections().get(0);
  TensorIterator it = c.getWeights().iterator();
  float x = 0.1f;
  while (it.hasNext()) {
      c.getWeights().getElements()[it.next()] = x;
      x += 0.1f;
  }

  Conv2DConnection b = (Conv2DConnection) nn.getOutputLayer().getConnections().get(1);
  b.getWeights().getElements()[b.getWeights().getStartIndex()] = -3f;
 
  SimpleInputProvider ts = new SimpleInputProvider(new float[][] { { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f }, { 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f, 0.9f, 1, 1.1f, 1.2f, 1.3f, 1.4f, 1.5f, 1.6f, 1.7f, 1.8f } }, new float[][] { { 1, 1, 1, 1 }, { 1, 1, 1, 1 } });
  BackPropagationTrainer<?> t = TrainerFactory.backPropagation(nn, ts, null, null, null, 0.5f, 0f, 0f, 0f, 0f, 1, 1, 1);
  t.train();
View Full Code Here


    @Test
    public void testCNNStride() {
  //Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);

  Environment.getInstance().setUseWeightsSharedMemory(true);
  NeuralNetworkImpl nn = NNFactory.convNN(new int[][] { { 5, 5, 1 }, { 2, 2, 1, 2 } }, false);
  nn.setLayerCalculator(NNFactory.lcWeightedSum(nn, null));

  Conv2DConnection cc = (Conv2DConnection) nn.getInputLayer().getConnections().get(0);
  cc.getWeights().forEach(i -> cc.getWeights().getElements()[i] = 1);

  ValuesProvider vp = TensorFactory.tensorProvider(nn, 1, true);
  float[] src = new float[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25};
  System.arraycopy(src, 0, vp.get(nn.getInputLayer()).getElements(), vp.get(nn.getInputLayer()).getStartIndex(), src.length);

  Set<Layer> calculatedLayers = new HashSet<>();
  calculatedLayers.add(nn.getInputLayer());
  nn.getLayerCalculator().calculate(nn, nn.getOutputLayer(), calculatedLayers, vp);

  Tensor o = vp.get(nn.getOutputLayer());
  assertEquals(16, o.get(0, 0, 0, 0), 0.00001);
  assertEquals(24, o.get(0, 0, 1, 0), 0.00001);
  assertEquals(56, o.get(0, 1, 0, 0), 0.00001);
  assertEquals(64, o.get(0, 1, 1, 0), 0.00001);
    }
View Full Code Here

  //Environment.getInstance().setExecutionMode(EXECUTION_MODE.SEQ);

  Environment.getInstance().setUseDataSharedMemory(false);

  // CNN
  NeuralNetworkImpl cnn = NNFactory.convNN(new int[][] { { 2, 1, 1 }, { 1, 1 }, {1} }, false);
  cnn.setLayerCalculator(NNFactory.lcSigmoid(cnn, null));
  NNFactory.lcMaxPooling(cnn);
  FullyConnected cnnfc = (FullyConnected) cnn.getOutputLayer().getConnections().get(0);
  cnnfc.getWeights().set(0.05f, 0, 0);
  cnnfc.getWeights().set(0.08f, 0, 1);
  ValuesProvider cnnvp = TensorFactory.tensorProvider(cnn, 1, Environment.getInstance().getUseDataSharedMemory());
  Tensor cnnin = cnnvp.get(cnn.getInputLayer());
  cnnin.set(0.2f, 0, 0, 0, 0);
  cnnin.set(0.6f, 0, 1, 0, 0);

  // MLP
  NeuralNetworkImpl mlp = NNFactory.mlpSigmoid(new int[] { 2, 1 }, false);
  FullyConnected mlpfc = (FullyConnected) mlp.getOutputLayer().getConnections().get(0);
  mlpfc.getWeights().set(0.05f, 0, 0);
  mlpfc.getWeights().set(0.08f, 0, 1);
  ValuesProvider mlpvp = TensorFactory.tensorProvider(mlp, 1, Environment.getInstance().getUseDataSharedMemory());
  Tensor mlpin = mlpvp.get(mlp.getInputLayer());
  mlpin.set(0.2f, 0, 0);
  mlpin.set(0.6f, 1, 0);

  // compare ff
  Set<Layer> calculated = new HashSet<>();
  calculated.add(cnn.getInputLayer());
  cnn.getLayerCalculator().calculate(cnn, cnn.getOutputLayer(), calculated, cnnvp);

  calculated = new HashSet<>();
  calculated.add(mlp.getInputLayer());
  mlp.getLayerCalculator().calculate(mlp, mlp.getOutputLayer(), calculated, mlpvp);

  assertTrue(Arrays.equals(cnnvp.get(cnn.getOutputLayer()).getElements(), mlpvp.get(mlp.getOutputLayer()).getElements()));
    }
View Full Code Here

    }

    @Test
    public void testCNNConstruction() {
  Environment.getInstance().setUseWeightsSharedMemory(true);
  NeuralNetworkImpl nn = NNFactory.convNN(new int[][] { { 32, 32, 1 }, { 5, 5, 6, 1 }, { 2, 2 }, { 5, 5, 16, 1 }, { 2, 2 }, { 5, 5, 120, 1 }, {84}, {10} }, true);
  assertEquals(13, nn.getLayers().size(), 0);

  Layer l = nn.getInputLayer().getConnections().get(0).getOutputLayer();
  Conv2DConnection cc = (Conv2DConnection) nn.getInputLayer().getConnections().get(0);
  assertEquals(28, cc.getOutputFeatureMapRows(), 0);
  assertEquals(28, cc.getOutputFeatureMapColumns(), 0);
  assertEquals(6, cc.getOutputFilters(), 0);

  Subsampling2DConnection sc = (Subsampling2DConnection) l.getConnections().get(2);
View Full Code Here

  if (layers[0].length != 3) {
      throw new IllegalArgumentException("first layer must be convolutional");
  }

  NeuralNetworkImpl result = new NeuralNetworkImpl();
  ConnectionFactory cf = new ConnectionFactory();
  result.setProperties(new Properties());
  result.getProperties().setParameter(Constants.CONNECTION_FACTORY, cf);

  Layer prev = null;
  int prevUnitCount = layers[0][0] * layers[0][1] * layers[0][2];
  result.addLayer(prev = new Layer());
  for (int i = 1; i < layers.length; i++) {
      int[] l = layers[i];
      Layer newLayer = null;
      Layer biasLayer = null;
      if (l.length == 1) {
    cf.fullyConnected(prev, newLayer = new Layer(), prevUnitCount, l[0]);
    if (addBias) {
        cf.fullyConnected(biasLayer = new Layer(), newLayer, 1, l[0]);
    }

    prevUnitCount = l[0];
      } else if (l.length == 4 || l.length == 2) {
    Integer inputFMRows = null;
    Integer inputFMCols = null;
    Integer filters = null;
    if (i == 1) {
        inputFMRows = layers[0][0];
        inputFMCols = layers[0][1];
        filters = layers[0][2];
    } else {
        for (Connections c : prev.getConnections()) {
      if (c.getOutputLayer() == prev) {
          if (c instanceof Conv2DConnection) {
        Conv2DConnection cc = (Conv2DConnection) c;
        inputFMRows = cc.getOutputFeatureMapRows();
        inputFMCols = cc.getOutputFeatureMapColumns();
        filters = cc.getOutputFilters();
        break;
          } else if (c instanceof Subsampling2DConnection) {
        Subsampling2DConnection sc = (Subsampling2DConnection) c;
        inputFMRows = sc.getOutputFeatureMapRows();
        inputFMCols = sc.getOutputFeatureMapColumns();
        filters = sc.getFilters();
        break;
          }
      }
        }
    }

    if (l.length == 4) {
        Conv2DConnection c = cf.conv2d(prev, newLayer = new Layer(), inputFMRows, inputFMCols, filters, l[0], l[1], l[2], l[3]);
        if (addBias) {
      cf.conv2d(biasLayer = new Layer(), newLayer, c.getOutputFeatureMapRows(), c.getOutputFeatureMapColumns(), 1, 1, 1, l[2], l[3]);
        }

        prevUnitCount = c.getOutputUnitCount();
    } else if (l.length == 2) {
        Subsampling2DConnection c = cf.subsampling2D(prev, newLayer = new Layer(), inputFMRows, inputFMCols, l[0], l[1], filters);
        prevUnitCount = c.getOutputUnitCount();
    }
      }

      result.addLayer(newLayer);
      if (biasLayer != null) {
    result.addLayer(biasLayer);
      }

      prev = newLayer;
  }

View Full Code Here

     * @param addBias
     * @param useSharedMemory - whether all network weights will be part of single array
     * @return
     */
    public static NeuralNetworkImpl mlp(int[] layers, boolean addBias) {
  NeuralNetworkImpl result = new NeuralNetworkImpl();
  mlp(result, new ConnectionFactory(), layers, addBias);
  return result;
    }
View Full Code Here

      throw new IllegalArgumentException("LayerCalculator type not supported");
  }
    }

    public static NeuralNetworkImpl mlpSigmoid(int[] layers, boolean addBias) {
  NeuralNetworkImpl result = mlp(layers, addBias);
  result.setLayerCalculator(lcSigmoid(result, null));
  return result;
    }
View Full Code Here

  result.setLayerCalculator(lcSigmoid(result, null));
  return result;
    }

    public static NeuralNetworkImpl mlpSoftRelu(int[] layers, boolean addBias, ConnectionCalculator outputCC) {
  NeuralNetworkImpl result = mlp(layers, addBias);
  result.setLayerCalculator(lcSoftRelu(result, outputCC));
  return result;
    }
View Full Code Here

  result.setLayerCalculator(lcSoftRelu(result, outputCC));
  return result;
    }

    public static NeuralNetworkImpl mlpRelu(int[] layers, boolean addBias, ConnectionCalculator outputCC) {
  NeuralNetworkImpl result = mlp(layers, addBias);
  result.setLayerCalculator(lcRelu(result, outputCC));
  return result;
    }
View Full Code Here

  result.setLayerCalculator(lcRelu(result, outputCC));
  return result;
    }

    public static NeuralNetworkImpl mlpTanh(int[] layers, boolean addBias, ConnectionCalculator outputCC) {
  NeuralNetworkImpl result = mlp(layers, addBias);
  result.setLayerCalculator(lcTanh(result, outputCC));
  return result;
    }
View Full Code Here

TOP

Related Classes of com.github.neuralnetworks.architecture.NeuralNetworkImpl

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.