private void reComputeFunctionValueAndGradientHelper(double[] weightsForTheta){
MinRiskDAGradientSemiringParser gradientSemiringParser
= new MinRiskDAGradientSemiringParser(1, 0, scalingFactor, temperature);
MinRiskDAFuncValSemiringParser funcValSemiringParser
= new MinRiskDAFuncValSemiringParser(1, 0, scalingFactor, temperature);
for(int sentID=0; sentID < numSentence; sentID ++){
//System.out.println("#Process sentence " + sent_id);
FeatureForest fForest = hgFactory.nextHG(sentID);
fForest.setFeatureWeights(weightsForTheta);
fForest.setScale(scalingFactor);
/** Based on a model and a test hypergraph (which provides the topology and feature/risk annotation),
* compute the gradient.
**/
//@todo: we should check if hg_test is a feature forest or not
gradientSemiringParser.setHyperGraph(fForest);
HashMap<Integer, Double> gradients = gradientSemiringParser.computeGradientForTheta();
for(Map.Entry<Integer, Double> feature : gradients.entrySet()){
gradientsForTheta[feature.getKey()] -= feature.getValue(); //we are maximizing, instead of minizing
}
if(this.fixFirstFeature)//do not tune the baseline feature
gradientsForTheta[0]=0;
if(shouldComputeGradientForScalingFactor)
gradientForScalingFactor -= computeGradientForScalingFactor(gradients, weightsForTheta, scalingFactor);//we are maximizing, instead of minizing
//== compute function value
funcValSemiringParser.setHyperGraph(fForest);
functionValue -= funcValSemiringParser.computeFunctionVal();//we are maximizing, instead of minizing
sumGain += -funcValSemiringParser.getRisk();
sumEntropy += funcValSemiringParser.getEntropy();
if(sentID%1000==0){
logger.info("======processed sentID =" + sentID);
}
}