/** * Computes value of splitting criterion after split. * * @param dist * @param priorVal the splitting criterion * @return the gain after splitting */ protected double gain(double[][] dist, double priorVal) { return priorVal - ContingencyTables.entropyConditionedOnRows(dist); }
/** * Computes value of splitting criterion after split. * * @param dist the distributions * @param priorVal the splitting criterion * @return the gain after the split */ protected double gain(double[][] dist, double priorVal) { return priorVal - ContingencyTables.entropyConditionedOnRows(dist); }
/** * Computes value of splitting criterion after split. * * @param dist the distributions * @param priorVal the splitting criterion * @return the gain after the split */ protected double gain(double[][] dist, double priorVal) { return priorVal - ContingencyTables.entropyConditionedOnRows(dist); }
/** * Computes value of splitting criterion after split. * * @param dist * @param priorVal the splitting criterion * @return the gain after splitting */ protected double gain(double[][] dist, double priorVal) { return priorVal - ContingencyTables.entropyConditionedOnRows(dist); }
public double getInfoGain() { return ContingencyTables.entropyOverColumns(getContingencyTable()) - ContingencyTables .entropyConditionedOnRows(getContingencyTable()); }
ContingencyTables.entropyConditionedOnColumns(matrix)); System.out.println("Entropy conditioned on rows: " + ContingencyTables.entropyConditionedOnRows(matrix)); System.out.println("Entropy conditioned on rows (with Laplace): " + ContingencyTables.entropyConditionedOnRows(matrix, matrix, 3)); System.out.println("Entropy of rows: " + ContingencyTables.entropyOverRows(matrix));
ContingencyTables.entropyConditionedOnColumns(matrix)); System.out.println("Entropy conditioned on rows: " + ContingencyTables.entropyConditionedOnRows(matrix)); System.out.println("Entropy conditioned on rows (with Laplace): " + ContingencyTables.entropyConditionedOnRows(matrix, matrix, 3)); System.out.println("Entropy of rows: " + ContingencyTables.entropyOverRows(matrix));
entropy = ContingencyTables.entropyConditionedOnRows(bestCounts);
entropy = ContingencyTables.entropyConditionedOnRows(bestCounts);
if (inst.value(index) < instPlusOne.value(index)) { currCutPoint = (inst.value(index) + instPlusOne.value(index)) / 2.0; currVal = ContingencyTables.entropyConditionedOnRows(m_Distribution); if (currVal < bestVal) { m_SplitPoint = currCutPoint;
if (inst.value(index) < instPlusOne.value(index)) { currCutPoint = (inst.value(index) + instPlusOne.value(index)) / 2.0; currVal = ContingencyTables.entropyConditionedOnRows(m_Distribution); if (currVal < bestVal) { m_SplitPoint = currCutPoint;
currentCutPoint = (instances.instance(i).value(attIndex) + instances .instance(i + 1).value(attIndex)) / 2.0; currentEntropy = ContingencyTables.entropyConditionedOnRows(counts); if (currentEntropy < bestEntropy) { bestCutPoint = currentCutPoint;
currentCutPoint = (instances.instance(i).value(attIndex) + instances .instance(i + 1).value(attIndex)) / 2.0; currentEntropy = ContingencyTables.entropyConditionedOnRows(counts); if (currentEntropy < bestEntropy) { bestCutPoint = currentCutPoint;
m_Distribution[1][j] = sumCounts[j] - counts[i][j]; currVal = ContingencyTables.entropyConditionedOnRows(m_Distribution); if (currVal < bestVal) { bestVal = currVal;
m_Distribution[1][j] = sumCounts[j] - counts[i][j]; currVal = ContingencyTables.entropyConditionedOnRows(m_Distribution); if (currVal < bestVal) { bestVal = currVal;
if (i != classIndex) { m_InfoGains[i] = (ContingencyTables.entropyOverColumns(counts[i]) - ContingencyTables .entropyConditionedOnRows(counts[i]));
if (i != classIndex) { m_InfoGains[i] = (ContingencyTables.entropyOverColumns(counts[i]) - ContingencyTables .entropyConditionedOnRows(counts[i]));