Artificial Neural Networks (namespace Nn)


ConflictInfo: It stores information about the conflict between two rows in a data set
ConflictInfo()
bool operator <(const ConflictInfo& init)const
~ ConflictInfo()

KohoNet: An artificial neural network without supervision (Kohonen ANN)
KohoNet& operator =(const KohoNet& init)
KohoNet()
KohoNet(const KohoNet& init)
bool AutoSetInputScaler(MATRIX& input)
bool Create(int numInputs, int numOutputs, int inputNormType)
bool GetInputScaler(int index, double& minimum, double& maximum)
bool SetInputScaler(int index, double minimum, double maximum)
bool SetWeights(const MATRIX& weights)
const wchar_t* ComputeWinner(const MATRIX& input, valarray<double >& output)
const wchar_t* ComputeWinner(const MATRIX& input, valarray<int >& output)
const wchar_t* Load(const wchar_t* filename)
const wchar_t* Run(const MATRIX& input, MATRIX& output)
const wchar_t* Save(const wchar_t* filename)
const wchar_t* ScaleInputDataSet(const MATRIX& input, MATRIX& scaledInput, bool ignoreWarnings)
const wchar_t* SetTrainingSet(const MATRIX& trainSetIn, bool ignoreWarnings)
const wchar_t* TrainAdditive(Mt::BoolTs& running, Mt::DecimalTs& progress, Mt::DoubleTs& error, double learningRate, int numIterations)
const wchar_t* TrainSubtractive(Mt::BoolTs& running, Mt::DecimalTs& progress, Mt::DoubleTs& error, double learningRate, int numIterations)
int ComputeWinner(int trainCaseIndex)
int GetInputCount() const
int GetOutputCount() const
void Copy(const KohoNet& init)
void Delete()
void GetDescription(wchar_t* description, int length)
void GetNormalizedInput(MATRIX& normInput)
void GetWeights(MATRIX& weights)
void Unlearn()
~ KohoNet()

Layer: One layer of an artificial neural network
Layer& operator =(const Layer& init)
Layer(const Layer& init)
Layer(void)
double OutputDerivative(const int index)
void Agitate(double perturbRatio, Nn::Layer& source)
void ComputeOutput(const MATRIX& input, int rowInputIndex)
void Copy(const Layer& init)
void Delete()
void Initialize()
~ Layer(void)

LayerC: One layer of an artificial neural network that works in the domain of the complex numbers
LayerC& operator =(const LayerC& init)
LayerC(const LayerC& init)
LayerC(void)
void Agitate(double perturbRatio, Nn::LayerC& source)
void ComputeOutput(const MATRIXC& input, int rowInputIndex)
void ComputeOutputAndDerivative(const MATRIXC& input, int rowInputIndex, valarray<Nn::ComplexDerivative >& out_derivative)
void Copy(const LayerC& init)
void Delete()
void Initialize()
void InverseActivationFunction(const complex<double >& output, complex<double >& input)
~ LayerC(void)

LayerNet: A multi-layer artificial neural network
LayerNet& operator =(const LayerNet& init)
LayerNet()
LayerNet(const LayerNet& init)
bool AutoSetInputScaler(MATRIX& input)
bool AutoSetOutputScaler(MATRIX& output)
bool Create(int inputCount, int hidden1Count, int hidden2Count, int outputCount)
bool GetActivation(int layerIndex, valarray<double >& activation)
bool GetInputScaler(int index, double& minimum, double& maximum)
bool GetOutputScaler(int index, double& minimum, double& maximum)
bool GetWeights(int layerIndex, MATRIX& out_weights)
bool Run(const MATRIX& input, MATRIX& output)
bool SetInputName(int index, const wchar_t* name)
bool SetInputScaler(int index, double minimum, double maximum)
bool SetOutputName(int index, const wchar_t* name)
bool SetOutputScaler(int index, double minimum, double maximum)
bool SetWeights(int layerIndex, const MATRIX& weights)
const wchar_t* GetInputName(int index)
const wchar_t* GetOutputName(int index)
const wchar_t* Load(const wchar_t* filename)
const wchar_t* Save(const wchar_t* filename)
const wchar_t* ScaleInputDataSet(const MATRIX& input, MATRIX& scaledInput, bool ignoreWarnings)
const wchar_t* ScaleOutputDataSet(const MATRIX& output, MATRIX& scaledOutput, bool ignoreWarnings)
const wchar_t* SetTrainingSet(const MATRIX& trainSetIn, const MATRIX& trainSetTarget, bool ignoreWarnings)
const wchar_t* TrainConjugateGradient(Mt::BoolTs& cancel, Mt::DecimalTs& progress, Mt::DoubleTs& mse, int epochs, double goal)
const wchar_t* TrainGenetic(Mt::BoolTs& cancel, Mt::DecimalTs& progress, Mt::DoubleTs& mse, Math::GeneticParam& param)
const wchar_t* TrainLevenbergMarquardt(Mt::BoolTs& cancel, Mt::DecimalTs& progress, Mt::DoubleTs& mse, int epochs, double goal)
const wchar_t* TrainRegression(Mt::BoolTs& cancel, Mt::DecimalTs& progress, Mt::DoubleTs& mse)
const wchar_t* TrainSimAnneal(Mt::BoolTs& cancel, Mt::DecimalTs& progress, Mt::DoubleTs& mse, Math::SimAnnealParam& param)
const wchar_t* TrainVariableMetric(Mt::BoolTs& cancel, Mt::DecimalTs& progress, Mt::DoubleTs& mse, int epochs, double goal)
double ComputeCurrentTrueMse()
double ComputeTrueMse(const MATRIX& trainSet_in, const MATRIX& trainSet_target)
double EvaluateFunc(const valarray<double >& x)
double GeneticGetError()
double LevenMar(MATRIX& input, int inputRow, int idep, double target, MATRIX& alpha, valarray<double >& beta, valarray<double >& hid2delta, valarray<double >& grad)
double LevenMarComputeHessianAndGradient(valarray<double >& hid2delta, valarray<double >& grad, MATRIX& hessian, valarray<double >& beta, Mt::BoolTs& cancel)
double SimAnnealGetError()
int GetHidden1NeuronCount() const
int GetHidden2NeuronCount() const
int GetInputCount() const
int GetLayerCount() const
int GetMinNumTrainCases()
int GetOutputCount() const
static bool IsPredictionOverfitting(int seriesLength, int numInputs, int numHid)
static void ComputeBestPrediction(int seriesLength, const MATRIX& mse, int& out_row, int& out_col)
void ComputeOutput(const MATRIX& input, int inputRowIndex, int layerCount)
void Copy(const LayerNet& init)
void Delete()
void EvaluateFuncAndGrad(const valarray<double >& x, double& Fx, valarray<double >& gradient)
void EvaluateGrad(const valarray& x, valarray& outGrad);
void GeneticInitialize(Sys::BoolArray& bits)
void GeneticSetFromBits(const Sys::BoolArray& bits)
void GetDescription(wchar_t* description, int length)
void LevenMarMove(double step, valarray<double >& direction)
void SimAnnealCopy(const Math::ISimAnneal& source)
void SimAnnealInitialize()
void SimAnnealPerturb(Math::ISimAnneal& original, double temperature, double initialTemperature)
void Unlearn()
wchar_t* GetScaledOutput(MATRIX& scaledOutput)
~ LayerNet()

LayerNetC: A multi-layer artificial neural network with inputs and outputs in the domain of the complex numbers
LayerNetC& operator =(const LayerNetC& init)
LayerNetC()
LayerNetC(const LayerNetC& init)
bool AutoSetInputScaler(MATRIXC& input)
bool AutoSetOutputScaler(MATRIXC& output)
bool Create(int inputCount, int hidden1Count, int outputCount)
bool GetActivation(int layerIndex, valarray<complex<double>>& activation)
bool GetInputScaler(int index, double& minimum, double& maximum)
bool GetOutputScaler(int index, double& minimum, double& maximum)
bool GetWeights(int layerIndex, MATRIXC& weights)
bool Run(const MATRIXC& input, MATRIXC& output)
bool SetInputScaler(int index, double minimum, double maximum)
bool SetOutputScaler(int index, double minimum, double maximum)
bool SetWeights(int layerIndex, const MATRIXC& weights)
const wchar_t* Load(const wchar_t* filename)
const wchar_t* Save(const wchar_t* filename)
const wchar_t* ScaleInputDataSet(const MATRIXC& input, MATRIXC& scaledInput, bool ignoreWarnings)
const wchar_t* ScaleOutputDataSet(const MATRIXC& output, MATRIXC& scaledOutput, bool ignoreWarnings)
const wchar_t* SetTrainingSet(const MATRIXC& trainSetIn, const MATRIXC& trainSetTarget, bool ignoreWarnings)
const wchar_t* TrainConjugateGradient(Mt::BoolTs& running, Mt::DecimalTs& progress, Mt::DoubleTs& mse, int epochs, double goal)
const wchar_t* TrainGenetic(Mt::BoolTs& running, Mt::DecimalTs& progress, Mt::DoubleTs& mse, Math::GeneticParam& param)
const wchar_t* TrainSimAnneal(Mt::BoolTs& running, Mt::DecimalTs& progress, Mt::DoubleTs& mse, Math::SimAnnealParam& param)
const wchar_t* TrainVariableMetric(Mt::BoolTs& running, Mt::DecimalTs& progress, Mt::DoubleTs& mse, int epochs, double goal)
double ComputeCurrentTrueMse()
double ComputeTrueMse(const MATRIXC& trainSet_in, const MATRIXC& trainSet_target)
double EvaluateFunc(const valarray<double >& x)
double GeneticGetError()
double SimAnnealGetError()
int GetHidden1NeuronCount() const
int GetInputCount() const
int GetLayerCount() const
int GetMinNumTrainCases()
int GetOutputCount() const
void ComputeOutput(const MATRIXC& input, int inputRowIndex, int layerCount)
void Copy(const LayerNetC& init)
void Delete()
void EvaluateFuncAndGrad(const valarray<double >& x, double& Fx, valarray<double >& gradient)
void EvaluateGrad(const valarray& x, valarray& outGrad);
void GeneticInitialize(Sys::BoolArray& bits)
void GeneticSetFromBits(const Sys::BoolArray& bits)
void GetDescription(wchar_t* description, int length)
void SimAnnealCopy(const Math::ISimAnneal& source)
void SimAnnealInitialize()
void SimAnnealPerturb(Math::ISimAnneal& original, double temperature, double initialTemperature)
void Unlearn()
wchar_t* GetScaledOutput(MATRIXC& scaledOutput)
~ LayerNetC()

ProbNet: A probabilistic artificial neural network
ProbNet& operator =(const ProbNet& init)
ProbNet()
ProbNet(const ProbNet& init)
const wchar_t* Load(const wchar_t* filename)
const wchar_t* Run(const MATRIX& trainSetInput, const MATRIX& trainSetTarget, const MATRIX& input, MATRIX& output)
const wchar_t* Save(const wchar_t* filename)
const wchar_t* TrainConjugateGradient(Mt::BoolTs& running, Mt::DecimalTs& progress, Mt::DoubleTs& mse, const MATRIX& trainSetInput, const MATRIX& trainSetTarget, int epochs, double goal)
const wchar_t* TrainVariableMetric(Mt::BoolTs& running, Mt::DecimalTs& progress, Mt::DoubleTs& mse, const MATRIX& trainSetInput, const MATRIX& trainSetTarget, int epochs, double goal)
double EvaluateFunc(const double x)
double EvaluateFunc(const valarray<double >& x)
int GetInputCount()
int GetOutputCount()
void Copy(const ProbNet& init)
void Delete()
void EvaluateFuncAndDeriv(const double x, double& Fx, double& dFx)
void EvaluateFuncAndGrad(const valarray<double >& x, double& Fx, valarray<double >& gradient)
void GetDescription(wchar_t* description, int length)
void GetWeights(valarray<double >& weights)
void SetWeights(const valarray<double >& weights)
~ ProbNet()

Scaler: It provides support to scale the input or output of an artificial neural network
Scaler& operator =(const Scaler& init)
Scaler()
Scaler(const Scaler& init)
bool AutoSet(MATRIX& matrix)
bool AutoSet(MATRIXC& matrix)
bool Create(int count)
bool Get(int index, double& minimum, double& maximum)
bool GetScalingInfo(int index, ScalingInfo& si)
bool Load(Sys::File& file)
bool Load(const wchar_t* filename)
bool Save(Sys::File& file)
bool Save(const wchar_t* filename)
bool Set(int index, ScalingInfo& si)
bool Set(int index, double minimum, double maximum)
bool SetName(int index, const wchar_t* name)
bool operator !=(const Scaler& init)const
bool operator ==(const Scaler& init)const
const wchar_t* GetName(int index)
int GetSize()
void Copy(const Scaler& init)
void Delete()
void GetDescription(wchar_t* description, int length)
void GetText(int index, wchar_t* buffer, int buffer_length)
wchar_t* ScaleFromStandardRange(const MATRIX& input, MATRIX& output)
wchar_t* ScaleFromStandardRange(const MATRIXC& input, MATRIXC& output)
wchar_t* ScaleFromStandardRange(const valarray<complex<double>>& input, valarray<complex<double>>& output)
wchar_t* ScaleFromStandardRange(const valarray<double >& input, valarray<double >& output)
wchar_t* ScaleTo11(const MATRIX& input, MATRIX& output)
wchar_t* ScaleTo11(const valarray<double >& input, valarray<double >& output)
wchar_t* ScaleToStandardRange(const MATRIX& input, MATRIX& output)
wchar_t* ScaleToStandardRange(const MATRIXC& input, MATRIXC& output)
wchar_t* ScaleToStandardRange(const valarray<complex<double>>& input, valarray<complex<double>>& output)
wchar_t* ScaleToStandardRange(const valarray<double >& input, valarray<double >& output)
~ Scaler()

ScalingInfo
ScalingInfo()
bool Load(Sys::File& file)
bool Save(Sys::File& file)
~ ScalingInfo()

Tanh: High performance class to compute y = tanh(x)
Tanh()
double Derivative(double y)
double Func(double x)
double GetClassificationThreshold()
double GetExactValue(double x)
double GetNeuronOff()
double GetNeuronOn()
static double InverseFunc(double y)
void ShowError(HWND hWnd)
~ Tanh()
© Copyright 2000-2019 selo. All Rights Reserved. Sep 05 2019.