Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 19 additions & 3 deletions engine/engine.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,22 @@ func (v *Value) Backward() {
}
}

func (v *Value) Data() float64 {
return v.data
}

func (v *Value) SetData(data float64) {
v.data = data
}

func (v *Value) Grad() float64 {
return v.grad
}

func (v *Value) ZeroGrad() {
v.grad = 0
}

func (v Value) String() string {
return fmt.Sprintf("Value(label=%s, data=%f, children=(%v), op=%s, grad=%f)", v.label, v.data, v.children, v.op, v.grad)
}
Expand All @@ -126,7 +142,7 @@ func validateValue(candidate interface{}) *Value {
}

// Helper functions to create Value slices and matrices (Tensor like objects)
func makeValues(data []float64) []*Value {
func MakeValues(data []float64) []*Value {
/**
* Create a slice of Value pointers from a slice of float64.
**/
Expand All @@ -137,13 +153,13 @@ func makeValues(data []float64) []*Value {
return ans
}

func makeValueMatrix(data [][]float64) [][]*Value {
func MakeValueMatrix(data [][]float64) [][]*Value {
/**
* Create a matrix of Value pointers from a matrix of float64.
**/
ans := make([][]*Value, len(data))
for i := 0; i < len(data); i++ {
ans[i] = makeValues(data[i])
ans[i] = MakeValues(data[i])
}
return ans
}
29 changes: 28 additions & 1 deletion engine/nn.go
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
package engine

import "math/rand"
import (
"fmt"
"math/rand"
)

/**
* The structs and functions in this file are used to create a simple feedforward neural network (MLP).
Expand Down Expand Up @@ -41,6 +44,12 @@ func NewNeuron(in int) *Neuron {
return &Neuron{weights: weights, bias: bias}
}

func (n *Neuron) String() string {
ans := "Neuron{"
ans += fmt.Sprintf("Weights=%v, ", len(n.weights))
return ans + "Bias}"
}

type Layer struct {
neurons []*Neuron
}
Expand Down Expand Up @@ -70,6 +79,15 @@ func NewLayer(in, out int) *Layer {
return &Layer{neurons: neurons}
}

func (l *Layer) String() string {
ans := "Layer{\n"
for i := 0; i < len(l.neurons); i++ {
ans += "\t\t" + l.neurons[i].String() + ",\n"
}
ans += "\t}"
return ans
}

type MLP struct {
layers []*Layer
}
Expand Down Expand Up @@ -106,3 +124,12 @@ func NewMLP(in int, outs []int) *MLP {

return &MLP{layers: layers}
}

func (m *MLP) String() string {
ans := "MLP{\n"
for i := 0; i < len(m.layers); i++ {
ans += "\t" + m.layers[i].String() + ",\n"
}
ans += "}"
return ans
}
2 changes: 1 addition & 1 deletion engine/nn_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (
)

func TestMLPSimpleScenario(t *testing.T) {
x := makeValueMatrix(
x := MakeValueMatrix(
[][]float64{
{2.0, 3.0, -1.0},
{3.0, -1.0, 0.5},
Expand Down
94 changes: 94 additions & 0 deletions examples/xor.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
package main

import (
"fmt"
"math"

e "github.com/daniel4x/GoGrad/engine"
)

func createXORData() ([][]*e.Value, []float64) {
x := [][]float64{
{0, 0},
{0, 1},
{1, 0},
{1, 1},
}
y := []float64{-1, 1, 1, -1} // setting False as -1 and True as 1 just to get a cleaner outputs from the model

return e.MakeValueMatrix(x), y
}

func printData(X [][]*e.Value, y []float64) {
for i := 0; i < len(X); i++ {
fmt.Printf("(%v, %v) -> %v\n", X[i][0].Data(), X[i][1].Data(), y[i])
}
}

func main() {
// Create XOR dataset
X, y := createXORData()
fmt.Println("XOR dataset:")
printData(X, y)

// Define a two-layer MLP with 2 input neurons, 2 hidden layers with 4 neurons each, and 1 output neuron
nn := e.NewMLP(2, []int{4, 4, 1})
fmt.Println("\nMulti-layer Perceptron Definition:\n", nn)

// Train the model
epochs := 2000
alpha := 0.01

for i := 0; i < epochs; i++ {
y_model := make([]*e.Value, len(X))

// Forward pass
// Feed in each data point
for j := 0; j < len(X); j++ {
y_model[j] = nn.Call(X[j])
}

// Compute the loss
loss := y_model[0].Sub(y[0]).Pow(2)
for j := 1; j < len(y_model); j++ {
loss = loss.Add(y_model[j].Sub(y[j]).Pow(2))
}

// Backward pass
// zero the gradients to avoid accumulation between epochs
params := nn.Parameters()
for j := 0; j < len(params); j++ {
params[j].ZeroGrad()
}

loss.Backward() // backward

// Update the parameters
for j := 0; j < len(params); j++ {
params[j].SetData(params[j].Data() - alpha*params[j].Grad())
}

if (i+1)%100 == 0 {
// Print the loss every 100 epochs
fmt.Println("epoch", i, "loss", loss.Data())
}
}

// Test the model
predictions := make([]float64, len(X))
for i := 0; i < len(X); i++ {
predictions[i] = nn.Call(X[i]).Data()
}

fmt.Println("\nTesting the model:")
for i := 0; i < len(X); i++ {
fmt.Printf("(%v, %v) -> Actual: %v Prediction %v\n", X[i][0].Data(), X[i][1].Data(), y[i], predictions[i])
}

// Raise error if the difference between the actual and predicted values is greater than 0.1
for i := 0; i < len(X); i++ {
if math.Abs(y[i]-predictions[i]) > 0.1 {
panic(fmt.Sprintf("\nTest failed: (%v, %v) -> Actual: %v Prediction %v\n", X[i][0].Data(), X[i][1].Data(), y[i], predictions[i]))
}
}
}
Loading