Bladeren bron

Flow package restructuring

luis 7 jaren geleden
bovenliggende
commit
aad2602816

+ 1 - 1
browser/vue-flow/src/store/flow/default-registry.js

@@ -7,7 +7,7 @@ export default{
   },
   'Variable': {
     categories: ['core'],
-    inputs: [{type: 'string', name: 'name'}, {type: 'interface {}', name: 'initial'}],
+    inputs: [{type: 'interface {}', name: 'initial'}],
     output: {type: 'interface {}'},
     style: { color: '#88a', shape: 'circle' }
   },

+ 60 - 22
go/src/demos/cmd/noui/flow.go

@@ -1,41 +1,79 @@
 package main
 
 import (
+	"demos/ops/ml"
 	"flow"
-	"flow/registry"
 	"log"
-	"os"
-	"strings"
 )
 
 func main() {
 
-	r := registry.New()
-	r.Add(strings.Split, strings.Join)
+	r := ml.New()
 
 	f := flow.New()
 	f.UseRegistry(r)
 
-	first := f.Op("Join",
-		f.Op("Split", "hello world", " "),
-		"|",
-	)
+	samples := [][]float64{
+		{0, 0},
+		{0, 1},
+		{1, 0},
+		{1, 1},
+	}
+	labels := []float64{1, 0, 0, 1}
+	_ = samples
+	_ = labels
 
-	// Find a way to process this
-	v := f.Var("x", first)
+	// Try binary operations first
+	// 2 inputs 1 output
+	// Load minst somehow 28x28 784
+	nSample := 1
 
-	res, err := v.Process()
-	log.Println("P:", res, err)
+	// Load minst into floats array
 
-	opSplit := f.Op("Split", v, " ")
-	opJoin := f.Op("Join", opSplit, ",")
+	// Make a matrix from the weights variable
+	// 5 outputs
 
-	log.Println("Flow:", f)
-	f.Analyse(os.Stdout)
-	res, err = opJoin.Process()
-	if err != nil {
-		log.Fatal(err)
-	}
-	log.Println("res:", res)
+	// Layer 1
+	//
+	sample := samples[nSample]
+
+	// Define input
+	// Make a matrix out of the input
+	inp := f.Op("matNew", 2, 1, f.In(0))
+
+	// Layer1 weights
+	w1 := f.Op("matNew", 5, 2, f.Var("w1", f.Op("normFloat", 5*2)))
+
+	l1op := f.Op("matSigmoid", f.Op("matMul", w1, inp))
+
+	// 5 inputs 2 output
+	w2 := f.Op("matNew", 2, 5, f.Var("w2", f.Op("normFloat", 2*5)))
+
+	// Previous layer result
+	l2op := f.Op("matSigmoid", f.Op("matMul", w2, l1op))
+
+	// Do we need this?
+	netOp := f.Op("toFloatArr", l2op)
+
+	netResI, _ := netOp.Process(sample)
+	res := netResI.([]float64)
+	log.Println("Network result:", res)
+	log.Println("Loss", res[0]-sample[0], res[1]-sample[1])
+	// Back propagation
+
+	//log.Println(layerRes.Process(samples[0]))
+
+	// Operation
+	/*mulRes := f.Op("matrixMul", w1, inp)
+	res, _ := mulRes.Process([]float64{0, 0})
+
+	sig := f.Op("matrixSigmoid", mulRes)
+
+	res, _ = sig.Process(samples[0])
+
+	log.Println("Result:", res)*/
+	//f.Analyse(os.Stdout, []float64{0, 1})
+
+	// weights
 
 }

+ 38 - 23
go/src/demos/ops/ml/gonumops.go

@@ -2,19 +2,15 @@
 package ml
 
 import (
-	"flow"
 	"flow/registry"
-	"log"
 	"math"
 	"math/rand"
 
 	"gonum.org/v1/gonum/mat"
 )
 
-// Tensor wrapper
-type Tensor interface {
-	mat.Matrix
-}
+// Matrix wrapper
+type Matrix = mat.Matrix
 
 // New registry
 func New() *registry.R {
@@ -22,12 +18,15 @@ func New() *registry.R {
 	r := registry.New()
 
 	registry.Describer(
-		r.Add(tensorNew).Inputs("rows", "columns", "data"),
+		r.Add(matNew).Inputs("rows", "columns", "data"),
 		r.Add(
 			normFloat,
-			tensorMul,
-			tensorTranspose,
-			logistic),
+			matMul,
+			matTranspose,
+			matSigmoid,
+			matSigmoidD,
+			toFloatArr,
+		),
 	).Description("gonum functions").
 		Tags("gonum").
 		Extra("style", registry.M{"color": "#953"})
@@ -35,14 +34,6 @@ func New() *registry.R {
 	return r
 }
 
-func myVar(f *flow.Flow, placeholder string, init flow.Data) flow.Data {
-
-	if _, ok := f.Data[placeholder]; !ok {
-		f.Data[placeholder] = init
-	}
-	return f.Data[placeholder]
-}
-
 func normFloat(n int) []float64 {
 	data := make([]float64, n)
 	for i := range data {
@@ -51,25 +42,24 @@ func normFloat(n int) []float64 {
 	return data
 }
 
-func tensorNew(r, c int, data []float64) Tensor {
+func matNew(r, c int, data []float64) Matrix {
 	return mat.NewDense(r, c, data)
 }
 
-func tensorMul(a Tensor, b Tensor) Tensor {
+func matMul(a Matrix, b Matrix) Matrix {
 	var c mat.Dense
 	c.Mul(a, b)
 	return &c
 }
 
-func tensorTranspose(a Tensor) Tensor {
+func matTranspose(a Matrix) Matrix {
 	return a.T()
 }
 
-func logistic(a Tensor) Tensor {
+func matSigmoid(a Matrix) Matrix {
 	// Should be a vector perhaps
 	r, c := a.Dims()
 	ret := mat.NewDense(r, c, nil)
-	log.Println("Dimentions", r, c)
 	for c--; c >= 0; c-- {
 		for r--; r >= 0; r-- {
 			ret.Set(r, c, activator(a.At(r, c)))
@@ -78,6 +68,31 @@ func logistic(a Tensor) Tensor {
 	return ret
 }
 
+func matSigmoidD(a Matrix) Matrix {
+	r, c := a.Dims()
+	ret := mat.NewDense(r, c, nil)
+	for c--; c >= 0; c-- {
+		for r--; r >= 0; r-- {
+			ret.Set(r, c, derivative(a.At(r, c)))
+		}
+	}
+	return ret
+}
+func toFloatArr(a Matrix) []float64 {
+	r, c := a.Dims()
+	ret := make([]float64, r*c)
+	for i := 0; i < c; i++ {
+		for j := 0; j < r; j++ {
+			ret[i*r+j] = a.At(j, i)
+		}
+	}
+	return ret
+
+}
+
 func activator(v float64) float64 {
 	return 1 / (1 + math.Exp(-v))
 }
+func derivative(v float64) float64 {
+	return v * (1 - v)
+}

+ 1 - 1
go/src/flow/flow.go

@@ -63,7 +63,7 @@ func (f *Flow) Analyse(w io.Writer, params ...Data) {
 				fmt.Fprintf(w, "Operator: %s error#%s\n", op.name, err)
 				break
 			}
-			fmt.Fprintf(fw, " %s[%v](%v)", op.kind, k, ires)
+			fmt.Fprintf(fw, " %s(%v)", in.kind, ires)
 		}
 		fmt.Fprintf(fw, ") - ")
 		// Create OpProcessor and execute

+ 4 - 68
go/src/flow/flow_test.go

@@ -3,7 +3,6 @@ package flow_test
 import (
 	"bytes"
 	"encoding/json"
-	"fmt"
 	"testing"
 	"time"
 
@@ -40,69 +39,6 @@ func TestInput(t *testing.T) {
 	a.Eq(d, []float32{3, 3, 3}, "array should be equal")
 
 }
-func TestDefOp(t *testing.T) {
-	a := assert.A(t)
-	f := flow.New()
-
-	var err error
-	_, err = f.DefOp("2", "vecadd", []float32{1, 1, 1}, []float32{2, 2, 2}) // r:3 3 3
-	a.Eq(err, nil, fmt.Sprintf("doing DefOp\n%v", f))
-
-	_, err = f.DefOp("1", "vecadd", []float32{1, 2, 3}, f.GetOp("2")) // r: 4 5 6
-	a.Eq(err, nil, "doing DefOp")
-
-	op := f.Op("vecmul", f.GetOp("1"), []float32{2, 2, 2}) //r:8 10 12
-	a.NotEq(op, nil, "operation not nil")
-
-	_, err = op.Process()
-	a.Eq(err, nil, "mul operation")
-
-	desired := []float32{8, 10, 12}
-	res, _ := op.Process()
-	a.Eq(res, desired, fmt.Sprintf("vector result should match:\n%v", f))
-
-	op, err = f.DefOp("123", "none")
-	a.NotEq(err, nil, "Error should not be nil")
-}
-func TestGetOp(t *testing.T) {
-	a := assert.A(t)
-	f := flow.New()
-
-	op := f.GetOp("1")
-	a.Eq(op, nil, "op should be nil")
-
-}
-
-/*func TestIDGen(t *testing.T) {
-	a := assert.A(t)
-	idTable := []string{"2", "1", "1"}
-
-	f := flow.New()
-	f.SetIDGen(func() string {
-		if len(idTable) == 0 {
-			return "0"
-		}
-		newID := idTable[len(idTable)-1]
-		idTable = idTable[:len(idTable)-1]
-		return newID
-	})
-
-	i1 := f.In(0)
-	a.NotEq(i1, nil, "i1 should not be nil")
-	a.Eq(i1.ID(), "1", "id should be 1")
-
-	i2 := f.In(1)
-	a.NotEq(i2, nil, "i2 should not be nil")
-	a.Eq(i2.ID(), "2", "id should be 2")
-
-	o := f.Op("vecadd", i1, i2)
-	a.NotEq(o, nil, "Should not nil")
-	a.Eq(o.ID(), "0", "id should be 0")
-
-	o = f.Op("vecadd", i1, i2)
-	a.Eq(o, nil, "Should be nil, id generation exausted")
-
-}*/
 
 func TestSerialize(t *testing.T) {
 	// Does not text yet
@@ -215,10 +151,10 @@ func TestCache(t *testing.T) {
 func TestHandler(t *testing.T) {
 	f, op := prepareComplex()
 	f.Hook(flow.Hook{
-		Wait:   func(ID string, triggerTime time.Time) { t.Logf("[%s] Wait", ID) },
-		Start:  func(ID string, triggerTime time.Time) { t.Logf("[%s]Start", ID) },
-		Finish: func(ID string, triggerTime time.Time, res flow.Data) { t.Logf("[%s] Finish %v", ID, res) },
-		Error:  func(ID string, triggerTime time.Time, err error) { t.Logf("[%s] Error %v", ID, err) },
+		Wait:   func(op flow.Operation, triggerTime time.Time) { t.Logf("[%s] Wait", op) },
+		Start:  func(op flow.Operation, triggerTime time.Time) { t.Logf("[%s]Start", op) },
+		Finish: func(op flow.Operation, triggerTime time.Time, res flow.Data) { t.Logf("[%s] Finish %v", op, res) },
+		Error:  func(op flow.Operation, triggerTime time.Time, err error) { t.Logf("[%s] Error %v", op, err) },
 	})
 	op.Process()
 }

+ 11 - 18
go/src/flow/flowserver/flowbuilder/builder.go

@@ -106,6 +106,12 @@ func (fb *FlowBuilder) Build(ID string) flow.Operation {
 		op := f.In(inputID) // By id perhaps
 		fb.OperationMap[node.ID] = op
 		return op
+	case "Variable":
+		log.Println("Source is a variable")
+		var t interface{}
+		inputs = []reflect.Type{
+			reflect.TypeOf(t),
+		}
 	default:
 		log.Println("Loading entry:", node.Src)
 		entry, err := r.Entry(node.Src)
@@ -117,23 +123,6 @@ func (fb *FlowBuilder) Build(ID string) flow.Operation {
 		inputs = entry.Inputs
 
 	}
-	/*case "Variable":
-	// Input 1 is the var
-	raw := node.Prop["init"]
-	val, err := parseValue(nil, raw)
-	if err != nil {
-		op, _ = f.DefErrOp(node.ID, err)
-	} else {
-		op = f.DefVar(node.ID, node.Label, val)
-	}
-	case "Const":
-		raw := node.Label
-		val, err := parseValue(nil, raw)
-		if err != nil {
-			return f.DefErrOp(node.ID, err)
-		}
-		return f.DefConst(node.ID, val)*/
-	// Load entry
 
 	//// Build inputs ////
 	param := make([]flow.Data, len(inputs))
@@ -152,7 +141,11 @@ func (fb *FlowBuilder) Build(ID string) flow.Operation {
 		param[i] = fb.Build(l.From)
 	}
 
-	op = f.Op(node.Src, param...)
+	if node.Src == "Variable" {
+		op = f.Var(node.Label, param[0])
+	} else {
+		op = f.Op(node.Src, param...)
+	}
 	fb.OperationMap[node.ID] = op
 	fb.addTriggersTo(node, op)
 

+ 2 - 2
go/src/flow/flowserver/session.go

@@ -198,14 +198,14 @@ func (s *FlowSession) NodeProcess(c *websocket.Conn, data []byte) error {
 			return s
 		})
 		// Special func
-		localR.Add("Variable", func(name string, initial flow.Data) flow.Data {
+		/*localR.Add("Variable", func(name string, initial flow.Data) flow.Data {
 			log.Println("Loading variable:", name)
 			_, ok := s.flow.Data[name]
 			if !ok {
 				s.flow.Data[name] = initial
 			}
 			return s.flow.Data[name]
-		})
+		})*/
 		localR.Add("Output", func(d interface{}) {
 
 			r := fmt.Sprint("Result:", d)

+ 117 - 238
go/src/flow/operation.go

@@ -8,13 +8,12 @@ package flow
 import (
 	"errors"
 	"fmt"
-	"log"
 	"reflect"
 	"runtime/debug"
 	"sync"
 )
 
-type executorFunc func(OpCtx, ...Data) (Data, error)
+type executorFunc func(*Session, ...Data) (Data, error)
 
 // Operation interface
 type Operation interface { // Id perhaps?
@@ -32,44 +31,48 @@ type operation struct {
 	// Could be a simple ID for operation, but it will depend on flow
 	inputs []*operation // still figuring, might be Operation
 
-	fn       interface{} // Any func
-	executor executorFunc
+	fn       interface{}  // the registry func
+	executor executorFunc // the executor?
+	//processor func(OpCtx, params ...Data) (Data, error)
 }
 
-// OpCtx operation Context
-type OpCtx = *sync.Map
+// NewOperation creates an operation
+func (f *Flow) newOperation(kind string, inputs []*operation) *operation {
+	return &operation{
+		Mutex:  sync.Mutex{},
+		flow:   f,
+		kind:   kind,
+		inputs: inputs,
+		//name:   fmt.Sprintf("(var)<%s>", name),
+	}
 
-// NewOpCtx creates a running context
-func newOpCtx() OpCtx {
-	return &sync.Map{}
 }
 
-//func (o *operation) ID() string { return o.id }
-
-func (o *operation) Process(params ...Data) (Data, error) {
+// Process params are the global inputs
+func (o *operation) Process(ginputs ...Data) (Data, error) {
 	// Create CTX
-	ctx := newOpCtx()
-	return o.executor(ctx, params...)
-
+	s := o.flow.NewSession()
+	return s.Run(o, ginputs...)
 }
 
 // make Executor for func
-func (f *Flow) asTrigger(op *operation, fn executorFunc) executorFunc {
-	return func(ctx OpCtx, params ...Data) (Data, error) {
+// safe run a func
+func (f *Flow) asTrigger(op *operation) executorFunc {
+	return func(sess *Session, params ...Data) (Data, error) {
 		f.hooks.start(op)
 
 		//panic recoverer, since nodes are not our functions
 		var err error
 		var res Data
+		// Safe thing
 		func() {
 			defer func() {
 				if r := recover(); r != nil {
-					log.Println("Panic:", r)
 					debug.PrintStack()
 					err = fmt.Errorf("%v", r)
 				}
 			}()
-			res, err = fn(ctx, params...)
+			res, err = op.executor(sess, params...)
 		}()
 
 		if err != nil {
@@ -82,235 +85,62 @@ func (f *Flow) asTrigger(op *operation, fn executorFunc) executorFunc {
 	}
 }
 
-// safe makeExecutor with a bunch of type checks
-// we will create a less safer functions to get performance
-func (f *Flow) makeExecutor(op *operation, fn interface{}) executorFunc {
-	// ExecutorFunc
-	return func(ctx OpCtx, params ...Data) (Data, error) {
-
-		/*op := f.GetOp(id)
-		if op == nil {
-			return nil, fmt.Errorf("invalid operation '%s'", id)
-		}*/
-		op.Lock()
-		defer op.Unlock()
-
-		// Load from cache if any
-		if ctx != nil {
-			if v, ok := ctx.Load(op); ok {
-				return v, nil
-			}
-		}
-		// Change to wait to wait for the inputs
-		f.hooks.wait(op)
-
-		fnval := reflect.ValueOf(fn)
-		callParam, err := f.processInputs(ctx, op, fnval, params...)
-		if err != nil {
-			return nil, err
-		}
-
-		// Start again and execute function
-		f.hooks.start(op)
-		fnret := fnval.Call(callParam)
-		if len(fnret) == 0 {
-			return nil, nil
-		}
-		// Output erroring
-		if len(fnret) > 1 && (fnret[len(fnret)-1].Interface() != nil) {
-			err, ok := fnret[len(fnret)-1].Interface().(error)
-			if !ok {
-				err = errors.New("unknown error")
-			}
-			return nil, err
-		}
-
-		// THE RESULT
-		ret := fnret[0].Interface()
-		// Store in the cache
-		if ctx != nil {
-			ctx.Store(op, ret)
-		}
-		return ret, nil
-	}
-}
-
 // processInputs will run a list of operations and return reflect values
 // to be processed next
 // NEW PARALLEL PROCESSING
-func (f *Flow) processInputs(ctx OpCtx, op *operation, fnval reflect.Value, params ...Data) ([]reflect.Value, error) {
-	nInputs := fnval.Type().NumIn()
-
-	// Total inputs
-	callParam := make([]reflect.Value, nInputs)
-
-	if nInputs != len(op.inputs) {
-		return nil, fmt.Errorf("expect %d inputs got %d", nInputs, len(op.inputs))
-	} //Wait
-
-	callErrors := ""
-	paramMutex := sync.Mutex{}
-	// Parallel processing if inputs
-	wg := sync.WaitGroup{}
-	wg.Add(len(op.inputs))
-	for i, in := range op.inputs {
-		go func(i int, in *operation) {
-			defer wg.Done()
-			inTyp := fnval.Type().In(i)
-			/////////////////
-			// Executor
-			fr, err := in.executor(ctx, params...)
-			//log.Println("Executing:", in.id, in.name, fr)
-
-			paramMutex.Lock()
-			defer paramMutex.Unlock()
-			if err != nil {
-				callErrors += err.Error() + "\n"
-				return
-			}
-			if fr == nil {
-				callParam[i] = reflect.Zero(inTyp)
-				return
-			}
-			res := reflect.ValueOf(fr)
-			var cres reflect.Value
-
-			// Conversion effort
-			switch {
-			case !res.IsValid():
-				callErrors += fmt.Sprintf("Input %d invalid\n", i)
-				return
-			case !res.Type().ConvertibleTo(inTyp):
-				if inTyp.Kind() != reflect.String {
-					callErrors += fmt.Sprintf("Input %d type: %v(%v) cannot be converted to %v\n", i, res.Type(), res.Interface(), inTyp)
-					log.Println(f)
-					return
-				}
-				cres = reflect.ValueOf(fmt.Sprint(res.Interface()))
-			default:
-				cres = res.Convert(inTyp)
-			}
-			// CheckError and safelly append
-			callParam[i] = cres
-		}(i, in)
-	}
-	wg.Wait()
-
-	// Check for any error
-	if callErrors != "" {
-		log.Println("Call errors:", callErrors)
-		return nil, errors.New(callErrors)
-	}
-
-	return callParam, nil
-}
 
 // Var create a operation
 func (f *Flow) Var(name string, initial Data) Operation {
-	// Input from params
-	var input *operation
-	switch v := initial.(type) {
-	case *operation:
-		input = v
-	default:
-		c := f.Const(v)
-		input = c.(*operation)
-	}
-	op := &operation{
-		Mutex:  sync.Mutex{},
-		flow:   f,
-		name:   fmt.Sprintf("(var)<%s>", name),
-		kind:   "var",
-		inputs: []*operation{input},
-	}
+	inputs := f.makeInputs(initial)
 
-	op.executor = f.makeExecutor(op, func(initial Data) Data {
+	op := f.newOperation("var", inputs)
+	op.name = fmt.Sprintf("(var)<%s>", name)
+
+	op.executor = func(sess *Session, ginputs ...Data) (Data, error) {
 		val, ok := f.Data[name]
 		if !ok {
+			var initial Data
+			f.hooks.wait(op)
+			res, err := sess.RunList(op.inputs, ginputs...)
+			if err != nil {
+				return nil, err
+			}
+			if len(res) > 0 {
+				initial = res[0]
+			}
+
 			val = initial
 			f.Data[name] = val
 		}
-		return val
-	})
+		return val, nil
+	}
 
 	return op
 
 }
 
-// Var define var operation with optional initial
-/*func (f *Flow) Var(name string, initial ...Data) Operation {
-	// Unique
-	if _, ok := f.Data[name]; !ok {
-		var v interface{}
-		if len(initial) > 0 {
-			v = initial[0]
-		}
-		f.Data[name] = v
-	}
-
-	op := &operation{
-		Mutex:  sync.Mutex{},
-		flow:   f,
-		name:   fmt.Sprintf("(var)<%s>", name),
-		kind:   "var",
-		inputs: nil,
-	}
-	op.executor = f.asTrigger(op, func(OpCtx, ...Data) (Data, error) {
-		// if f.data == nil we set from the init operation
-		return f.Data[name], nil
-	})
-	f.operations = append(f.operations, op)
-	//f.operations.Store(id, op)
-	return op
-}*/
-
-// Op Manual tag an Operation
-// Define operation for ID
+// Op operation from registry
 func (f *Flow) Op(name string, params ...interface{}) Operation {
-	inputs := make([]*operation, len(params))
-	for i, p := range params {
-		switch v := p.(type) {
-		case *operation:
-			inputs[i] = v
-		default:
-			c := f.Const(v)
-			inputs[i], _ = c.(*operation)
-		}
-	}
-	// If special executor we attach our func
+	inputs := f.makeInputs(params...)
 
 	// Grab executor here
 	registryFn, err := f.registry.Get(name)
 	if err != nil {
 		return f.ErrOp(err)
 	}
-	op := &operation{
-		Mutex:  sync.Mutex{},
-		flow:   f,
-		name:   name,
-		kind:   "func",
-		inputs: inputs,
-	}
-	executor := f.makeExecutor(op, registryFn)
-	op.executor = f.asTrigger(op, executor)
+	op := f.newOperation("func", inputs)
+	op.name = name
+	// make executor from registry func
+	op.executor = makeExecutor(op, registryFn)
 	f.operations = append(f.operations, op)
-
 	return op
 }
 
 // ErrOp define a nil operation that will return error
 // Usefull for builders
 func (f *Flow) ErrOp(err error) Operation {
-	op := &operation{
-		Mutex: sync.Mutex{},
-		//id:     id,
-		flow:   f,
-		name:   fmt.Sprintf("(error)<%v>", err),
-		kind:   "error",
-		inputs: nil,
-	}
-	op.executor = f.asTrigger(op, func(OpCtx, ...Data) (Data, error) { return nil, err })
-	//f.operations = append(f.operations, op)
+	op := f.newOperation("error", nil)
+	op.executor = func(*Session, ...Data) (Data, error) { return nil, err }
 	return op
 }
 
@@ -319,7 +149,7 @@ func (f *Flow) Const(value Data) Operation {
 	// Optimize this definition
 	constID := -1
 	for k, v := range f.consts {
-		if v == value {
+		if reflect.DeepEqual(v, value) {
 			constID = k
 			break
 		}
@@ -329,14 +159,9 @@ func (f *Flow) Const(value Data) Operation {
 		f.consts = append(f.consts, value)
 	}
 
-	op := &operation{
-		Mutex:  sync.Mutex{},
-		flow:   f,
-		name:   fmt.Sprintf("(const)<%v:%v>", constID, value),
-		kind:   "const",
-		inputs: nil,
-	}
-	op.executor = f.asTrigger(op, func(OpCtx, ...Data) (Data, error) { return f.consts[constID], nil })
+	op := f.newOperation("const", nil)
+	op.name = fmt.Sprintf("(const)<%v:%v>", constID, value)
+	op.executor = func(*Session, ...Data) (Data, error) { return f.consts[constID], nil }
 	//f.operations = append(f.operations, op)
 
 	return op
@@ -344,19 +169,73 @@ func (f *Flow) Const(value Data) Operation {
 
 // In define input operation
 func (f *Flow) In(paramID int) Operation {
-	op := &operation{
-		Mutex:  sync.Mutex{},
-		flow:   f,
-		name:   fmt.Sprintf("(in)<%d>", paramID),
-		kind:   "in",
-		inputs: nil,
-	}
-	op.executor = f.asTrigger(op, func(ctx OpCtx, params ...Data) (Data, error) {
-		if paramID < 0 || paramID >= len(params) {
+
+	op := f.newOperation("in", nil)
+	op.name = fmt.Sprintf("(in)<%d>", paramID)
+	op.executor = func(sess *Session, ginputs ...Data) (Data, error) {
+		if paramID < 0 || paramID >= len(ginputs) {
 			return nil, ErrInput
 		}
-		return params[paramID], nil
-	})
-	f.operations = append(f.operations, op)
+		return ginputs[paramID], nil
+	}
+	//f.operations = append(f.operations, op)
 	return op
 }
+
+func (f *Flow) makeInputs(params ...Data) []*operation {
+	inputs := make([]*operation, len(params))
+	for i, p := range params {
+		switch v := p.(type) {
+		case *operation:
+			inputs[i] = v
+		default:
+			c := f.Const(v)
+			inputs[i], _ = c.(*operation)
+		}
+	}
+	return inputs
+}
+
+// make any go func as an executor
+// Trigger
+func makeExecutor(op *operation, fn interface{}) executorFunc {
+	// ExecutorFunc
+	return func(sess *Session, ginput ...Data) (Data, error) {
+		// Change to wait to wait for the inputs
+
+		op.flow.hooks.wait(op)
+		inRes, err := sess.RunList(op.inputs, ginput...)
+		if err != nil {
+			return nil, err
+		}
+
+		fnval := reflect.ValueOf(fn)
+		callParam := make([]reflect.Value, len(inRes))
+		for i, r := range inRes {
+			if r == nil {
+				callParam[i] = reflect.Zero(fnval.Type().In(i))
+			} else {
+				callParam[i] = reflect.ValueOf(r)
+			}
+		}
+
+		// Start again and execute function
+		op.flow.hooks.start(op)
+		fnret := fnval.Call(callParam)
+		if len(fnret) == 0 {
+			return nil, nil
+		}
+		// Output erroring
+		if len(fnret) > 1 && (fnret[len(fnret)-1].Interface() != nil) {
+			err, ok := fnret[len(fnret)-1].Interface().(error)
+			if !ok {
+				err = errors.New("unknown error")
+			}
+			return nil, err
+		}
+
+		// THE RESULT
+		ret := fnret[0].Interface()
+		return ret, nil
+	}
+}

+ 54 - 35
go/src/flow/session.go

@@ -1,55 +1,74 @@
-// TODO operations shoudl be run in a session to store variables structures etc
-
 package flow
 
-import "sync"
-
-// RunCtx operation Context
-type RunCtx = *sync.Map
+import (
+	"errors"
+	"sync"
+)
 
-// NewOpCtx creates a running context
-func newRunCtx() RunCtx {
-	return &sync.Map{}
-}
-
-// Session running session
-// should hold variables and operation clones?
+// Session operation session
 type Session struct {
+	*sync.Map
 	flow *Flow
-	data map[string]Data
 }
 
-// Run operation runner
-func (s *Session) Run(op *operation) (Data, error) {
-	runCtx := newRunCtx()
-	return s.run(runCtx, op)
+// NewSession creates a running context
+func (f *Flow) NewSession() *Session {
+	return &Session{
+		Map:  &sync.Map{},
+		flow: f,
+	}
 }
 
-// internal run
-func (s *Session) run(runCtx RunCtx, op *operation) (Data, error) {
-	// If cache return cache
-	// Cache check
-	if val, ok := runCtx.Load(op); ok {
-		return val, nil
+// Run run operation
+func (s *Session) Run(op *operation, ginputs ...Data) (Data, error) {
+	op.Lock()
+	defer op.Unlock()
+	// Load from cache if any
+	if v, ok := s.Load(op); ok {
+		return v, nil
 	}
 
-	// Process inputs?
-	//
+	res, err := s.flow.asTrigger(op)(s, ginputs...)
+	if err != nil {
+		return nil, err
+	}
+
+	s.Store(op, res)
+	return res, nil
 
-	return nil, nil
 }
 
-// Process inputs for operation
-func (s *Session) processInputs(RunCtx, op *operation, params ...Data) ([]Data, error) {
-	ret := make([]Data, len(op.inputs))
+// RunList more than one operations in this session
+func (s *Session) RunList(ops []*operation, ginputs ...Data) ([]Data, error) {
+	nOps := len(ops)
+
+	// Total inputs
+	callParam := make([]Data, nOps)
 
+	callErrors := ""
+	paramMutex := sync.Mutex{}
+	// Parallel processing if inputs
 	wg := sync.WaitGroup{}
-	wg.Add(len(op.inputs))
-	for i, in := range op.inputs {
-		go func(i, in *operation) {
-			r, err := in.executor(params...)
+	wg.Add(nOps)
+	for i, op := range ops {
+		go func(i int, op *operation) {
+			defer wg.Done()
 
-		}(i, in)
+			res, err := s.Run(op, ginputs...)
+			paramMutex.Lock()
+			defer paramMutex.Unlock()
+			if err != nil {
+				callErrors += err.Error() + "\n"
+				return
+			}
+			callParam[i] = res
+		}(i, op)
+	}
+	wg.Wait()
 
+	if callErrors != "" {
+		return nil, errors.New(callErrors)
 	}
+
+	return callParam, nil
 }