summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--event.go60
-rw-r--r--http.go6
-rw-r--r--start.go106
-rw-r--r--validate.go119
4 files changed, 196 insertions, 95 deletions
diff --git a/event.go b/event.go
index 3934ca1..a2406b9 100644
--- a/event.go
+++ b/event.go
@@ -2,7 +2,6 @@ package main
import (
"fmt"
- "math/rand"
"time"
"go.wit.com/lib/gui/shell"
@@ -93,62 +92,3 @@ func findDroplet(name string) *pb.Droplet {
}
return nil
}
-
-func Start(name string) (bool, string) {
- var result string
-
- d := findDroplet(name)
- if d == nil {
- result += "can't start unknown droplet: " + name
- return false, result
- }
-
- if d.CurrentState == pb.DropletState_ON {
- return false, "EVENT start droplet " + d.Hostname + " is already ON"
- }
-
- dur := time.Since(me.unstable) // how long has the cluster been stable?
- result = fmt.Sprintln("should start droplet", name, "here. grid stable for:", shell.FormatDuration(dur))
- if dur < me.unstableTimeout {
- tmp := shell.FormatDuration(me.unstableTimeout)
- result += "grid is still too unstable (unstable timeout = " + tmp + ")"
- return false, result
- }
-
- // make the list of hypervisors that are active and can start new droplets
- var pool []*HyperT
- for _, h := range me.hypers {
- result += fmt.Sprintln("could start droplet on", name, "on", h.pb.Hostname, h.pb.Active)
- if d.PreferredHypervisor == h.pb.Hostname {
- // the config file says this droplet should run on this hypervisor
- a, b := h.start(d)
- return a, result + b
- }
-
- if h.pb.Active != true {
- continue
- }
- pool = append(pool, h)
- }
-
- // left here as an example of how to actually do random numbers
- // it's complete mathematical chaos. Randomness is simple when
- // human interaction occurs -- which is exactly what happens most
- // of the time. most random shit is bullshit. all you really need
- // is exactly this to make sure the random functions work as they
- // should. Probably, just use this everywhere in all cases. --jcarr
- rand.Seed(time.Now().UnixNano())
- a := 0
- b := len(pool)
- n := a + rand.Intn(b-a)
- result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
- h := pool[n]
-
- // send the search directories to the hypervisor
- result += fmt.Sprintln("h.sendDirs() HERE")
- result += fmt.Sprintln("h.sendDirs() HERE")
- h.sendDirs()
-
- startbool, startresult := h.start(d)
- return startbool, result + startresult
-}
diff --git a/http.go b/http.go
index 51c5a10..629b973 100644
--- a/http.go
+++ b/http.go
@@ -99,11 +99,13 @@ func okHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "hostname is", hostname)
// log.Warn("Handling URL:", tmp, "start droplet", start)
- ok, result := Start(hostname)
- if ok {
+ result, err := Start(hostname)
+ if err == nil {
+ fmt.Fprintln(w, result)
fmt.Fprintln(w, hostname, "started ok")
} else {
fmt.Fprintln(w, result)
+ fmt.Fprintln(w, err)
fmt.Fprintln(w, hostname, "start failed")
}
return
diff --git a/start.go b/start.go
new file mode 100644
index 0000000..556c2d0
--- /dev/null
+++ b/start.go
@@ -0,0 +1,106 @@
+package main
+
+// validates the droplet information
+// finds a hypervisor
+// attempts to start the virtual machine
+
+import (
+ "errors"
+ "fmt"
+ "math/rand"
+ "time"
+
+ "go.wit.com/lib/gui/shell"
+ pb "go.wit.com/lib/protobuf/virtbuf"
+)
+
+// for now, because sometimes this should write to stdout and
+// sometimes to http socket, it returns a string
+func Start(name string) (string, error) {
+ var result string
+
+ // lookup the droplet by name
+ d := findDroplet(name)
+ if d == nil {
+ result = "can't start unknown droplet: " + name
+ return result, errors.New(result)
+ }
+
+ // validate the droplet
+ if err := ValidateDroplet(d); err != nil {
+ result = "ValidateDroplet() failed droplet " + d.Hostname
+ return result, err
+ }
+
+ // is the droplet already on?
+ if d.CurrentState == pb.DropletState_ON {
+ result = "EVENT start droplet " + d.Hostname + " is already ON"
+ return result, errors.New(result)
+ }
+
+ // how long has the cluster been stable?
+ // wait until it is stable. use this to throttle droplet starts
+ dur := time.Since(me.unstable)
+ result = fmt.Sprintln("should start droplet", name, "here. grid stable for:", shell.FormatDuration(dur))
+ if dur < me.unstableTimeout {
+ tmp := shell.FormatDuration(me.unstableTimeout)
+ result += "grid is still too unstable (unstable timeout = " + tmp + ")"
+ return result, errors.New("grid is still unstable")
+ }
+
+ // make the list of hypervisors that are active and can start new droplets
+ var pool []*HyperT
+ for _, h := range me.hypers {
+ // this droplet is set to use this and only this hypervisor
+ if d.ForceHypervisor == h.pb.Hostname {
+ ok, b := h.start(d)
+ if ok {
+ return result + b, nil
+ }
+ return result + b, errors.New("start " + name + " on hypervisor " + h.pb.Hostname)
+ }
+
+ // skip hypervisors marked inactive
+ if h.pb.Active != true {
+ result += fmt.Sprintln("hypervisor is inactive:", name, "for", h.pb.Hostname, h.pb.Active)
+ continue
+ }
+
+ // the config file says this droplet should run on this hypervisor
+ // attempt to start the droplet here. use this even if the hypervisor is inactive?
+ if d.PreferredHypervisor == h.pb.Hostname {
+ ok, b := h.start(d)
+ if ok {
+ return result + b, nil
+ }
+ return result + b, errors.New("start " + name + " on hypervisor " + h.pb.Hostname)
+ }
+
+ result += fmt.Sprintln("hypervisor ready:", name, "for", h.pb.Hostname, h.pb.Active)
+ pool = append(pool, h)
+ }
+
+ // left here as an example of how to actually do random numbers
+ // it's complete mathematical chaos. Randomness is simple when
+ // human interaction occurs -- which is exactly what happens most
+ // of the time. most random shit is bullshit. all you really need
+ // is exactly this to make sure the random functions work as they
+ // should. Probably, just use this everywhere in all cases. --jcarr
+ rand.Seed(time.Now().UnixNano())
+ a := 0
+ b := len(pool)
+ n := a + rand.Intn(b-a)
+ result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
+ h := pool[n]
+
+ // update/resend the search directories to the hypervisor
+ result += fmt.Sprintln("h.sendDirs() HERE")
+ result += fmt.Sprintln("h.sendDirs() HERE")
+ h.sendDirs()
+
+ ok, output := h.start(d)
+ if ok {
+ return result + output, nil
+ }
+ return result + output, errors.New("start " + name + " on hypervisor " + h.pb.Hostname)
+}
diff --git a/validate.go b/validate.go
index 16d78ac..505b022 100644
--- a/validate.go
+++ b/validate.go
@@ -177,46 +177,99 @@ func ValidateDroplets(cluster *pb.Cluster) (map[string]string, map[string]string
return umap, macs, nil
}
-/*
-func safeValidateDroplets(cluster *pb.Cluster) (map[string]string, map[string]string) {
- // uuid map to check for duplicates
- var umap map[string]string
- umap = make(map[string]string)
-
- // mac address map to check for duplicates
- var macs map[string]string
- macs = make(map[string]string)
-
- for _, d := range cluster.Droplets {
- // Generate a new UUID
- if d.Uuid == "" {
- u := uuid.New()
- d.Uuid = u.String()
+// checks a droplet right before a start event
+// verify ethernet mac address
+// verify uuid (but probably can ignore this since it's not used)
+// check qemu domain id
+// check spice and vnc ports
+// check filenames
+func ValidateDroplet(check *pb.Droplet) error {
+ // check for duplicate uuid's
+ for _, d := range me.cluster.Droplets {
+ if check == d {
+ continue
}
-
- // seconds, ok := timeZone[tz]; ok {
- if _, ok := umap[d.Uuid]; ok {
+ if d.Uuid == check.Uuid {
// UUID already exists
- log.Info("duplicate UUID", d.Uuid, umap[d.Uuid])
log.Info("duplicate UUID", d.Uuid, d.Hostname)
- // os.Exit(-1)
+ log.Info("duplicate UUID", d.Uuid, check.Hostname)
+ return errors.New("duplicate UUID: " + d.Uuid)
}
- umap[d.Uuid] = d.Hostname
+ }
- for _, n := range d.Networks {
- // log.Println("network:", n.Mac, d.Uuid, d.Hostname)
- if _, ok := macs[n.Mac]; ok {
- // UUID already exists
- log.Info("duplicate MAC", n.Mac, macs[n.Mac], umap[macs[n.Mac]])
- log.Info("duplicate MAC", n.Mac, d.Hostname)
- // os.Exit(-1)
+ // check for duplicate mac addresses
+ for _, checkn := range check.Networks {
+ for _, d := range me.cluster.Droplets {
+ if check == d {
+ continue
+ }
+ for _, n := range d.Networks {
+ if checkn.Mac == n.Mac {
+ // MAC already exists
+ log.Info("duplicate MAC", n.Mac, d.Hostname)
+ log.Info("duplicate MAC", n.Mac, check.Hostname)
+ return errors.New("duplicate MAC: " + n.Mac)
+ }
}
- macs[n.Mac] = d.Uuid
}
}
- log.Println("validated okay: no duplicate MAC addr")
- log.Println("validated okay: no duplicate UUID")
- return umap, macs
+ if err := setUniqueSpicePort(check); err != nil {
+ return err
+ }
+ return nil
+}
+
+func setUniqueSpicePort(check *pb.Droplet) error {
+ var ports map[int64]*pb.Droplet
+ ports = make(map[int64]*pb.Droplet)
+
+ // check spice ports
+ // checkn.SpicePort = getUniqueSpicePort()
+ for _, d := range me.cluster.Droplets {
+ if d.SpicePort == 0 {
+ continue
+ }
+ if _, ok := ports[d.SpicePort]; ok {
+ log.Info("duplicate ports", d.SpicePort)
+ return errors.New("duplicate ports")
+ }
+ ports[d.SpicePort] = d
+ }
+
+ for p, d := range ports {
+ log.Info("found spice port", p, "on", d.Hostname)
+ }
+
+ var start int64
+ start = 6000
+ for {
+ if _, ok := ports[start]; ok {
+ d := ports[start]
+ log.Info("already using port", start, "on", d.Hostname)
+ if d == check {
+ log.Info("this is good because it's me!", check.Hostname, d.Hostname)
+ return nil
+ }
+ start += 1
+ continue
+ }
+ // generate change port event
+ log.Info("going to try port", start, "on", check.Hostname)
+ e := check.NewChangeEvent("SpicePort", check.SpicePort, start)
+ me.cluster.Events = append(me.cluster.Events, e)
+
+ // set port to start
+ check.SpicePort = start
+
+ // write out config file
+ if err := me.cluster.ConfigSave(); err != nil {
+ log.Info("config save error inside here is bad", err)
+ return err
+ }
+
+ return nil
+ }
+ // for loop never gets here
+ return nil
}
-*/