summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--dump.go22
-rw-r--r--event.go3
-rw-r--r--http.go26
-rw-r--r--main.go7
-rw-r--r--structs.go18
-rw-r--r--validate.go56
6 files changed, 100 insertions, 32 deletions
diff --git a/dump.go b/dump.go
new file mode 100644
index 0000000..1e6a596
--- /dev/null
+++ b/dump.go
@@ -0,0 +1,22 @@
+package main
+
+import (
+ "fmt"
+ "net/http"
+)
+
+/*
+ debugging code to see the state of the
+ cluster via http
+*/
+
+func dumpCluster(w http.ResponseWriter) {
+ umap, macs := safeValidateDroplets(me.cluster)
+ for u, hostname := range umap {
+ fmt.Fprintln(w, "uuid:", u, "hostname:", hostname)
+ }
+
+ for mac, uuid := range macs {
+ fmt.Fprintln(w, "mac:", mac, "uuid", uuid, "hostname:", umap[uuid])
+ }
+}
diff --git a/event.go b/event.go
index 0a8ea63..3934ca1 100644
--- a/event.go
+++ b/event.go
@@ -110,7 +110,8 @@ func Start(name string) (bool, string) {
dur := time.Since(me.unstable) // how long has the cluster been stable?
result = fmt.Sprintln("should start droplet", name, "here. grid stable for:", shell.FormatDuration(dur))
if dur < me.unstableTimeout {
- result += "grid is still too unstable"
+ tmp := shell.FormatDuration(me.unstableTimeout)
+ result += "grid is still too unstable (unstable timeout = " + tmp + ")"
return false, result
}
diff --git a/http.go b/http.go
index d00117b..8d819f5 100644
--- a/http.go
+++ b/http.go
@@ -76,11 +76,6 @@ func okHandler(w http.ResponseWriter, r *http.Request) {
return
}
- if route == "/dumplibvirtxml" {
- virtigoxml.DumpLibvirtxmlDomainNames()
- return
- }
-
if route == "/uptime" {
ok, s := clusterHealthy()
if ok {
@@ -104,10 +99,23 @@ func okHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "hostname is", hostname)
// log.Warn("Handling URL:", tmp, "start droplet", start)
- b, result := Start(hostname)
- log.Warn("Start returned =", b, "result =", result)
- fmt.Fprintln(w, "Start() returned", b)
- fmt.Fprintln(w, "result:", result)
+ ok, result := Start(hostname)
+ if ok {
+ fmt.Fprintln(w, hostname, "started ok")
+ } else {
+ fmt.Fprintln(w, result)
+ fmt.Fprintln(w, hostname, "start failed")
+ }
+ return
+ }
+
+ if route == "/dumpcluster" {
+ dumpCluster(w)
+ return
+ }
+
+ if route == "/dumplibvirtxml" {
+ virtigoxml.DumpLibvirtxmlDomainNames()
return
}
diff --git a/main.go b/main.go
index a36c0bc..794ee83 100644
--- a/main.go
+++ b/main.go
@@ -48,7 +48,7 @@ func main() {
me.unstableTimeout = 17 * time.Second
// how long the cluster must be stable before new droplets can be started
- me.clusterStableDuration = 37 * time.Second
+ me.clusterStableDuration = 37 * time.Second
// read in the config file
me.cluster = new(pb.Cluster)
@@ -72,7 +72,10 @@ func main() {
var newEvents []*pb.Event
// sanity check the cluster & droplets
- ValidateDroplets(me.cluster, false)
+ if err := ValidateDroplets(me.cluster); err != nil {
+ log.Info("todo: add flag to ignore. for now, fix problems in the config file.")
+ os.Exit(0)
+ }
newe := ValidateDiskFilenames(me.cluster)
for _, e := range newe {
newEvents = append(newEvents, e)
diff --git a/structs.go b/structs.go
index 81d4d00..19b5c86 100644
--- a/structs.go
+++ b/structs.go
@@ -20,15 +20,15 @@ func (b *virtigoT) Enable() {
// this app's variables
type virtigoT struct {
- cluster *pb.Cluster // basic cluster settings
- delay time.Duration // how often to poll the hypervisors
- hmap map[*pb.Hypervisor]*HyperT // map to the local struct
- names []string
- hypers []*HyperT
- killcount int
- unstable time.Time // the last time the cluster was incorrect
- changed bool
- unstableTimeout time.Duration // how long a droplet can be missing until it's declared dead
+ cluster *pb.Cluster // basic cluster settings
+ delay time.Duration // how often to poll the hypervisors
+ hmap map[*pb.Hypervisor]*HyperT // map to the local struct
+ names []string
+ hypers []*HyperT
+ killcount int
+ unstable time.Time // the last time the cluster was incorrect
+ changed bool
+ unstableTimeout time.Duration // how long a droplet can be missing until it's declared dead
clusterStableDuration time.Duration // how long the cluster must be stable before new droplets can be started
}
diff --git a/validate.go b/validate.go
index 5343c34..274033c 100644
--- a/validate.go
+++ b/validate.go
@@ -15,7 +15,6 @@ package main
import (
"errors"
- "os"
"path/filepath"
"github.com/google/uuid"
@@ -72,6 +71,7 @@ func lookupFilename(cluster *pb.Cluster, filename string) *pb.Droplet {
return nil
}
+/*
func InsertFilename(cluster *pb.Cluster, d *pb.Droplet, filename string) (*pb.Event, error) {
dupd := lookupFilename(cluster, filename)
if dupd != nil {
@@ -104,6 +104,7 @@ func InsertFilename(cluster *pb.Cluster, d *pb.Droplet, filename string) (*pb.Ev
log.Info("New filename", filebase, dir)
return e, nil
}
+*/
func ValidateUniqueFilenames(cluster *pb.Cluster) bool {
var ok bool = true
@@ -167,8 +168,9 @@ func ValidateDiskFilenames(cluster *pb.Cluster) []*pb.Event {
return alle
}
-// this doesn't run often
-func ValidateDroplets(cluster *pb.Cluster, dump bool) bool {
+// runs on startup. dies if there are duplicates
+// the config file must then be edited by hand
+func ValidateDroplets(cluster *pb.Cluster) error {
// uuid map to check for duplicates
var umap map[string]string
umap = make(map[string]string)
@@ -189,7 +191,7 @@ func ValidateDroplets(cluster *pb.Cluster, dump bool) bool {
// UUID already exists
log.Info("duplicate UUID", d.Uuid, umap[d.Uuid])
log.Info("duplicate UUID", d.Uuid, d.Hostname)
- os.Exit(-1)
+ return errors.New("duplicate UUID: " + d.Uuid)
}
umap[d.Uuid] = d.Hostname
@@ -199,7 +201,7 @@ func ValidateDroplets(cluster *pb.Cluster, dump bool) bool {
// UUID already exists
log.Info("duplicate MAC", n.Mac, macs[n.Mac], umap[macs[n.Mac]])
log.Info("duplicate MAC", n.Mac, d.Hostname)
- os.Exit(-1)
+ return errors.New("duplicate MAC: " + n.Mac)
}
macs[n.Mac] = d.Uuid
}
@@ -207,15 +209,47 @@ func ValidateDroplets(cluster *pb.Cluster, dump bool) bool {
log.Println("validated okay: no duplicate MAC addr")
log.Println("validated okay: no duplicate UUID")
- if dump {
- for u, hostname := range umap {
- log.Println("uuid:", u, "hostname:", hostname)
+ return nil
+}
+
+func safeValidateDroplets(cluster *pb.Cluster) (map[string]string, map[string]string) {
+ // uuid map to check for duplicates
+ var umap map[string]string
+ umap = make(map[string]string)
+
+ // mac address map to check for duplicates
+ var macs map[string]string
+ macs = make(map[string]string)
+
+ for _, d := range cluster.Droplets {
+ // Generate a new UUID
+ if d.Uuid == "" {
+ u := uuid.New()
+ d.Uuid = u.String()
+ }
+
+ // seconds, ok := timeZone[tz]; ok {
+ if _, ok := umap[d.Uuid]; ok {
+ // UUID already exists
+ log.Info("duplicate UUID", d.Uuid, umap[d.Uuid])
+ log.Info("duplicate UUID", d.Uuid, d.Hostname)
+ // os.Exit(-1)
}
+ umap[d.Uuid] = d.Hostname
- for mac, uuid := range macs {
- log.Println("mac:", mac, "uuid", uuid, "hostname:", umap[uuid])
+ for _, n := range d.Networks {
+ // log.Println("network:", n.Mac, d.Uuid, d.Hostname)
+ if _, ok := macs[n.Mac]; ok {
+ // UUID already exists
+ log.Info("duplicate MAC", n.Mac, macs[n.Mac], umap[macs[n.Mac]])
+ log.Info("duplicate MAC", n.Mac, d.Hostname)
+ // os.Exit(-1)
+ }
+ macs[n.Mac] = d.Uuid
}
}
+ log.Println("validated okay: no duplicate MAC addr")
+ log.Println("validated okay: no duplicate UUID")
- return false
+ return umap, macs
}