summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--argv.go17
-rw-r--r--create.go7
-rw-r--r--dump.go8
-rw-r--r--main.go33
-rw-r--r--poll.go4
-rw-r--r--resources/initcfg/cluster.text5
-rw-r--r--resources/initcfg/droplets.text44
-rw-r--r--resources/initcfg/events.text10
-rw-r--r--resources/initcfg/hypervisors.text17
-rw-r--r--start.go5
-rw-r--r--structs.go3
-rw-r--r--validate.go85
-rw-r--r--watchdog.go24
13 files changed, 180 insertions, 82 deletions
diff --git a/argv.go b/argv.go
index 8e966cf..218b4ca 100644
--- a/argv.go
+++ b/argv.go
@@ -11,15 +11,16 @@ import "go.wit.com/log"
var argv args
type args struct {
- Xml []string `arg:"--libvirt" help:"import qemu xml files: --libvirt /etc/libvirt/qemu/*.xml"`
- IgnoreCpu bool `arg:"--xml-ignore-cpu" default:"true" help:"ignore non-standard libvirt xml cpus"`
- IgnoreBr bool `arg:"--xml-ignore-net" default:"true" help:"ignore network bridge name changes"`
- IgnDisk bool `arg:"--xml-ignore-disk" default:"false" help:"ignore duplicate disk names"`
- Config string `arg:"env:VIRTIGO_HOME" help:"defaults to ~/.config/virtigo/"`
- Port int `arg:"--port" default:"8080" help:"allow droplet events via http"`
- Daemon bool `arg:"--daemon" help:"run in daemon mode"`
+ Xml []string `arg:"--libvirt" help:"import qemu xml files: --libvirt /etc/libvirt/qemu/*.xml"`
+ Config string `arg:"env:VIRTIGO_HOME" help:"defaults to ~/.config/virtigo/"`
+ Port int `arg:"--port" default:"8080" help:"allow droplet events via http"`
}
+// Daemon bool `arg:"--daemon" help:"run in daemon mode"`
+// IgnoreCpu bool `arg:"--xml-ignore-cpu" default:"true" help:"ignore non-standard libvirt xml cpus"`
+// IgnoreBr bool `arg:"--xml-ignore-net" default:"true" help:"ignore network bridge name changes"`
+// IgnDisk bool `arg:"--xml-ignore-disk" default:"false" help:"ignore duplicate disk names"`
+
// Save bool `arg:"--save" default:"false" help:"save protobuf config after import"`
// Start string `arg:"--start" help:"start a droplet"`
// Uptime bool `arg:"--uptime" default:"true" help:"allow uptime checks for things like Kuma"`
@@ -35,7 +36,7 @@ This app talks to your hypervisors via the virtigod daemon.
Import your existing libvirt xml files with:
- virtigo --libvirt /etc/libvirt/qemu/*.xml --save
+ virtigo --libvirt /etc/libvirt/qemu/*.xml
This runs a http server so you can control your virtual machines.
For example to start a vm called 'www.wit.com' your cluster 'foo.bar.com':
diff --git a/create.go b/create.go
index 9cdde7e..d77daad 100644
--- a/create.go
+++ b/create.go
@@ -71,7 +71,7 @@ func create(w http.ResponseWriter, r *http.Request) (string, error) {
d.Networks = append(d.Networks, newNet)
// d.AddDefaultNetwork(mac)
}
- me.cluster.Droplets = append(me.cluster.Droplets, d)
+ me.cluster.AddDroplet(d)
result, err := startDroplet(d)
if err != nil {
@@ -158,11 +158,6 @@ func startDroplet(d *pb.Droplet) (string, error) {
result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
h := pool[n]
- // update/resend the search directories to the hypervisor
- result += fmt.Sprintln("h.sendDirs() HERE")
- result += fmt.Sprintln("h.sendDirs() HERE")
- h.sendDirs()
-
ok, output := h.start(d)
if ok {
return result + output, nil
diff --git a/dump.go b/dump.go
index 51d4cf3..e3ab9f7 100644
--- a/dump.go
+++ b/dump.go
@@ -16,7 +16,7 @@ import (
*/
func dumpCluster(w http.ResponseWriter) {
- umap, macs, err := ValidateDroplets(me.cluster)
+ umap, macs, err := ValidateDroplets()
for u, hostname := range umap {
fmt.Fprintln(w, "uuid:", u, "hostname:", hostname)
}
@@ -31,7 +31,11 @@ func dumpCluster(w http.ResponseWriter) {
// list running droplets and droplets that should be running
func dumpDroplets(w http.ResponseWriter, full bool) {
- for _, d := range me.cluster.Droplets {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
+ fmt.Println(w, "Droplet UUID:", d.Uuid)
+
var macs []string
for _, n := range d.Networks {
macs = append(macs, n.Mac)
diff --git a/main.go b/main.go
index 69a8f45..98af0d0 100644
--- a/main.go
+++ b/main.go
@@ -4,6 +4,7 @@ package main
import (
"embed"
+ "fmt"
"os"
"path/filepath"
"time"
@@ -33,9 +34,9 @@ func main() {
os.Exit(0)
}
- if argv.Daemon {
- log.DaemonMode(true)
- }
+ // if argv.Daemon {
+ // log.DaemonMode(true)
+ // }
// set defaults
me.unstable = time.Now() // initialize the grid as unstable
@@ -52,20 +53,25 @@ func main() {
// how long the cluster must be stable before new droplets can be started
me.clusterStableDuration = 37 * time.Second
- // read in the config file
- me.cluster = new(pb.Cluster)
- me.cluster.E = new(pb.Events)
+ me.cluster = pb.InitCluster()
if err := me.cluster.ConfigLoad(); err != nil {
log.Info("config load error", err)
os.Exit(-1)
}
- for i, d := range me.cluster.Droplets {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
+ if d == nil {
+ fmt.Println("d == nil")
+ os.Exit(-1)
+ }
+ fmt.Println("Droplet UUID:", d.Uuid)
if d.Current == nil {
d.Current = new(pb.Current)
}
d.Current.State = pb.DropletState_OFF
- log.Info(i, "droplet", d.Hostname)
+ log.Info("droplet", d.Hostname)
}
hmm := "pihole.wit.com"
d := me.cluster.FindDropletByName(hmm)
@@ -78,11 +84,11 @@ func main() {
var newEvents []*pb.Event
// sanity check the cluster & droplets
- if _, _, err := ValidateDroplets(me.cluster); err != nil {
+ if _, _, err := ValidateDroplets(); err != nil {
log.Info("todo: add flag to ignore. for now, fix problems in the config file.")
os.Exit(0)
}
- newe, err := ValidateDiskFilenames(me.cluster)
+ newe, err := ValidateDiskFilenames()
if err != nil {
log.Info(err)
os.Exit(-1)
@@ -91,7 +97,7 @@ func main() {
for _, e := range newe {
newEvents = append(newEvents, e)
}
- ValidateUniqueFilenames(me.cluster)
+ ValidateUniqueFilenames()
for _, filename := range argv.Xml {
domcfg, err := virtigolib.ReadXml(filename)
@@ -138,7 +144,7 @@ func main() {
}
// initialize each hypervisor
- for _, pbh := range me.cluster.Hypervisors {
+ for _, pbh := range me.cluster.H.Hypervisors {
// this is a new unknown droplet (not in the config file)
var h *HyperT
h = new(HyperT)
@@ -155,9 +161,6 @@ func main() {
for _, h := range me.hypers {
log.Info("starting polling on", h.pb.Hostname)
- // inititialize the search directories on each hypervisor
- h.sendDirs()
-
// start a watchdog on each hypervisor
go h.NewWatchdog()
}
diff --git a/poll.go b/poll.go
index 0cf9709..dc5235f 100644
--- a/poll.go
+++ b/poll.go
@@ -139,7 +139,9 @@ func uptimeCheck() (bool, string) {
var unknown int
var unknownList []string
- for _, d := range me.cluster.Droplets {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
total += 1
if d.StartState != pb.DropletState_ON {
continue
diff --git a/resources/initcfg/cluster.text b/resources/initcfg/cluster.text
new file mode 100644
index 0000000..9d94cad
--- /dev/null
+++ b/resources/initcfg/cluster.text
@@ -0,0 +1,5 @@
+dirs: "/var/lib/libvirt/images"
+dirs: "/home/isos"
+dirs: "/home/nfs"
+dirs: "/home/ceph"
+dirs: "/home"
diff --git a/resources/initcfg/droplets.text b/resources/initcfg/droplets.text
new file mode 100644
index 0000000..fae130a
--- /dev/null
+++ b/resources/initcfg/droplets.text
@@ -0,0 +1,44 @@
+droplets: {
+ hostname: "git.wit.org"
+ cpus: 16
+ memory: 103079215104
+ preferred_hypervisor: "farm04"
+ qemu_machine: "pc-q35-9.0"
+ networks: {
+ mac: "22:22:22:22:22:03"
+ name: ""
+ }
+ disks: {
+ filename: "git.wit.org.qcow2"
+ filepath: "/home/nfs3"
+ }
+}
+droplets: {
+ hostname: "go.wit.com"
+ cpus: 2
+ memory: 2147483648
+ preferred_hypervisor: "farm04"
+ qemu_machine: "pc-q35-9.0"
+ networks: {
+ mac: "22:22:22:22:22:05"
+ name: ""
+ }
+ disks: {
+ filename: "go.wit.com.qcow2"
+ filepath: "/home/nfs"
+ }
+}
+droplets: {
+ hostname: "wekan.foo.com"
+ cpus: 2
+ memory: 2147483648
+ qemu_machine: "pc-q35-9.1"
+ networks: {
+ mac: "22:22:22:22:22:08"
+ name: ""
+ }
+ disks: {
+ filename: "wekan.foo.com.qcow2"
+ filepath: "/home/nfs"
+ }
+}
diff --git a/resources/initcfg/events.text b/resources/initcfg/events.text
new file mode 100644
index 0000000..12a7515
--- /dev/null
+++ b/resources/initcfg/events.text
@@ -0,0 +1,10 @@
+events: {
+ droplet: "www.foo.org"
+ start: {
+ seconds: 1729895589
+ nanos: 425114400
+ }
+ field_name: "Droplet.Memory"
+ orig_val: "1073741824"
+ new_val: "2147483648"
+}
diff --git a/resources/initcfg/hypervisors.text b/resources/initcfg/hypervisors.text
new file mode 100644
index 0000000..7817eb0
--- /dev/null
+++ b/resources/initcfg/hypervisors.text
@@ -0,0 +1,17 @@
+hypervisors: {
+ uuid: "11111111-2222-3333-4444-555555555555"
+ hostname: "hyper01"
+ active: true
+ cpus: 16
+ memory: 8796093022208
+ comment: "this is a fake hypervisor"
+ autoscan: true
+}
+hypervisors: {
+ hostname: "hyper02"
+ active: true
+ cpus: 16
+ memory: 8796093022208
+ comment: "this is a fake hypervisor"
+ autoscan: true
+}
diff --git a/start.go b/start.go
index cf82d36..eaad2c5 100644
--- a/start.go
+++ b/start.go
@@ -101,11 +101,6 @@ func Start(name string) (string, error) {
result += fmt.Sprintln("pool has", len(pool), "members", "rand =", n)
h := pool[n]
- // update/resend the search directories to the hypervisor
- result += fmt.Sprintln("h.sendDirs() HERE")
- result += fmt.Sprintln("h.sendDirs() HERE")
- h.sendDirs()
-
ok, output := h.start(d)
if ok {
return result + output, nil
diff --git a/structs.go b/structs.go
index d8c0823..725b424 100644
--- a/structs.go
+++ b/structs.go
@@ -20,7 +20,8 @@ func (b *virtigoT) Enable() {
// this app's variables
type virtigoT struct {
- cluster *pb.Cluster // basic cluster settings
+ cluster *pb.NewCluster // basic cluster settings
+ // newc *pb.NewCluster // basic cluster settings
e *pb.Events // virtbuf events
hmap map[*pb.Hypervisor]*HyperT // map to the local struct
names []string
diff --git a/validate.go b/validate.go
index 49b2088..ac2712d 100644
--- a/validate.go
+++ b/validate.go
@@ -16,6 +16,7 @@ package main
import (
"errors"
"fmt"
+ "os"
"path/filepath"
"strings"
@@ -26,8 +27,10 @@ import (
)
// will make sure the mac address is unique
-func ValidateUniqueMac(cluster *pb.Cluster, mac string) bool {
- for _, d := range cluster.Droplets {
+func ValidateUniqueMac(mac string) bool {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
for _, n := range d.Networks {
if n.Mac == mac {
log.Info("duplicate MAC", n.Mac, "in droplet", d.Hostname)
@@ -39,10 +42,10 @@ func ValidateUniqueMac(cluster *pb.Cluster, mac string) bool {
}
// records all the known paths. this should go in the protobuf
-func addClusterFilepath(cluster *pb.Cluster, dir string) *pb.Event {
+func addClusterFilepath(dir string) *pb.Event {
var found bool = false
var e *pb.Event
- for _, d := range cluster.Dirs {
+ for _, d := range me.cluster.Dirs {
if d == dir {
// found dir
found = true
@@ -53,17 +56,19 @@ func addClusterFilepath(cluster *pb.Cluster, dir string) *pb.Event {
if dir != "." {
// make a new Add Event
e = pb.NewAddEvent(nil, "Add Cluster Directory", dir)
- cluster.Dirs = append(cluster.Dirs, dir)
+ me.cluster.Dirs = append(me.cluster.Dirs, dir)
}
}
return e
}
// returns the droplet using a filename
-func lookupFilename(cluster *pb.Cluster, filename string) *pb.Droplet {
+func lookupFilename(filename string) *pb.Droplet {
filebase := filepath.Base(filename)
- for _, d := range cluster.Droplets {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
for _, disk := range d.Disks {
if filebase == disk.Filename {
return d
@@ -73,15 +78,17 @@ func lookupFilename(cluster *pb.Cluster, filename string) *pb.Droplet {
return nil
}
-func ValidateUniqueFilenames(cluster *pb.Cluster) bool {
+func ValidateUniqueFilenames() bool {
var ok bool = true
var disks map[string]string
disks = make(map[string]string)
- for _, d := range cluster.Droplets {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
for _, disk := range d.Disks {
filename := disk.Filename
- addClusterFilepath(cluster, disk.Filepath)
+ addClusterFilepath(disk.Filepath)
if _, ok := disks[filename]; ok {
/*
if argv.IgnDisk {
@@ -103,16 +110,18 @@ func ValidateUniqueFilenames(cluster *pb.Cluster) bool {
return ok
}
-func ValidateDiskFilenames(cluster *pb.Cluster) ([]*pb.Event, error) {
+func ValidateDiskFilenames() ([]*pb.Event, error) {
var alle []*pb.Event
- for _, d := range cluster.Droplets {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
var found bool = false
for _, disk := range d.Disks {
filename := disk.Filename
filebase := filepath.Base(filename)
dir := filepath.Dir(filename)
- addClusterFilepath(cluster, dir)
+ addClusterFilepath(dir)
if disk.Filename != filebase {
// update filename
e := d.NewChangeEvent("Disk.Filename", disk.Filename, filebase)
@@ -159,7 +168,9 @@ func getNewMac() string {
var macs map[string]string
macs = make(map[string]string)
- for _, d := range me.cluster.Droplets {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
for _, n := range d.Networks {
// log.Println("network:", n.Mac, d.Uuid, d.Hostname)
if _, ok := macs[n.Mac]; ok {
@@ -189,7 +200,7 @@ func getNewMac() string {
// runs on startup. dies if there are duplicates
// the config file must then be edited by hand
-func ValidateDroplets(cluster *pb.Cluster) (map[string]string, map[string]string, error) {
+func ValidateDroplets() (map[string]string, map[string]string, error) {
// uuid map to check for duplicates
var umap map[string]string
umap = make(map[string]string)
@@ -198,7 +209,9 @@ func ValidateDroplets(cluster *pb.Cluster) (map[string]string, map[string]string
var macs map[string]string
macs = make(map[string]string)
- for _, d := range cluster.Droplets {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
// Generate a new UUID
if d.Uuid == "" {
u := uuid.New()
@@ -231,6 +244,26 @@ func ValidateDroplets(cluster *pb.Cluster) (map[string]string, map[string]string
return umap, macs, nil
}
+func searchForDuplicateUUIDs() {
+ // var broken int
+}
+
+/*
+// remove from the slice
+func deleteDroplet(bad int) {
+ var all *pb.Droplets
+ all = me.cluster.DeleteDroplet(b *db.Droplet)
+
+ fmt.Println("deleting", bad, all.Droplets[bad].Hostname)
+
+ // Check if the index is within bounds
+ if bad >= 0 && bad < len(all.Droplets) {
+ // Remove element at targetIndex
+ all.Droplets = append(all.Droplets[:bad], all.Droplets[bad+1:]...)
+ }
+}
+*/
+
// checks a droplet right before a start event
// verify ethernet mac address
// verify uuid (but probably can ignore this since it's not used)
@@ -239,7 +272,9 @@ func ValidateDroplets(cluster *pb.Cluster) (map[string]string, map[string]string
// check filenames
func ValidateDroplet(check *pb.Droplet) error {
// check for duplicate uuid's
- for _, d := range me.cluster.Droplets {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
if check == d {
continue
}
@@ -247,13 +282,21 @@ func ValidateDroplet(check *pb.Droplet) error {
// UUID already exists
log.Info("duplicate UUID", d.Uuid, d.Hostname)
log.Info("duplicate UUID", d.Uuid, check.Hostname)
+ // d.Archive = new(pb.DropletArchive)
+ if d.Archive == nil {
+ log.Info("d.Archive == nil")
+ os.Exit(-1)
+ }
+ d.Archive.Reason = pb.DropletArchive_DUP
return errors.New("duplicate UUID: " + d.Uuid)
}
}
// check for duplicate mac addresses
for _, checkn := range check.Networks {
- for _, d := range me.cluster.Droplets {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
if check == d {
continue
}
@@ -280,7 +323,9 @@ func setUniqueSpicePort(check *pb.Droplet) error {
// check spice ports
// checkn.SpicePort = getUniqueSpicePort()
- for _, d := range me.cluster.Droplets {
+ loop := me.cluster.DropletsAll() // get the list of droplets
+ for loop.Scan() {
+ d := loop.Droplet()
if d.SpicePort == 0 {
continue
}
@@ -330,7 +375,7 @@ func setUniqueSpicePort(check *pb.Droplet) error {
// generate change port event
log.Info("going to try port", start, "on", check.Hostname)
e := check.NewChangeEvent("SpicePort", check.SpicePort, start)
- me.cluster.E.Events = append(me.cluster.E.Events, e)
+ me.cluster.AddEvent(e)
// set port to start
check.SpicePort = start
diff --git a/watchdog.go b/watchdog.go
index 97baac4..ff1c68b 100644
--- a/watchdog.go
+++ b/watchdog.go
@@ -4,7 +4,6 @@ import (
"fmt"
"time"
- pb "go.wit.com/lib/protobuf/virtbuf"
"go.wit.com/log"
)
@@ -15,29 +14,6 @@ func TimeFunction(f func()) time.Duration {
return time.Since(startTime) // Calculate the elapsed time
}
-func (h *HyperT) sendDirs() {
- url := "http://" + h.pb.Hostname + ":2520/cluster"
- var msg string
- var data []byte
-
- var c *pb.Cluster
- c = new(pb.Cluster)
- for _, dir := range me.cluster.Dirs {
- c.Dirs = append(c.Dirs, dir)
- }
- msg = c.FormatJSON()
- data = []byte(msg) // Convert the string to []byte
- req, err := httpPost(url, data)
- if err != nil {
- log.Info("error:", err)
- return
- }
- // log.Info("http post url:", url)
- // log.Info("http post data:", msg)
-
- log.Info("EVENT start droplet response: " + string(req))
-}
-
func (h *HyperT) NewWatchdog() {
h.dog = time.NewTicker(me.hyperPollDelay)
defer h.dog.Stop()