summaryrefslogtreecommitdiff
path: root/poll.go
blob: dc9d1710d82d85166f4ccfdd7ac9e8dee24d9245 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
package main

import (
	"fmt"
	"strings"
	"time"

	"go.wit.com/lib/gui/shell"
	"go.wit.com/log"
)

func (h *HyperT) pollHypervisor() {
	url := "http://" + h.Hostname + ":2520/vms"
	log.Log(POLL, "wget url =", url)
	s := shell.Wget(url)
	if s == nil {
		return
	}
	var bytesSplice []byte
	bytesSplice = s.Bytes()
	// fmt.Fprintln(w, string(bytesSplice))
	for _, line := range strings.Split(string(bytesSplice), "\n") {
		if line == "" {
			continue
		}
		fields := strings.Fields(line)
		if len(fields) < 2 {
			continue
		}
		state := fields[0]
		name := fields[1]
		if state == "ON" {
			log.Log(POLL, h.Hostname, "STATE:", state, "HOST:", name, "rest:", fields[2:])
			d := findDroplet(name)
			if d != nil {
				log.Log(INFO, "ALREADY RECORDED", d.Hostname)
				d.lastpoll = time.Now()
				d.CurrentState = "ON"
				// log.Info("ALREADY RECORDED", d.Hostname, d.lastpoll)
				if d.hname == "" {
					log.Log(EVENT, "DROPLET", d.Hostname, "PROBABLY WAS NEVER POLLED YET")
				}
				if d.hname != h.Hostname {
					log.Log(EVENT, "DROPLET", d.Hostname, "MOVED FROM", d.hname, "TO", h.Hostname)
					d.hname = h.Hostname
				}
				continue
			}
			// this is a new unknown droplet (not in the config file)
			d = new(DropletT)
			d.Hostname = name
			d.hname = h.Hostname
			d.lastpoll = time.Now()
			d.CurrentState = "ON"
			me.droplets = append(me.droplets, d)
			log.Log(EVENT, name, "IS NEW. ADDED ON", h.Hostname)
		}
	}
	h.lastpoll = time.Now()
	h.killcount = 0 // poll worked. reset killcount
}

func findDroplet(name string) *DropletT {
	for _, d := range me.droplets {
		if d.Hostname == name {
			return d
		}
	}
	return nil
}

// check the state of the cluster and return a string
// that is intended to be sent to an uptime monitor like Kuma
func clusterHealthy() (bool, string) {
	var good bool = true
	var working int
	var failed int
	for _, d := range me.droplets {
		if d.State != "ON" {
			continue
		}
		dur := time.Since(d.lastpoll) // Calculate the elapsed time
		if d.CurrentState == "" {
			// log.Info("SKIP. hostname has not been polled yet", d.Hostname, d.hname)
			continue
		}
		if d.CurrentState != "ON" {
			log.Info("BAD  STATE", d.State, d.Hostname, d.hname, "CurrentState =", d.CurrentState, shell.FormatDuration(dur))
			good = false
		} else {
			dur := time.Since(d.lastpoll) // Calculate the elapsed time
			if dur > time.Minute {
				log.Info("GOOD STATE MISSING", d.Hostname, d.hname, shell.FormatDuration(dur))
				good = false
				d.CurrentState = "MISSING"
				failed += 1
				continue
			}
			l := shell.FormatDuration(dur)
			if l == "" {
				log.Info("DUR IS EMPTY", dur)
				continue
			}
			working += 1
			// log.Info("GOOD STATE ON", d.Hostname, d.hname, "dur =", l)
		}
	}
	var summary string = "("
	if working > 0 {
		summary += fmt.Sprintf("working = %d", working)
	}
	if failed > 0 {
		summary += fmt.Sprintf("failed = %d", failed)
	}
	summary += ")"
	if good {
		return good, "GOOD=true " + summary
	}
	return good, "GOOD=false " + summary
}