From 92479a4a5bee61bf1310bbaeb99fd0346422e6af Mon Sep 17 00:00:00 2001 From: Bogdan Popescu <68062990+bopopescu@users.noreply.github.com> Date: Sun, 26 Jul 2020 09:11:38 +0300 Subject: [PATCH] Removal of master-slave terminology --- cmd/inspect-mysql/inspect-mysql.go | 10 +-- mysql/dbstat/dbstat.go | 88 +++++++++++------------ mysql/dbstat/dbstat_test.go | 108 ++++++++++++++--------------- postgres/stat/stat.go | 54 +++++++-------- 4 files changed, 130 insertions(+), 130 deletions(-) diff --git a/cmd/inspect-mysql/inspect-mysql.go b/cmd/inspect-mysql/inspect-mysql.go index f0a761a..a6514a6 100644 --- a/cmd/inspect-mysql/inspect-mysql.go +++ b/cmd/inspect-mysql/inspect-mysql.go @@ -22,7 +22,7 @@ import ( ) func main() { - var user, password, host, address, cnf, form, checkConfigFile, slavelagtable, slavelagcol string + var user, password, host, address, cnf, form, checkConfigFile, subordinatelagtable, subordinatelagcol string var stepSec int var servermode, human, loop bool var checkConfig *conf.ConfigFile @@ -44,8 +44,8 @@ func main() { "Makes output in MB for human readable sizes") flag.BoolVar(&loop, "loop", false, "loop on collecting metrics") flag.StringVar(&checkConfigFile, "check", "", "config file to check metrics with") - flag.StringVar(&slavelagtable, "slavelagtable", "", "name of the table \"database.table\" that holds slave lag timestamps") - flag.StringVar(&slavelagcol, "slavelagcolumn", "", "name of column in slavelagtable that holds the timestamps") + flag.StringVar(&subordinatelagtable, "subordinatelagtable", "", "name of the table \"database.table\" that holds subordinate lag timestamps") + flag.StringVar(&subordinatelagcol, "subordinatelagcolumn", "", "name of column in subordinatelagtable that holds the timestamps") flag.Parse() if servermode { @@ -73,8 +73,8 @@ func main() { checkConfigFile = "" } options := map[string]string{ - "slavelagtable": slavelagtable, - "slavelagcolumn": slavelagcol, + "subordinatelagtable": subordinatelagtable, + "subordinatelagcolumn": subordinatelagcol, } //initialize metrics collectors to not loop and collect diff --git a/mysql/dbstat/dbstat.go b/mysql/dbstat/dbstat.go index c978a0e..17653d4 100644 --- a/mysql/dbstat/dbstat.go +++ b/mysql/dbstat/dbstat.go @@ -26,17 +26,17 @@ import ( type MysqlStatDBs struct { util.MysqlStat Metrics *MysqlStatMetrics //collection of metrics - MasterHostname string - slaveLagTable string // table for reading timestamps, of the form "database.table" - slaveLagQuery string // mysql query for replication lag + MainHostname string + subordinateLagTable string // table for reading timestamps, of the form "database.table" + subordinateLagQuery string // mysql query for replication lag } // MysqlStatMetrics represents metrics being collected about the server/database type MysqlStatMetrics struct { - //GetSlave Stats - SlaveSecondsBehindMaster *metrics.Gauge - SlaveSeqFile *metrics.Gauge - SlavePosition *metrics.Counter + //GetSubordinate Stats + SubordinateSecondsBehindMain *metrics.Gauge + SubordinateSeqFile *metrics.Gauge + SubordinatePosition *metrics.Counter ReplicationRunning *metrics.Gauge RelayLogSpace *metrics.Gauge @@ -181,7 +181,7 @@ type MysqlStatMetrics struct { } const ( - slaveQuery = "SHOW SLAVE STATUS;" + subordinateQuery = "SHOW SLAVE STATUS;" oldestQuery = ` SELECT time FROM information_schema.processlist WHERE command NOT IN ('Sleep','Connect','Binlog Dump','Binlog Dump GTID') @@ -215,13 +215,13 @@ const ( sessionQuery1 = "SELECT @@GLOBAL.max_connections;" sessionQuery2 = ` SELECT IF(command LIKE 'Sleep',1,0) + - IF(state LIKE '%master%' OR state LIKE '%slave%',1,0) AS sort_col, + IF(state LIKE '%main%' OR state LIKE '%subordinate%',1,0) AS sort_col, processlist.* FROM information_schema.processlist ORDER BY 1, time DESC;` innodbQuery = "SHOW GLOBAL VARIABLES LIKE 'innodb_log_file_size';" securityQuery = "SELECT COUNT(*) FROM mysql.user WHERE (password = '' OR password IS NULL) AND (x509_subject='' OR x509_subject IS NULL);" - slaveBackupQuery = ` + subordinateBackupQuery = ` SELECT COUNT(*) as count FROM information_schema.processlist WHERE user LIKE '%bkup%';` @@ -232,7 +232,7 @@ SELECT COUNT(*) as count ) var ( - slaveLagQuery = "SELECT max(%s) AS TIMESTAMP from %s" + subordinateLagQuery = "SELECT max(%s) AS TIMESTAMP from %s" now = func() time.Time { return time.Now() } ) @@ -254,10 +254,10 @@ func New(m *metrics.MetricContext, user, password, host, config string, options s.Metrics = MysqlStatMetricsNew(m) // If the user specifies a separate table to gather replication lag from - if slavelagTable, ok := options["slavelagtable"]; ok { - if slavelagCol, ok := options["slavelagcolumn"]; ok { - s.slaveLagTable = slavelagTable - s.slaveLagQuery = fmt.Sprintf(slaveLagQuery, slavelagCol, slavelagTable) + if subordinatelagTable, ok := options["subordinatelagtable"]; ok { + if subordinatelagCol, ok := options["subordinatelagcolumn"]; ok { + s.subordinateLagTable = subordinatelagTable + s.subordinateLagQuery = fmt.Sprintf(subordinateLagQuery, subordinatelagCol, subordinatelagTable) } } @@ -279,7 +279,7 @@ func (s *MysqlStatDBs) Collect() { s.GetVersion() var queryFuncList = []func(){ - s.GetSlaveStats, + s.GetSubordinateStats, s.GetGlobalStatus, s.GetBinlogStats, s.GetStackedQueries, @@ -353,20 +353,20 @@ func (s *MysqlStatDBs) GetQueriesPerSecond() { s.Metrics.QueriesPerSecond.Set(queriesPerSecond) } -// GetSlaveLag determines replication lag by querying for the latest timestamp +// GetSubordinateLag determines replication lag by querying for the latest timestamp // in a heartbeat table. (Similar to the table used in pt-heartbeat). -func (s *MysqlStatDBs) GetSlaveLag() { - if s.slaveLagTable == "" { - s.Db.Log(errors.New("No slave lag table specified.")) +func (s *MysqlStatDBs) GetSubordinateLag() { + if s.subordinateLagTable == "" { + s.Db.Log(errors.New("No subordinate lag table specified.")) return } - res, err := s.Db.QueryReturnColumnDict(s.slaveLagQuery) + res, err := s.Db.QueryReturnColumnDict(s.subordinateLagQuery) if err != nil { s.Db.Log(err) return } if len(res["TIMESTAMP"]) == 0 { - s.Db.Log("No timestamp in " + s.slaveLagTable + " found") + s.Db.Log("No timestamp in " + s.subordinateLagTable + " found") return } timestamp := res["TIMESTAMP"][0] @@ -376,15 +376,15 @@ func (s *MysqlStatDBs) GetSlaveLag() { return } lag := now().Sub(ts) - s.Metrics.SlaveSecondsBehindMaster.Set(lag.Seconds()) + s.Metrics.SubordinateSecondsBehindMain.Set(lag.Seconds()) } -// GetSlaveStats returns statistics regarding mysql replication -func (s *MysqlStatDBs) GetSlaveStats() { +// GetSubordinateStats returns statistics regarding mysql replication +func (s *MysqlStatDBs) GetSubordinateStats() { s.Metrics.ReplicationRunning.Set(float64(-1)) numBackups := float64(0) - res, err := s.Db.QueryReturnColumnDict(slaveBackupQuery) + res, err := s.Db.QueryReturnColumnDict(subordinateBackupQuery) if err != nil { s.Db.Log(err) } else if len(res["count"]) > 0 { @@ -393,55 +393,55 @@ func (s *MysqlStatDBs) GetSlaveStats() { s.Db.Log(err) } else { if numBackups > 0 { - s.Metrics.SlaveSecondsBehindMaster.Set(float64(-1)) + s.Metrics.SubordinateSecondsBehindMain.Set(float64(-1)) s.Metrics.ReplicationRunning.Set(float64(1)) } } } - res, err = s.Db.QueryReturnColumnDict(slaveQuery) + res, err = s.Db.QueryReturnColumnDict(subordinateQuery) if err != nil { s.Db.Log(err) return } - if len(res["Master_Host"]) > 0 { - s.MasterHostname = string(res["Master_Host"][0]) + if len(res["Main_Host"]) > 0 { + s.MainHostname = string(res["Main_Host"][0]) } - if (len(res["Seconds_Behind_Master"]) > 0) && (string(res["Seconds_Behind_Master"][0]) != "") { - secondsBehindMaster, err := strconv.ParseFloat(string(res["Seconds_Behind_Master"][0]), 64) + if (len(res["Seconds_Behind_Main"]) > 0) && (string(res["Seconds_Behind_Main"][0]) != "") { + secondsBehindMain, err := strconv.ParseFloat(string(res["Seconds_Behind_Main"][0]), 64) if err != nil { s.Db.Log(err) - s.Metrics.SlaveSecondsBehindMaster.Set(float64(-1)) + s.Metrics.SubordinateSecondsBehindMain.Set(float64(-1)) if numBackups == 0 { s.Metrics.ReplicationRunning.Set(float64(-1)) } } else { - s.Metrics.SlaveSecondsBehindMaster.Set(float64(secondsBehindMaster)) + s.Metrics.SubordinateSecondsBehindMain.Set(float64(secondsBehindMain)) s.Metrics.ReplicationRunning.Set(float64(1)) } } - if s.slaveLagTable != "" { - s.GetSlaveLag() + if s.subordinateLagTable != "" { + s.GetSubordinateLag() } - relayMasterLogFile, _ := res["Relay_Master_Log_File"] - if len(relayMasterLogFile) > 0 { - tmp := strings.Split(string(relayMasterLogFile[0]), ".") - slaveSeqFile, err := strconv.ParseInt(tmp[len(tmp)-1], 10, 64) + relayMainLogFile, _ := res["Relay_Main_Log_File"] + if len(relayMainLogFile) > 0 { + tmp := strings.Split(string(relayMainLogFile[0]), ".") + subordinateSeqFile, err := strconv.ParseInt(tmp[len(tmp)-1], 10, 64) if err != nil { s.Db.Log(err) } - s.Metrics.SlaveSeqFile.Set(float64(slaveSeqFile)) + s.Metrics.SubordinateSeqFile.Set(float64(subordinateSeqFile)) } - if len(res["Exec_Master_Log_Pos"]) > 0 { - slavePosition, err := strconv.ParseFloat(string(res["Exec_Master_Log_Pos"][0]), 64) + if len(res["Exec_Main_Log_Pos"]) > 0 { + subordinatePosition, err := strconv.ParseFloat(string(res["Exec_Main_Log_Pos"][0]), 64) if err != nil { s.Db.Log(err) return } - s.Metrics.SlavePosition.Set(uint64(slavePosition)) + s.Metrics.SubordinatePosition.Set(uint64(subordinatePosition)) } if (len(res["Relay_Log_Space"]) > 0) && (string(res["Relay_Log_Space"][0]) != "") { diff --git a/mysql/dbstat/dbstat_test.go b/mysql/dbstat/dbstat_test.go index 5e4ef5c..277ef07 100644 --- a/mysql/dbstat/dbstat_test.go +++ b/mysql/dbstat/dbstat_test.go @@ -104,7 +104,7 @@ func initMysqlStatDBs() *MysqlStatDBs { Logger: log.New(os.Stderr, "TESTING LOG: ", log.Lshortfile), } s.Metrics = MysqlStatMetricsNew(metrics.NewMetricContext("system")) - slaveLagQuery = "SELECT max(%s) AS TIMESTAMP from %s" + subordinateLagQuery = "SELECT max(%s) AS TIMESTAMP from %s" // Have test function now() always return 2016-05-20 15:21:45.65432 UTC now = func() time.Time { @@ -152,13 +152,13 @@ func TestBasic(t *testing.T) { //set desired test output testquerycol = map[string]map[string][]string{ - //getSlaveStats() - slaveQuery: map[string][]string{ - "Seconds_Behind_Master": []string{"8"}, - "Relay_Master_Log_File": []string{"some-name-bin.010"}, - "Exec_Master_Log_Pos": []string{"79"}, + //getSubordinateStats() + subordinateQuery: map[string][]string{ + "Seconds_Behind_Main": []string{"8"}, + "Relay_Main_Log_File": []string{"some-name-bin.010"}, + "Exec_Main_Log_Pos": []string{"79"}, "Relay_Log_Space": []string{"123"}, - "Master_Host": []string{"abcdef"}, + "Main_Host": []string{"abcdef"}, }, //getOldest oldestQuery: map[string][]string{ @@ -211,9 +211,9 @@ func TestBasic(t *testing.T) { }} //expected results expectedValues = map[interface{}]interface{}{ - s.Metrics.SlaveSecondsBehindMaster: float64(8), - s.Metrics.SlaveSeqFile: float64(10), - s.Metrics.SlavePosition: uint64(79), + s.Metrics.SubordinateSecondsBehindMain: float64(8), + s.Metrics.SubordinateSeqFile: float64(10), + s.Metrics.SubordinatePosition: uint64(79), s.Metrics.RelayLogSpace: float64(123), s.Metrics.Queries: uint64(8), s.Metrics.Uptime: uint64(100), @@ -245,8 +245,8 @@ func TestBasic(t *testing.T) { t.Error(err) } - if s.MasterHostname != "abcdef" { - t.Error("MasterHost: expect abcdef, got " + s.MasterHostname) + if s.MainHostname != "abcdef" { + t.Error("MainHost: expect abcdef, got " + s.MainHostname) } } @@ -356,27 +356,27 @@ func TestSessions(t *testing.T) { } } -// Test basic parsing of slave info query -func TestSlave1(t *testing.T) { +// Test basic parsing of subordinate info query +func TestSubordinate1(t *testing.T) { //initialize MysqlStatDBs s := initMysqlStatDBs() //set desired test output testquerycol = map[string]map[string][]string{ - //getSlaveStats() - slaveQuery: map[string][]string{ - "Seconds_Behind_Master": []string{"80"}, - "Relay_Master_Log_File": []string{"some-name-bin.01345"}, - "Exec_Master_Log_Pos": []string{"7"}, + //getSubordinateStats() + subordinateQuery: map[string][]string{ + "Seconds_Behind_Main": []string{"80"}, + "Relay_Main_Log_File": []string{"some-name-bin.01345"}, + "Exec_Main_Log_Pos": []string{"7"}, "Relay_Log_Space": []string{"2"}, }, - slaveBackupQuery: map[string][]string{ + subordinateBackupQuery: map[string][]string{ "count": []string{"0"}, }, } expectedValues = map[interface{}]interface{}{ - s.Metrics.SlaveSecondsBehindMaster: float64(80), - s.Metrics.SlaveSeqFile: float64(1345), - s.Metrics.SlavePosition: uint64(7), + s.Metrics.SubordinateSecondsBehindMain: float64(80), + s.Metrics.SubordinateSeqFile: float64(1345), + s.Metrics.SubordinatePosition: uint64(7), s.Metrics.ReplicationRunning: float64(1), s.Metrics.RelayLogSpace: float64(2), } @@ -387,32 +387,32 @@ func TestSlave1(t *testing.T) { t.Error(err) } - if s.MasterHostname != "" { - t.Error("MasterHost: Expect empty string, got " + s.MasterHostname) + if s.MainHostname != "" { + t.Error("MainHost: Expect empty string, got " + s.MainHostname) } } -// Test when slave is down and backup isn't running -func TestSlave2(t *testing.T) { +// Test when subordinate is down and backup isn't running +func TestSubordinate2(t *testing.T) { //initialize MysqlStatDBs s := initMysqlStatDBs() //set desired test output testquerycol = map[string]map[string][]string{ - //getSlaveStats() - slaveQuery: map[string][]string{ - "Seconds_Behind_Master": []string{"NULL"}, - "Relay_Master_Log_File": []string{"some.name.bin.01345"}, - "Exec_Master_Log_Pos": []string{"7"}, + //getSubordinateStats() + subordinateQuery: map[string][]string{ + "Seconds_Behind_Main": []string{"NULL"}, + "Relay_Main_Log_File": []string{"some.name.bin.01345"}, + "Exec_Main_Log_Pos": []string{"7"}, "Relay_Log_Space": []string{"0"}, }, - slaveBackupQuery: map[string][]string{ + subordinateBackupQuery: map[string][]string{ "count": []string{"0"}, }, } expectedValues = map[interface{}]interface{}{ - s.Metrics.SlaveSecondsBehindMaster: float64(-1), - s.Metrics.SlaveSeqFile: float64(1345), - s.Metrics.SlavePosition: uint64(7), + s.Metrics.SubordinateSecondsBehindMain: float64(-1), + s.Metrics.SubordinateSeqFile: float64(1345), + s.Metrics.SubordinatePosition: uint64(7), s.Metrics.ReplicationRunning: float64(-1), s.Metrics.RelayLogSpace: float64(0), } @@ -424,27 +424,27 @@ func TestSlave2(t *testing.T) { } } -// Test when slave is down and backup is running -func TestSlave3(t *testing.T) { +// Test when subordinate is down and backup is running +func TestSubordinate3(t *testing.T) { //initialize MysqlStatDBs s := initMysqlStatDBs() //set desired test output testquerycol = map[string]map[string][]string{ - //getSlaveStats() - slaveQuery: map[string][]string{ - "Seconds_Behind_Master": []string{"NULL"}, - "Relay_Master_Log_File": []string{"some.name.bin.01345"}, - "Exec_Master_Log_Pos": []string{"7"}, + //getSubordinateStats() + subordinateQuery: map[string][]string{ + "Seconds_Behind_Main": []string{"NULL"}, + "Relay_Main_Log_File": []string{"some.name.bin.01345"}, + "Exec_Main_Log_Pos": []string{"7"}, "Relay_Log_Space": []string{"0"}, }, - slaveBackupQuery: map[string][]string{ + subordinateBackupQuery: map[string][]string{ "count": []string{"1"}, }, } expectedValues = map[interface{}]interface{}{ - s.Metrics.SlaveSecondsBehindMaster: float64(-1), - s.Metrics.SlaveSeqFile: float64(1345), - s.Metrics.SlavePosition: uint64(7), + s.Metrics.SubordinateSecondsBehindMain: float64(-1), + s.Metrics.SubordinateSeqFile: float64(1345), + s.Metrics.SubordinatePosition: uint64(7), s.Metrics.ReplicationRunning: float64(1), s.Metrics.RelayLogSpace: float64(0), } @@ -456,23 +456,23 @@ func TestSlave3(t *testing.T) { } } -// Test that a slave lag table can be queried correctly -func TestSlaveLagTable(t *testing.T) { +// Test that a subordinate lag table can be queried correctly +func TestSubordinateLagTable(t *testing.T) { s := initMysqlStatDBs() - s.slaveLagTable = "test-dbadmin-table" - slaveLagQuery = "SELECT max(ts) AS TIMESTAMP from test-dbadmin-table" + s.subordinateLagTable = "test-dbadmin-table" + subordinateLagQuery = "SELECT max(ts) AS TIMESTAMP from test-dbadmin-table" testquerycol = map[string]map[string][]string{ - slaveLagQuery: map[string][]string{ + subordinateLagQuery: map[string][]string{ "TIMESTAMP": []string{"2016-05-20 15:21:46.00000"}, }, } - s.GetSlaveLag() + s.GetSubordinateLag() timestamp := time.Date(2016, time.May, 20, 15, 21, 46, 00000000, time.UTC) timeDiff := now().Sub(timestamp) expectedValues = map[interface{}]interface{}{ - s.Metrics.SlaveSecondsBehindMaster: timeDiff.Seconds(), + s.Metrics.SubordinateSecondsBehindMain: timeDiff.Seconds(), } err := checkResults() if err != "" { diff --git a/postgres/stat/stat.go b/postgres/stat/stat.go index ed86960..83028f4 100644 --- a/postgres/stat/stat.go +++ b/postgres/stat/stat.go @@ -78,18 +78,18 @@ type PostgresStatMetrics struct { BackupsRunning *metrics.Gauge BinlogFiles *metrics.Gauge DBSizeBinlogs *metrics.Gauge - SecondsBehindMaster *metrics.Gauge - SlavesConnectedToMe *metrics.Gauge + SecondsBehindMain *metrics.Gauge + SubordinatesConnectedToMe *metrics.Gauge VacuumsAutoRunning *metrics.Gauge VacuumsManualRunning *metrics.Gauge - SlaveBytesBehindMe *metrics.Gauge + SubordinateBytesBehindMe *metrics.Gauge } //store all sql commands here const ( uptimeQuery = ` SELECT EXTRACT(epoch FROM now()) - - EXTRACT(epoch From pg_postmaster_start_time()) AS uptime;` + - EXTRACT(epoch From pg_postmain_start_time()) AS uptime;` versionQuery = "SELECT VERSION() AS version;" tpsQuery = "SELECT SUM(xact_commit + xact_rollback) AS tps FROM pg_stat_database;" cacheInfoQuery = "SELECT SUM(blks_read) AS block_reads_disk, SUM(blks_hit) AS block_reads_cache FROM pg_stat_database;" @@ -148,7 +148,7 @@ const ( AND C.relkind <> 'i' AND nspname !~ '^pg_toast' ORDER BY pg_total_relation_size(C.oid) DESC;` - secondsBehindMasterQuery = ` + secondsBehindMainQuery = ` SELECT EXTRACT(epoch FROM NOW()) - EXTRACT(epoch FROM pg_last_xact_replay_timestamp()) AS seconds;` delayBytesQuery = ` @@ -249,8 +249,8 @@ func (s *PostgresStat) Collect() { go s.getVacuumsInProgress() go s.getMainProcessInfo() go s.getSizes() - go s.getSecondsBehindMaster() - go s.getSlaveDelayBytes() + go s.getSecondsBehindMain() + go s.getSubordinateDelayBytes() go s.getSecurity() go s.getBackups() go s.getWriteability() @@ -609,7 +609,7 @@ func (s *PostgresStat) getMainProcessInfo() { } cmd := strings.Join(words[10:], " ") - if strings.Contains(cmd, "postmaster") { + if strings.Contains(cmd, "postmain") { info := make([]string, 10) //mapping for info: 0-user, 1-pid, 2-cpu, 3-mem, 4-vsz, 5-rss, 6-tty, 7-stat, 8-start, 9-time, 10-cmd for i, word := range words { @@ -777,11 +777,11 @@ func (s *PostgresStat) getBackups() { } //get seconds -func (s *PostgresStat) getSecondsBehindMaster() { +func (s *PostgresStat) getSecondsBehindMain() { recoveryConfFile := s.PGDATA + "/recovery.conf" recoveryDoneFile := s.PGDATA + "/recovery.done" - res, err := s.db.QueryReturnColumnDict(secondsBehindMasterQuery) + res, err := s.db.QueryReturnColumnDict(secondsBehindMainQuery) if err != nil { s.db.Log(err) s.wg.Done() @@ -789,12 +789,12 @@ func (s *PostgresStat) getSecondsBehindMaster() { } v, ok := res["seconds"] if !ok || len(v) == 0 { - s.db.Log(errors.New("Unable to get seconds behind master")) + s.db.Log(errors.New("Unable to get seconds behind main")) s.wg.Done() return } if res["seconds"][0] == "" { - s.Metrics.SecondsBehindMaster.Set(float64(0)) // or -1? + s.Metrics.SecondsBehindMain.Set(float64(0)) // or -1? s.wg.Done() return } @@ -804,21 +804,21 @@ func (s *PostgresStat) getSecondsBehindMaster() { s.wg.Done() return } - s.Metrics.SecondsBehindMaster.Set(float64(seconds)) + s.Metrics.SecondsBehindMain.Set(float64(seconds)) _, confErr := os.Stat(recoveryConfFile) if confErr == nil { - s.Metrics.SecondsBehindMaster.Set(float64(-1)) + s.Metrics.SecondsBehindMain.Set(float64(-1)) } _, doneErr := os.Stat(recoveryDoneFile) if doneErr == nil && os.IsNotExist(confErr) { - s.Metrics.SecondsBehindMaster.Set(float64(-1)) + s.Metrics.SecondsBehindMain.Set(float64(-1)) } s.wg.Done() return } -//get bytes slave is behind master -func (s *PostgresStat) getSlaveDelayBytes() { +//get bytes subordinate is behind main +func (s *PostgresStat) getSubordinateDelayBytes() { res, err := s.db.QueryReturnColumnDict(delayBytesQuery) if err != nil { @@ -826,33 +826,33 @@ func (s *PostgresStat) getSlaveDelayBytes() { s.wg.Done() return } - s.Metrics.SlavesConnectedToMe.Set(float64(len(res["client_hostname"]))) + s.Metrics.SubordinatesConnectedToMe.Set(float64(len(res["client_hostname"]))) for _, val := range res["pg_current_xlog_location"] { str := strings.Split(val, "/") if len(str) < 2 { - s.db.Log(errors.New("Can't get slave delay bytes")) + s.db.Log(errors.New("Can't get subordinate delay bytes")) s.wg.Done() return } - var masterFile, masterPos, slaveFile, slavePos int64 - masterFile, err = strconv.ParseInt(str[0], 16, 64) + var mainFile, mainPos, subordinateFile, subordinatePos int64 + mainFile, err = strconv.ParseInt(str[0], 16, 64) - masterPos, err = strconv.ParseInt(str[1], 16, 64) + mainPos, err = strconv.ParseInt(str[1], 16, 64) str2 := strings.Split(res["write_location"][0], "/") if len(str2) < 2 { - s.db.Log(errors.New("Can't get slave delay bytes")) + s.db.Log(errors.New("Can't get subordinate delay bytes")) s.wg.Done() return } - slaveFile, err = strconv.ParseInt(str2[0], 16, 64) + subordinateFile, err = strconv.ParseInt(str2[0], 16, 64) - slavePos, err = strconv.ParseInt(str2[1], 16, 64) + subordinatePos, err = strconv.ParseInt(str2[1], 16, 64) segmentSize, _ := strconv.ParseInt("0xFFFFFFFF", 0, 64) - r := ((masterFile * segmentSize) + masterPos) - ((slaveFile * segmentSize) + slavePos) - s.Metrics.SlaveBytesBehindMe.Set(float64(r)) + r := ((mainFile * segmentSize) + mainPos) - ((subordinateFile * segmentSize) + subordinatePos) + s.Metrics.SubordinateBytesBehindMe.Set(float64(r)) } if err != nil { s.db.Log(err)