Skip to content

Commit 5ed189a

Browse files
Fix all go vet non-constant string errors
1 parent f421180 commit 5ed189a

File tree

10 files changed

+78
-56
lines changed

10 files changed

+78
-56
lines changed

event/event.go

Lines changed: 21 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -58,8 +58,10 @@ func SetReceiver(r Receiver) {
5858
return
5959
}
6060

61-
var subscribers = []Receiver{}
62-
var submux = &sync.Mutex{}
61+
var (
62+
subscribers = []Receiver{}
63+
submux = &sync.Mutex{}
64+
)
6365

6466
func Subscribe(r Receiver) {
6567
submux.Lock()
@@ -90,6 +92,15 @@ func Sendf(eventName string, msg string, args ...interface{}) {
9092
})
9193
}
9294

95+
func Error(eventName string, msg string) {
96+
send(Event{
97+
Ts: time.Now(),
98+
Event: eventName,
99+
Message: msg,
100+
Error: true,
101+
})
102+
}
103+
93104
// Errorf sends an event flagged as an error with a formatted message.
94105
func Errorf(eventName string, msg string, args ...interface{}) {
95106
send(Event{
@@ -139,6 +150,10 @@ func (s MonitorReceiver) Sendf(eventName string, msg string, args ...interface{}
139150
})
140151
}
141152

153+
func (s MonitorReceiver) Error(eventName string, msg string) {
154+
s.Errorf(eventName, "%s", msg)
155+
}
156+
142157
func (s MonitorReceiver) Errorf(eventName string, msg string, args ...interface{}) {
143158
send(Event{
144159
Ts: time.Now(),
@@ -151,8 +166,10 @@ func (s MonitorReceiver) Errorf(eventName string, msg string, args ...interface{
151166

152167
// --------------------------------------------------------------------------
153168

154-
var stdout = log.New(os.Stdout, "", log.LstdFlags|log.Lmicroseconds)
155-
var stderr = log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds)
169+
var (
170+
stdout = log.New(os.Stdout, "", log.LstdFlags|log.Lmicroseconds)
171+
stderr = log.New(os.Stderr, "", log.LstdFlags|log.Lmicroseconds)
172+
)
156173

157174
// Log is the default Receiver that uses the Go built-in log package to print
158175
// certain events to STDOUT and error events to STDERR. Call SetReceiver to

heartbeat/reader.go

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -33,10 +33,12 @@ type Lag struct {
3333
Replica bool
3434
}
3535

36-
var ReadTimeout = 2 * time.Second
37-
var ReadErrorWait = 1 * time.Second
38-
var NoHeartbeatWait = 3 * time.Second
39-
var ReplCheckWait = 3 * time.Second
36+
var (
37+
ReadTimeout = 2 * time.Second
38+
ReadErrorWait = 1 * time.Second
39+
NoHeartbeatWait = 3 * time.Second
40+
ReplCheckWait = 3 * time.Second
41+
)
4042

4143
// BlipReader reads heartbeats from BlipWriter.
4244
type BlipReader struct {
@@ -165,7 +167,7 @@ func (r *BlipReader) run() {
165167
r.Unlock()
166168
msg := fmt.Sprintf("not a replica: %s=%d (retry in %s)", r.replCheck, isRepl, ReplCheckWait)
167169
blip.Debug("%s: %s", r.monitorId, msg)
168-
status.Monitor(r.monitorId, status.HEARTBEAT_READER, msg)
170+
status.Monitor(r.monitorId, status.HEARTBEAT_READER, "%s", msg)
169171
time.Sleep(ReplCheckWait)
170172
continue
171173
}

monitor/engine.go

Lines changed: 12 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -93,16 +93,16 @@ func (e *Engine) DB() *sql.DB {
9393
// calls. Serialization is handled by the only caller: LevelCollector.ChangePlan().
9494
func (e *Engine) Prepare(ctx context.Context, plan blip.Plan, before, after func()) error {
9595
blip.Debug("%s: prepare %s (%s)", e.monitorId, plan.Name, plan.Source)
96-
e.event.Sendf(event.ENGINE_PREPARE, plan.Name)
97-
status.Monitor(e.monitorId, status.ENGINE_PREPARE, plan.Name)
96+
e.event.Sendf(event.ENGINE_PREPARE, "%s", plan.Name)
97+
status.Monitor(e.monitorId, status.ENGINE_PREPARE, "%s", plan.Name)
9898
defer status.RemoveComponent(e.monitorId, status.ENGINE_PREPARE)
9999

100100
// Report last error, if any
101101
var lerr error
102102
defer func() {
103103
if lerr != nil {
104-
e.event.Errorf(event.ENGINE_PREPARE_ERROR, lerr.Error())
105-
status.Monitor(e.monitorId, "error:"+status.ENGINE_PREPARE, lerr.Error())
104+
e.event.Error(event.ENGINE_PREPARE_ERROR, lerr.Error())
105+
status.Monitor(e.monitorId, "error:"+status.ENGINE_PREPARE, "%s", lerr.Error())
106106
} else {
107107
// success
108108
status.RemoveComponent(e.monitorId, "error:"+status.ENGINE_PREPARE)
@@ -233,8 +233,8 @@ func (e *Engine) Prepare(ctx context.Context, plan blip.Plan, before, after func
233233

234234
e.Unlock() // UNLOCK plan ---------------------------------------
235235

236-
status.Monitor(e.monitorId, status.ENGINE_PLAN, plan.Name)
237-
e.event.Sendf(event.ENGINE_PREPARE_SUCCESS, plan.Name)
236+
status.Monitor(e.monitorId, status.ENGINE_PLAN, "%s", plan.Name)
237+
e.event.Sendf(event.ENGINE_PREPARE_SUCCESS, "%s", plan.Name)
238238

239239
status.Monitor(e.monitorId, status.ENGINE_PREPARE, "%s: level-collector after callback", plan.Name)
240240
after() // notify caller (lco.changePlan) that we have swapped the plan
@@ -271,10 +271,10 @@ func (e *Engine) Collect(emrCtx context.Context, interval uint, levelName string
271271
}
272272
if domains == nil {
273273
blip.Debug("Engine.Stop was called, dropping interval %d level %s", interval, levelName)
274-
return []*blip.Metrics{&blip.Metrics{Values: map[string][]blip.MetricValue{}}}, nil // see return guarantee in Collect comment
274+
return []*blip.Metrics{{Values: map[string][]blip.MetricValue{}}}, nil // see return guarantee in Collect comment
275275
}
276276
blip.Debug("%s: %s: collect", e.monitorId, coId)
277-
status.Monitor(e.monitorId, status.ENGINE_COLLECT, coId+": collecting")
277+
status.Monitor(e.monitorId, status.ENGINE_COLLECT, "%s", coId+": collecting")
278278

279279
// Collect metrics for each domain in parallel (limit: CollectParallel)
280280
sem := make(chan bool, CollectParallel) // semaphore for CollectParallel
@@ -362,7 +362,7 @@ SWEEP:
362362
metrics[0].End = time.Now()
363363

364364
// Log collector errors and update collector status
365-
status.Monitor(e.monitorId, status.ENGINE_COLLECT, coId+": logging errors")
365+
status.Monitor(e.monitorId, status.ENGINE_COLLECT, "%s", coId+": logging errors")
366366
errCount := 0
367367
for domain, err := range errs {
368368
switch err {
@@ -376,7 +376,7 @@ SWEEP:
376376
errCount += 1
377377
errMsg := fmt.Sprintf("%s/%s: %s", coId, domain, err)
378378
status.Monitor(e.monitorId, "error:"+domain, "at %s: %s", metrics[0].Begin, errMsg)
379-
e.event.Errorf(event.COLLECTOR_ERROR, errMsg) // log by default
379+
e.event.Error(event.COLLECTOR_ERROR, errMsg) // log by default
380380
}
381381
}
382382

@@ -493,7 +493,7 @@ func (cl *clutch) collect(m blip.Metrics, sem chan bool) {
493493

494494
if cl.running {
495495
// Collector fault: it didn't terminate itself at CMR
496-
cl.event.Errorf(event.COLLECTOR_FAULT, fmt.Sprintf("%s: metrics from interval %d will be dropped if the collector recovers: %+v", cl.domain, cl.m.Interval, cl))
496+
cl.event.Errorf(event.COLLECTOR_FAULT, "%s: metrics from interval %d will be dropped if the collector recovers: %+v", cl.domain, cl.m.Interval, cl)
497497
cl.fence = m.Interval
498498
if cl.cancel != nil {
499499
cl.cancel()
@@ -546,7 +546,7 @@ func (cl *clutch) collect(m blip.Metrics, sem chan bool) {
546546
b := make([]byte, 4096)
547547
n := runtime.Stack(b, false)
548548
perr := fmt.Errorf("PANIC: monitor ID %s: %s: %v\n%s", cl.m.MonitorId, cl.domain, r, string(b[0:n]))
549-
cl.event.Errorf(event.COLLECTOR_PANIC, perr.Error())
549+
cl.event.Error(event.COLLECTOR_PANIC, perr.Error())
550550
}
551551
cancel() // local cancel, not cl.cancel, in case goroutine is behind the fence
552552
cl.Lock()
@@ -562,7 +562,6 @@ func (cl *clutch) collect(m blip.Metrics, sem chan bool) {
562562
cl.domain, cl.stopTime.Sub(cl.startTime), cl.pending, cl.err)
563563
}
564564
cl.Unlock()
565-
566565
}()
567566

568567
// ----------------------------------------------------------------------

monitor/level_collector.go

Lines changed: 16 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -108,9 +108,11 @@ func TickerDuration(d, e time.Duration) {
108108
// act like 1s has elapsed. This is for test plans with realistic whole-second
109109
// durations. The RBB tests use 100ms/100ms because that test plan is design for
110110
// 100ms intervals, so it can run through 5 intervals in about 500ms.
111-
var tickerMux = &sync.Mutex{} // make go test -race happy
112-
var tickerDuration = 1 * time.Second // used for testing
113-
var timeElapsed = 1 * time.Second // used for testing
111+
var (
112+
tickerMux = &sync.Mutex{} // make go test -race happy
113+
tickerDuration = 1 * time.Second // used for testing
114+
timeElapsed = 1 * time.Second // used for testing
115+
)
114116

115117
// recvMetrics receives metrics on metricsChan and send them to all the sinks.
116118
// This is a goroutine run by keepRecvMetrics and restarted by keepRecvMetrics
@@ -123,7 +125,7 @@ func (c *lco) recvMetrics(stopSinksChan, doneChan chan struct{}) {
123125
b := make([]byte, 4096)
124126
n := runtime.Stack(b, false)
125127
perr := fmt.Errorf("PANIC: sinks: %s: %v\n%s", c.monitorId, r, string(b[0:n]))
126-
c.event.Errorf(event.LCO_RECEIVER_PANIC, perr.Error())
128+
c.event.Error(event.LCO_RECEIVER_PANIC, perr.Error())
127129
}
128130
}()
129131
RECV:
@@ -145,11 +147,11 @@ RECV:
145147
coId := fmt.Sprintf("%s/%s/%d", m.Plan, m.Level, m.Interval)
146148
for _, sink := range c.sinks {
147149
sinkName := sink.Name()
148-
status.Monitor(c.monitorId, status.LEVEL_SINKS, coId+": sending to "+sinkName)
150+
status.Monitor(c.monitorId, status.LEVEL_SINKS, "%s", coId+": sending to "+sinkName)
149151
err := sink.Send(context.Background(), m) // @todo ctx with timeout
150152
if err != nil {
151153
c.event.Errorf(event.SINK_SEND_ERROR, "%s :%s", sinkName, err) // log by default
152-
status.Monitor(c.monitorId, "error:"+sinkName, err.Error())
154+
status.Monitor(c.monitorId, "error:"+sinkName, "%s", err.Error())
153155
} else {
154156
status.RemoveComponent(c.monitorId, "error:"+sinkName)
155157
}
@@ -279,8 +281,8 @@ func (c *lco) collect(interval uint, levelName string, startTime time.Time) {
279281
blip.Debug("%s: level %s: done in %s", c.monitorId, levelName, metrics[0].End.Sub(metrics[0].Begin))
280282

281283
if err != nil {
282-
status.Monitor(c.monitorId, "error:collect", err.Error())
283-
c.event.Errorf(event.ENGINE_COLLECT_ERROR, err.Error())
284+
status.Monitor(c.monitorId, "error:collect", "%s", err.Error())
285+
c.event.Error(event.ENGINE_COLLECT_ERROR, err.Error())
284286
} else {
285287
status.RemoveComponent(c.monitorId, "error:collect")
286288
}
@@ -368,7 +370,7 @@ func (c *lco) changePlan(ctx context.Context, doneChan chan struct{}, newState,
368370
oldPlanName := c.plan.Name
369371
c.stateMux.Unlock()
370372
change := fmt.Sprintf("state:%s plan:%s -> state:%s plan:%s", oldState, oldPlanName, newState, newPlanName)
371-
c.event.Sendf(event.CHANGE_PLAN, change)
373+
c.event.Sendf(event.CHANGE_PLAN, "%s", change)
372374

373375
// Load new plan from plan loader, which contains all plans. Try forever because
374376
// that's what this func/gouroutine does: try forever (caller's expect that).
@@ -385,8 +387,8 @@ func (c *lco) changePlan(ctx context.Context, doneChan chan struct{}, newState,
385387
}
386388

387389
errMsg := fmt.Sprintf("%s: error loading new plan %s: %s (retrying)", change, newPlanName, err)
388-
status.Monitor(c.monitorId, status.LEVEL_CHANGE_PLAN, errMsg)
389-
c.event.Sendf(event.CHANGE_PLAN_ERROR, errMsg)
390+
status.Monitor(c.monitorId, status.LEVEL_CHANGE_PLAN, "%s", errMsg)
391+
c.event.Sendf(event.CHANGE_PLAN_ERROR, "%s", errMsg)
390392
time.Sleep(2 * time.Second)
391393
}
392394

@@ -428,8 +430,8 @@ func (c *lco) changePlan(ctx context.Context, doneChan chan struct{}, newState,
428430
// Changing state/plan always resumes (if paused); in fact, it's the
429431
// only way to resume after Pause is called
430432
c.paused = false
431-
status.Monitor(c.monitorId, status.LEVEL_STATE, newState)
432-
status.Monitor(c.monitorId, status.LEVEL_PLAN, newPlan.Name)
433+
status.Monitor(c.monitorId, status.LEVEL_STATE, "%s", newState)
434+
status.Monitor(c.monitorId, status.LEVEL_PLAN, "%s", newPlan.Name)
433435
status.Monitor(c.monitorId, status.LEVEL_COLLECTOR, "running since %s", blip.FormatTime(time.Now()))
434436
blip.Debug("%s: resume", c.monitorId)
435437

@@ -466,7 +468,7 @@ func (c *lco) changePlan(ctx context.Context, doneChan chan struct{}, newState,
466468
}
467469

468470
status.RemoveComponent(c.monitorId, status.LEVEL_CHANGE_PLAN)
469-
c.event.Sendf(event.CHANGE_PLAN_SUCCESS, change)
471+
c.event.Sendf(event.CHANGE_PLAN_SUCCESS, "%s", change)
470472
}
471473

472474
// Pause pauses metrics collection until ChangePlan is called. Run still runs,

monitor/loader.go

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -168,8 +168,10 @@ func (ml *Loader) start(m *loadedMonitor) error {
168168
// [ 1, 10) = <1s startup
169169
// [10, 50] = ~1s
170170
// (50,inf) = >1s + 1s per 50 [e.g. 100=2s, 200=4s]
171-
const min_wait = 20
172-
const max_wait = 100
171+
const (
172+
min_wait = 20
173+
max_wait = 100
174+
)
173175

174176
func wait(n int) int {
175177
if n <= 1 {
@@ -243,7 +245,7 @@ func (ml *Loader) Load(ctx context.Context) error {
243245
}
244246
}
245247
if errMsg != "" {
246-
event.Errorf(event.MONITORS_STOPLOSS, errMsg)
248+
event.Error(event.MONITORS_STOPLOSS, errMsg)
247249
return ErrStopLoss
248250
}
249251
}

monitor/monitor.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ func (m *Monitor) Stop() error {
155155
m.db.Close()
156156
}
157157

158-
event.Sendf(event.MONITOR_STOPPED, m.monitorId)
158+
event.Sendf(event.MONITOR_STOPPED, "%s", m.monitorId)
159159
status.Monitor(m.monitorId, status.MONITOR, "stopped at %s", blip.FormatTime(time.Now()))
160160
return nil
161161
}
@@ -259,7 +259,7 @@ func (m *Monitor) startup() error {
259259
m.runMux.Lock()
260260
m.db = db
261261
m.dsn = dsnRedacted
262-
status.Monitor(m.monitorId, status.MONITOR_DSN, dsnRedacted)
262+
status.Monitor(m.monitorId, status.MONITOR_DSN, "%s", dsnRedacted)
263263
m.runMux.Unlock()
264264
break
265265
}
@@ -377,7 +377,7 @@ func (m *Monitor) startup() error {
377377
if m.cfg.Exporter.Mode == blip.EXPORTER_MODE_LEGACY {
378378
blip.Debug("%s: legacy mode", m.monitorId)
379379
status.Monitor(m.monitorId, status.MONITOR, "running in exporter legacy mode")
380-
m.event.Sendf(event.MONITOR_STARTED, m.dsn)
380+
m.event.Sendf(event.MONITOR_STARTED, "%s", m.dsn)
381381
return nil
382382
}
383383
}
@@ -452,7 +452,7 @@ func (m *Monitor) startup() error {
452452
m.lco.ChangePlan(blip.STATE_ACTIVE, m.cfg.Plan) // start LCO directly
453453
}
454454

455-
m.event.Sendf(event.MONITOR_STARTED, m.dsn)
455+
m.event.Sendf(event.MONITOR_STARTED, "%s", m.dsn)
456456
return nil
457457
}
458458

@@ -491,7 +491,7 @@ func (m *Monitor) stop(lock bool, caller string) {
491491

492492
func (m *Monitor) setErr(err error, isPanic bool) {
493493
if err != nil {
494-
m.event.Errorf(event.MONITOR_ERROR, err.Error())
494+
m.event.Error(event.MONITOR_ERROR, err.Error())
495495
status.Monitor(m.monitorId, "error:"+status.MONITOR, "error: %s", err)
496496
} else {
497497
status.RemoveComponent(m.monitorId, "error:"+status.MONITOR)

monitor/plan_changer.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ func NewPlanChanger(args PlanChangerArgs) *planChanger {
115115
// setErr sets the last internal error reported by Status.
116116
func (pch *planChanger) setErr(err error) {
117117
if err != nil {
118-
status.Monitor(pch.monitorId, "error:"+status.PLAN_CHANGER, err.Error())
118+
status.Monitor(pch.monitorId, "error:"+status.PLAN_CHANGER, "%s", err.Error())
119119
} else {
120120
status.RemoveComponent(pch.monitorId, "error:"+status.PLAN_CHANGER)
121121
}

plan/loader.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,7 @@ func (pl *Loader) LoadShared(cfg blip.ConfigPlans, dbMaker blip.DbFactory) error
171171

172172
// LoadMonitor loads monitor plans: config.monitors.*.Plans.
173173
func (pl *Loader) LoadMonitor(mon blip.ConfigMonitor, dbMaker blip.DbFactory) error {
174-
event.Sendf(event.PLANS_LOAD_MONITOR, mon.MonitorId)
174+
event.Sendf(event.PLANS_LOAD_MONITOR, "%s", mon.MonitorId)
175175

176176
if mon.Plans.Table == "" && len(mon.Plans.Files) == 0 {
177177
blip.Debug("monitor %s uses only shared plans", mon.MonitorId)

server/api.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ func (api *API) Run() error {
8989
blip.Debug("shutdown")
9090
return nil
9191
default:
92-
event.Errorf(event.SERVER_API_ERROR, err.Error())
92+
event.Errorf(event.SERVER_API_ERROR, "%s", err.Error())
9393
time.Sleep(1 * time.Second) // between crashes
9494
}
9595
}

0 commit comments

Comments
 (0)