forked from sasha-s/go-deadlock
-
Notifications
You must be signed in to change notification settings - Fork 10
/
deadlock.go
473 lines (420 loc) · 12.1 KB
/
deadlock.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
package deadlock
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"sync"
"time"
"github.com/petermattis/goid"
)
// Opts control how deadlock detection behaves.
// Options are supposed to be set once at a startup (say, when parsing flags).
var Opts = struct {
// Mutex/RWMutex would work exactly as their sync counterparts
// -- almost no runtime penalty, no deadlock detection if Disable == true.
Disable bool
// Would disable lock order based deadlock detection if DisableLockOrderDetection == true.
DisableLockOrderDetection bool
// Waiting for a lock for longer than DeadlockTimeout is considered a deadlock.
// Ignored is DeadlockTimeout <= 0.
DeadlockTimeout time.Duration
// OnPotentialDeadlock is called each time a potential deadlock is detected -- either based on
// lock order or on lock wait time.
OnPotentialDeadlock func()
// Will keep MaxMapSize lock pairs (happens before // happens after) in the map.
// The map resets once the threshold is reached.
MaxMapSize int
// Will dump stacktraces of all goroutines when inconsistent locking is detected.
PrintAllCurrentGoroutines bool
mu *sync.Mutex // Protects the LogBuf.
// Will print deadlock info to log buffer.
LogBuf io.Writer
}{
DeadlockTimeout: time.Second * 30,
OnPotentialDeadlock: func() {
os.Exit(2)
},
MaxMapSize: 1024 * 64,
mu: &sync.Mutex{},
LogBuf: os.Stderr,
}
type lockID uint64
var counterMu sync.Mutex
var currID = lockID(1)
type identifiable interface {
id() lockID
}
// A Mutex is a drop-in replacement for sync.Mutex.
// Performs deadlock detection unless disabled in Opts.
type Mutex struct {
muId lockID
mu sync.Mutex
}
func (m *Mutex) id() lockID {
return m.muId
}
// Lock locks the mutex.
// If the lock is already in use, the calling goroutine
// blocks until the mutex is available.
//
// Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf,
// calling Opts.OnPotentialDeadlock on each occasion.
func (m *Mutex) Lock() {
// shortcut for disabled deadlock detection to prevent extra copying of `m.mu.Lock` to the heap
if Opts.Disable {
m.mu.Lock()
return
}
counterMu.Lock()
if m.muId == 0 {
m.muId = currID
currID++
}
counterMu.Unlock()
lockEnabled(m.mu.Lock, m, false)
}
// Unlock unlocks the mutex.
// It is a run-time error if m is not locked on entry to Unlock.
//
// A locked Mutex is not associated with a particular goroutine.
// It is allowed for one goroutine to lock a Mutex and then
// arrange for another goroutine to unlock it.
func (m *Mutex) Unlock() {
m.mu.Unlock()
if !Opts.Disable {
postUnlock(m)
}
}
// An RWMutex is a drop-in replacement for sync.RWMutex.
// Performs deadlock detection unless disabled in Opts.
type RWMutex struct {
muId lockID
mu sync.RWMutex
}
func (m *RWMutex) id() lockID {
return m.muId
}
// Lock locks rw for writing.
// If the lock is already locked for reading or writing,
// Lock blocks until the lock is available.
// To ensure that the lock eventually becomes available,
// a blocked Lock call excludes new readers from acquiring
// the lock.
//
// Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf,
// calling Opts.OnPotentialDeadlock on each occasion.
func (m *RWMutex) Lock() {
if Opts.Disable {
m.mu.Lock()
return
}
counterMu.Lock()
if m.muId == 0 {
m.muId = currID
currID++
}
counterMu.Unlock()
lockEnabled(m.mu.Lock, m, false)
}
// Unlock unlocks the mutex for writing. It is a run-time error if rw is
// not locked for writing on entry to Unlock.
//
// As with Mutexes, a locked RWMutex is not associated with a particular
// goroutine. One goroutine may RLock (Lock) an RWMutex and then
// arrange for another goroutine to RUnlock (Unlock) it.
func (m *RWMutex) Unlock() {
m.mu.Unlock()
if !Opts.Disable {
postUnlock(m)
}
}
// RLock locks the mutex for reading.
//
// Unless deadlock detection is disabled, logs potential deadlocks to Opts.LogBuf,
// calling Opts.OnPotentialDeadlock on each occasion.
func (m *RWMutex) RLock() {
if Opts.Disable {
m.mu.RLock()
return
}
counterMu.Lock()
if m.muId == 0 {
m.muId = currID
currID++
}
counterMu.Unlock()
lockEnabled(m.mu.RLock, m, true)
}
// RUnlock undoes a single RLock call;
// it does not affect other simultaneous readers.
// It is a run-time error if rw is not locked for reading
// on entry to RUnlock.
func (m *RWMutex) RUnlock() {
m.mu.RUnlock()
if !Opts.Disable {
postUnlock(m)
}
}
// RLocker returns a Locker interface that implements
// the Lock and Unlock methods by calling RLock and RUnlock.
func (m *RWMutex) RLocker() sync.Locker {
return (*rlocker)(m)
}
func preLock(skip int, p identifiable, gid int64, checkRecursiveLocking bool) {
lo.preLock(skip, p, gid, checkRecursiveLocking)
}
func postLock(skip int, p identifiable, gid int64) {
lo.postLock(skip, p, gid)
}
func postUnlock(p identifiable) {
lo.postUnlock(p)
}
func checkRecursiveLocking(skip int, p identifiable, gid int64) {
lo.checkRecursiveLocking(skip, p, gid)
}
func checkLockOrdering(skip int, p identifiable, gid int64) {
lo.checkLockOrdering(skip, p, gid)
}
func lockEnabled(lockFn func(), ptr identifiable, preLockCheckRecursiveLocking bool) {
// grab the current goroutine identifier
gid := goid.Get()
preLock(4, ptr, gid, preLockCheckRecursiveLocking)
if Opts.DeadlockTimeout <= 0 {
lockFn()
} else {
ch := make(chan struct{})
go func() {
lockFn()
close(ch)
}()
for {
t := time.NewTimer(Opts.DeadlockTimeout)
defer t.Stop()
select {
case <-t.C:
if !preLockCheckRecursiveLocking {
checkRecursiveLocking(4, ptr, gid)
}
checkLockOrdering(4, ptr, gid)
lo.mu.Lock()
prev, ok := lo.cur[ptr.id()]
if !ok {
lo.mu.Unlock()
break // Nobody seems to be holding the lock, try again.
}
Opts.mu.Lock()
fmt.Fprintln(Opts.LogBuf, header)
fmt.Fprintln(Opts.LogBuf, "Previous place where the lock was grabbed")
fmt.Fprintf(Opts.LogBuf, "goroutine %v lock %p\n", prev.gid, ptr)
printStack(Opts.LogBuf, prev.stack)
fmt.Fprintln(Opts.LogBuf, "Have been trying to lock it again for more than", Opts.DeadlockTimeout)
fmt.Fprintf(Opts.LogBuf, "goroutine %v lock %p\n", gid, ptr)
printStack(Opts.LogBuf, callers(2))
stacks := stacks()
grs := bytes.Split(stacks, []byte("\n\n"))
for _, g := range grs {
if goid.ExtractGID(g) == prev.gid {
fmt.Fprintln(Opts.LogBuf, "Here is what goroutine", prev.gid, "doing now")
Opts.LogBuf.Write(g)
fmt.Fprintln(Opts.LogBuf)
}
}
lo.other(ptr)
if Opts.PrintAllCurrentGoroutines {
fmt.Fprintln(Opts.LogBuf, "All current goroutines:")
Opts.LogBuf.Write(stacks)
}
fmt.Fprintln(Opts.LogBuf)
if buf, ok := Opts.LogBuf.(*bufio.Writer); ok {
buf.Flush()
}
Opts.mu.Unlock()
lo.mu.Unlock()
Opts.OnPotentialDeadlock()
<-ch
postLock(4, ptr, gid)
return
case <-ch:
postLock(4, ptr, gid)
return
}
}
}
postLock(4, ptr, gid)
}
type lockOrder struct {
mu sync.Mutex
cur map[lockID]stackGID // stacktraces + gids for the locks currently taken.
order map[beforeAfter]ss // expected order of locks.
}
type stackGID struct {
stack []uintptr
gid int64
}
type beforeAfter struct {
before lockID
after lockID
}
type ss struct {
before []uintptr
after []uintptr
}
var lo = newLockOrder()
func newLockOrder() *lockOrder {
return &lockOrder{
cur: map[lockID]stackGID{}, // maps each lock identifier to the stack that was acquired after the lock was taken.
order: map[beforeAfter]ss{},
}
}
func (l *lockOrder) postLock(skip int, p identifiable, gid int64) {
stack := callers(skip)
l.mu.Lock()
l.cur[p.id()] = stackGID{stack, gid}
l.mu.Unlock()
}
func (l *lockOrder) printRecursiveLocking(currentGoRoutineID int64, otherLockID lockID, currentStack []uintptr, otherStack []uintptr, p identifiable) {
Opts.mu.Lock()
fmt.Fprintln(Opts.LogBuf, header, "Recursive locking:")
fmt.Fprintf(Opts.LogBuf, "current goroutine %d lock %x\n", currentGoRoutineID, otherLockID)
printStack(Opts.LogBuf, currentStack)
fmt.Fprintln(Opts.LogBuf, "Previous place where the lock was grabbed (same goroutine)")
printStack(Opts.LogBuf, otherStack)
l.other(p)
if buf, ok := Opts.LogBuf.(*bufio.Writer); ok {
buf.Flush()
}
Opts.mu.Unlock()
Opts.OnPotentialDeadlock()
}
func (l *lockOrder) printLockOrdering(currentGoRoutineID int64, otherLockID lockID, currentStack []uintptr, otherStack []uintptr, p identifiable, s ss) {
Opts.mu.Lock()
fmt.Fprintln(Opts.LogBuf, header, "Inconsistent locking. saw this ordering in one goroutine:")
fmt.Fprintln(Opts.LogBuf, "happened before")
printStack(Opts.LogBuf, s.before)
fmt.Fprintln(Opts.LogBuf, "happened after")
printStack(Opts.LogBuf, s.after)
fmt.Fprintln(Opts.LogBuf, "in another goroutine: happened before")
printStack(Opts.LogBuf, otherStack)
fmt.Fprintln(Opts.LogBuf, "happened after")
printStack(Opts.LogBuf, currentStack)
l.other(p)
fmt.Fprintln(Opts.LogBuf)
if buf, ok := Opts.LogBuf.(*bufio.Writer); ok {
buf.Flush()
}
Opts.mu.Unlock()
Opts.OnPotentialDeadlock()
}
func (l *lockOrder) checkRecursiveLocking(skip int, p identifiable, gid int64) {
l.mu.Lock()
defer l.mu.Unlock()
lockID := p.id()
for otherLockID, otherLockStack := range l.cur {
if otherLockStack.gid != gid { // We want locks taken in the same goroutine only.
continue
}
if otherLockID == lockID {
// we want to wait up to Opt.DeadlockTimeout before giving up.
stack := callers(skip)
l.printRecursiveLocking(gid, otherLockID, stack, otherLockStack.stack, p)
}
}
}
func (l *lockOrder) checkLockOrdering(skip int, p identifiable, gid int64) {
if Opts.DisableLockOrderDetection {
return
}
lockID := p.id()
l.mu.Lock()
defer l.mu.Unlock()
for otherLockID, otherLockStack := range l.cur {
if otherLockStack.gid != gid { // We want locks taken in the same goroutine only.
continue
}
if otherLockID == lockID {
// we want to wait up to Opt.DeadlockTimeout before giving up.
// we will do this testing during the lock() function.
continue
}
if s, ok := l.order[beforeAfter{lockID, otherLockID}]; ok {
stack := callers(skip)
l.printLockOrdering(gid, otherLockID, stack, otherLockStack.stack, p, s)
}
}
}
func (l *lockOrder) storeLockOrder(skip int, p identifiable, gid int64) {
if Opts.DisableLockOrderDetection {
return
}
stack := callers(skip)
lockID := p.id()
l.mu.Lock()
defer l.mu.Unlock()
for otherLockID, otherLockStack := range l.cur {
if otherLockStack.gid != gid { // We want locks taken in the same goroutine only.
continue
}
if otherLockID == lockID {
// we want to wait up to Opt.DeadlockTimeout before giving up.
// we will do this testing during the lock() function.
continue
}
l.order[beforeAfter{otherLockID, lockID}] = ss{otherLockStack.stack, stack}
if len(l.order) == Opts.MaxMapSize { // Reset the map to keep memory footprint bounded.
l.order = map[beforeAfter]ss{}
}
}
}
func (l *lockOrder) preLock(skip int, p identifiable, gid int64, checkRecursiveLocking bool) {
if Opts.DeadlockTimeout <= 0 || checkRecursiveLocking {
l.checkRecursiveLocking(skip, p, gid)
l.checkLockOrdering(skip, p, gid)
}
l.storeLockOrder(skip, p, gid)
}
func (l *lockOrder) pruneLockOrder(p identifiable) {
prunedBa := make([]beforeAfter, 0)
for ba := range l.order {
if ba.after == p.id() {
// remove this entry.
prunedBa = append(prunedBa, ba)
}
}
for _, ba := range prunedBa {
delete(l.order, ba)
}
}
func (l *lockOrder) postUnlock(p identifiable) {
l.mu.Lock()
delete(l.cur, p.id())
l.pruneLockOrder(p)
l.mu.Unlock()
}
type rlocker RWMutex
func (r *rlocker) Lock() { (*RWMutex)(r).RLock() }
func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() }
// Under lo.mu Locked.
func (l *lockOrder) other(ptr identifiable) {
empty := true
for k := range l.cur {
if k == ptr.id() {
continue
}
empty = false
}
if empty {
return
}
fmt.Fprintln(Opts.LogBuf, "Other goroutines holding locks:")
for k, pp := range l.cur {
if k == ptr.id() {
continue
}
fmt.Fprintf(Opts.LogBuf, "goroutine %v lock %x\n", pp.gid, k)
printStack(Opts.LogBuf, pp.stack)
}
fmt.Fprintln(Opts.LogBuf)
}
const header = "POTENTIAL DEADLOCK:"