Skip to content

Commit

Permalink
runtime: eliminate blocking GC work drains
Browse files Browse the repository at this point in the history
Now work.helperDrainBlock is always false, so we can remove it and
code paths that only ran when it was true. That means we no longer use
the gcDrainBlock mode of gcDrain, so we can eliminate that. That means
we no longer use gcWork.get, so we can eliminate that. That means we
no longer use getfull, so we can eliminate that.

Updates #26903. This is a follow-up to unifying STW GC and concurrent GC.

Change-Id: I8dbcf8ce24861df0a6149e0b7c5cd0eadb5c13f6
Reviewed-on: https://go-review.googlesource.com/c/134782
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
  • Loading branch information
aclements committed Oct 2, 2018
1 parent 143b13a commit 457c8f4
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 139 deletions.
25 changes: 4 additions & 21 deletions src/runtime/mgc.go
Original file line number Diff line number Diff line change
Expand Up @@ -944,14 +944,6 @@ var work struct {
ndone uint32
alldone note

// helperDrainBlock indicates that GC mark termination helpers
// should pass gcDrainBlock to gcDrain to block in the
// getfull() barrier. Otherwise, they should pass gcDrainNoBlock.
//
// TODO: This is a temporary fallback to work around races
// that cause early mark termination.
helperDrainBlock bool

// Number of roots of various root types. Set by gcMarkRootPrepare.
nFlushCacheRoots int
nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
Expand Down Expand Up @@ -1528,7 +1520,7 @@ func gcMarkTermination(nextTriggerRatio float64) {
gcResetMarkState()
initCheckmarks()
gcw := &getg().m.p.ptr().gcw
gcDrain(gcw, gcDrainNoBlock)
gcDrain(gcw, 0)
wbBufFlush1(getg().m.p.ptr())
gcw.dispose()
clearCheckmarks()
Expand Down Expand Up @@ -1814,7 +1806,7 @@ func gcBgMarkWorker(_p_ *p) {
}
// Go back to draining, this time
// without preemption.
gcDrain(&_p_.gcw, gcDrainNoBlock|gcDrainFlushBgCredit)
gcDrain(&_p_.gcw, gcDrainFlushBgCredit)
case gcMarkWorkerFractionalMode:
gcDrain(&_p_.gcw, gcDrainFractional|gcDrainUntilPreempt|gcDrainFlushBgCredit)
case gcMarkWorkerIdleMode:
Expand Down Expand Up @@ -1905,7 +1897,6 @@ func gcMark(start_time int64) {
work.nwait = 0
work.ndone = 0
work.nproc = uint32(gcprocs())
work.helperDrainBlock = false

// Check that there's no marking work remaining.
if work.full != 0 || work.nDataRoots+work.nBSSRoots+work.nSpanRoots+work.nStackRoots != 0 {
Expand All @@ -1921,11 +1912,7 @@ func gcMark(start_time int64) {
gchelperstart()

gcw := &getg().m.p.ptr().gcw
if work.helperDrainBlock {
gcDrain(gcw, gcDrainBlock)
} else {
gcDrain(gcw, gcDrainNoBlock)
}
gcDrain(gcw, 0)

if debug.gccheckmark > 0 {
// This is expensive when there's a large number of
Expand Down Expand Up @@ -2119,11 +2106,7 @@ func gchelper() {
// Parallel mark over GC roots and heap
if gcphase == _GCmarktermination {
gcw := &_g_.m.p.ptr().gcw
if work.helperDrainBlock {
gcDrain(gcw, gcDrainBlock) // blocks in getfull
} else {
gcDrain(gcw, gcDrainNoBlock)
}
gcDrain(gcw, 0)
}

nproc := atomic.Load(&work.nproc) // work.nproc can change right after we increment work.ndone
Expand Down
44 changes: 13 additions & 31 deletions src/runtime/mgcmark.go
Original file line number Diff line number Diff line change
Expand Up @@ -771,34 +771,26 @@ type gcDrainFlags int

const (
gcDrainUntilPreempt gcDrainFlags = 1 << iota
gcDrainNoBlock
gcDrainFlushBgCredit
gcDrainIdle
gcDrainFractional

// gcDrainBlock means neither gcDrainUntilPreempt or
// gcDrainNoBlock. It is the default, but callers should use
// the constant for documentation purposes.
gcDrainBlock gcDrainFlags = 0
)

// gcDrain scans roots and objects in work buffers, blackening grey
// objects until all roots and work buffers have been drained.
// objects until it is unable to get more work. It may return before
// GC is done; it's the caller's responsibility to balance work from
// other Ps.
//
// If flags&gcDrainUntilPreempt != 0, gcDrain returns when g.preempt
// is set. This implies gcDrainNoBlock.
// is set.
//
// If flags&gcDrainIdle != 0, gcDrain returns when there is other work
// to do. This implies gcDrainNoBlock.
// to do.
//
// If flags&gcDrainFractional != 0, gcDrain self-preempts when
// pollFractionalWorkerExit() returns true. This implies
// gcDrainNoBlock.
//
// If flags&gcDrainNoBlock != 0, gcDrain returns as soon as it is
// unable to get more work. Otherwise, it will block until all
// blocking calls are blocked in gcDrain.
//
// If flags&gcDrainFlushBgCredit != 0, gcDrain flushes scan work
// credit to gcController.bgScanCredit every gcCreditSlack units of
// scan work.
Expand All @@ -811,7 +803,6 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {

gp := getg().m.curg
preemptible := flags&gcDrainUntilPreempt != 0
blocking := flags&(gcDrainUntilPreempt|gcDrainIdle|gcDrainFractional|gcDrainNoBlock) == 0
flushBgCredit := flags&gcDrainFlushBgCredit != 0
idle := flags&gcDrainIdle != 0

Expand Down Expand Up @@ -855,24 +846,19 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
gcw.balance()
}

var b uintptr
if blocking {
b = gcw.get()
} else {
b = gcw.tryGetFast()
b := gcw.tryGetFast()
if b == 0 {
b = gcw.tryGet()
if b == 0 {
// Flush the write barrier
// buffer; this may create
// more work.
wbBufFlush(nil, 0)
b = gcw.tryGet()
if b == 0 {
// Flush the write barrier
// buffer; this may create
// more work.
wbBufFlush(nil, 0)
b = gcw.tryGet()
}
}
}
if b == 0 {
// work barrier reached or tryGet failed.
// Unable to get work.
break
}
scanobject(b, gcw)
Expand All @@ -898,10 +884,6 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
}
}

// In blocking mode, write barriers are not allowed after this
// point because we must preserve the condition that the work
// buffers are empty.

done:
// Flush remaining scan work credit.
if gcw.scanWork > 0 {
Expand Down
88 changes: 1 addition & 87 deletions src/runtime/mgcwork.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func init() {
//
// (preemption must be disabled)
// gcw := &getg().m.p.ptr().gcw
// .. call gcw.put() to produce and gcw.get() to consume ..
// .. call gcw.put() to produce and gcw.tryGet() to consume ..
//
// It's important that any use of gcWork during the mark phase prevent
// the garbage collector from transitioning to mark termination since
Expand Down Expand Up @@ -236,37 +236,6 @@ func (w *gcWork) tryGetFast() uintptr {
return wbuf.obj[wbuf.nobj]
}

// get dequeues a pointer for the garbage collector to trace, blocking
// if necessary to ensure all pointers from all queues and caches have
// been retrieved. get returns 0 if there are no pointers remaining.
//go:nowritebarrierrec
func (w *gcWork) get() uintptr {
wbuf := w.wbuf1
if wbuf == nil {
w.init()
wbuf = w.wbuf1
// wbuf is empty at this point.
}
if wbuf.nobj == 0 {
w.wbuf1, w.wbuf2 = w.wbuf2, w.wbuf1
wbuf = w.wbuf1
if wbuf.nobj == 0 {
owbuf := wbuf
wbuf = getfull()
if wbuf == nil {
return 0
}
putempty(owbuf)
w.wbuf1 = wbuf
}
}

// TODO: This might be a good place to add prefetch code

wbuf.nobj--
return wbuf.obj[wbuf.nobj]
}

// dispose returns any cached pointers to the global queue.
// The buffers are being put on the full queue so that the
// write barriers will not simply reacquire them before the
Expand Down Expand Up @@ -449,61 +418,6 @@ func trygetfull() *workbuf {
return b
}

// Get a full work buffer off the work.full list.
// If nothing is available wait until all the other gc helpers have
// finished and then return nil.
// getfull acts as a barrier for work.nproc helpers. As long as one
// gchelper is actively marking objects it
// may create a workbuffer that the other helpers can work on.
// The for loop either exits when a work buffer is found
// or when _all_ of the work.nproc GC helpers are in the loop
// looking for work and thus not capable of creating new work.
// This is in fact the termination condition for the STW mark
// phase.
//go:nowritebarrier
func getfull() *workbuf {
b := (*workbuf)(work.full.pop())
if b != nil {
b.checknonempty()
return b
}

incnwait := atomic.Xadd(&work.nwait, +1)
if incnwait > work.nproc {
println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
throw("work.nwait > work.nproc")
}
for i := 0; ; i++ {
if work.full != 0 {
decnwait := atomic.Xadd(&work.nwait, -1)
if decnwait == work.nproc {
println("runtime: work.nwait=", decnwait, "work.nproc=", work.nproc)
throw("work.nwait > work.nproc")
}
b = (*workbuf)(work.full.pop())
if b != nil {
b.checknonempty()
return b
}
incnwait := atomic.Xadd(&work.nwait, +1)
if incnwait > work.nproc {
println("runtime: work.nwait=", incnwait, "work.nproc=", work.nproc)
throw("work.nwait > work.nproc")
}
}
if work.nwait == work.nproc && work.markrootNext >= work.markrootJobs {
return nil
}
if i < 10 {
procyield(20)
} else if i < 20 {
osyield()
} else {
usleep(100)
}
}
}

//go:nowritebarrier
func handoff(b *workbuf) *workbuf {
// Make new buffer with half of b's pointers.
Expand Down

0 comments on commit 457c8f4

Please sign in to comment.