runtime: copy more of scheduler from Go 1.7 runtime
This started by moving procresize from C to Go so that we can pass the right type to the memory allocator when allocating a p, which forced the gomaxprocs variable to move from C to Go, and everything else followed from that. Reviewed-on: https://go-review.googlesource.com/34916 From-SVN: r244236
This commit is contained in:
parent
d1261ac6eb
commit
2193ad7fbf
16 changed files with 1997 additions and 1542 deletions
|
@ -1,4 +1,4 @@
|
|||
eef0fb3b092dc22d9830cac15a536760da5d033a
|
||||
189ea81cc758e000325fd6cca7882c252d33f8f0
|
||||
|
||||
The first line of this file holds the git revision number of the last
|
||||
merge done from the gofrontend repository.
|
||||
|
|
|
@ -14,7 +14,25 @@ import (
|
|||
// change the current setting.
|
||||
// The number of logical CPUs on the local machine can be queried with NumCPU.
|
||||
// This call will go away when the scheduler improves.
|
||||
func GOMAXPROCS(n int) int
|
||||
func GOMAXPROCS(n int) int {
|
||||
if n > _MaxGomaxprocs {
|
||||
n = _MaxGomaxprocs
|
||||
}
|
||||
lock(&sched.lock)
|
||||
ret := int(gomaxprocs)
|
||||
unlock(&sched.lock)
|
||||
if n <= 0 || n == ret {
|
||||
return ret
|
||||
}
|
||||
|
||||
stopTheWorld("GOMAXPROCS")
|
||||
|
||||
// newprocs will be processed by startTheWorld
|
||||
newprocs = int32(n)
|
||||
|
||||
startTheWorld()
|
||||
return ret
|
||||
}
|
||||
|
||||
// NumCPU returns the number of logical CPUs usable by the current process.
|
||||
//
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"runtime/internal/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
@ -47,39 +48,6 @@ func GCMask(x interface{}) (ret []byte) {
|
|||
return nil
|
||||
}
|
||||
|
||||
//func testSchedLocalQueue()
|
||||
//func testSchedLocalQueueSteal()
|
||||
//
|
||||
//func RunSchedLocalQueueTest() {
|
||||
// testSchedLocalQueue()
|
||||
//}
|
||||
//
|
||||
//func RunSchedLocalQueueStealTest() {
|
||||
// testSchedLocalQueueSteal()
|
||||
//}
|
||||
|
||||
//var StringHash = stringHash
|
||||
//var BytesHash = bytesHash
|
||||
//var Int32Hash = int32Hash
|
||||
//var Int64Hash = int64Hash
|
||||
//var EfaceHash = efaceHash
|
||||
//var IfaceHash = ifaceHash
|
||||
//var MemclrBytes = memclrBytes
|
||||
|
||||
var HashLoad = &hashLoad
|
||||
|
||||
// entry point for testing
|
||||
//func GostringW(w []uint16) (s string) {
|
||||
// s = gostringw(&w[0])
|
||||
// return
|
||||
//}
|
||||
|
||||
//var Gostringnocopy = gostringnocopy
|
||||
//var Maxstring = &maxstring
|
||||
|
||||
//type Uintreg uintreg
|
||||
|
||||
/*
|
||||
func RunSchedLocalQueueTest() {
|
||||
_p_ := new(p)
|
||||
gs := make([]g, len(_p_.runq))
|
||||
|
@ -177,14 +145,26 @@ func RunSchedLocalQueueEmptyTest(iters int) {
|
|||
}
|
||||
}
|
||||
|
||||
var StringHash = stringHash
|
||||
var BytesHash = bytesHash
|
||||
var Int32Hash = int32Hash
|
||||
var Int64Hash = int64Hash
|
||||
var EfaceHash = efaceHash
|
||||
var IfaceHash = ifaceHash
|
||||
var MemclrBytes = memclrBytes
|
||||
*/
|
||||
//var StringHash = stringHash
|
||||
//var BytesHash = bytesHash
|
||||
//var Int32Hash = int32Hash
|
||||
//var Int64Hash = int64Hash
|
||||
//var EfaceHash = efaceHash
|
||||
//var IfaceHash = ifaceHash
|
||||
//var MemclrBytes = memclrBytes
|
||||
|
||||
var HashLoad = &hashLoad
|
||||
|
||||
// entry point for testing
|
||||
//func GostringW(w []uint16) (s string) {
|
||||
// s = gostringw(&w[0])
|
||||
// return
|
||||
//}
|
||||
|
||||
//var Gostringnocopy = gostringnocopy
|
||||
//var Maxstring = &maxstring
|
||||
|
||||
//type Uintreg uintreg
|
||||
|
||||
var Open = open
|
||||
var Close = closefd
|
||||
|
|
|
@ -149,13 +149,9 @@ func notewakeup(n *note) {
|
|||
|
||||
func notesleep(n *note) {
|
||||
gp := getg()
|
||||
|
||||
// Currently OK to sleep in non-g0 for gccgo. It happens in
|
||||
// stoptheworld because we have not implemented preemption.
|
||||
// if gp != gp.m.g0 {
|
||||
// throw("notesleep not on g0")
|
||||
// }
|
||||
|
||||
if gp != gp.m.g0 {
|
||||
throw("notesleep not on g0")
|
||||
}
|
||||
for atomic.Load(key32(&n.key)) == 0 {
|
||||
gp.m.blocked = true
|
||||
futexsleep(key32(&n.key), 0, -1)
|
||||
|
@ -202,10 +198,13 @@ func notetsleep_internal(n *note, ns int64) bool {
|
|||
}
|
||||
|
||||
func notetsleep(n *note, ns int64) bool {
|
||||
gp := getg()
|
||||
if gp != gp.m.g0 && gp.m.preemptoff != "" {
|
||||
throw("notetsleep not on g0")
|
||||
}
|
||||
// Currently OK to sleep in non-g0 for gccgo. It happens in
|
||||
// stoptheworld because our version of systemstack does not
|
||||
// change to g0.
|
||||
// gp := getg()
|
||||
// if gp != gp.m.g0 && gp.m.preemptoff != "" {
|
||||
// throw("notetsleep not on g0")
|
||||
// }
|
||||
|
||||
return notetsleep_internal(n, ns)
|
||||
}
|
||||
|
|
|
@ -162,13 +162,9 @@ func notewakeup(n *note) {
|
|||
|
||||
func notesleep(n *note) {
|
||||
gp := getg()
|
||||
|
||||
// Currently OK to sleep in non-g0 for gccgo. It happens in
|
||||
// stoptheworld because we have not implemented preemption.
|
||||
// if gp != gp.m.g0 {
|
||||
// throw("notesleep not on g0")
|
||||
// }
|
||||
|
||||
if gp != gp.m.g0 {
|
||||
throw("notesleep not on g0")
|
||||
}
|
||||
semacreate(gp.m)
|
||||
if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
|
||||
// Must be locked (got wakeup).
|
||||
|
@ -257,7 +253,8 @@ func notetsleep(n *note, ns int64) bool {
|
|||
gp := getg()
|
||||
|
||||
// Currently OK to sleep in non-g0 for gccgo. It happens in
|
||||
// stoptheworld because we have not implemented preemption.
|
||||
// stoptheworld because our version of systemstack does not
|
||||
// change to g0.
|
||||
// if gp != gp.m.g0 && gp.m.preemptoff != "" {
|
||||
// throw("notetsleep not on g0")
|
||||
// }
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -556,19 +556,14 @@ func nonleaf(stop chan int) bool {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
func TestSchedLocalQueue(t *testing.T) {
|
||||
runtime.TestSchedLocalQueue1()
|
||||
runtime.RunSchedLocalQueueTest()
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
func TestSchedLocalQueueSteal(t *testing.T) {
|
||||
runtime.TestSchedLocalQueueSteal1()
|
||||
runtime.RunSchedLocalQueueStealTest()
|
||||
}
|
||||
*/
|
||||
|
||||
/*
|
||||
func TestSchedLocalQueueEmpty(t *testing.T) {
|
||||
if runtime.NumCPU() == 1 {
|
||||
// Takes too long and does not trigger the race.
|
||||
|
@ -586,7 +581,6 @@ func TestSchedLocalQueueEmpty(t *testing.T) {
|
|||
}
|
||||
runtime.RunSchedLocalQueueEmptyTest(iters)
|
||||
}
|
||||
*/
|
||||
|
||||
func benchmarkStackGrowth(b *testing.B, rec int) {
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"runtime/internal/atomic"
|
||||
"runtime/internal/sys"
|
||||
"unsafe"
|
||||
)
|
||||
|
@ -203,12 +204,10 @@ func (gp guintptr) ptr() *g { return (*g)(unsafe.Pointer(gp)) }
|
|||
//go:nosplit
|
||||
func (gp *guintptr) set(g *g) { *gp = guintptr(unsafe.Pointer(g)) }
|
||||
|
||||
/*
|
||||
//go:nosplit
|
||||
func (gp *guintptr) cas(old, new guintptr) bool {
|
||||
return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new))
|
||||
}
|
||||
*/
|
||||
|
||||
type puintptr uintptr
|
||||
|
||||
|
@ -358,8 +357,8 @@ type g struct {
|
|||
sigpc uintptr
|
||||
gopc uintptr // pc of go statement that created this goroutine
|
||||
startpc uintptr // pc of goroutine function
|
||||
racectx uintptr
|
||||
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
|
||||
// Not for gccgo: racectx uintptr
|
||||
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
|
||||
// Not for gccgo: cgoCtxt []uintptr // cgo traceback context
|
||||
|
||||
// Per-G GC state
|
||||
|
@ -521,16 +520,16 @@ type p struct {
|
|||
gfreecnt int32
|
||||
|
||||
sudogcache []*sudog
|
||||
// Not for gccgo for now: sudogbuf [128]*sudog
|
||||
sudogbuf [128]*sudog
|
||||
|
||||
// Not for gccgo for now: tracebuf traceBufPtr
|
||||
tracebuf traceBufPtr
|
||||
|
||||
// Not for gccgo for now: palloc persistentAlloc // per-P to avoid mutex
|
||||
|
||||
// Per-P GC state
|
||||
// Not for gccgo for now: gcAssistTime int64 // Nanoseconds in assistAlloc
|
||||
// Not for gccgo for now: gcBgMarkWorker guintptr
|
||||
// Not for gccgo for now: gcMarkWorkerMode gcMarkWorkerMode
|
||||
gcAssistTime int64 // Nanoseconds in assistAlloc
|
||||
gcBgMarkWorker guintptr
|
||||
gcMarkWorkerMode gcMarkWorkerMode
|
||||
|
||||
// gcw is this P's GC work buffer cache. The work buffer is
|
||||
// filled by write barriers, drained by mutator assists, and
|
||||
|
@ -760,18 +759,13 @@ var (
|
|||
|
||||
// allm *m
|
||||
|
||||
allp [_MaxGomaxprocs + 1]*p
|
||||
|
||||
// gomaxprocs int32
|
||||
|
||||
panicking uint32
|
||||
ncpu int32
|
||||
|
||||
// forcegc forcegcstate
|
||||
|
||||
sched schedt
|
||||
|
||||
// newprocs int32
|
||||
allp [_MaxGomaxprocs + 1]*p
|
||||
gomaxprocs int32
|
||||
panicking uint32
|
||||
ncpu int32
|
||||
forcegc forcegcstate
|
||||
sched schedt
|
||||
newprocs int32
|
||||
|
||||
// Information about what cpu features are available.
|
||||
// Set on startup.
|
||||
|
|
|
@ -304,6 +304,7 @@ const (
|
|||
_64bit = 1 << (^uintptr(0) >> 63) / 2
|
||||
_MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*32
|
||||
_MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
|
||||
_MaxGcproc = 32
|
||||
)
|
||||
|
||||
// Here for gccgo until we port malloc.go.
|
||||
|
@ -350,7 +351,6 @@ func entersyscallblock(int32)
|
|||
func exitsyscall(int32)
|
||||
func gopark(func(*g, unsafe.Pointer) bool, unsafe.Pointer, string, byte, int)
|
||||
func goparkunlock(*mutex, string, byte, int)
|
||||
func goready(*g, int)
|
||||
|
||||
// Temporary hack for gccgo until we port proc.go.
|
||||
//go:nosplit
|
||||
|
@ -411,12 +411,6 @@ func roundupsize(uintptr) uintptr
|
|||
// Here for gccgo until we port mgc.go.
|
||||
func GC()
|
||||
|
||||
// Here for gccgo until we port proc.go.
|
||||
var worldsema uint32 = 1
|
||||
|
||||
func stopTheWorldWithSema()
|
||||
func startTheWorldWithSema()
|
||||
|
||||
// For gccgo to call from C code.
|
||||
//go:linkname acquireWorldsema runtime.acquireWorldsema
|
||||
func acquireWorldsema() {
|
||||
|
@ -429,26 +423,6 @@ func releaseWorldsema() {
|
|||
semrelease(&worldsema)
|
||||
}
|
||||
|
||||
// Here for gccgo until we port proc.go.
|
||||
func stopTheWorld(reason string) {
|
||||
semacquire(&worldsema, false)
|
||||
getg().m.preemptoff = reason
|
||||
getg().m.gcing = 1
|
||||
systemstack(stopTheWorldWithSema)
|
||||
}
|
||||
|
||||
// Here for gccgo until we port proc.go.
|
||||
func startTheWorld() {
|
||||
getg().m.gcing = 0
|
||||
getg().m.locks++
|
||||
systemstack(startTheWorldWithSema)
|
||||
// worldsema must be held over startTheWorldWithSema to ensure
|
||||
// gomaxprocs cannot change while worldsema is held.
|
||||
semrelease(&worldsema)
|
||||
getg().m.preemptoff = ""
|
||||
getg().m.locks--
|
||||
}
|
||||
|
||||
// For gccgo to call from C code, so that the C code and the Go code
|
||||
// can share the memstats variable for now.
|
||||
//go:linkname getMstats runtime.getMstats
|
||||
|
@ -461,6 +435,7 @@ func setcpuprofilerate_m(hz int32)
|
|||
|
||||
// Temporary for gccgo until we port mem_GOOS.go.
|
||||
func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer
|
||||
func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64)
|
||||
|
||||
// Temporary for gccgo until we port proc.go, so that the C signal
|
||||
// handler can call into cpuprof.
|
||||
|
@ -522,7 +497,6 @@ func getZerobase() *uintptr {
|
|||
func sigprof()
|
||||
func mcount() int32
|
||||
func goexit1()
|
||||
func freezetheworld()
|
||||
|
||||
// Get signal trampoline, written in C.
|
||||
func getSigtramp() uintptr
|
||||
|
@ -592,6 +566,7 @@ func getPanicking() uint32 {
|
|||
|
||||
// Temporary for gccgo until we port mcache.go.
|
||||
func allocmcache() *mcache
|
||||
func freemcache(*mcache)
|
||||
|
||||
// Temporary for gccgo until we port mgc.go.
|
||||
// This is just so that allgadd will compile.
|
||||
|
@ -616,3 +591,60 @@ func gcount() int32 {
|
|||
unlock(&allglock)
|
||||
return n
|
||||
}
|
||||
|
||||
// Temporary for gccgo until we port mgc.go.
|
||||
var gcBlackenEnabled uint32
|
||||
|
||||
// Temporary for gccgo until we port mgc.go.
|
||||
func gcMarkWorkAvailable(p *p) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Temporary for gccgo until we port mgc.go.
|
||||
var gcController gcControllerState
|
||||
|
||||
// Temporary for gccgo until we port mgc.go.
|
||||
type gcControllerState struct {
|
||||
}
|
||||
|
||||
// Temporary for gccgo until we port mgc.go.
|
||||
func (c *gcControllerState) findRunnableGCWorker(_p_ *p) *g {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Temporary for gccgo until we port mgc.go.
|
||||
var gcphase uint32
|
||||
|
||||
// Temporary for gccgo until we port mgc.go.
|
||||
const (
|
||||
_GCoff = iota
|
||||
_GCmark
|
||||
_GCmarktermination
|
||||
)
|
||||
|
||||
// Temporary for gccgo until we port mgc.go.
|
||||
type gcMarkWorkerMode int
|
||||
|
||||
// Temporary for gccgo until we port mgc.go.
|
||||
const (
|
||||
gcMarkWorkerDedicatedMode gcMarkWorkerMode = iota
|
||||
gcMarkWorkerFractionalMode
|
||||
gcMarkWorkerIdleMode
|
||||
)
|
||||
|
||||
// Temporary for gccgo until we port mheap.go.
|
||||
type mheap struct {
|
||||
}
|
||||
|
||||
// Temporary for gccgo until we port mheap.go.
|
||||
var mheap_ mheap
|
||||
|
||||
// Temporary for gccgo until we port mheap.go.
|
||||
func (h *mheap) scavenge(k int32, now, limit uint64) {
|
||||
}
|
||||
|
||||
// Temporary for gccgo until we initialize ncpu in Go.
|
||||
//go:linkname setncpu runtime.setncpu
|
||||
func setncpu(n int32) {
|
||||
ncpu = n
|
||||
}
|
||||
|
|
|
@ -127,10 +127,10 @@ var trace struct {
|
|||
|
||||
// traceBufHeader is per-P tracing buffer.
|
||||
type traceBufHeader struct {
|
||||
link traceBufPtr // in trace.empty/full
|
||||
lastTicks uint64 // when we wrote the last event
|
||||
pos int // next write offset in arr
|
||||
stk [traceStackSize]uintptr // scratch buffer for traceback
|
||||
link traceBufPtr // in trace.empty/full
|
||||
lastTicks uint64 // when we wrote the last event
|
||||
pos int // next write offset in arr
|
||||
stk [traceStackSize]location // scratch buffer for traceback
|
||||
}
|
||||
|
||||
// traceBuf is per-P tracing buffer.
|
||||
|
@ -152,9 +152,6 @@ func traceBufPtrOf(b *traceBuf) traceBufPtr {
|
|||
return traceBufPtr(unsafe.Pointer(b))
|
||||
}
|
||||
|
||||
/*
|
||||
Commented out for gccgo for now.
|
||||
|
||||
// StartTrace enables tracing for the current process.
|
||||
// While tracing, the data will be buffered and available via ReadTrace.
|
||||
// StartTrace returns an error if tracing is already enabled.
|
||||
|
@ -522,13 +519,7 @@ func traceEvent(ev byte, skip int, args ...uint64) {
|
|||
if gp == _g_ {
|
||||
nstk = callers(skip, buf.stk[:])
|
||||
} else if gp != nil {
|
||||
gp = mp.curg
|
||||
// This may happen when tracing a system call,
|
||||
// so we must lock the stack.
|
||||
if gcTryLockStackBarriers(gp) {
|
||||
nstk = gcallers(gp, skip, buf.stk[:])
|
||||
gcUnlockStackBarriers(gp)
|
||||
}
|
||||
// FIXME: get stack trace of different goroutine.
|
||||
}
|
||||
if nstk > 0 {
|
||||
nstk-- // skip runtime.goexit
|
||||
|
@ -647,8 +638,6 @@ func (buf *traceBuf) byte(v byte) {
|
|||
buf.pos++
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
// traceStackTable maps stack traces (arrays of PC's) to unique uint32 ids.
|
||||
// It is lock-free for reading.
|
||||
type traceStackTable struct {
|
||||
|
@ -664,28 +653,30 @@ type traceStack struct {
|
|||
hash uintptr
|
||||
id uint32
|
||||
n int
|
||||
stk [0]uintptr // real type [n]uintptr
|
||||
stk [0]location // real type [n]location
|
||||
}
|
||||
|
||||
type traceStackPtr uintptr
|
||||
|
||||
/*
|
||||
Commented out for gccgo for now.
|
||||
|
||||
func (tp traceStackPtr) ptr() *traceStack { return (*traceStack)(unsafe.Pointer(tp)) }
|
||||
|
||||
// stack returns slice of PCs.
|
||||
func (ts *traceStack) stack() []uintptr {
|
||||
return (*[traceStackSize]uintptr)(unsafe.Pointer(&ts.stk))[:ts.n]
|
||||
func (ts *traceStack) stack() []location {
|
||||
return (*[traceStackSize]location)(unsafe.Pointer(&ts.stk))[:ts.n]
|
||||
}
|
||||
|
||||
// put returns a unique id for the stack trace pcs and caches it in the table,
|
||||
// if it sees the trace for the first time.
|
||||
func (tab *traceStackTable) put(pcs []uintptr) uint32 {
|
||||
func (tab *traceStackTable) put(pcs []location) uint32 {
|
||||
if len(pcs) == 0 {
|
||||
return 0
|
||||
}
|
||||
hash := memhash(unsafe.Pointer(&pcs[0]), 0, uintptr(len(pcs))*unsafe.Sizeof(pcs[0]))
|
||||
var hash uintptr
|
||||
for _, loc := range pcs {
|
||||
hash += loc.pc
|
||||
hash += hash << 10
|
||||
hash ^= hash >> 6
|
||||
}
|
||||
// First, search the hashtable w/o the mutex.
|
||||
if id := tab.find(pcs, hash); id != 0 {
|
||||
return id
|
||||
|
@ -714,7 +705,7 @@ func (tab *traceStackTable) put(pcs []uintptr) uint32 {
|
|||
}
|
||||
|
||||
// find checks if the stack trace pcs is already present in the table.
|
||||
func (tab *traceStackTable) find(pcs []uintptr, hash uintptr) uint32 {
|
||||
func (tab *traceStackTable) find(pcs []location, hash uintptr) uint32 {
|
||||
part := int(hash % uintptr(len(tab.tab)))
|
||||
Search:
|
||||
for stk := tab.tab[part].ptr(); stk != nil; stk = stk.link.ptr() {
|
||||
|
@ -732,13 +723,12 @@ Search:
|
|||
|
||||
// newStack allocates a new stack of size n.
|
||||
func (tab *traceStackTable) newStack(n int) *traceStack {
|
||||
return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*sys.PtrSize))
|
||||
return (*traceStack)(tab.mem.alloc(unsafe.Sizeof(traceStack{}) + uintptr(n)*unsafe.Sizeof(location{})))
|
||||
}
|
||||
|
||||
// dump writes all previously cached stacks to trace buffers,
|
||||
// releases all memory and resets state.
|
||||
func (tab *traceStackTable) dump() {
|
||||
frames := make(map[uintptr]traceFrame)
|
||||
var tmp [(2 + 4*traceStackSize) * traceBytesPerNumber]byte
|
||||
buf := traceFlush(0).ptr()
|
||||
for _, stk := range tab.tab {
|
||||
|
@ -749,8 +739,8 @@ func (tab *traceStackTable) dump() {
|
|||
tmpbuf = traceAppend(tmpbuf, uint64(stk.n))
|
||||
for _, pc := range stk.stack() {
|
||||
var frame traceFrame
|
||||
frame, buf = traceFrameForPC(buf, frames, pc)
|
||||
tmpbuf = traceAppend(tmpbuf, uint64(pc))
|
||||
frame, buf = traceFrameForPC(buf, pc)
|
||||
tmpbuf = traceAppend(tmpbuf, uint64(pc.pc))
|
||||
tmpbuf = traceAppend(tmpbuf, uint64(frame.funcID))
|
||||
tmpbuf = traceAppend(tmpbuf, uint64(frame.fileID))
|
||||
tmpbuf = traceAppend(tmpbuf, uint64(frame.line))
|
||||
|
@ -780,25 +770,15 @@ type traceFrame struct {
|
|||
line uint64
|
||||
}
|
||||
|
||||
func traceFrameForPC(buf *traceBuf, frames map[uintptr]traceFrame, pc uintptr) (traceFrame, *traceBuf) {
|
||||
if frame, ok := frames[pc]; ok {
|
||||
return frame, buf
|
||||
}
|
||||
|
||||
func traceFrameForPC(buf *traceBuf, loc location) (traceFrame, *traceBuf) {
|
||||
var frame traceFrame
|
||||
f := findfunc(pc)
|
||||
if f == nil {
|
||||
frames[pc] = frame
|
||||
return frame, buf
|
||||
}
|
||||
|
||||
fn := funcname(f)
|
||||
fn := loc.function
|
||||
const maxLen = 1 << 10
|
||||
if len(fn) > maxLen {
|
||||
fn = fn[len(fn)-maxLen:]
|
||||
}
|
||||
frame.funcID, buf = traceString(buf, fn)
|
||||
file, line := funcline(f, pc-sys.PCQuantum)
|
||||
file, line := loc.filename, loc.lineno
|
||||
frame.line = uint64(line)
|
||||
if len(file) > maxLen {
|
||||
file = file[len(file)-maxLen:]
|
||||
|
@ -807,8 +787,6 @@ func traceFrameForPC(buf *traceBuf, frames map[uintptr]traceFrame, pc uintptr) (
|
|||
return frame, buf
|
||||
}
|
||||
|
||||
*/
|
||||
|
||||
// traceAlloc is a non-thread-safe region allocator.
|
||||
// It holds a linked list of traceAllocBlock.
|
||||
type traceAlloc struct {
|
||||
|
@ -831,9 +809,6 @@ type traceAllocBlockPtr uintptr
|
|||
func (p traceAllocBlockPtr) ptr() *traceAllocBlock { return (*traceAllocBlock)(unsafe.Pointer(p)) }
|
||||
func (p *traceAllocBlockPtr) set(x *traceAllocBlock) { *p = traceAllocBlockPtr(unsafe.Pointer(x)) }
|
||||
|
||||
/*
|
||||
Commented out for gccgo for now.
|
||||
|
||||
// alloc allocates n-byte block.
|
||||
func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
|
||||
n = round(n, sys.PtrSize)
|
||||
|
@ -841,6 +816,8 @@ func (a *traceAlloc) alloc(n uintptr) unsafe.Pointer {
|
|||
if n > uintptr(len(a.head.ptr().data)) {
|
||||
throw("trace: alloc too large")
|
||||
}
|
||||
// This is only safe because the strings returned by callers
|
||||
// are stored in a location that is not in the Go heap.
|
||||
block := (*traceAllocBlock)(sysAlloc(unsafe.Sizeof(traceAllocBlock{}), &memstats.other_sys))
|
||||
if block == nil {
|
||||
throw("trace: out of memory")
|
||||
|
@ -913,7 +890,7 @@ func traceGoCreate(newg *g, pc uintptr) {
|
|||
newg.traceseq = 0
|
||||
newg.tracelastp = getg().m.p
|
||||
// +PCQuantum because traceFrameForPC expects return PCs and subtracts PCQuantum.
|
||||
id := trace.stackTab.put([]uintptr{pc + sys.PCQuantum})
|
||||
id := trace.stackTab.put([]location{location{pc: pc + sys.PCQuantum}})
|
||||
traceEvent(traceEvGoCreate, 2, uint64(newg.goid), uint64(id))
|
||||
}
|
||||
|
||||
|
@ -1004,5 +981,3 @@ func traceHeapAlloc() {
|
|||
func traceNextGC() {
|
||||
traceEvent(traceEvNextGC, -1, memstats.next_gc)
|
||||
}
|
||||
|
||||
*/
|
||||
|
|
|
@ -618,8 +618,7 @@ runtime_debug_WriteHeapDump(uintptr fd)
|
|||
// Stop the world.
|
||||
runtime_acquireWorldsema();
|
||||
m = runtime_m();
|
||||
m->gcing = 1;
|
||||
m->locks++;
|
||||
m->preemptoff = runtime_gostringnocopy((const byte*)"write heap dump");
|
||||
runtime_stopTheWorldWithSema();
|
||||
|
||||
// Update stats so we can dump them.
|
||||
|
@ -640,10 +639,9 @@ runtime_debug_WriteHeapDump(uintptr fd)
|
|||
dumpfd = 0;
|
||||
|
||||
// Start up the world again.
|
||||
m->gcing = 0;
|
||||
runtime_releaseWorldsema();
|
||||
runtime_startTheWorldWithSema();
|
||||
m->locks--;
|
||||
runtime_releaseWorldsema();
|
||||
m->preemptoff = runtime_gostringnocopy(nil);
|
||||
}
|
||||
|
||||
// Runs the specified gc program. Calls the callback for every
|
||||
|
|
|
@ -99,7 +99,8 @@ runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
|
|||
flag |= FlagNoInvokeGC;
|
||||
}
|
||||
|
||||
if(runtime_gcwaiting() && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC) && m->preemptoff.len == 0) {
|
||||
if((g->preempt || runtime_gcwaiting()) && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC) && m->preemptoff.len == 0) {
|
||||
g->preempt = false;
|
||||
runtime_gosched();
|
||||
m = runtime_m();
|
||||
}
|
||||
|
|
|
@ -132,12 +132,6 @@ enum
|
|||
#else
|
||||
MHeapMap_Bits = 32 - PageShift,
|
||||
#endif
|
||||
|
||||
// Max number of threads to run garbage collection.
|
||||
// 2, 3, and 4 are all plausible maximums depending
|
||||
// on the hardware details of the machine. The garbage
|
||||
// collector scales well to 8 cpus.
|
||||
MaxGcproc = 8,
|
||||
};
|
||||
|
||||
// Maximum memory allocation size, a hint for callers.
|
||||
|
@ -186,7 +180,8 @@ enum
|
|||
|
||||
void* runtime_SysAlloc(uintptr nbytes, uint64 *stat)
|
||||
__asm__ (GOSYM_PREFIX "runtime.sysAlloc");
|
||||
void runtime_SysFree(void *v, uintptr nbytes, uint64 *stat);
|
||||
void runtime_SysFree(void *v, uintptr nbytes, uint64 *stat)
|
||||
__asm__ (GOSYM_PREFIX "runtime.sysFree");
|
||||
void runtime_SysUnused(void *v, uintptr nbytes);
|
||||
void runtime_SysUsed(void *v, uintptr nbytes);
|
||||
void runtime_SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat);
|
||||
|
@ -467,11 +462,15 @@ void runtime_MProf_GC(void)
|
|||
__asm__ (GOSYM_PREFIX "runtime.mProf_GC");
|
||||
void runtime_iterate_memprof(FuncVal* callback)
|
||||
__asm__ (GOSYM_PREFIX "runtime.iterate_memprof");
|
||||
int32 runtime_gcprocs(void);
|
||||
void runtime_helpgc(int32 nproc);
|
||||
void runtime_gchelper(void);
|
||||
int32 runtime_gcprocs(void)
|
||||
__asm__ (GOSYM_PREFIX "runtime.gcprocs");
|
||||
void runtime_helpgc(int32 nproc)
|
||||
__asm__ (GOSYM_PREFIX "runtime.helpgc");
|
||||
void runtime_gchelper(void)
|
||||
__asm__ (GOSYM_PREFIX "runtime.gchelper");
|
||||
void runtime_createfing(void);
|
||||
G* runtime_wakefing(void);
|
||||
G* runtime_wakefing(void)
|
||||
__asm__ (GOSYM_PREFIX "runtime.wakefing");
|
||||
extern bool runtime_fingwait;
|
||||
extern bool runtime_fingwake;
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
// GC is:
|
||||
// - mark&sweep
|
||||
// - mostly precise (with the exception of some C-allocated objects, assembly frames/arguments, etc)
|
||||
// - parallel (up to MaxGcproc threads)
|
||||
// - parallel (up to _MaxGcproc threads)
|
||||
// - partially concurrent (mark is stop-the-world, while sweep is concurrent)
|
||||
// - non-moving/non-compacting
|
||||
// - full (non-partial)
|
||||
|
@ -389,7 +389,7 @@ struct BufferList
|
|||
uint32 busy;
|
||||
byte pad[CacheLineSize];
|
||||
};
|
||||
static BufferList bufferList[MaxGcproc];
|
||||
static BufferList bufferList[_MaxGcproc];
|
||||
|
||||
static void enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj);
|
||||
|
||||
|
@ -2228,7 +2228,7 @@ gc(struct gc_args *args)
|
|||
|
||||
m->locks++; // disable gc during mallocs in parforalloc
|
||||
if(work.markfor == nil)
|
||||
work.markfor = runtime_parforalloc(MaxGcproc);
|
||||
work.markfor = runtime_parforalloc(_MaxGcproc);
|
||||
m->locks--;
|
||||
|
||||
tm1 = 0;
|
||||
|
@ -2355,7 +2355,7 @@ gc(struct gc_args *args)
|
|||
sweep.g = __go_go(bgsweep, nil);
|
||||
else if(sweep.parked) {
|
||||
sweep.parked = false;
|
||||
runtime_ready(sweep.g);
|
||||
runtime_ready(sweep.g, 0, true);
|
||||
}
|
||||
runtime_unlock(&gclock);
|
||||
} else {
|
||||
|
@ -2429,7 +2429,7 @@ gchelperstart(void)
|
|||
M *m;
|
||||
|
||||
m = runtime_m();
|
||||
if(m->helpgc < 0 || m->helpgc >= MaxGcproc)
|
||||
if(m->helpgc < 0 || m->helpgc >= _MaxGcproc)
|
||||
runtime_throw("gchelperstart: bad m->helpgc");
|
||||
if(runtime_xchg(&bufferList[m->helpgc].busy, 1))
|
||||
runtime_throw("gchelperstart: already busy");
|
||||
|
@ -2541,6 +2541,20 @@ runtime_createfing(void)
|
|||
runtime_unlock(&gclock);
|
||||
}
|
||||
|
||||
bool getfingwait() __asm__(GOSYM_PREFIX "runtime.getfingwait");
|
||||
bool
|
||||
getfingwait()
|
||||
{
|
||||
return runtime_fingwait;
|
||||
}
|
||||
|
||||
bool getfingwake() __asm__(GOSYM_PREFIX "runtime.getfingwake");
|
||||
bool
|
||||
getfingwake()
|
||||
{
|
||||
return runtime_fingwake;
|
||||
}
|
||||
|
||||
G*
|
||||
runtime_wakefing(void)
|
||||
{
|
||||
|
|
1400
libgo/runtime/proc.c
1400
libgo/runtime/proc.c
File diff suppressed because it is too large
Load diff
|
@ -240,7 +240,6 @@ extern G* runtime_lastg;
|
|||
extern M* runtime_allm;
|
||||
extern P** runtime_allp;
|
||||
extern Sched* runtime_sched;
|
||||
extern int32 runtime_gomaxprocs;
|
||||
extern uint32 runtime_panicking(void)
|
||||
__asm__ (GOSYM_PREFIX "runtime.getPanicking");
|
||||
extern int8* runtime_goos;
|
||||
|
@ -260,7 +259,8 @@ extern bool runtime_isarchive;
|
|||
intgo runtime_findnull(const byte*)
|
||||
__asm__ (GOSYM_PREFIX "runtime.findnull");
|
||||
|
||||
void runtime_gogo(G*);
|
||||
void runtime_gogo(G*)
|
||||
__asm__ (GOSYM_PREFIX "runtime.gogo");
|
||||
struct __go_func_type;
|
||||
void runtime_args(int32, byte**)
|
||||
__asm__ (GOSYM_PREFIX "runtime.args");
|
||||
|
@ -294,7 +294,8 @@ void runtime_printtrace(Slice, G*)
|
|||
#define runtime_read(d, v, n) read((d), (v), (n))
|
||||
#define runtime_write(d, v, n) write((d), (v), (n))
|
||||
#define runtime_close(d) close(d)
|
||||
void runtime_ready(G*);
|
||||
void runtime_ready(G*, intgo, bool)
|
||||
__asm__ (GOSYM_PREFIX "runtime.ready");
|
||||
String runtime_getenv(const char*);
|
||||
int32 runtime_atoi(const byte*, intgo);
|
||||
void* runtime_mstart(void*);
|
||||
|
@ -307,7 +308,8 @@ void runtime_signalstack(byte*, uintptr)
|
|||
__asm__ (GOSYM_PREFIX "runtime.signalstack");
|
||||
MCache* runtime_allocmcache(void)
|
||||
__asm__ (GOSYM_PREFIX "runtime.allocmcache");
|
||||
void runtime_freemcache(MCache*);
|
||||
void runtime_freemcache(MCache*)
|
||||
__asm__ (GOSYM_PREFIX "runtime.freemcache");
|
||||
void runtime_mallocinit(void);
|
||||
void runtime_mprofinit(void);
|
||||
#define runtime_getcallersp(p) __builtin_frame_address(0)
|
||||
|
@ -368,8 +370,6 @@ int64 runtime_unixnanotime(void) // real time, can skip
|
|||
void runtime_dopanic(int32) __attribute__ ((noreturn));
|
||||
void runtime_startpanic(void)
|
||||
__asm__ (GOSYM_PREFIX "runtime.startpanic");
|
||||
void runtime_freezetheworld(void)
|
||||
__asm__ (GOSYM_PREFIX "runtime.freezetheworld");
|
||||
void runtime_unwindstack(G*, byte*);
|
||||
void runtime_sigprof()
|
||||
__asm__ (GOSYM_PREFIX "runtime.sigprof");
|
||||
|
|
Loading…
Add table
Reference in a new issue