2016-02-03 21:58:02 +00:00
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
2016-08-06 00:36:33 +00:00
// +build ignore
2016-02-03 21:58:02 +00:00
package runtime
import (
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
/ *
Stack layout parameters .
Included both by runtime ( compiled via 6 c ) and linkers ( compiled via gcc ) .
The per - goroutine g - > stackguard is set to point StackGuard bytes
above the bottom of the stack . Each function compares its stack
pointer against g - > stackguard to check for overflow . To cut one
instruction from the check sequence for functions with tiny frames ,
the stack is allowed to protrude StackSmall bytes below the stack
guard . Functions with large frames don ' t bother with the check and
always call morestack . The sequences are ( for amd64 , others are
similar ) :
guard = g - > stackguard
frame = function ' s stack frame size
argsize = size of function arguments ( call + return )
stack frame size <= StackSmall :
CMPQ guard , SP
JHI 3 ( PC )
MOVQ m - > morearg , $ ( argsize << 32 )
CALL morestack ( SB )
stack frame size > StackSmall but < StackBig
LEAQ ( frame - StackSmall ) ( SP ) , R0
CMPQ guard , R0
JHI 3 ( PC )
MOVQ m - > morearg , $ ( argsize << 32 )
CALL morestack ( SB )
stack frame size >= StackBig :
MOVQ m - > morearg , $ ( ( argsize << 32 ) | frame )
CALL morestack ( SB )
The bottom StackGuard - StackSmall bytes are important : there has
to be enough room to execute functions that refuse to check for
stack overflow , either because they need to be adjacent to the
actual caller ' s frame ( deferproc ) or because they handle the imminent
stack overflow ( morestack ) .
For example , deferproc might call malloc , which does one of the
above checks ( without allocating a full frame ) , which might trigger
a call to morestack . This sequence needs to fit in the bottom
section of the stack . On amd64 , morestack ' s frame is 40 bytes , and
deferproc ' s frame is 56 bytes . That fits well within the
StackGuard - StackSmall bytes at the bottom .
The linkers explore all possible call traces involving non - splitting
functions to make sure that this limit cannot be violated .
* /
const (
// StackSystem is a number of additional bytes to add
// to each stack below the usual guard area for OS-specific
// purposes like signal handling. Used on Windows, Plan 9,
// and Darwin/ARM because they do not use a separate stack.
_StackSystem = sys . GoosWindows * 512 * sys . PtrSize + sys . GoosPlan9 * 512 + sys . GoosDarwin * sys . GoarchArm * 1024
// The minimum size of stack used by Go code
_StackMin = 2048
// The minimum stack size to allocate.
// The hackery here rounds FixedStack0 up to a power of 2.
_FixedStack0 = _StackMin + _StackSystem
_FixedStack1 = _FixedStack0 - 1
_FixedStack2 = _FixedStack1 | ( _FixedStack1 >> 1 )
_FixedStack3 = _FixedStack2 | ( _FixedStack2 >> 2 )
_FixedStack4 = _FixedStack3 | ( _FixedStack3 >> 4 )
_FixedStack5 = _FixedStack4 | ( _FixedStack4 >> 8 )
_FixedStack6 = _FixedStack5 | ( _FixedStack5 >> 16 )
_FixedStack = _FixedStack6 + 1
// Functions that need frames bigger than this use an extra
// instruction to do the stack split check, to avoid overflow
// in case SP - framesize wraps below zero.
// This value can be no bigger than the size of the unmapped
// space at zero.
_StackBig = 4096
// The stack guard is a pointer this many bytes above the
// bottom of the stack.
_StackGuard = 720 * sys . StackGuardMultiplier + _StackSystem
// After a stack split check the SP is allowed to be this
2016-07-22 18:15:38 +00:00
// many bytes below the stack guard. This saves an instruction
2016-02-03 21:58:02 +00:00
// in the checking sequence for tiny frames.
_StackSmall = 128
// The maximum number of bytes that a chain of NOSPLIT
// functions can use.
_StackLimit = _StackGuard - _StackSystem - _StackSmall
)
// Goroutine preemption request.
// Stored into g->stackguard0 to cause split stack check failure.
// Must be greater than any real sp.
// 0xfffffade in hex.
const (
_StackPreempt = uintptrMask & - 1314
_StackFork = uintptrMask & - 1234
)
const (
// stackDebug == 0: no logging
// == 1: logging of per-stack operations
// == 2: logging of per-frame operations
// == 3: logging of per-word updates
// == 4: logging of per-word reads
stackDebug = 0
stackFromSystem = 0 // allocate stacks from system memory instead of the heap
stackFaultOnFree = 0 // old stacks are mapped noaccess to detect use after free
stackPoisonCopy = 0 // fill stack that should not be accessed with garbage, to detect bad dereferences during copy
stackCache = 1
)
const (
uintptrMask = 1 << ( 8 * sys . PtrSize ) - 1
// Goroutine preemption request.
// Stored into g->stackguard0 to cause split stack check failure.
// Must be greater than any real sp.
// 0xfffffade in hex.
stackPreempt = uintptrMask & - 1314
// Thread is forking.
// Stored into g->stackguard0 to cause split stack check failure.
// Must be greater than any real sp.
stackFork = uintptrMask & - 1234
)
// Global pool of spans that have free stacks.
// Stacks are assigned an order according to size.
// order = log_2(size/FixedStack)
// There is a free list for each order.
// TODO: one lock per order?
var stackpool [ _NumStackOrders ] mSpanList
var stackpoolmu mutex
// Global pool of large stack spans.
var stackLarge struct {
lock mutex
free [ _MHeapMap_Bits ] mSpanList // free lists by log_2(s.npages)
}
func stackinit ( ) {
if _StackCacheSize & _PageMask != 0 {
throw ( "cache size must be a multiple of page size" )
}
for i := range stackpool {
stackpool [ i ] . init ( )
}
for i := range stackLarge . free {
stackLarge . free [ i ] . init ( )
}
}
// stacklog2 returns ⌊log_2(n)⌋.
func stacklog2 ( n uintptr ) int {
log2 := 0
for n > 1 {
n >>= 1
log2 ++
}
return log2
}
2016-07-22 18:15:38 +00:00
// Allocates a stack from the free pool. Must be called with
2016-02-03 21:58:02 +00:00
// stackpoolmu held.
func stackpoolalloc ( order uint8 ) gclinkptr {
list := & stackpool [ order ]
s := list . first
if s == nil {
2016-07-22 18:15:38 +00:00
// no free stacks. Allocate another span worth.
2016-02-03 21:58:02 +00:00
s = mheap_ . allocStack ( _StackCacheSize >> _PageShift )
if s == nil {
throw ( "out of memory" )
}
2016-07-22 18:15:38 +00:00
if s . allocCount != 0 {
throw ( "bad allocCount" )
2016-02-03 21:58:02 +00:00
}
2016-07-22 18:15:38 +00:00
if s . stackfreelist . ptr ( ) != nil {
throw ( "bad stackfreelist" )
2016-02-03 21:58:02 +00:00
}
for i := uintptr ( 0 ) ; i < _StackCacheSize ; i += _FixedStack << order {
2016-07-22 18:15:38 +00:00
x := gclinkptr ( s . base ( ) + i )
x . ptr ( ) . next = s . stackfreelist
s . stackfreelist = x
2016-02-03 21:58:02 +00:00
}
list . insert ( s )
}
2016-07-22 18:15:38 +00:00
x := s . stackfreelist
2016-02-03 21:58:02 +00:00
if x . ptr ( ) == nil {
throw ( "span has no free stacks" )
}
2016-07-22 18:15:38 +00:00
s . stackfreelist = x . ptr ( ) . next
s . allocCount ++
if s . stackfreelist . ptr ( ) == nil {
2016-02-03 21:58:02 +00:00
// all stacks in s are allocated.
list . remove ( s )
}
return x
}
2016-07-22 18:15:38 +00:00
// Adds stack x to the free pool. Must be called with stackpoolmu held.
2016-02-03 21:58:02 +00:00
func stackpoolfree ( x gclinkptr , order uint8 ) {
s := mheap_ . lookup ( unsafe . Pointer ( x ) )
if s . state != _MSpanStack {
throw ( "freeing stack not in a stack span" )
}
2016-07-22 18:15:38 +00:00
if s . stackfreelist . ptr ( ) == nil {
2016-02-03 21:58:02 +00:00
// s will now have a free stack
stackpool [ order ] . insert ( s )
}
2016-07-22 18:15:38 +00:00
x . ptr ( ) . next = s . stackfreelist
s . stackfreelist = x
s . allocCount --
if gcphase == _GCoff && s . allocCount == 0 {
2016-02-03 21:58:02 +00:00
// Span is completely free. Return it to the heap
// immediately if we're sweeping.
//
// If GC is active, we delay the free until the end of
// GC to avoid the following type of situation:
//
// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
// 2) The stack that pointer points to is copied
// 3) The old stack is freed
// 4) The containing span is marked free
// 5) GC attempts to mark the SudoG.elem pointer. The
// marking fails because the pointer looks like a
// pointer into a free span.
//
// By not freeing, we prevent step #4 until GC is done.
stackpool [ order ] . remove ( s )
2016-07-22 18:15:38 +00:00
s . stackfreelist = 0
2016-02-03 21:58:02 +00:00
mheap_ . freeStack ( s )
}
}
// stackcacherefill/stackcacherelease implement a global pool of stack segments.
// The pool is required to prevent unlimited growth of per-thread caches.
2016-07-22 18:15:38 +00:00
//
//go:systemstack
2016-02-03 21:58:02 +00:00
func stackcacherefill ( c * mcache , order uint8 ) {
if stackDebug >= 1 {
print ( "stackcacherefill order=" , order , "\n" )
}
// Grab some stacks from the global cache.
// Grab half of the allowed capacity (to prevent thrashing).
var list gclinkptr
var size uintptr
lock ( & stackpoolmu )
for size < _StackCacheSize / 2 {
x := stackpoolalloc ( order )
x . ptr ( ) . next = list
list = x
size += _FixedStack << order
}
unlock ( & stackpoolmu )
c . stackcache [ order ] . list = list
c . stackcache [ order ] . size = size
}
2016-07-22 18:15:38 +00:00
//go:systemstack
2016-02-03 21:58:02 +00:00
func stackcacherelease ( c * mcache , order uint8 ) {
if stackDebug >= 1 {
print ( "stackcacherelease order=" , order , "\n" )
}
x := c . stackcache [ order ] . list
size := c . stackcache [ order ] . size
lock ( & stackpoolmu )
for size > _StackCacheSize / 2 {
y := x . ptr ( ) . next
stackpoolfree ( x , order )
x = y
size -= _FixedStack << order
}
unlock ( & stackpoolmu )
c . stackcache [ order ] . list = x
c . stackcache [ order ] . size = size
}
2016-07-22 18:15:38 +00:00
//go:systemstack
2016-02-03 21:58:02 +00:00
func stackcache_clear ( c * mcache ) {
if stackDebug >= 1 {
print ( "stackcache clear\n" )
}
lock ( & stackpoolmu )
for order := uint8 ( 0 ) ; order < _NumStackOrders ; order ++ {
x := c . stackcache [ order ] . list
for x . ptr ( ) != nil {
y := x . ptr ( ) . next
stackpoolfree ( x , order )
x = y
}
c . stackcache [ order ] . list = 0
c . stackcache [ order ] . size = 0
}
unlock ( & stackpoolmu )
}
2016-07-22 18:15:38 +00:00
// stackalloc allocates an n byte stack.
//
// stackalloc must run on the system stack because it uses per-P
// resources and must not split the stack.
//
//go:systemstack
2016-02-03 21:58:02 +00:00
func stackalloc ( n uint32 ) ( stack , [ ] stkbar ) {
// Stackalloc must be called on scheduler stack, so that we
// never try to grow the stack during the code that stackalloc runs.
// Doing so would cause a deadlock (issue 1547).
thisg := getg ( )
if thisg != thisg . m . g0 {
throw ( "stackalloc not on scheduler stack" )
}
if n & ( n - 1 ) != 0 {
throw ( "stack size not a power of 2" )
}
if stackDebug >= 1 {
print ( "stackalloc " , n , "\n" )
}
// Compute the size of stack barrier array.
maxstkbar := gcMaxStackBarriers ( int ( n ) )
nstkbar := unsafe . Sizeof ( stkbar { } ) * uintptr ( maxstkbar )
if debug . efence != 0 || stackFromSystem != 0 {
v := sysAlloc ( round ( uintptr ( n ) , _PageSize ) , & memstats . stacks_sys )
if v == nil {
throw ( "out of memory (stackalloc)" )
}
top := uintptr ( n ) - nstkbar
stkbarSlice := slice { add ( v , top ) , 0 , maxstkbar }
return stack { uintptr ( v ) , uintptr ( v ) + top } , * ( * [ ] stkbar ) ( unsafe . Pointer ( & stkbarSlice ) )
}
// Small stacks are allocated with a fixed-size free-list allocator.
// If we need a stack of a bigger size, we fall back on allocating
// a dedicated span.
var v unsafe . Pointer
if stackCache != 0 && n < _FixedStack << _NumStackOrders && n < _StackCacheSize {
order := uint8 ( 0 )
n2 := n
for n2 > _FixedStack {
order ++
n2 >>= 1
}
var x gclinkptr
c := thisg . m . mcache
if c == nil || thisg . m . preemptoff != "" || thisg . m . helpgc != 0 {
// c == nil can happen in the guts of exitsyscall or
// procresize. Just get a stack from the global pool.
// Also don't touch stackcache during gc
// as it's flushed concurrently.
lock ( & stackpoolmu )
x = stackpoolalloc ( order )
unlock ( & stackpoolmu )
} else {
x = c . stackcache [ order ] . list
if x . ptr ( ) == nil {
stackcacherefill ( c , order )
x = c . stackcache [ order ] . list
}
c . stackcache [ order ] . list = x . ptr ( ) . next
c . stackcache [ order ] . size -= uintptr ( n )
}
v = unsafe . Pointer ( x )
} else {
var s * mspan
npage := uintptr ( n ) >> _PageShift
log2npage := stacklog2 ( npage )
// Try to get a stack from the large stack cache.
lock ( & stackLarge . lock )
if ! stackLarge . free [ log2npage ] . isEmpty ( ) {
s = stackLarge . free [ log2npage ] . first
stackLarge . free [ log2npage ] . remove ( s )
}
unlock ( & stackLarge . lock )
if s == nil {
// Allocate a new stack from the heap.
s = mheap_ . allocStack ( npage )
if s == nil {
throw ( "out of memory" )
}
}
2016-07-22 18:15:38 +00:00
v = unsafe . Pointer ( s . base ( ) )
2016-02-03 21:58:02 +00:00
}
if raceenabled {
racemalloc ( v , uintptr ( n ) )
}
if msanenabled {
msanmalloc ( v , uintptr ( n ) )
}
if stackDebug >= 1 {
print ( " allocated " , v , "\n" )
}
top := uintptr ( n ) - nstkbar
stkbarSlice := slice { add ( v , top ) , 0 , maxstkbar }
return stack { uintptr ( v ) , uintptr ( v ) + top } , * ( * [ ] stkbar ) ( unsafe . Pointer ( & stkbarSlice ) )
}
2016-07-22 18:15:38 +00:00
// stackfree frees an n byte stack allocation at stk.
//
// stackfree must run on the system stack because it uses per-P
// resources and must not split the stack.
//
//go:systemstack
2016-02-03 21:58:02 +00:00
func stackfree ( stk stack , n uintptr ) {
gp := getg ( )
v := unsafe . Pointer ( stk . lo )
if n & ( n - 1 ) != 0 {
throw ( "stack not a power of 2" )
}
if stk . lo + n < stk . hi {
throw ( "bad stack size" )
}
if stackDebug >= 1 {
println ( "stackfree" , v , n )
memclr ( v , n ) // for testing, clobber stack data
}
if debug . efence != 0 || stackFromSystem != 0 {
if debug . efence != 0 || stackFaultOnFree != 0 {
sysFault ( v , n )
} else {
sysFree ( v , n , & memstats . stacks_sys )
}
return
}
if msanenabled {
msanfree ( v , n )
}
if stackCache != 0 && n < _FixedStack << _NumStackOrders && n < _StackCacheSize {
order := uint8 ( 0 )
n2 := n
for n2 > _FixedStack {
order ++
n2 >>= 1
}
x := gclinkptr ( v )
c := gp . m . mcache
if c == nil || gp . m . preemptoff != "" || gp . m . helpgc != 0 {
lock ( & stackpoolmu )
stackpoolfree ( x , order )
unlock ( & stackpoolmu )
} else {
if c . stackcache [ order ] . size >= _StackCacheSize {
stackcacherelease ( c , order )
}
x . ptr ( ) . next = c . stackcache [ order ] . list
c . stackcache [ order ] . list = x
c . stackcache [ order ] . size += n
}
} else {
s := mheap_ . lookup ( v )
if s . state != _MSpanStack {
2016-07-22 18:15:38 +00:00
println ( hex ( s . base ( ) ) , v )
2016-02-03 21:58:02 +00:00
throw ( "bad span state" )
}
if gcphase == _GCoff {
// Free the stack immediately if we're
// sweeping.
mheap_ . freeStack ( s )
} else {
// If the GC is running, we can't return a
// stack span to the heap because it could be
// reused as a heap span, and this state
// change would race with GC. Add it to the
// large stack cache instead.
log2npage := stacklog2 ( s . npages )
lock ( & stackLarge . lock )
stackLarge . free [ log2npage ] . insert ( s )
unlock ( & stackLarge . lock )
}
}
}
var maxstacksize uintptr = 1 << 20 // enough until runtime.main sets it for real
var ptrnames = [ ] string {
0 : "scalar" ,
1 : "ptr" ,
}
// Stack frame layout
//
// (x86)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// | return address |
// +------------------+
// | caller's BP (*) | (*) if framepointer_enabled && varp < sp
// +------------------+ <- frame->varp
// | locals |
// +------------------+
// | args to callee |
// +------------------+ <- frame->sp
//
// (arm)
// +------------------+
// | args from caller |
// +------------------+ <- frame->argp
// | caller's retaddr |
// +------------------+ <- frame->varp
// | locals |
// +------------------+
// | args to callee |
// +------------------+
// | return address |
// +------------------+ <- frame->sp
type adjustinfo struct {
old stack
delta uintptr // ptr distance from old to new stack (newbase - oldbase)
cache pcvalueCache
2016-07-22 18:15:38 +00:00
// sghi is the highest sudog.elem on the stack.
sghi uintptr
2016-02-03 21:58:02 +00:00
}
// Adjustpointer checks whether *vpp is in the old stack described by adjinfo.
// If so, it rewrites *vpp to point into the new stack.
func adjustpointer ( adjinfo * adjustinfo , vpp unsafe . Pointer ) {
2016-07-22 18:15:38 +00:00
pp := ( * uintptr ) ( vpp )
2016-02-03 21:58:02 +00:00
p := * pp
if stackDebug >= 4 {
2016-07-22 18:15:38 +00:00
print ( " " , pp , ":" , hex ( p ) , "\n" )
2016-02-03 21:58:02 +00:00
}
2016-07-22 18:15:38 +00:00
if adjinfo . old . lo <= p && p < adjinfo . old . hi {
* pp = p + adjinfo . delta
2016-02-03 21:58:02 +00:00
if stackDebug >= 3 {
2016-07-22 18:15:38 +00:00
print ( " adjust ptr " , pp , ":" , hex ( p ) , " -> " , hex ( * pp ) , "\n" )
2016-02-03 21:58:02 +00:00
}
}
}
// Information from the compiler about the layout of stack frames.
type bitvector struct {
n int32 // # of bits
bytedata * uint8
}
type gobitvector struct {
n uintptr
bytedata [ ] uint8
}
func gobv ( bv bitvector ) gobitvector {
return gobitvector {
uintptr ( bv . n ) ,
( * [ 1 << 30 ] byte ) ( unsafe . Pointer ( bv . bytedata ) ) [ : ( bv . n + 7 ) / 8 ] ,
}
}
func ptrbit ( bv * gobitvector , i uintptr ) uint8 {
return ( bv . bytedata [ i / 8 ] >> ( i % 8 ) ) & 1
}
// bv describes the memory starting at address scanp.
// Adjust any pointers contained therein.
func adjustpointers ( scanp unsafe . Pointer , cbv * bitvector , adjinfo * adjustinfo , f * _func ) {
bv := gobv ( * cbv )
minp := adjinfo . old . lo
maxp := adjinfo . old . hi
delta := adjinfo . delta
2016-07-22 18:15:38 +00:00
num := bv . n
// If this frame might contain channel receive slots, use CAS
// to adjust pointers. If the slot hasn't been received into
// yet, it may contain stack pointers and a concurrent send
// could race with adjusting those pointers. (The sent value
// itself can never contain stack pointers.)
useCAS := uintptr ( scanp ) < adjinfo . sghi
2016-02-03 21:58:02 +00:00
for i := uintptr ( 0 ) ; i < num ; i ++ {
if stackDebug >= 4 {
print ( " " , add ( scanp , i * sys . PtrSize ) , ":" , ptrnames [ ptrbit ( & bv , i ) ] , ":" , hex ( * ( * uintptr ) ( add ( scanp , i * sys . PtrSize ) ) ) , " # " , i , " " , bv . bytedata [ i / 8 ] , "\n" )
}
if ptrbit ( & bv , i ) == 1 {
pp := ( * uintptr ) ( add ( scanp , i * sys . PtrSize ) )
2016-07-22 18:15:38 +00:00
retry :
2016-02-03 21:58:02 +00:00
p := * pp
2016-07-22 18:15:38 +00:00
if f != nil && 0 < p && p < _PageSize && debug . invalidptr != 0 {
2016-02-03 21:58:02 +00:00
// Looks like a junk value in a pointer slot.
// Live analysis wrong?
getg ( ) . m . traceback = 2
print ( "runtime: bad pointer in frame " , funcname ( f ) , " at " , pp , ": " , hex ( p ) , "\n" )
throw ( "invalid stack pointer" )
}
if minp <= p && p < maxp {
if stackDebug >= 3 {
print ( "adjust ptr " , p , " " , funcname ( f ) , "\n" )
}
2016-07-22 18:15:38 +00:00
if useCAS {
ppu := ( * unsafe . Pointer ) ( unsafe . Pointer ( pp ) )
if ! atomic . Casp1 ( ppu , unsafe . Pointer ( p ) , unsafe . Pointer ( p + delta ) ) {
goto retry
}
} else {
* pp = p + delta
}
2016-02-03 21:58:02 +00:00
}
}
}
}
// Note: the argument/return area is adjusted by the callee.
func adjustframe ( frame * stkframe , arg unsafe . Pointer ) bool {
adjinfo := ( * adjustinfo ) ( arg )
targetpc := frame . continpc
if targetpc == 0 {
// Frame is dead.
return true
}
f := frame . fn
if stackDebug >= 2 {
print ( " adjusting " , funcname ( f ) , " frame=[" , hex ( frame . sp ) , "," , hex ( frame . fp ) , "] pc=" , hex ( frame . pc ) , " continpc=" , hex ( frame . continpc ) , "\n" )
}
if f . entry == systemstack_switchPC {
// A special routine at the bottom of stack of a goroutine that does an systemstack call.
// We will allow it to be copied even though we don't
// have full GC info for it (because it is written in asm).
return true
}
if targetpc != f . entry {
targetpc --
}
pcdata := pcdatavalue ( f , _PCDATA_StackMapIndex , targetpc , & adjinfo . cache )
if pcdata == - 1 {
pcdata = 0 // in prologue
}
// Adjust local variables if stack frame has been allocated.
size := frame . varp - frame . sp
var minsize uintptr
2016-07-22 18:15:38 +00:00
switch sys . ArchFamily {
case sys . ARM64 :
2016-02-03 21:58:02 +00:00
minsize = sys . SpAlign
default :
minsize = sys . MinFrameSize
}
if size > minsize {
var bv bitvector
stackmap := ( * stackmap ) ( funcdata ( f , _FUNCDATA_LocalsPointerMaps ) )
if stackmap == nil || stackmap . n <= 0 {
print ( "runtime: frame " , funcname ( f ) , " untyped locals " , hex ( frame . varp - size ) , "+" , hex ( size ) , "\n" )
throw ( "missing stackmap" )
}
// Locals bitmap information, scan just the pointers in locals.
if pcdata < 0 || pcdata >= stackmap . n {
// don't know where we are
print ( "runtime: pcdata is " , pcdata , " and " , stackmap . n , " locals stack map entries for " , funcname ( f ) , " (targetpc=" , targetpc , ")\n" )
throw ( "bad symbol table" )
}
bv = stackmapdata ( stackmap , pcdata )
size = uintptr ( bv . n ) * sys . PtrSize
if stackDebug >= 3 {
print ( " locals " , pcdata , "/" , stackmap . n , " " , size / sys . PtrSize , " words " , bv . bytedata , "\n" )
}
adjustpointers ( unsafe . Pointer ( frame . varp - size ) , & bv , adjinfo , f )
}
// Adjust saved base pointer if there is one.
2016-07-22 18:15:38 +00:00
if sys . ArchFamily == sys . AMD64 && frame . argp - frame . varp == 2 * sys . RegSize {
2016-02-03 21:58:02 +00:00
if ! framepointer_enabled {
print ( "runtime: found space for saved base pointer, but no framepointer experiment\n" )
print ( "argp=" , hex ( frame . argp ) , " varp=" , hex ( frame . varp ) , "\n" )
throw ( "bad frame layout" )
}
if stackDebug >= 3 {
print ( " saved bp\n" )
}
adjustpointer ( adjinfo , unsafe . Pointer ( frame . varp ) )
}
// Adjust arguments.
if frame . arglen > 0 {
var bv bitvector
if frame . argmap != nil {
bv = * frame . argmap
} else {
stackmap := ( * stackmap ) ( funcdata ( f , _FUNCDATA_ArgsPointerMaps ) )
if stackmap == nil || stackmap . n <= 0 {
2016-07-22 18:15:38 +00:00
print ( "runtime: frame " , funcname ( f ) , " untyped args " , frame . argp , "+" , frame . arglen , "\n" )
2016-02-03 21:58:02 +00:00
throw ( "missing stackmap" )
}
if pcdata < 0 || pcdata >= stackmap . n {
// don't know where we are
print ( "runtime: pcdata is " , pcdata , " and " , stackmap . n , " args stack map entries for " , funcname ( f ) , " (targetpc=" , targetpc , ")\n" )
throw ( "bad symbol table" )
}
bv = stackmapdata ( stackmap , pcdata )
}
if stackDebug >= 3 {
print ( " args\n" )
}
adjustpointers ( unsafe . Pointer ( frame . argp ) , & bv , adjinfo , nil )
}
return true
}
func adjustctxt ( gp * g , adjinfo * adjustinfo ) {
adjustpointer ( adjinfo , unsafe . Pointer ( & gp . sched . ctxt ) )
}
func adjustdefers ( gp * g , adjinfo * adjustinfo ) {
// Adjust defer argument blocks the same way we adjust active stack frames.
tracebackdefers ( gp , adjustframe , noescape ( unsafe . Pointer ( adjinfo ) ) )
// Adjust pointers in the Defer structs.
// Defer structs themselves are never on the stack.
for d := gp . _defer ; d != nil ; d = d . link {
adjustpointer ( adjinfo , unsafe . Pointer ( & d . fn ) )
adjustpointer ( adjinfo , unsafe . Pointer ( & d . sp ) )
adjustpointer ( adjinfo , unsafe . Pointer ( & d . _panic ) )
}
}
func adjustpanics ( gp * g , adjinfo * adjustinfo ) {
// Panics are on stack and already adjusted.
// Update pointer to head of list in G.
adjustpointer ( adjinfo , unsafe . Pointer ( & gp . _panic ) )
}
func adjustsudogs ( gp * g , adjinfo * adjustinfo ) {
// the data elements pointed to by a SudoG structure
// might be in the stack.
for s := gp . waiting ; s != nil ; s = s . waitlink {
adjustpointer ( adjinfo , unsafe . Pointer ( & s . elem ) )
adjustpointer ( adjinfo , unsafe . Pointer ( & s . selectdone ) )
}
}
func adjuststkbar ( gp * g , adjinfo * adjustinfo ) {
for i := int ( gp . stkbarPos ) ; i < len ( gp . stkbar ) ; i ++ {
adjustpointer ( adjinfo , unsafe . Pointer ( & gp . stkbar [ i ] . savedLRPtr ) )
}
}
func fillstack ( stk stack , b byte ) {
for p := stk . lo ; p < stk . hi ; p ++ {
* ( * byte ) ( unsafe . Pointer ( p ) ) = b
}
}
2016-07-22 18:15:38 +00:00
func findsghi ( gp * g , stk stack ) uintptr {
var sghi uintptr
for sg := gp . waiting ; sg != nil ; sg = sg . waitlink {
p := uintptr ( sg . elem ) + uintptr ( sg . c . elemsize )
if stk . lo <= p && p < stk . hi && p > sghi {
sghi = p
}
p = uintptr ( unsafe . Pointer ( sg . selectdone ) ) + unsafe . Sizeof ( sg . selectdone )
if stk . lo <= p && p < stk . hi && p > sghi {
sghi = p
}
}
return sghi
}
// syncadjustsudogs adjusts gp's sudogs and copies the part of gp's
// stack they refer to while synchronizing with concurrent channel
// operations. It returns the number of bytes of stack copied.
func syncadjustsudogs ( gp * g , used uintptr , adjinfo * adjustinfo ) uintptr {
if gp . waiting == nil {
return 0
}
// Lock channels to prevent concurrent send/receive.
// It's important that we *only* do this for async
// copystack; otherwise, gp may be in the middle of
// putting itself on wait queues and this would
// self-deadlock.
var lastc * hchan
for sg := gp . waiting ; sg != nil ; sg = sg . waitlink {
if sg . c != lastc {
lock ( & sg . c . lock )
}
lastc = sg . c
}
// Adjust sudogs.
adjustsudogs ( gp , adjinfo )
// Copy the part of the stack the sudogs point in to
// while holding the lock to prevent races on
// send/receive slots.
var sgsize uintptr
if adjinfo . sghi != 0 {
oldBot := adjinfo . old . hi - used
newBot := oldBot + adjinfo . delta
sgsize = adjinfo . sghi - oldBot
memmove ( unsafe . Pointer ( newBot ) , unsafe . Pointer ( oldBot ) , sgsize )
}
// Unlock channels.
lastc = nil
for sg := gp . waiting ; sg != nil ; sg = sg . waitlink {
if sg . c != lastc {
unlock ( & sg . c . lock )
}
lastc = sg . c
}
return sgsize
}
2016-02-03 21:58:02 +00:00
// Copies gp's stack to a new stack of a different size.
// Caller must have changed gp status to Gcopystack.
2016-07-22 18:15:38 +00:00
//
// If sync is true, this is a self-triggered stack growth and, in
// particular, no other G may be writing to gp's stack (e.g., via a
// channel operation). If sync is false, copystack protects against
// concurrent channel operations.
func copystack ( gp * g , newsize uintptr , sync bool ) {
2016-02-03 21:58:02 +00:00
if gp . syscallsp != 0 {
throw ( "stack growth not allowed in system call" )
}
old := gp . stack
if old . lo == 0 {
throw ( "nil stackbase" )
}
used := old . hi - gp . sched . sp
// allocate new stack
new , newstkbar := stackalloc ( uint32 ( newsize ) )
if stackPoisonCopy != 0 {
fillstack ( new , 0xfd )
}
if stackDebug >= 1 {
print ( "copystack gp=" , gp , " [" , hex ( old . lo ) , " " , hex ( old . hi - used ) , " " , hex ( old . hi ) , "]/" , gp . stackAlloc , " -> [" , hex ( new . lo ) , " " , hex ( new . hi - used ) , " " , hex ( new . hi ) , "]/" , newsize , "\n" )
}
2016-07-22 18:15:38 +00:00
// Compute adjustment.
2016-02-03 21:58:02 +00:00
var adjinfo adjustinfo
adjinfo . old = old
adjinfo . delta = new . hi - old . hi
2016-07-22 18:15:38 +00:00
// Adjust sudogs, synchronizing with channel ops if necessary.
ncopy := used
if sync {
adjustsudogs ( gp , & adjinfo )
} else {
// sudogs can point in to the stack. During concurrent
// shrinking, these areas may be written to. Find the
// highest such pointer so we can handle everything
// there and below carefully. (This shouldn't be far
// from the bottom of the stack, so there's little
// cost in handling everything below it carefully.)
adjinfo . sghi = findsghi ( gp , old )
// Synchronize with channel ops and copy the part of
// the stack they may interact with.
ncopy -= syncadjustsudogs ( gp , used , & adjinfo )
}
// Copy the stack (or the rest of it) to the new location
memmove ( unsafe . Pointer ( new . hi - ncopy ) , unsafe . Pointer ( old . hi - ncopy ) , ncopy )
// Disallow sigprof scans of this stack and block if there's
// one in progress.
gcLockStackBarriers ( gp )
// Adjust remaining structures that have pointers into stacks.
// We have to do most of these before we traceback the new
// stack because gentraceback uses them.
2016-02-03 21:58:02 +00:00
adjustctxt ( gp , & adjinfo )
adjustdefers ( gp , & adjinfo )
adjustpanics ( gp , & adjinfo )
adjuststkbar ( gp , & adjinfo )
2016-07-22 18:15:38 +00:00
if adjinfo . sghi != 0 {
adjinfo . sghi += adjinfo . delta
2016-02-03 21:58:02 +00:00
}
// copy old stack barriers to new stack barrier array
newstkbar = newstkbar [ : len ( gp . stkbar ) ]
copy ( newstkbar , gp . stkbar )
// Swap out old stack for new one
gp . stack = new
gp . stackguard0 = new . lo + _StackGuard // NOTE: might clobber a preempt request
gp . sched . sp = new . hi - used
oldsize := gp . stackAlloc
gp . stackAlloc = newsize
gp . stkbar = newstkbar
gp . stktopsp += adjinfo . delta
2016-07-22 18:15:38 +00:00
// Adjust pointers in the new stack.
gentraceback ( ^ uintptr ( 0 ) , ^ uintptr ( 0 ) , 0 , gp , 0 , nil , 0x7fffffff , adjustframe , noescape ( unsafe . Pointer ( & adjinfo ) ) , 0 )
2016-02-03 21:58:02 +00:00
gcUnlockStackBarriers ( gp )
// free old stack
if stackPoisonCopy != 0 {
fillstack ( old , 0xfc )
}
stackfree ( old , oldsize )
}
// round x up to a power of 2.
func round2 ( x int32 ) int32 {
s := uint ( 0 )
for 1 << s < x {
s ++
}
return 1 << s
}
// Called from runtime·morestack when more stack is needed.
// Allocate larger stack and relocate to new stack.
// Stack growth is multiplicative, for constant amortized cost.
//
// g->atomicstatus will be Grunning or Gscanrunning upon entry.
// If the GC is trying to stop this g then it will set preemptscan to true.
func newstack ( ) {
thisg := getg ( )
// TODO: double check all gp. shouldn't be getg().
if thisg . m . morebuf . g . ptr ( ) . stackguard0 == stackFork {
throw ( "stack growth after fork" )
}
if thisg . m . morebuf . g . ptr ( ) != thisg . m . curg {
print ( "runtime: newstack called from g=" , hex ( thisg . m . morebuf . g ) , "\n" + "\tm=" , thisg . m , " m->curg=" , thisg . m . curg , " m->g0=" , thisg . m . g0 , " m->gsignal=" , thisg . m . gsignal , "\n" )
morebuf := thisg . m . morebuf
traceback ( morebuf . pc , morebuf . sp , morebuf . lr , morebuf . g . ptr ( ) )
throw ( "runtime: wrong goroutine in newstack" )
}
if thisg . m . curg . throwsplit {
gp := thisg . m . curg
// Update syscallsp, syscallpc in case traceback uses them.
morebuf := thisg . m . morebuf
gp . syscallsp = morebuf . sp
gp . syscallpc = morebuf . pc
print ( "runtime: newstack sp=" , hex ( gp . sched . sp ) , " stack=[" , hex ( gp . stack . lo ) , ", " , hex ( gp . stack . hi ) , "]\n" ,
"\tmorebuf={pc:" , hex ( morebuf . pc ) , " sp:" , hex ( morebuf . sp ) , " lr:" , hex ( morebuf . lr ) , "}\n" ,
"\tsched={pc:" , hex ( gp . sched . pc ) , " sp:" , hex ( gp . sched . sp ) , " lr:" , hex ( gp . sched . lr ) , " ctxt:" , gp . sched . ctxt , "}\n" )
traceback ( morebuf . pc , morebuf . sp , morebuf . lr , gp )
throw ( "runtime: stack split at bad time" )
}
gp := thisg . m . curg
morebuf := thisg . m . morebuf
thisg . m . morebuf . pc = 0
thisg . m . morebuf . lr = 0
thisg . m . morebuf . sp = 0
thisg . m . morebuf . g = 0
rewindmorestack ( & gp . sched )
// NOTE: stackguard0 may change underfoot, if another thread
// is about to try to preempt gp. Read it just once and use that same
// value now and below.
preempt := atomic . Loaduintptr ( & gp . stackguard0 ) == stackPreempt
// Be conservative about where we preempt.
// We are interested in preempting user Go code, not runtime code.
// If we're holding locks, mallocing, or preemption is disabled, don't
// preempt.
// This check is very early in newstack so that even the status change
// from Grunning to Gwaiting and back doesn't happen in this case.
// That status change by itself can be viewed as a small preemption,
// because the GC might change Gwaiting to Gscanwaiting, and then
// this goroutine has to wait for the GC to finish before continuing.
// If the GC is in some way dependent on this goroutine (for example,
// it needs a lock held by the goroutine), that small preemption turns
// into a real deadlock.
if preempt {
if thisg . m . locks != 0 || thisg . m . mallocing != 0 || thisg . m . preemptoff != "" || thisg . m . p . ptr ( ) . status != _Prunning {
// Let the goroutine keep running for now.
// gp->preempt is set, so it will be preempted next time.
gp . stackguard0 = gp . stack . lo + _StackGuard
gogo ( & gp . sched ) // never return
}
}
if gp . stack . lo == 0 {
throw ( "missing stack in newstack" )
}
sp := gp . sched . sp
2016-07-22 18:15:38 +00:00
if sys . ArchFamily == sys . AMD64 || sys . ArchFamily == sys . I386 {
2016-02-03 21:58:02 +00:00
// The call to morestack cost a word.
sp -= sys . PtrSize
}
if stackDebug >= 1 || sp < gp . stack . lo {
print ( "runtime: newstack sp=" , hex ( sp ) , " stack=[" , hex ( gp . stack . lo ) , ", " , hex ( gp . stack . hi ) , "]\n" ,
"\tmorebuf={pc:" , hex ( morebuf . pc ) , " sp:" , hex ( morebuf . sp ) , " lr:" , hex ( morebuf . lr ) , "}\n" ,
"\tsched={pc:" , hex ( gp . sched . pc ) , " sp:" , hex ( gp . sched . sp ) , " lr:" , hex ( gp . sched . lr ) , " ctxt:" , gp . sched . ctxt , "}\n" )
}
if sp < gp . stack . lo {
print ( "runtime: gp=" , gp , ", gp->status=" , hex ( readgstatus ( gp ) ) , "\n " )
print ( "runtime: split stack overflow: " , hex ( sp ) , " < " , hex ( gp . stack . lo ) , "\n" )
throw ( "runtime: split stack overflow" )
}
if gp . sched . ctxt != nil {
// morestack wrote sched.ctxt on its way in here,
// without a write barrier. Run the write barrier now.
// It is not possible to be preempted between then
// and now, so it's okay.
writebarrierptr_nostore ( ( * uintptr ) ( unsafe . Pointer ( & gp . sched . ctxt ) ) , uintptr ( gp . sched . ctxt ) )
}
if preempt {
if gp == thisg . m . g0 {
throw ( "runtime: preempt g0" )
}
if thisg . m . p == 0 && thisg . m . locks == 0 {
throw ( "runtime: g is running but p is not" )
}
2016-07-22 18:15:38 +00:00
// Synchronize with scang.
casgstatus ( gp , _Grunning , _Gwaiting )
2016-02-03 21:58:02 +00:00
if gp . preemptscan {
for ! castogscanstatus ( gp , _Gwaiting , _Gscanwaiting ) {
// Likely to be racing with the GC as
// it sees a _Gwaiting and does the
// stack scan. If so, gcworkdone will
// be set and gcphasework will simply
// return.
}
if ! gp . gcscandone {
2016-07-22 18:15:38 +00:00
// gcw is safe because we're on the
// system stack.
gcw := & gp . m . p . ptr ( ) . gcw
scanstack ( gp , gcw )
if gcBlackenPromptly {
gcw . dispose ( )
}
2016-02-03 21:58:02 +00:00
gp . gcscandone = true
}
gp . preemptscan = false
gp . preempt = false
casfrom_Gscanstatus ( gp , _Gscanwaiting , _Gwaiting )
2016-07-22 18:15:38 +00:00
// This clears gcscanvalid.
2016-02-03 21:58:02 +00:00
casgstatus ( gp , _Gwaiting , _Grunning )
gp . stackguard0 = gp . stack . lo + _StackGuard
gogo ( & gp . sched ) // never return
}
// Act like goroutine called runtime.Gosched.
casgstatus ( gp , _Gwaiting , _Grunning )
gopreempt_m ( gp ) // never return
}
// Allocate a bigger segment and move the stack.
oldsize := int ( gp . stackAlloc )
newsize := oldsize * 2
if uintptr ( newsize ) > maxstacksize {
print ( "runtime: goroutine stack exceeds " , maxstacksize , "-byte limit\n" )
throw ( "stack overflow" )
}
2016-07-22 18:15:38 +00:00
// The goroutine must be executing in order to call newstack,
// so it must be Grunning (or Gscanrunning).
casgstatus ( gp , _Grunning , _Gcopystack )
2016-02-03 21:58:02 +00:00
// The concurrent GC will not scan the stack while we are doing the copy since
// the gp is in a Gcopystack status.
2016-07-22 18:15:38 +00:00
copystack ( gp , uintptr ( newsize ) , true )
2016-02-03 21:58:02 +00:00
if stackDebug >= 1 {
print ( "stack grow done\n" )
}
casgstatus ( gp , _Gcopystack , _Grunning )
gogo ( & gp . sched )
}
//go:nosplit
func nilfunc ( ) {
* ( * uint8 ) ( nil ) = 0
}
// adjust Gobuf as if it executed a call to fn
// and then did an immediate gosave.
func gostartcallfn ( gobuf * gobuf , fv * funcval ) {
var fn unsafe . Pointer
if fv != nil {
fn = unsafe . Pointer ( fv . fn )
} else {
fn = unsafe . Pointer ( funcPC ( nilfunc ) )
}
gostartcall ( gobuf , fn , unsafe . Pointer ( fv ) )
}
// Maybe shrink the stack being used by gp.
// Called at garbage collection time.
2016-07-22 18:15:38 +00:00
// gp must be stopped, but the world need not be.
2016-02-03 21:58:02 +00:00
func shrinkstack ( gp * g ) {
2016-07-22 18:15:38 +00:00
gstatus := readgstatus ( gp )
if gstatus &^ _Gscan == _Gdead {
2016-02-03 21:58:02 +00:00
if gp . stack . lo != 0 {
// Free whole stack - it will get reallocated
// if G is used again.
stackfree ( gp . stack , gp . stackAlloc )
gp . stack . lo = 0
gp . stack . hi = 0
gp . stkbar = nil
gp . stkbarPos = 0
}
return
}
if gp . stack . lo == 0 {
throw ( "missing stack in shrinkstack" )
}
2016-07-22 18:15:38 +00:00
if gstatus & _Gscan == 0 {
throw ( "bad status in shrinkstack" )
}
2016-02-03 21:58:02 +00:00
if debug . gcshrinkstackoff > 0 {
return
}
oldsize := gp . stackAlloc
newsize := oldsize / 2
// Don't shrink the allocation below the minimum-sized stack
// allocation.
if newsize < _FixedStack {
return
}
// Compute how much of the stack is currently in use and only
// shrink the stack if gp is using less than a quarter of its
// current stack. The currently used stack includes everything
// down to the SP plus the stack guard space that ensures
// there's room for nosplit functions.
avail := gp . stack . hi - gp . stack . lo
if used := gp . stack . hi - gp . sched . sp + _StackLimit ; used >= avail / 4 {
return
}
// We can't copy the stack if we're in a syscall.
// The syscall might have pointers into the stack.
if gp . syscallsp != 0 {
return
}
if sys . GoosWindows != 0 && gp . m != nil && gp . m . libcallsp != 0 {
return
}
if stackDebug > 0 {
print ( "shrinking stack " , oldsize , "->" , newsize , "\n" )
}
2016-07-22 18:15:38 +00:00
copystack ( gp , newsize , false )
2016-02-03 21:58:02 +00:00
}
// freeStackSpans frees unused stack spans at the end of GC.
func freeStackSpans ( ) {
lock ( & stackpoolmu )
// Scan stack pools for empty stack spans.
for order := range stackpool {
list := & stackpool [ order ]
for s := list . first ; s != nil ; {
next := s . next
2016-07-22 18:15:38 +00:00
if s . allocCount == 0 {
2016-02-03 21:58:02 +00:00
list . remove ( s )
2016-07-22 18:15:38 +00:00
s . stackfreelist = 0
2016-02-03 21:58:02 +00:00
mheap_ . freeStack ( s )
}
s = next
}
}
unlock ( & stackpoolmu )
// Free large stack spans.
lock ( & stackLarge . lock )
for i := range stackLarge . free {
for s := stackLarge . free [ i ] . first ; s != nil ; {
next := s . next
stackLarge . free [ i ] . remove ( s )
mheap_ . freeStack ( s )
s = next
}
}
unlock ( & stackLarge . lock )
}
//go:nosplit
func morestackc ( ) {
systemstack ( func ( ) {
throw ( "attempt to execute C code on Go stack" )
} )
}