runtime: copy runtime.go and runtime1.go from Go 1.7

Also copy over cputicks.go, env_posix.go, vdso_none.go, stubs2.go, and a
    part of os_linux.go.  Remove the corresponding functions from the C code
    in libgo/go/runtime.  Add some transitional support functions to
    stubs.go.  This converts several minor functions from C to Go.
    
    Reviewed-on: https://go-review.googlesource.com/29962

From-SVN: r240609
This commit is contained in:
Ian Lance Taylor 2016-09-29 00:56:44 +00:00
parent 83194649d2
commit 6748787813
16 changed files with 931 additions and 400 deletions

View file

@ -1,4 +1,4 @@
c79a35411c1065c71add196fdeca6e5207a79248
e51657a576367c7a498c94baf985b79066fc082a
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.

View file

@ -0,0 +1,9 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// careful: cputicks is not guaranteed to be monotonic! In particular, we have
// noticed drift between cpus on certain os/arch combinations. See issue 8976.
func cputicks() int64

View file

@ -0,0 +1,20 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
package runtime
func gogetenv(key string) string {
env := environ()
if env == nil {
throw("getenv before env init")
}
for _, s := range environ() {
if len(s) > len(key) && s[len(key)] == '=' && s[:len(key)] == key {
return s[len(key)+1:]
}
}
return ""
}

View file

@ -6,10 +6,6 @@
package runtime
import (
"unsafe"
)
//var Fadd64 = fadd64
//var Fsub64 = fsub64
//var Fmul64 = fmul64
@ -103,20 +99,6 @@ var HashLoad = &hashLoad
//type Uintreg uintreg
//extern __go_open
func open(path *byte, mode int32, perm int32) int32
func Open(path *byte, mode int32, perm int32) int32 {
return open(path, mode, perm)
}
//extern close
func close(int32) int32
func Close(fd int32) int32 {
return close(fd)
}
/*
func RunSchedLocalQueueTest() {
_p_ := new(p)
@ -224,25 +206,13 @@ var IfaceHash = ifaceHash
var MemclrBytes = memclrBytes
*/
//extern read
func read(fd int32, buf unsafe.Pointer, size int32) int32
var Open = open
var Close = closefd
var Read = read
var Write = write
func Read(fd int32, buf unsafe.Pointer, size int32) int32 {
return read(fd, buf, size)
}
//extern write
func write(fd int32, buf unsafe.Pointer, size int32) int32
func Write(fd uintptr, buf unsafe.Pointer, size int32) int32 {
return write(int32(fd), buf, size)
}
func envs() []string
func setenvs([]string)
var Envs = envs
var SetEnvs = setenvs
func Envs() []string { return envs }
func SetEnvs(e []string) { envs = e }
//var BigEndian = sys.BigEndian
@ -287,7 +257,10 @@ var ForceGCPeriod = &forcegcperiod
// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
// the "environment" traceback level, so later calls to
// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
func SetTracebackEnv(level string)
func SetTracebackEnv(level string) {
setTraceback(level)
traceback_env = traceback_cache
}
/*
var ReadUnaligned32 = readUnaligned32

View file

@ -0,0 +1,56 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"runtime/internal/sys"
"unsafe"
)
const (
_AT_NULL = 0 // End of vector
_AT_PAGESZ = 6 // System physical page size
_AT_RANDOM = 25 // introduced in 2.6.29
)
func sysargs(argc int32, argv **byte) {
n := argc + 1
// skip over argv, envp to get to auxv
for argv_index(argv, n) != nil {
n++
}
// skip NULL separator
n++
// now argv+n is auxv
auxv := (*[1 << 28]uintptr)(add(unsafe.Pointer(argv), uintptr(n)*sys.PtrSize))
for i := 0; auxv[i] != _AT_NULL; i += 2 {
tag, val := auxv[i], auxv[i+1]
switch tag {
case _AT_RANDOM:
// The kernel provides a pointer to 16-bytes
// worth of random data.
startupRandomData = (*[16]byte)(unsafe.Pointer(val))[:]
case _AT_PAGESZ:
// Check that the true physical page size is
// compatible with the runtime's assumed
// physical page size.
if sys.PhysPageSize < val {
print("runtime: kernel page size (", val, ") is larger than runtime page size (", sys.PhysPageSize, ")\n")
exit(1)
}
if sys.PhysPageSize%val != 0 {
print("runtime: runtime page size (", sys.PhysPageSize, ") is not a multiple of kernel page size (", val, ")\n")
exit(1)
}
}
// Commented out for gccgo for now.
// archauxv(tag, val)
}
}

View file

@ -0,0 +1,69 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"runtime/internal/atomic"
_ "unsafe" // for go:linkname
)
//go:generate go run wincallback.go
//go:generate go run mkduff.go
//go:generate go run mkfastlog2table.go
// For gccgo, while we still have C runtime code, use go:linkname to
// rename some functions to themselves, so that the compiler will
// export them.
//
//go:linkname tickspersecond runtime.tickspersecond
var ticks struct {
lock mutex
pad uint32 // ensure 8-byte alignment of val on 386
val uint64
}
// Note: Called by runtime/pprof in addition to runtime code.
func tickspersecond() int64 {
r := int64(atomic.Load64(&ticks.val))
if r != 0 {
return r
}
lock(&ticks.lock)
r = int64(ticks.val)
if r == 0 {
t0 := nanotime()
c0 := cputicks()
usleep(100 * 1000)
t1 := nanotime()
c1 := cputicks()
if t1 == t0 {
t1++
}
r = (c1 - c0) * 1000 * 1000 * 1000 / (t1 - t0)
if r == 0 {
r++
}
atomic.Store64(&ticks.val, uint64(r))
}
unlock(&ticks.lock)
return r
}
var envs []string
var argslice []string
//go:linkname syscall_runtime_envs syscall.runtime_envs
func syscall_runtime_envs() []string { return append([]string{}, envs...) }
//go:linkname os_runtime_args os.runtime_args
func os_runtime_args() []string { return append([]string{}, argslice...) }
// Temporary, for the gccgo runtime code written in C.
//go:linkname get_envs runtime_get_envs
func get_envs() []string { return envs }
//go:linkname get_args runtime_get_args
func get_args() []string { return argslice }

View file

@ -0,0 +1,509 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
// For gccgo, while we still have C runtime code, use go:linkname to
// rename some functions to themselves, so that the compiler will
// export them.
//
//go:linkname gotraceback runtime.gotraceback
//go:linkname args runtime.args
//go:linkname goargs runtime.goargs
//go:linkname check runtime.check
//go:linkname goenvs_unix runtime.goenvs_unix
//go:linkname parsedebugvars runtime.parsedebugvars
//go:linkname timediv runtime.timediv
// Keep a cached value to make gotraceback fast,
// since we call it on every call to gentraceback.
// The cached value is a uint32 in which the low bits
// are the "crash" and "all" settings and the remaining
// bits are the traceback value (0 off, 1 on, 2 include system).
const (
tracebackCrash = 1 << iota
tracebackAll
tracebackShift = iota
)
var traceback_cache uint32 = 2 << tracebackShift
var traceback_env uint32
// gotraceback returns the current traceback settings.
//
// If level is 0, suppress all tracebacks.
// If level is 1, show tracebacks, but exclude runtime frames.
// If level is 2, show tracebacks including runtime frames.
// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
// If crash is set, crash (core dump, etc) after tracebacking.
//
//go:nosplit
func gotraceback() (level int32, all, crash bool) {
_g_ := getg()
all = _g_.m.throwing > 0
if _g_.m.traceback != 0 {
level = int32(_g_.m.traceback)
return
}
t := atomic.Load(&traceback_cache)
crash = t&tracebackCrash != 0
all = all || t&tracebackAll != 0
level = int32(t >> tracebackShift)
return
}
var (
argc int32
argv **byte
)
// nosplit for use in linux startup sysargs
//go:nosplit
func argv_index(argv **byte, i int32) *byte {
return *(**byte)(add(unsafe.Pointer(argv), uintptr(i)*sys.PtrSize))
}
func args(c int32, v **byte) {
argc = c
argv = v
sysargs(c, v)
}
func goargs() {
if GOOS == "windows" {
return
}
argslice = make([]string, argc)
for i := int32(0); i < argc; i++ {
argslice[i] = gostringnocopy(argv_index(argv, i))
}
}
func goenvs_unix() {
// TODO(austin): ppc64 in dynamic linking mode doesn't
// guarantee env[] will immediately follow argv. Might cause
// problems.
n := int32(0)
for argv_index(argv, argc+1+n) != nil {
n++
}
envs = make([]string, n)
for i := int32(0); i < n; i++ {
envs[i] = gostring(argv_index(argv, argc+1+i))
}
}
func environ() []string {
return envs
}
// TODO: These should be locals in testAtomic64, but we don't 8-byte
// align stack variables on 386.
var test_z64, test_x64 uint64
func testAtomic64() {
test_z64 = 42
test_x64 = 0
// prefetcht0(uintptr(unsafe.Pointer(&test_z64)))
// prefetcht1(uintptr(unsafe.Pointer(&test_z64)))
// prefetcht2(uintptr(unsafe.Pointer(&test_z64)))
// prefetchnta(uintptr(unsafe.Pointer(&test_z64)))
if atomic.Cas64(&test_z64, test_x64, 1) {
throw("cas64 failed")
}
if test_x64 != 0 {
throw("cas64 failed")
}
test_x64 = 42
if !atomic.Cas64(&test_z64, test_x64, 1) {
throw("cas64 failed")
}
if test_x64 != 42 || test_z64 != 1 {
throw("cas64 failed")
}
if atomic.Load64(&test_z64) != 1 {
throw("load64 failed")
}
atomic.Store64(&test_z64, (1<<40)+1)
if atomic.Load64(&test_z64) != (1<<40)+1 {
throw("store64 failed")
}
if atomic.Xadd64(&test_z64, (1<<40)+1) != (2<<40)+2 {
throw("xadd64 failed")
}
if atomic.Load64(&test_z64) != (2<<40)+2 {
throw("xadd64 failed")
}
if atomic.Xchg64(&test_z64, (3<<40)+3) != (2<<40)+2 {
throw("xchg64 failed")
}
if atomic.Load64(&test_z64) != (3<<40)+3 {
throw("xchg64 failed")
}
}
func check() {
// This doesn't currently work for gccgo. Because escape
// analysis is not turned on by default, the code below that
// takes the address of local variables causes memory
// allocation, but this function is called before the memory
// allocator has been initialized.
return
var (
a int8
b uint8
c int16
d uint16
e int32
f uint32
g int64
h uint64
i, i1 float32
j, j1 float64
k, k1 unsafe.Pointer
l *uint16
m [4]byte
)
type x1t struct {
x uint8
}
type y1t struct {
x1 x1t
y uint8
}
var x1 x1t
var y1 y1t
if unsafe.Sizeof(a) != 1 {
throw("bad a")
}
if unsafe.Sizeof(b) != 1 {
throw("bad b")
}
if unsafe.Sizeof(c) != 2 {
throw("bad c")
}
if unsafe.Sizeof(d) != 2 {
throw("bad d")
}
if unsafe.Sizeof(e) != 4 {
throw("bad e")
}
if unsafe.Sizeof(f) != 4 {
throw("bad f")
}
if unsafe.Sizeof(g) != 8 {
throw("bad g")
}
if unsafe.Sizeof(h) != 8 {
throw("bad h")
}
if unsafe.Sizeof(i) != 4 {
throw("bad i")
}
if unsafe.Sizeof(j) != 8 {
throw("bad j")
}
if unsafe.Sizeof(k) != sys.PtrSize {
throw("bad k")
}
if unsafe.Sizeof(l) != sys.PtrSize {
throw("bad l")
}
if unsafe.Sizeof(x1) != 1 {
throw("bad unsafe.Sizeof x1")
}
if unsafe.Offsetof(y1.y) != 1 {
throw("bad offsetof y1.y")
}
if unsafe.Sizeof(y1) != 2 {
throw("bad unsafe.Sizeof y1")
}
if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
throw("bad timediv")
}
var z uint32
z = 1
if !atomic.Cas(&z, 1, 2) {
throw("cas1")
}
if z != 2 {
throw("cas2")
}
z = 4
if atomic.Cas(&z, 5, 6) {
throw("cas3")
}
if z != 4 {
throw("cas4")
}
z = 0xffffffff
if !atomic.Cas(&z, 0xffffffff, 0xfffffffe) {
throw("cas5")
}
if z != 0xfffffffe {
throw("cas6")
}
k = unsafe.Pointer(uintptr(0xfedcb123))
if sys.PtrSize == 8 {
k = unsafe.Pointer(uintptr(k) << 10)
}
if casp(&k, nil, nil) {
throw("casp1")
}
k1 = add(k, 1)
if !casp(&k, k, k1) {
throw("casp2")
}
if k != k1 {
throw("casp3")
}
m = [4]byte{1, 1, 1, 1}
atomic.Or8(&m[1], 0xf0)
if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
throw("atomicor8")
}
*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
if j == j {
throw("float64nan")
}
if !(j != j) {
throw("float64nan1")
}
*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
if j == j1 {
throw("float64nan2")
}
if !(j != j1) {
throw("float64nan3")
}
*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
if i == i {
throw("float32nan")
}
if i == i {
throw("float32nan1")
}
*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
if i == i1 {
throw("float32nan2")
}
if i == i1 {
throw("float32nan3")
}
testAtomic64()
// if _FixedStack != round2(_FixedStack) {
// throw("FixedStack is not power-of-2")
// }
if !checkASM() {
throw("assembly checks failed")
}
}
type dbgVar struct {
name string
value *int32
}
// Holds variables parsed from GODEBUG env var,
// except for "memprofilerate" since there is an
// existing int var for that value, which may
// already have an initial value.
// For gccgo we use a named type so that the C code can see the
// definition.
type debugVars struct {
allocfreetrace int32
cgocheck int32
efence int32
gccheckmark int32
gcpacertrace int32
gcshrinkstackoff int32
gcstackbarrieroff int32
gcstackbarrierall int32
gcstoptheworld int32
gctrace int32
invalidptr int32
sbrk int32
scavenge int32
scheddetail int32
schedtrace int32
wbshadow int32
}
var debug debugVars
// For gccgo's C code.
//extern runtime_setdebug
func runtime_setdebug(*debugVars)
var dbgvars = []dbgVar{
{"allocfreetrace", &debug.allocfreetrace},
{"cgocheck", &debug.cgocheck},
{"efence", &debug.efence},
{"gccheckmark", &debug.gccheckmark},
{"gcpacertrace", &debug.gcpacertrace},
{"gcshrinkstackoff", &debug.gcshrinkstackoff},
{"gcstackbarrieroff", &debug.gcstackbarrieroff},
{"gcstackbarrierall", &debug.gcstackbarrierall},
{"gcstoptheworld", &debug.gcstoptheworld},
{"gctrace", &debug.gctrace},
{"invalidptr", &debug.invalidptr},
{"sbrk", &debug.sbrk},
{"scavenge", &debug.scavenge},
{"scheddetail", &debug.scheddetail},
{"schedtrace", &debug.schedtrace},
{"wbshadow", &debug.wbshadow},
}
func parsedebugvars() {
// defaults
debug.cgocheck = 1
debug.invalidptr = 1
for p := gogetenv("GODEBUG"); p != ""; {
field := ""
i := index(p, ",")
if i < 0 {
field, p = p, ""
} else {
field, p = p[:i], p[i+1:]
}
i = index(field, "=")
if i < 0 {
continue
}
key, value := field[:i], field[i+1:]
// Update MemProfileRate directly here since it
// is int, not int32, and should only be updated
// if specified in GODEBUG.
if key == "memprofilerate" {
MemProfileRate = atoi(value)
} else {
for _, v := range dbgvars {
if v.name == key {
*v.value = int32(atoi(value))
}
}
}
}
setTraceback(gogetenv("GOTRACEBACK"))
traceback_env = traceback_cache
// if debug.gcstackbarrierall > 0 {
// firstStackBarrierOffset = 0
// }
// For cgocheck > 1, we turn on the write barrier at all times
// and check all pointer writes.
if debug.cgocheck > 1 {
writeBarrier.cgo = true
writeBarrier.enabled = true
}
// Tell the C code what the value is.
runtime_setdebug(&debug)
}
//go:linkname setTraceback runtime_debug.SetTraceback
func setTraceback(level string) {
var t uint32
switch level {
case "none":
t = 0
case "single", "":
t = 1 << tracebackShift
case "all":
t = 1<<tracebackShift | tracebackAll
case "system":
t = 2<<tracebackShift | tracebackAll
case "crash":
t = 2<<tracebackShift | tracebackAll | tracebackCrash
default:
t = uint32(atoi(level))<<tracebackShift | tracebackAll
}
// when C owns the process, simply exit'ing the process on fatal errors
// and panics is surprising. Be louder and abort instead.
if islibrary || isarchive {
t |= tracebackCrash
}
t |= traceback_env
atomic.Store(&traceback_cache, t)
}
// Poor mans 64-bit division.
// This is a very special function, do not use it if you are not sure what you are doing.
// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
// Handles overflow in a time-specific manner.
//go:nosplit
func timediv(v int64, div int32, rem *int32) int32 {
res := int32(0)
for bit := 30; bit >= 0; bit-- {
if v >= int64(div)<<uint(bit) {
v = v - (int64(div) << uint(bit))
res += 1 << uint(bit)
}
}
if v >= int64(div) {
if rem != nil {
*rem = 0
}
return 0x7fffffff
}
if rem != nil {
*rem = int32(v)
}
return res
}
// Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block.
//go:nosplit
func acquirem() *m {
_g_ := getg()
_g_.m.locks++
return _g_.m
}
//go:nosplit
func releasem(mp *m) {
// _g_ := getg()
mp.locks--
// if mp.locks == 0 && _g_.preempt {
// // restore the preemption request in case we've cleared it in newstack
// _g_.stackguard0 = stackPreempt
// }
}
//go:nosplit
func gomcache() *mcache {
return getg().m.mcache
}

View file

@ -678,11 +678,11 @@ type forcegcstate struct {
idle uint32
}
/*
// startup_random_data holds random bytes initialized at startup. These come from
// the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
var startupRandomData []byte
/*
// extendRandom extends the random numbers in r[:n] to the whole slice r.
// Treats n<0 as n==0.
func extendRandom(r []byte, n int) {
@ -797,8 +797,8 @@ var (
// Set by the linker so the runtime can determine the buildmode.
var (
// islibrary bool // -buildmode=c-shared
// isarchive bool // -buildmode=c-archive
islibrary bool // -buildmode=c-shared
isarchive bool // -buildmode=c-archive
)
// Types that are only used by gccgo.

View file

@ -5,6 +5,7 @@
package runtime
import (
"runtime/internal/atomic"
"runtime/internal/sys"
"unsafe"
)
@ -209,10 +210,10 @@ func round(n, a uintptr) uintptr {
return (n + a - 1) &^ (a - 1)
}
/*
// checkASM returns whether assembly runtime checks have passed.
func checkASM() bool
*/
func checkASM() bool {
return true
}
// throw crashes the program.
// For gccgo unless and until we port panic.go.
@ -251,3 +252,119 @@ type stringStruct struct {
func stringStructOf(sp *string) *stringStruct {
return (*stringStruct)(unsafe.Pointer(sp))
}
// Here for gccgo unless and until we port slice.go.
type slice struct {
array unsafe.Pointer
len int
cap int
}
// Here for gccgo until we port malloc.go.
const (
_64bit = 1 << (^uintptr(0) >> 63) / 2
_MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*32
_MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
)
// Here for gccgo until we port malloc.go.
//extern runtime_mallocgc
func c_mallocgc(size uintptr, typ uintptr, flag uint32) unsafe.Pointer
func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
flag := uint32(0)
if !needzero {
flag = 1 << 3
}
return c_mallocgc(size, uintptr(unsafe.Pointer(typ)), flag)
}
// Here for gccgo unless and until we port string.go.
func rawstring(size int) (p unsafe.Pointer, s string) {
p = mallocgc(uintptr(size), nil, false)
(*(*stringStruct)(unsafe.Pointer(&s))).str = p
(*(*stringStruct)(unsafe.Pointer(&s))).len = size
return
}
// Here for gccgo unless and until we port string.go.
func gostring(p *byte) string {
l := findnull(p)
if l == 0 {
return ""
}
m, s := rawstring(l)
memmove(m, unsafe.Pointer(p), uintptr(l))
return s
}
// Here for gccgo unless and until we port string.go.
func index(s, t string) int {
if len(t) == 0 {
return 0
}
for i := 0; i < len(s); i++ {
if s[i] == t[0] && hasprefix(s[i:], t) {
return i
}
}
return -1
}
// Here for gccgo unless and until we port string.go.
func hasprefix(s, t string) bool {
return len(s) >= len(t) && s[:len(t)] == t
}
// Here for gccgo unless and until we port string.go.
//go:nosplit
func findnull(s *byte) int {
if s == nil {
return 0
}
p := (*[_MaxMem/2 - 1]byte)(unsafe.Pointer(s))
l := 0
for p[l] != 0 {
l++
}
return l
}
// Here for gccgo unless and until we port string.go.
//go:nosplit
func gostringnocopy(str *byte) string {
ss := stringStruct{str: unsafe.Pointer(str), len: findnull(str)}
return *(*string)(unsafe.Pointer(&ss))
}
// Here for gccgo unless and until we port string.go.
func atoi(s string) int {
n := 0
for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
n = n*10 + int(s[0]) - '0'
s = s[1:]
}
return n
}
// Here for gccgo until we port mgc.go.
var writeBarrier struct {
enabled bool // compiler emits a check of this before calling write barrier
needed bool // whether we need a write barrier for current GC phase
cgo bool // whether we need a write barrier for a cgo check
alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
}
// Here for gccgo until we port atomic_pointer.go and mgc.go.
//go:nosplit
func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
if !atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) {
return false
}
return true
}
// Here for gccgo until we port lock_*.go.
func lock(l *mutex)
func unlock(l *mutex)

View file

@ -0,0 +1,29 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !plan9
// +build !windows
// +build !nacl
package runtime
import "unsafe"
func read(fd int32, p unsafe.Pointer, n int32) int32
func closefd(fd int32) int32
//extern exit
func exit(code int32)
func nanotime() int64
func usleep(usec uint32)
func munmap(addr unsafe.Pointer, n uintptr)
//go:noescape
func write(fd uintptr, p unsafe.Pointer, n int32) int32
//go:noescape
func open(name *byte, mode, perm int32) int32
func madvise(addr unsafe.Pointer, n uintptr, flags int32)

View file

@ -0,0 +1,10 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !linux
package runtime
func sysargs(argc int32, argv **byte) {
}

View file

@ -9,7 +9,7 @@
#include "arch.h"
#include "malloc.h"
extern Slice envs;
extern Slice runtime_get_envs(void);
String
runtime_getenv(const char *s)
@ -17,12 +17,14 @@ runtime_getenv(const char *s)
int32 i, j;
intgo len;
const byte *v, *bs;
Slice envs;
String* envv;
int32 envc;
String ret;
bs = (const byte*)s;
len = runtime_findnull(bs);
envs = runtime_get_envs();
envv = (String*)envs.__values;
envc = envs.__count;
for(i=0; i<envc; i++){

View file

@ -76,6 +76,10 @@ static void *back_state;
static Lock back_state_lock;
/* The program arguments. */
extern Slice runtime_get_args(void);
/* Fetch back_state, creating it if necessary. */
struct backtrace_state *
@ -84,15 +88,19 @@ __go_get_backtrace_state ()
runtime_lock (&back_state_lock);
if (back_state == NULL)
{
Slice args;
const char *filename;
struct stat s;
filename = (const char *) runtime_progname ();
args = runtime_get_args();
filename = NULL;
if (args.__count > 0)
filename = (const char*)((String*)args.__values)[0].str;
/* If there is no '/' in FILENAME, it was found on PATH, and
might not be the same as the file with the same name in the
current directory. */
if (__builtin_strchr (filename, '/') == NULL)
if (filename != NULL && __builtin_strchr (filename, '/') == NULL)
filename = NULL;
/* If the file is small, then it's not the real executable.

View file

@ -15,118 +15,27 @@ enum {
maxround = sizeof(uintptr),
};
// Keep a cached value to make gotraceback fast,
// since we call it on every call to gentraceback.
// The cached value is a uint32 in which the low bit
// is the "crash" setting and the top 31 bits are the
// gotraceback value.
enum {
tracebackCrash = 1 << 0,
tracebackAll = 1 << 1,
tracebackShift = 2,
};
static uint32 traceback_cache = 2 << tracebackShift;
static uint32 traceback_env;
extern volatile intgo runtime_MemProfileRate
__asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
struct gotraceback_ret {
int32 level;
bool crash;
};
// gotraceback returns the current traceback settings.
//
// If level is 0, suppress all tracebacks.
// If level is 1, show tracebacks, but exclude runtime frames.
// If level is 2, show tracebacks including runtime frames.
// If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
// If crash is set, crash (core dump, etc) after tracebacking.
extern struct gotraceback_ret gotraceback(void)
__asm__ (GOSYM_PREFIX "runtime.gotraceback");
// runtime_gotraceback is the C interface to runtime.gotraceback.
int32
runtime_gotraceback(bool *crash)
{
uint32 x;
struct gotraceback_ret r;
r = gotraceback();
if(crash != nil)
*crash = false;
if(runtime_m()->traceback != 0)
return runtime_m()->traceback;
x = runtime_atomicload(&traceback_cache);
if(crash != nil)
*crash = x&tracebackCrash;
return x>>tracebackShift;
}
static int32 argc;
static byte** argv;
static Slice args;
Slice envs;
void (*runtime_sysargs)(int32, uint8**);
void
runtime_args(int32 c, byte **v)
{
argc = c;
argv = v;
if(runtime_sysargs != nil)
runtime_sysargs(c, v);
}
byte*
runtime_progname()
{
return argc == 0 ? nil : argv[0];
}
void
runtime_goargs(void)
{
String *s;
int32 i;
// for windows implementation see "os" package
if(Windows)
return;
s = runtime_malloc(argc*sizeof s[0]);
for(i=0; i<argc; i++)
s[i] = runtime_gostringnocopy((const byte*)argv[i]);
args.__values = (void*)s;
args.__count = argc;
args.__capacity = argc;
}
void
runtime_goenvs_unix(void)
{
String *s;
int32 i, n;
for(n=0; argv[argc+1+n] != 0; n++)
;
s = runtime_malloc(n*sizeof s[0]);
for(i=0; i<n; i++)
s[i] = runtime_gostringnocopy(argv[argc+1+i]);
envs.__values = (void*)s;
envs.__count = n;
envs.__capacity = n;
}
// Called from the syscall package.
Slice runtime_envs(void) __asm__ (GOSYM_PREFIX "syscall.runtime_envs");
Slice
runtime_envs()
{
return envs;
}
Slice os_runtime_args(void) __asm__ (GOSYM_PREFIX "os.runtime_args");
Slice
os_runtime_args()
{
return args;
*crash = r.crash;
return r.level;
}
int32
@ -142,53 +51,6 @@ runtime_atoi(const byte *p, intgo len)
return n;
}
static struct root_list runtime_roots =
{ nil,
{ { &envs, sizeof envs },
{ &args, sizeof args },
{ nil, 0 } },
};
static void
TestAtomic64(void)
{
uint64 z64, x64;
z64 = 42;
x64 = 0;
PREFETCH(&z64);
if(runtime_cas64(&z64, x64, 1))
runtime_throw("cas64 failed");
if(x64 != 0)
runtime_throw("cas64 failed");
x64 = 42;
if(!runtime_cas64(&z64, x64, 1))
runtime_throw("cas64 failed");
if(x64 != 42 || z64 != 1)
runtime_throw("cas64 failed");
if(runtime_atomicload64(&z64) != 1)
runtime_throw("load64 failed");
runtime_atomicstore64(&z64, (1ull<<40)+1);
if(runtime_atomicload64(&z64) != (1ull<<40)+1)
runtime_throw("store64 failed");
if(runtime_xadd64(&z64, (1ull<<40)+1) != (2ull<<40)+2)
runtime_throw("xadd64 failed");
if(runtime_atomicload64(&z64) != (2ull<<40)+2)
runtime_throw("xadd64 failed");
if(runtime_xchg64(&z64, (3ull<<40)+3) != (2ull<<40)+2)
runtime_throw("xchg64 failed");
if(runtime_atomicload64(&z64) != (3ull<<40)+3)
runtime_throw("xchg64 failed");
}
void
runtime_check(void)
{
__go_register_gc_roots(&runtime_roots);
TestAtomic64();
}
uint32
runtime_fastrand1(void)
{
@ -220,8 +82,10 @@ runtime_cputicks(void)
asm volatile(".insn s,0xb27c0000,%0" /* stckf */ : "+Q" (clock) : : "cc" );
return (int64)clock;
#else
// FIXME: implement for other processors.
return 0;
// Currently cputicks() is used in blocking profiler and to seed runtime·fastrand1().
// runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
// TODO: need more entropy to better seed fastrand1.
return runtime_nanotime();
#endif
}
@ -237,36 +101,6 @@ runtime_showframe(String s, bool current)
return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0);
}
static Lock ticksLock;
static int64 ticks;
int64
runtime_tickspersecond(void)
{
int64 res, t0, t1, c0, c1;
res = (int64)runtime_atomicload64((uint64*)&ticks);
if(res != 0)
return ticks;
runtime_lock(&ticksLock);
res = ticks;
if(res == 0) {
t0 = runtime_nanotime();
c0 = runtime_cputicks();
runtime_usleep(100*1000);
t1 = runtime_nanotime();
c1 = runtime_cputicks();
if(t1 == t0)
t1++;
res = (c1-c0)*1000*1000*1000/(t1-t0);
if(res == 0)
res++;
runtime_atomicstore64((uint64*)&ticks, res);
}
runtime_unlock(&ticksLock);
return res;
}
// Called to initialize a new m (including the bootstrap m).
// Called on the parent thread (main thread in case of bootstrap), can allocate memory.
void
@ -321,131 +155,11 @@ runtime_signalstack(byte *p, int32 n)
*(int *)0xf1 = 0xf1;
}
void setTraceback(String level)
__asm__ (GOSYM_PREFIX "runtime_debug.SetTraceback");
void setTraceback(String level) {
uint32 t;
if (level.len == 4 && __builtin_memcmp(level.str, "none", 4) == 0) {
t = 0;
} else if (level.len == 0 || (level.len == 6 && __builtin_memcmp(level.str, "single", 6) == 0)) {
t = 1 << tracebackShift;
} else if (level.len == 3 && __builtin_memcmp(level.str, "all", 3) == 0) {
t = (1<<tracebackShift) | tracebackAll;
} else if (level.len == 6 && __builtin_memcmp(level.str, "system", 6) == 0) {
t = (2<<tracebackShift) | tracebackAll;
} else if (level.len == 5 && __builtin_memcmp(level.str, "crash", 5) == 0) {
t = (2<<tracebackShift) | tracebackAll | tracebackCrash;
} else {
t = (runtime_atoi(level.str, level.len)<<tracebackShift) | tracebackAll;
}
t |= traceback_env;
runtime_atomicstore(&traceback_cache, t);
}
DebugVars runtime_debug;
// Holds variables parsed from GODEBUG env var,
// except for "memprofilerate" since there is an
// existing var for that value which is int
// instead of in32 and might have an
// initial value.
static struct {
const char* name;
int32* value;
} dbgvar[] = {
{"allocfreetrace", &runtime_debug.allocfreetrace},
{"cgocheck", &runtime_debug.cgocheck},
{"efence", &runtime_debug.efence},
{"gccheckmark", &runtime_debug.gccheckmark},
{"gcpacertrace", &runtime_debug.gcpacertrace},
{"gcshrinkstackoff", &runtime_debug.gcshrinkstackoff},
{"gcstackbarrieroff", &runtime_debug.gcstackbarrieroff},
{"gcstackbarrierall", &runtime_debug.gcstackbarrierall},
{"gcstoptheworld", &runtime_debug.gcstoptheworld},
{"gctrace", &runtime_debug.gctrace},
{"gcdead", &runtime_debug.gcdead},
{"invalidptr", &runtime_debug.invalidptr},
{"sbrk", &runtime_debug.sbrk},
{"scavenge", &runtime_debug.scavenge},
{"scheddetail", &runtime_debug.scheddetail},
{"schedtrace", &runtime_debug.schedtrace},
{"wbshadow", &runtime_debug.wbshadow},
};
struct debugVars runtime_debug;
void
runtime_parsedebugvars(void)
{
String s;
const byte *p, *pn;
intgo len;
intgo i, n;
s = runtime_getenv("GODEBUG");
if(s.len == 0)
return;
p = s.str;
len = s.len;
for(;;) {
for(i=0; i<(intgo)nelem(dbgvar); i++) {
n = runtime_findnull((const byte*)dbgvar[i].name);
if(len > n && runtime_mcmp(p, "memprofilerate", n) == 0 && p[n] == '=')
// Set the MemProfileRate directly since it
// is an int, not int32, and should only lbe
// set here if specified by GODEBUG
runtime_MemProfileRate = runtime_atoi(p+n+1, len-(n+1));
else if(len > n && runtime_mcmp(p, dbgvar[i].name, n) == 0 && p[n] == '=')
*dbgvar[i].value = runtime_atoi(p+n+1, len-(n+1));
}
pn = (const byte *)runtime_strstr((const char *)p, ",");
if(pn == nil || pn - p >= len)
break;
len -= (pn - p) - 1;
p = pn + 1;
}
setTraceback(runtime_getenv("GOTRACEBACK"));
traceback_env = traceback_cache;
}
// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
// the "environment" traceback level, so later calls to
// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
void SetTracebackEnv(String level)
__asm__ (GOSYM_PREFIX "runtime.SetTracebackEnv");
void SetTracebackEnv(String level) {
setTraceback(level);
traceback_env = traceback_cache;
}
// Poor mans 64-bit division.
// This is a very special function, do not use it if you are not sure what you are doing.
// int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
// Handles overflow in a time-specific manner.
int32
runtime_timediv(int64 v, int32 div, int32 *rem)
{
int32 res, bit;
if(v >= (int64)div*0x7fffffffLL) {
if(rem != nil)
*rem = 0;
return 0x7fffffff;
}
res = 0;
for(bit = 30; bit >= 0; bit--) {
if(v >= ((int64)div<<bit)) {
v = v - ((int64)div<<bit);
res += 1<<bit;
}
}
if(rem != nil)
*rem = v;
return res;
runtime_setdebug(struct debugVars* d) {
runtime_debug = *d;
}
// Setting the max stack size doesn't really do anything for gccgo.
@ -460,3 +174,39 @@ memclrBytes(Slice s)
{
runtime_memclr(s.__values, s.__count);
}
int32 go_open(char *, int32, int32)
__asm__ (GOSYM_PREFIX "runtime.open");
int32
go_open(char *name, int32 mode, int32 perm)
{
return runtime_open(name, mode, perm);
}
int32 go_read(int32, void *, int32)
__asm__ (GOSYM_PREFIX "runtime.read");
int32
go_read(int32 fd, void *p, int32 n)
{
return runtime_read(fd, p, n);
}
int32 go_write(uintptr, void *, int32)
__asm__ (GOSYM_PREFIX "runtime.write");
int32
go_write(uintptr fd, void *p, int32 n)
{
return runtime_write(fd, p, n);
}
int32 go_closefd(int32)
__asm__ (GOSYM_PREFIX "runtime.closefd");
int32
go_closefd(int32 fd)
{
return runtime_close(fd);
}

View file

@ -75,7 +75,6 @@ typedef struct ParFor ParFor;
typedef struct ParForThread ParForThread;
typedef struct cgoMal CgoMal;
typedef struct PollDesc PollDesc;
typedef struct DebugVars DebugVars;
typedef struct __go_open_array Slice;
typedef struct __go_interface Iface;
@ -115,7 +114,8 @@ struct FuncVal
* Per-CPU declaration.
*/
extern M* runtime_m(void);
extern G* runtime_g(void);
extern G* runtime_g(void)
__asm__(GOSYM_PREFIX "runtime.getg");
extern M runtime_m0;
extern G runtime_g0;
@ -240,28 +240,6 @@ struct ParFor
uint64 nsleep;
};
// Holds variables parsed from GODEBUG env var.
struct DebugVars
{
int32 allocfreetrace;
int32 cgocheck;
int32 efence;
int32 gccheckmark;
int32 gcpacertrace;
int32 gcshrinkstackoff;
int32 gcstackbarrieroff;
int32 gcstackbarrierall;
int32 gcstoptheworld;
int32 gctrace;
int32 gcdead;
int32 invalidptr;
int32 sbrk;
int32 scavenge;
int32 scheddetail;
int32 schedtrace;
int32 wbshadow;
};
extern bool runtime_precisestack;
extern bool runtime_copystack;
@ -309,7 +287,7 @@ extern int8* runtime_goos;
extern int32 runtime_ncpu;
extern void (*runtime_sysargs)(int32, uint8**);
extern uint32 runtime_Hchansize;
extern DebugVars runtime_debug;
extern struct debugVars runtime_debug;
extern uintptr runtime_maxstacksize;
extern bool runtime_isstarted;
@ -327,11 +305,14 @@ void runtime_dump(byte*, int32);
void runtime_gogo(G*);
struct __go_func_type;
void runtime_args(int32, byte**);
void runtime_args(int32, byte**)
__asm__ (GOSYM_PREFIX "runtime.args");
void runtime_osinit();
void runtime_goargs(void);
void runtime_goargs(void)
__asm__ (GOSYM_PREFIX "runtime.goargs");
void runtime_goenvs(void);
void runtime_goenvs_unix(void);
void runtime_goenvs_unix(void)
__asm__ (GOSYM_PREFIX "runtime.goenvs_unix");
void runtime_throw(const char*) __attribute__ ((noreturn));
void runtime_panicstring(const char*) __attribute__ ((noreturn));
bool runtime_canpanic(G*);
@ -377,7 +358,8 @@ int32 runtime_mcount(void);
int32 runtime_gcount(void);
void runtime_mcall(void(*)(G*));
uint32 runtime_fastrand1(void) __asm__ (GOSYM_PREFIX "runtime.fastrand1");
int32 runtime_timediv(int64, int32, int32*);
int32 runtime_timediv(int64, int32, int32*)
__asm__ (GOSYM_PREFIX "runtime.timediv");
int32 runtime_round2(int32 x); // round x up to a power of 2.
// atomic operations
@ -417,7 +399,8 @@ G* __go_go(void (*pfn)(void*), void*);
void siginit(void);
bool __go_sigsend(int32 sig);
int32 runtime_callers(int32, Location*, int32, bool keep_callers);
int64 runtime_nanotime(void); // monotonic time
int64 runtime_nanotime(void) // monotonic time
__asm__(GOSYM_PREFIX "runtime.nanotime");
int64 runtime_unixnanotime(void); // real time, can skip
void runtime_dopanic(int32) __attribute__ ((noreturn));
void runtime_startpanic(void);
@ -426,9 +409,12 @@ void runtime_unwindstack(G*, byte*);
void runtime_sigprof();
void runtime_resetcpuprofiler(int32);
void runtime_setcpuprofilerate(void(*)(uintptr*, int32), int32);
void runtime_usleep(uint32);
int64 runtime_cputicks(void);
int64 runtime_tickspersecond(void);
void runtime_usleep(uint32)
__asm__ (GOSYM_PREFIX "runtime.usleep");
int64 runtime_cputicks(void)
__asm__ (GOSYM_PREFIX "runtime.cputicks");
int64 runtime_tickspersecond(void)
__asm__ (GOSYM_PREFIX "runtime.tickspersecond");
void runtime_blockevent(int64, int32);
extern int64 runtime_blockprofilerate;
void runtime_addtimer(Timer*);
@ -445,7 +431,8 @@ bool runtime_netpollclosing(PollDesc*);
void runtime_netpolllock(PollDesc*);
void runtime_netpollunlock(PollDesc*);
void runtime_crash(void);
void runtime_parsedebugvars(void);
void runtime_parsedebugvars(void)
__asm__(GOSYM_PREFIX "runtime.parsedebugvars");
void _rt0_go(void);
void* runtime_funcdata(Func*, int32);
int32 runtime_setmaxthreads(int32);
@ -462,8 +449,10 @@ extern uint32 runtime_worldsema;
* but on the contention path they sleep in the kernel.
* a zeroed Lock is unlocked (no need to initialize each lock).
*/
void runtime_lock(Lock*);
void runtime_unlock(Lock*);
void runtime_lock(Lock*)
__asm__(GOSYM_PREFIX "runtime.lock");
void runtime_unlock(Lock*)
__asm__(GOSYM_PREFIX "runtime.unlock");
/*
* sleep and wakeup on one-time events.
@ -609,7 +598,8 @@ enum
#define runtime_setitimer setitimer
void runtime_check(void);
void runtime_check(void)
__asm__ (GOSYM_PREFIX "runtime.check");
// A list of global variables that the garbage collector must scan.
struct root_list {
@ -630,7 +620,6 @@ extern uintptr runtime_stacks_sys;
struct backtrace_state;
extern struct backtrace_state *__go_get_backtrace_state(void);
extern _Bool __go_file_line(uintptr, int, String*, String*, intgo *);
extern byte* runtime_progname();
extern void runtime_main(void*);
extern uint32 runtime_in_callers;

View file

@ -84,13 +84,3 @@ func sync_atomic.runtime_procPin() (p int) {
func sync_atomic.runtime_procUnpin() {
runtime_m()->locks--;
}
extern Slice envs;
func envs() (s Slice) {
s = envs;
}
func setenvs(e Slice) {
envs = e;
}