2016-07-22 18:15:38 +00:00
|
|
|
// Copyright 2010 The Go Authors. All rights reserved.
|
2010-12-03 04:34:57 +00:00
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// Export guts for testing.
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
2015-10-31 00:59:47 +00:00
|
|
|
//var Fadd64 = fadd64
|
|
|
|
//var Fsub64 = fsub64
|
|
|
|
//var Fmul64 = fmul64
|
|
|
|
//var Fdiv64 = fdiv64
|
|
|
|
//var F64to32 = f64to32
|
|
|
|
//var F32to64 = f32to64
|
|
|
|
//var Fcmp64 = fcmp64
|
|
|
|
//var Fintto64 = fintto64
|
|
|
|
//var F64toint = f64toint
|
|
|
|
//var Sqrt = sqrt
|
|
|
|
|
2011-12-07 01:55:49 +00:00
|
|
|
func golockedOSThread() bool
|
2011-09-16 15:47:21 +00:00
|
|
|
|
|
|
|
var Entersyscall = entersyscall
|
|
|
|
var Exitsyscall = exitsyscall
|
2011-12-07 01:55:49 +00:00
|
|
|
var LockedOSThread = golockedOSThread
|
2012-10-23 04:31:11 +00:00
|
|
|
|
2015-10-31 00:59:47 +00:00
|
|
|
// var Xadduintptr = xadduintptr
|
|
|
|
|
|
|
|
// var FuncPC = funcPC
|
|
|
|
|
2012-10-23 04:31:11 +00:00
|
|
|
type LFNode struct {
|
2015-10-31 00:59:47 +00:00
|
|
|
Next uint64
|
2012-10-23 04:31:11 +00:00
|
|
|
Pushcnt uintptr
|
|
|
|
}
|
|
|
|
|
2014-07-12 00:01:09 +00:00
|
|
|
func lfstackpush_go(head *uint64, node *LFNode)
|
|
|
|
func lfstackpop_go(head *uint64) *LFNode
|
2012-10-23 04:31:11 +00:00
|
|
|
|
2014-07-12 00:01:09 +00:00
|
|
|
var LFStackPush = lfstackpush_go
|
|
|
|
var LFStackPop = lfstackpop_go
|
2012-10-23 04:31:11 +00:00
|
|
|
|
|
|
|
type ParFor struct {
|
2015-10-31 00:59:47 +00:00
|
|
|
body func(*ParFor, uint32)
|
|
|
|
done uint32
|
|
|
|
Nthr uint32
|
|
|
|
thrseq uint32
|
|
|
|
Cnt uint32
|
|
|
|
wait bool
|
2012-10-23 04:31:11 +00:00
|
|
|
}
|
|
|
|
|
2014-07-12 00:01:09 +00:00
|
|
|
func newParFor(nthrmax uint32) *ParFor
|
2015-10-31 00:59:47 +00:00
|
|
|
func parForSetup(desc *ParFor, nthr, n uint32, wait bool, body func(*ParFor, uint32))
|
2014-07-12 00:01:09 +00:00
|
|
|
func parForDo(desc *ParFor)
|
|
|
|
func parForIters(desc *ParFor, tid uintptr) (uintptr, uintptr)
|
2012-10-23 04:31:11 +00:00
|
|
|
|
2014-07-12 00:01:09 +00:00
|
|
|
var NewParFor = newParFor
|
|
|
|
var ParForSetup = parForSetup
|
|
|
|
var ParForDo = parForDo
|
2012-10-23 04:31:11 +00:00
|
|
|
|
|
|
|
func ParForIters(desc *ParFor, tid uint32) (uint32, uint32) {
|
2014-07-12 00:01:09 +00:00
|
|
|
begin, end := parForIters(desc, uintptr(tid))
|
2012-10-23 04:31:11 +00:00
|
|
|
return uint32(begin), uint32(end)
|
|
|
|
}
|
2013-07-16 06:54:42 +00:00
|
|
|
|
2015-10-31 00:59:47 +00:00
|
|
|
func GCMask(x interface{}) (ret []byte) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//func testSchedLocalQueue()
|
|
|
|
//func testSchedLocalQueueSteal()
|
|
|
|
//
|
|
|
|
//func RunSchedLocalQueueTest() {
|
|
|
|
// testSchedLocalQueue()
|
|
|
|
//}
|
|
|
|
//
|
|
|
|
//func RunSchedLocalQueueStealTest() {
|
|
|
|
// testSchedLocalQueueSteal()
|
|
|
|
//}
|
|
|
|
|
|
|
|
//var StringHash = stringHash
|
|
|
|
//var BytesHash = bytesHash
|
|
|
|
//var Int32Hash = int32Hash
|
|
|
|
//var Int64Hash = int64Hash
|
|
|
|
//var EfaceHash = efaceHash
|
|
|
|
//var IfaceHash = ifaceHash
|
|
|
|
//var MemclrBytes = memclrBytes
|
|
|
|
|
compiler, runtime: replace hashmap code with Go 1.7 hashmap
This change removes the gccgo-specific hashmap code and replaces it with
the hashmap code from the Go 1.7 runtime. The Go 1.7 hashmap code is
more efficient, does a better job on details like when to update a key,
and provides some support against denial-of-service attacks.
The compiler is changed to call the new hashmap functions instead of the
old ones.
The compiler now tracks which types are reflexive and which require
updating when used as a map key, and records the information in map type
descriptors.
Map_index_expression is simplified. The special case for a map index on
the right hand side of a tuple expression has been unnecessary for some
time, and is removed. The support for specially marking a map index as
an lvalue is removed, in favor of lowering an assignment to a map index
into a function call. The long-obsolete support for a map index of a
pointer to a map is removed.
The __go_new_map_big function (known to the compiler as
Runtime::MAKEMAPBIG) is no longer needed, as the new runtime.makemap
function takes an int64 hint argument.
The old map descriptor type and supporting expression is removed.
The compiler was still supporting the long-obsolete syntax `m[k] = 0,
false` to delete a value from a map. That is now removed, requiring a
change to one of the gccgo-specific tests.
The builtin len function applied to a map or channel p is now compiled
as `p == nil ? 0 : *(*int)(p)`. The __go_chan_len function (known to
the compiler as Runtime::CHAN_LEN) is removed.
Support for a shared zero value for maps to large value types is
introduced, along the lines of the gc compiler. The zero value is
handled as a common variable.
The hash function is changed to take a seed argument, changing the
runtime hash functions and the compiler-generated hash functions.
Unlike the gc compiler, both the hash and equal functions continue to
take the type length.
Types that can not be compared now store nil for the hash and equal
functions, rather than pointing to functions that throw. Interface hash
and comparison functions now check explicitly for nil. This matches the
gc compiler and permits a simple implementation for ismapkey.
The compiler is changed to permit marking struct and array types as
incomparable, meaning that they have no hash or equal function. We use
this for thunk types, removing the existing special code to avoid
generating hash/equal functions for them.
The C runtime code adds memclr, memequal, and memmove functions.
The hashmap code uses go:linkname comments to make the functions
visible, as otherwise the compiler would discard them.
The hashmap code comments out the unused reference to the address of the
first parameter in the race code, as otherwise the compiler thinks that
the parameter escapes and copies it onto the heap. This is probably not
needed when we enable escape analysis.
Several runtime map tests that ere previously skipped for gccgo are now
run.
The Go runtime picks up type kind information and stubs. The type kind
information causes the generated runtime header file to define some
constants, including `empty`, and the C code is adjusted accordingly.
A Go-callable version of runtime.throw, that takes a Go string, is
added to be called from the hashmap code.
Reviewed-on: https://go-review.googlesource.com/29447
* go.go-torture/execute/map-1.go: Replace old map deletion syntax
with call to builtin delete function.
From-SVN: r240334
2016-09-21 20:58:51 +00:00
|
|
|
var HashLoad = &hashLoad
|
2015-10-31 00:59:47 +00:00
|
|
|
|
|
|
|
// entry point for testing
|
|
|
|
//func GostringW(w []uint16) (s string) {
|
|
|
|
// s = gostringw(&w[0])
|
|
|
|
// return
|
|
|
|
//}
|
|
|
|
|
|
|
|
//var Gostringnocopy = gostringnocopy
|
|
|
|
//var Maxstring = &maxstring
|
|
|
|
|
|
|
|
//type Uintreg uintreg
|
|
|
|
|
2016-07-22 18:15:38 +00:00
|
|
|
/*
|
|
|
|
func RunSchedLocalQueueTest() {
|
|
|
|
_p_ := new(p)
|
|
|
|
gs := make([]g, len(_p_.runq))
|
|
|
|
for i := 0; i < len(_p_.runq); i++ {
|
|
|
|
if g, _ := runqget(_p_); g != nil {
|
|
|
|
throw("runq is not empty initially")
|
|
|
|
}
|
|
|
|
for j := 0; j < i; j++ {
|
|
|
|
runqput(_p_, &gs[i], false)
|
|
|
|
}
|
|
|
|
for j := 0; j < i; j++ {
|
|
|
|
if g, _ := runqget(_p_); g != &gs[i] {
|
|
|
|
print("bad element at iter ", i, "/", j, "\n")
|
|
|
|
throw("bad element")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if g, _ := runqget(_p_); g != nil {
|
|
|
|
throw("runq is not empty afterwards")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func RunSchedLocalQueueStealTest() {
|
|
|
|
p1 := new(p)
|
|
|
|
p2 := new(p)
|
|
|
|
gs := make([]g, len(p1.runq))
|
|
|
|
for i := 0; i < len(p1.runq); i++ {
|
|
|
|
for j := 0; j < i; j++ {
|
|
|
|
gs[j].sig = 0
|
|
|
|
runqput(p1, &gs[j], false)
|
|
|
|
}
|
|
|
|
gp := runqsteal(p2, p1, true)
|
|
|
|
s := 0
|
|
|
|
if gp != nil {
|
|
|
|
s++
|
|
|
|
gp.sig++
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
gp, _ = runqget(p2)
|
|
|
|
if gp == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
s++
|
|
|
|
gp.sig++
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
gp, _ = runqget(p1)
|
|
|
|
if gp == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
gp.sig++
|
|
|
|
}
|
|
|
|
for j := 0; j < i; j++ {
|
|
|
|
if gs[j].sig != 1 {
|
|
|
|
print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
|
|
|
|
throw("bad element")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if s != i/2 && s != i/2+1 {
|
|
|
|
print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
|
|
|
|
throw("bad steal")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func RunSchedLocalQueueEmptyTest(iters int) {
|
|
|
|
// Test that runq is not spuriously reported as empty.
|
|
|
|
// Runq emptiness affects scheduling decisions and spurious emptiness
|
|
|
|
// can lead to underutilization (both runnable Gs and idle Ps coexist
|
|
|
|
// for arbitrary long time).
|
|
|
|
done := make(chan bool, 1)
|
|
|
|
p := new(p)
|
|
|
|
gs := make([]g, 2)
|
|
|
|
ready := new(uint32)
|
|
|
|
for i := 0; i < iters; i++ {
|
|
|
|
*ready = 0
|
|
|
|
next0 := (i & 1) == 0
|
|
|
|
next1 := (i & 2) == 0
|
|
|
|
runqput(p, &gs[0], next0)
|
|
|
|
go func() {
|
|
|
|
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
|
|
|
|
}
|
|
|
|
if runqempty(p) {
|
|
|
|
println("next:", next0, next1)
|
|
|
|
throw("queue is empty")
|
|
|
|
}
|
|
|
|
done <- true
|
|
|
|
}()
|
|
|
|
for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
|
|
|
|
}
|
|
|
|
runqput(p, &gs[1], next1)
|
|
|
|
runqget(p)
|
|
|
|
<-done
|
|
|
|
runqget(p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var StringHash = stringHash
|
|
|
|
var BytesHash = bytesHash
|
|
|
|
var Int32Hash = int32Hash
|
|
|
|
var Int64Hash = int64Hash
|
|
|
|
var EfaceHash = efaceHash
|
|
|
|
var IfaceHash = ifaceHash
|
|
|
|
var MemclrBytes = memclrBytes
|
|
|
|
*/
|
|
|
|
|
2016-09-29 00:56:44 +00:00
|
|
|
var Open = open
|
|
|
|
var Close = closefd
|
|
|
|
var Read = read
|
|
|
|
var Write = write
|
2013-11-06 19:49:01 +00:00
|
|
|
|
2016-09-29 00:56:44 +00:00
|
|
|
func Envs() []string { return envs }
|
|
|
|
func SetEnvs(e []string) { envs = e }
|
2015-10-31 00:59:47 +00:00
|
|
|
|
2016-02-03 21:58:02 +00:00
|
|
|
//var BigEndian = sys.BigEndian
|
2015-10-31 00:59:47 +00:00
|
|
|
|
|
|
|
// For benchmarking.
|
|
|
|
|
|
|
|
/*
|
|
|
|
func BenchSetType(n int, x interface{}) {
|
2016-02-03 21:58:02 +00:00
|
|
|
e := *efaceOf(&x)
|
2015-10-31 00:59:47 +00:00
|
|
|
t := e._type
|
|
|
|
var size uintptr
|
|
|
|
var p unsafe.Pointer
|
|
|
|
switch t.kind & kindMask {
|
2016-02-03 21:58:02 +00:00
|
|
|
case kindPtr:
|
2015-10-31 00:59:47 +00:00
|
|
|
t = (*ptrtype)(unsafe.Pointer(t)).elem
|
|
|
|
size = t.size
|
|
|
|
p = e.data
|
2016-02-03 21:58:02 +00:00
|
|
|
case kindSlice:
|
2015-10-31 00:59:47 +00:00
|
|
|
slice := *(*struct {
|
|
|
|
ptr unsafe.Pointer
|
|
|
|
len, cap uintptr
|
|
|
|
})(e.data)
|
|
|
|
t = (*slicetype)(unsafe.Pointer(t)).elem
|
|
|
|
size = t.size * slice.len
|
|
|
|
p = slice.ptr
|
|
|
|
}
|
|
|
|
allocSize := roundupsize(size)
|
|
|
|
systemstack(func() {
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
heapBitsSetType(uintptr(p), allocSize, size, t)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2014-07-12 00:01:09 +00:00
|
|
|
|
2016-02-03 21:58:02 +00:00
|
|
|
const PtrSize = sys.PtrSize
|
2014-07-12 00:01:09 +00:00
|
|
|
|
2015-10-31 00:59:47 +00:00
|
|
|
var TestingAssertE2I2GC = &testingAssertE2I2GC
|
|
|
|
var TestingAssertE2T2GC = &testingAssertE2T2GC
|
2016-02-03 21:58:02 +00:00
|
|
|
|
|
|
|
var ForceGCPeriod = &forcegcperiod
|
2015-10-31 00:59:47 +00:00
|
|
|
*/
|
2016-02-03 21:58:02 +00:00
|
|
|
|
|
|
|
// SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
|
|
|
|
// the "environment" traceback level, so later calls to
|
|
|
|
// debug.SetTraceback (e.g., from testing timeouts) can't lower it.
|
2016-09-29 00:56:44 +00:00
|
|
|
func SetTracebackEnv(level string) {
|
|
|
|
setTraceback(level)
|
|
|
|
traceback_env = traceback_cache
|
|
|
|
}
|
2016-07-22 18:15:38 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
var ReadUnaligned32 = readUnaligned32
|
|
|
|
var ReadUnaligned64 = readUnaligned64
|
|
|
|
|
|
|
|
func CountPagesInUse() (pagesInUse, counted uintptr) {
|
|
|
|
stopTheWorld("CountPagesInUse")
|
|
|
|
|
|
|
|
pagesInUse = uintptr(mheap_.pagesInUse)
|
|
|
|
|
|
|
|
for _, s := range h_allspans {
|
|
|
|
if s.state == mSpanInUse {
|
|
|
|
counted += s.npages
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
startTheWorld()
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
*/
|