
Remove the old locking code written in C. Add a shell script mkrsysinfo.sh to generate the runtime_sysinfo.go file, so that we can get Go copies of the system time structures and other types. Tweak the compiler so that when compiling the runtime package the address operator does not cause local variables to escape. When the gc compiler compiles the runtime, an escaping local variable is treated as an error. We should implement that, instead of this change, when escape analysis is turned on. Tweak the compiler so that the generated C header does not include names that start with an underscore followed by a non-upper-case letter, except for the special cases of _defer and _panic. Otherwise we translate C types to Go in runtime_sysinfo.go and then generate those Go types back as C types in runtime.inc, which is useless and painful for the C code. Change entersyscall and friends to take a dummy argument, as the gc versions do, to simplify calls from the shared code. Reviewed-on: https://go-review.googlesource.com/30079 From-SVN: r240657
281 lines
6.6 KiB
Go
281 lines
6.6 KiB
Go
// Copyright 2011 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// +build darwin nacl netbsd openbsd plan9 solaris windows
|
|
|
|
package runtime
|
|
|
|
import (
|
|
"runtime/internal/atomic"
|
|
"unsafe"
|
|
)
|
|
|
|
// For gccgo, while we still have C runtime code, use go:linkname to
|
|
// rename some functions to themselves, so that the compiler will
|
|
// export them.
|
|
//
|
|
//go:linkname lock runtime.lock
|
|
//go:linkname unlock runtime.unlock
|
|
//go:linkname noteclear runtime.noteclear
|
|
//go:linkname notewakeup runtime.notewakeup
|
|
//go:linkname notesleep runtime.notesleep
|
|
//go:linkname notetsleep runtime.notetsleep
|
|
//go:linkname notetsleepg runtime.notetsleepg
|
|
|
|
// This implementation depends on OS-specific implementations of
|
|
//
|
|
// func semacreate(mp *m)
|
|
// Create a semaphore for mp, if it does not already have one.
|
|
//
|
|
// func semasleep(ns int64) int32
|
|
// If ns < 0, acquire m's semaphore and return 0.
|
|
// If ns >= 0, try to acquire m's semaphore for at most ns nanoseconds.
|
|
// Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
|
|
//
|
|
// func semawakeup(mp *m)
|
|
// Wake up mp, which is or will soon be sleeping on its semaphore.
|
|
//
|
|
const (
|
|
mutex_locked uintptr = 1
|
|
|
|
active_spin = 4
|
|
active_spin_cnt = 30
|
|
passive_spin = 1
|
|
)
|
|
|
|
func lock(l *mutex) {
|
|
gp := getg()
|
|
if gp.m.locks < 0 {
|
|
throw("runtime·lock: lock count")
|
|
}
|
|
gp.m.locks++
|
|
|
|
// Speculative grab for lock.
|
|
if atomic.Casuintptr(&l.key, 0, mutex_locked) {
|
|
return
|
|
}
|
|
semacreate(gp.m)
|
|
|
|
// On uniprocessor's, no point spinning.
|
|
// On multiprocessors, spin for ACTIVE_SPIN attempts.
|
|
spin := 0
|
|
if ncpu > 1 {
|
|
spin = active_spin
|
|
}
|
|
Loop:
|
|
for i := 0; ; i++ {
|
|
v := atomic.Loaduintptr(&l.key)
|
|
if v&mutex_locked == 0 {
|
|
// Unlocked. Try to lock.
|
|
if atomic.Casuintptr(&l.key, v, v|mutex_locked) {
|
|
return
|
|
}
|
|
i = 0
|
|
}
|
|
if i < spin {
|
|
procyield(active_spin_cnt)
|
|
} else if i < spin+passive_spin {
|
|
osyield()
|
|
} else {
|
|
// Someone else has it.
|
|
// l->waitm points to a linked list of M's waiting
|
|
// for this lock, chained through m->nextwaitm.
|
|
// Queue this M.
|
|
for {
|
|
gp.m.nextwaitm = v &^ mutex_locked
|
|
if atomic.Casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|mutex_locked) {
|
|
break
|
|
}
|
|
v = atomic.Loaduintptr(&l.key)
|
|
if v&mutex_locked == 0 {
|
|
continue Loop
|
|
}
|
|
}
|
|
if v&mutex_locked != 0 {
|
|
// Queued. Wait.
|
|
semasleep(-1)
|
|
i = 0
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
//go:nowritebarrier
|
|
// We might not be holding a p in this code.
|
|
func unlock(l *mutex) {
|
|
gp := getg()
|
|
var mp *m
|
|
for {
|
|
v := atomic.Loaduintptr(&l.key)
|
|
if v == mutex_locked {
|
|
if atomic.Casuintptr(&l.key, mutex_locked, 0) {
|
|
break
|
|
}
|
|
} else {
|
|
// Other M's are waiting for the lock.
|
|
// Dequeue an M.
|
|
mp = (*m)(unsafe.Pointer(v &^ mutex_locked))
|
|
if atomic.Casuintptr(&l.key, v, mp.nextwaitm) {
|
|
// Dequeued an M. Wake it.
|
|
semawakeup(mp)
|
|
break
|
|
}
|
|
}
|
|
}
|
|
gp.m.locks--
|
|
if gp.m.locks < 0 {
|
|
throw("runtime·unlock: lock count")
|
|
}
|
|
// if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
|
|
// gp.stackguard0 = stackPreempt
|
|
// }
|
|
}
|
|
|
|
// One-time notifications.
|
|
func noteclear(n *note) {
|
|
n.key = 0
|
|
}
|
|
|
|
func notewakeup(n *note) {
|
|
var v uintptr
|
|
for {
|
|
v = atomic.Loaduintptr(&n.key)
|
|
if atomic.Casuintptr(&n.key, v, mutex_locked) {
|
|
break
|
|
}
|
|
}
|
|
|
|
// Successfully set waitm to locked.
|
|
// What was it before?
|
|
switch {
|
|
case v == 0:
|
|
// Nothing was waiting. Done.
|
|
case v == mutex_locked:
|
|
// Two notewakeups! Not allowed.
|
|
throw("notewakeup - double wakeup")
|
|
default:
|
|
// Must be the waiting m. Wake it up.
|
|
semawakeup((*m)(unsafe.Pointer(v)))
|
|
}
|
|
}
|
|
|
|
func notesleep(n *note) {
|
|
gp := getg()
|
|
|
|
// Currently OK to sleep in non-g0 for gccgo. It happens in
|
|
// stoptheworld because we have not implemented preemption.
|
|
// if gp != gp.m.g0 {
|
|
// throw("notesleep not on g0")
|
|
// }
|
|
|
|
semacreate(gp.m)
|
|
if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
|
|
// Must be locked (got wakeup).
|
|
if n.key != mutex_locked {
|
|
throw("notesleep - waitm out of sync")
|
|
}
|
|
return
|
|
}
|
|
// Queued. Sleep.
|
|
gp.m.blocked = true
|
|
semasleep(-1)
|
|
gp.m.blocked = false
|
|
}
|
|
|
|
//go:nosplit
|
|
func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
|
|
// gp and deadline are logically local variables, but they are written
|
|
// as parameters so that the stack space they require is charged
|
|
// to the caller.
|
|
// This reduces the nosplit footprint of notetsleep_internal.
|
|
gp = getg()
|
|
|
|
// Register for wakeup on n->waitm.
|
|
if !atomic.Casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
|
|
// Must be locked (got wakeup).
|
|
if n.key != mutex_locked {
|
|
throw("notetsleep - waitm out of sync")
|
|
}
|
|
return true
|
|
}
|
|
if ns < 0 {
|
|
// Queued. Sleep.
|
|
gp.m.blocked = true
|
|
semasleep(-1)
|
|
gp.m.blocked = false
|
|
return true
|
|
}
|
|
|
|
deadline = nanotime() + ns
|
|
for {
|
|
// Registered. Sleep.
|
|
gp.m.blocked = true
|
|
if semasleep(ns) >= 0 {
|
|
gp.m.blocked = false
|
|
// Acquired semaphore, semawakeup unregistered us.
|
|
// Done.
|
|
return true
|
|
}
|
|
gp.m.blocked = false
|
|
// Interrupted or timed out. Still registered. Semaphore not acquired.
|
|
ns = deadline - nanotime()
|
|
if ns <= 0 {
|
|
break
|
|
}
|
|
// Deadline hasn't arrived. Keep sleeping.
|
|
}
|
|
|
|
// Deadline arrived. Still registered. Semaphore not acquired.
|
|
// Want to give up and return, but have to unregister first,
|
|
// so that any notewakeup racing with the return does not
|
|
// try to grant us the semaphore when we don't expect it.
|
|
for {
|
|
v := atomic.Loaduintptr(&n.key)
|
|
switch v {
|
|
case uintptr(unsafe.Pointer(gp.m)):
|
|
// No wakeup yet; unregister if possible.
|
|
if atomic.Casuintptr(&n.key, v, 0) {
|
|
return false
|
|
}
|
|
case mutex_locked:
|
|
// Wakeup happened so semaphore is available.
|
|
// Grab it to avoid getting out of sync.
|
|
gp.m.blocked = true
|
|
if semasleep(-1) < 0 {
|
|
throw("runtime: unable to acquire - semaphore out of sync")
|
|
}
|
|
gp.m.blocked = false
|
|
return true
|
|
default:
|
|
throw("runtime: unexpected waitm - semaphore out of sync")
|
|
}
|
|
}
|
|
}
|
|
|
|
func notetsleep(n *note, ns int64) bool {
|
|
gp := getg()
|
|
|
|
// Currently OK to sleep in non-g0 for gccgo. It happens in
|
|
// stoptheworld because we have not implemented preemption.
|
|
// if gp != gp.m.g0 && gp.m.preemptoff != "" {
|
|
// throw("notetsleep not on g0")
|
|
// }
|
|
|
|
semacreate(gp.m)
|
|
return notetsleep_internal(n, ns, nil, 0)
|
|
}
|
|
|
|
// same as runtime·notetsleep, but called on user g (not g0)
|
|
// calls only nosplit functions between entersyscallblock/exitsyscall
|
|
func notetsleepg(n *note, ns int64) bool {
|
|
gp := getg()
|
|
if gp == gp.m.g0 {
|
|
throw("notetsleepg on g0")
|
|
}
|
|
semacreate(gp.m)
|
|
entersyscallblock(0)
|
|
ok := notetsleep_internal(n, ns, nil, 0)
|
|
exitsyscall(0)
|
|
return ok
|
|
}
|