runtime: copy channel code from Go 1.7 runtime

Change the compiler to use the new routines. Drop the separation of
    small and large values when sending on a channel. Allocate the select
    struct on the stack. Remove the old C implementation of channels. Adjust
    the garbage collector for the new data structure.
    
    Bring in part of the tracing code, enough for the channel code to call.
    
    Bump the permitted number of allocations in one of the tests in
    context_test.go. The difference is that now receiving from a channel
    allocates a sudog, which the C code used to simply put on the
    stack. This will be somewhat better when we port proc.go.
    
    Reviewed-on: https://go-review.googlesource.com/30714

From-SVN: r240941
This commit is contained in:
Ian Lance Taylor 2016-10-10 16:52:09 +00:00
parent 40962ac03a
commit 5d8c099ede
23 changed files with 2675 additions and 1297 deletions

View file

@ -1,4 +1,4 @@
2431267d513804a3b1aa71adde9aefba9e3c3c59
9401e714d690e3907a64ac5c8cd5aed9e28f511b
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.

View file

@ -293,7 +293,6 @@ Node::op_format() const
break;
case Runtime::MAKECHAN:
case Runtime::MAKECHANBIG:
case Runtime::MAKEMAP:
case Runtime::MAKESLICE1:
case Runtime::MAKESLICE2:
@ -1229,7 +1228,6 @@ Escape_analysis_assign::expression(Expression** pexpr)
break;
case Runtime::MAKECHAN:
case Runtime::MAKECHANBIG:
case Runtime::MAKEMAP:
case Runtime::MAKESLICE1:
case Runtime::MAKESLICE2:
@ -1838,7 +1836,6 @@ Escape_analysis_assign::assign(Node* dst, Node* src)
}
case Runtime::MAKECHAN:
case Runtime::MAKECHANBIG:
case Runtime::MAKEMAP:
case Runtime::MAKESLICE1:
case Runtime::MAKESLICE2:
@ -2612,7 +2609,6 @@ Escape_analysis_flood::flood(Level level, Node* dst, Node* src,
break;
case Runtime::MAKECHAN:
case Runtime::MAKECHANBIG:
case Runtime::MAKEMAP:
case Runtime::MAKESLICE1:
case Runtime::MAKESLICE2:

View file

@ -3604,6 +3604,7 @@ Unsafe_type_conversion_expression::do_get_backend(Translate_context* context)
|| et->channel_type() != NULL
|| et->map_type() != NULL
|| et->function_type() != NULL
|| et->integer_type() != NULL
|| et->is_nil_type());
else if (et->is_unsafe_pointer_type())
go_assert(t->points_to() != NULL);
@ -7077,6 +7078,7 @@ Builtin_call_expression::do_flatten(Gogo*, Named_object*,
break;
case BUILTIN_LEN:
case BUILTIN_CAP:
Expression_list::iterator pa = this->args()->begin();
if (!(*pa)->is_variable()
&& ((*pa)->type()->map_type() != NULL
@ -7217,10 +7219,7 @@ Builtin_call_expression::lower_make()
Expression::make_nil(loc),
Expression::make_nil(loc));
else if (is_chan)
call = Runtime::make_call((have_big_args
? Runtime::MAKECHANBIG
: Runtime::MAKECHAN),
loc, 2, type_arg, len_arg);
call = Runtime::make_call(Runtime::MAKECHAN, loc, 2, type_arg, len_arg);
else
go_unreachable();
@ -8300,7 +8299,31 @@ Builtin_call_expression::do_get_backend(Translate_context* context)
this->seen_ = false;
}
else if (arg_type->channel_type() != NULL)
val = Runtime::make_call(Runtime::CHAN_CAP, location, 1, arg);
{
// The second field is the capacity. If the pointer
// is nil, the capacity is zero.
Type* uintptr_type = Type::lookup_integer_type("uintptr");
Type* pint_type = Type::make_pointer_type(int_type);
Expression* parg = Expression::make_unsafe_cast(uintptr_type,
arg,
location);
int off = int_type->integer_type()->bits() / 8;
Expression* eoff = Expression::make_integer_ul(off,
uintptr_type,
location);
parg = Expression::make_binary(OPERATOR_PLUS, parg, eoff,
location);
parg = Expression::make_unsafe_cast(pint_type, parg, location);
Expression* nil = Expression::make_nil(location);
nil = Expression::make_cast(pint_type, nil, location);
Expression* cmp = Expression::make_binary(OPERATOR_EQEQ,
arg, nil, location);
Expression* zero = Expression::make_integer_ul(0, int_type,
location);
Expression* indir = Expression::make_unary(OPERATOR_MULT,
parg, location);
val = Expression::make_conditional(cmp, zero, indir, location);
}
else
go_unreachable();
}
@ -13729,9 +13752,8 @@ Receive_expression::do_get_backend(Translate_context* context)
Expression* recv_addr =
Expression::make_temporary_reference(this->temp_receiver_, loc);
recv_addr = Expression::make_unary(OPERATOR_AND, recv_addr, loc);
Expression* recv =
Runtime::make_call(Runtime::RECEIVE, loc, 3,
td, this->channel_, recv_addr);
Expression* recv = Runtime::make_call(Runtime::CHANRECV1, loc, 3,
td, this->channel_, recv_addr);
return Expression::make_compound(recv, recv_ref, loc)->get_backend(context);
}

View file

@ -127,20 +127,13 @@ DEF_GO_RUNTIME(MAPITERNEXT, "runtime.mapiternext", P1(POINTER), R0())
// Make a channel.
DEF_GO_RUNTIME(MAKECHAN, "__go_new_channel", P2(TYPE, UINTPTR), R1(CHAN))
DEF_GO_RUNTIME(MAKECHANBIG, "__go_new_channel_big", P2(TYPE, UINT64), R1(CHAN))
DEF_GO_RUNTIME(MAKECHAN, "runtime.makechan", P2(TYPE, INT64), R1(CHAN))
// Get the capacity of a channel (the size of the buffer).
DEF_GO_RUNTIME(CHAN_CAP, "__go_chan_cap", P1(CHAN), R1(INT))
// Send a small value on a channel.
DEF_GO_RUNTIME(SEND_SMALL, "__go_send_small", P3(TYPE, CHAN, UINT64), R0())
// Send a big value on a channel.
DEF_GO_RUNTIME(SEND_BIG, "__go_send_big", P3(TYPE, CHAN, POINTER), R0())
// Send a value on a channel.
DEF_GO_RUNTIME(CHANSEND, "runtime.chansend1", P3(TYPE, CHAN, POINTER), R0())
// Receive a value from a channel.
DEF_GO_RUNTIME(RECEIVE, "__go_receive", P3(TYPE, CHAN, POINTER), R0())
DEF_GO_RUNTIME(CHANRECV1, "runtime.chanrecv1", P3(TYPE, CHAN, POINTER), R0())
// Receive a value from a channel returning whether it is closed.
DEF_GO_RUNTIME(CHANRECV2, "runtime.chanrecv2", P3(TYPE, CHAN, POINTER),
@ -148,7 +141,7 @@ DEF_GO_RUNTIME(CHANRECV2, "runtime.chanrecv2", P3(TYPE, CHAN, POINTER),
// Start building a select statement.
DEF_GO_RUNTIME(NEWSELECT, "runtime.newselect", P1(INT32), R1(POINTER))
DEF_GO_RUNTIME(NEWSELECT, "runtime.newselect", P3(POINTER, INT64, INT32), R0())
// Add a default clause to a select statement.
DEF_GO_RUNTIME(SELECTDEFAULT, "runtime.selectdefault",
@ -202,7 +195,7 @@ DEF_GO_RUNTIME(RUNTIME_ERROR, "__go_runtime_error", P1(INT32), R0())
// Close.
DEF_GO_RUNTIME(CLOSE, "__go_builtin_close", P1(CHAN), R0())
DEF_GO_RUNTIME(CLOSE, "runtime.closechan", P1(CHAN), R0())
// Copy.

View file

@ -4330,7 +4330,6 @@ Send_statement::do_get_backend(Translate_context* context)
element_type,
this->val_, loc);
bool is_small;
bool can_take_address;
switch (element_type->base()->classification())
{
@ -4340,25 +4339,18 @@ Send_statement::do_get_backend(Translate_context* context)
case Type::TYPE_POINTER:
case Type::TYPE_MAP:
case Type::TYPE_CHANNEL:
is_small = true;
can_take_address = false;
break;
case Type::TYPE_FLOAT:
case Type::TYPE_COMPLEX:
case Type::TYPE_STRING:
case Type::TYPE_INTERFACE:
is_small = false;
can_take_address = false;
break;
case Type::TYPE_STRUCT:
is_small = false;
can_take_address = true;
break;
case Type::TYPE_ARRAY:
is_small = false;
can_take_address = !element_type->is_slice_type();
break;
@ -4384,28 +4376,19 @@ Send_statement::do_get_backend(Translate_context* context)
Expression* td = Expression::make_type_descriptor(this->channel_->type(),
loc);
Runtime::Function code;
Bstatement* btemp = NULL;
if (is_small)
{
// Type is small enough to handle as uint64.
code = Runtime::SEND_SMALL;
val = Expression::make_unsafe_cast(Type::lookup_integer_type("uint64"),
val, loc);
}
else if (can_take_address)
if (can_take_address)
{
// Must pass address of value. The function doesn't change the
// value, so just take its address directly.
code = Runtime::SEND_BIG;
// The function doesn't change the value, so just take its
// address directly.
val = Expression::make_unary(OPERATOR_AND, val, loc);
}
else
{
// Must pass address of value, but the value is small enough
// that it might be in registers. Copy value into temporary
// variable to take address.
code = Runtime::SEND_BIG;
// The value is not in a variable, or is small enough that it
// might be in a register, and taking the address would push it
// on the stack. Copy it into a temporary variable to take the
// address.
Temporary_statement* temp = Statement::make_temporary(element_type,
val, loc);
Expression* ref = Expression::make_temporary_reference(temp, loc);
@ -4413,7 +4396,8 @@ Send_statement::do_get_backend(Translate_context* context)
btemp = temp->get_backend(context);
}
Expression* call = Runtime::make_call(code, loc, 3, td, this->channel_, val);
Expression* call = Runtime::make_call(Runtime::CHANSEND, loc, 3, td,
this->channel_, val);
context->gogo()->lower_expression(context->function(), NULL, &call);
Bexpression* bcall = call->get_backend(context);
@ -4491,6 +4475,7 @@ Select_clauses::Select_clause::lower(Gogo* gogo, Named_object* function,
Location loc = this->location_;
Expression* selref = Expression::make_temporary_reference(sel, loc);
selref = Expression::make_unary(OPERATOR_AND, selref, loc);
Expression* index_expr = Expression::make_integer_ul(this->index_, NULL,
loc);
@ -4854,6 +4839,7 @@ Select_clauses::get_backend(Translate_context* context,
}
Expression* selref = Expression::make_temporary_reference(sel, location);
selref = Expression::make_unary(OPERATOR_AND, selref, location);
Expression* call = Runtime::make_call(Runtime::SELECTGO, location, 1,
selref);
context->gogo()->lower_expression(context->function(), NULL, &call);
@ -4920,13 +4906,27 @@ Select_statement::do_lower(Gogo* gogo, Named_object* function,
go_assert(this->sel_ == NULL);
Expression* size_expr = Expression::make_integer_ul(this->clauses_->size(),
NULL, loc);
Expression* call = Runtime::make_call(Runtime::NEWSELECT, loc, 1, size_expr);
this->sel_ = Statement::make_temporary(NULL, call, loc);
int ncases = this->clauses_->size();
Type* selstruct_type = Channel_type::select_type(ncases);
this->sel_ = Statement::make_temporary(selstruct_type, NULL, loc);
b->add_statement(this->sel_);
int64_t selstruct_size;
if (!selstruct_type->backend_type_size(gogo, &selstruct_size))
{
go_assert(saw_errors());
return Statement::make_error_statement(loc);
}
Expression* ref = Expression::make_temporary_reference(this->sel_, loc);
ref = Expression::make_unary(OPERATOR_AND, ref, loc);
Expression* selstruct_size_expr =
Expression::make_integer_int64(selstruct_size, NULL, loc);
Expression* size_expr = Expression::make_integer_ul(ncases, NULL, loc);
Expression* call = Runtime::make_call(Runtime::NEWSELECT, loc, 3,
ref, selstruct_size_expr, size_expr);
b->add_statement(Statement::make_statement(call, true));
this->clauses_->lower(gogo, function, b, this->sel_);
this->is_lowered_ = true;
b->add_statement(this);

View file

@ -7771,6 +7771,53 @@ Channel_type::do_import(Import* imp)
return Type::make_channel_type(may_send, may_receive, element_type);
}
// Return the type to manage a select statement with ncases case
// statements. A value of this type is allocated on the stack. This
// must match the type hselect in libgo/go/runtime/select.go.
Type*
Channel_type::select_type(int ncases)
{
Type* unsafe_pointer_type = Type::make_pointer_type(Type::make_void_type());
Type* uint16_type = Type::lookup_integer_type("uint16");
static Struct_type* scase_type;
if (scase_type == NULL)
{
Type* uintptr_type = Type::lookup_integer_type("uintptr");
Type* uint64_type = Type::lookup_integer_type("uint64");
scase_type =
Type::make_builtin_struct_type(7,
"elem", unsafe_pointer_type,
"chan", unsafe_pointer_type,
"pc", uintptr_type,
"kind", uint16_type,
"index", uint16_type,
"receivedp", unsafe_pointer_type,
"releasetime", uint64_type);
scase_type->set_is_struct_incomparable();
}
Expression* ncases_expr =
Expression::make_integer_ul(ncases, NULL, Linemap::predeclared_location());
Array_type* scases = Type::make_array_type(scase_type, ncases_expr);
scases->set_is_array_incomparable();
Array_type* order = Type::make_array_type(uint16_type, ncases_expr);
order->set_is_array_incomparable();
Struct_type* ret =
Type::make_builtin_struct_type(7,
"tcase", uint16_type,
"ncase", uint16_type,
"pollorder", unsafe_pointer_type,
"lockorder", unsafe_pointer_type,
"scase", scases,
"lockorderarr", order,
"pollorderarr", order);
ret->set_is_struct_incomparable();
return ret;
}
// Make a new channel type.
Channel_type*

View file

@ -2809,6 +2809,9 @@ class Channel_type : public Type
static Type*
make_chan_type_descriptor_type();
static Type*
select_type(int ncases);
protected:
int
do_traverse(Traverse* traverse)

View file

@ -520,7 +520,6 @@ runtime_files = \
$(runtime_thread_files) \
runtime/yield.c \
$(rtems_task_variable_add_file) \
chan.c \
cpuprof.c \
go-iface.c \
lfstack.c \

View file

@ -263,9 +263,9 @@ am__objects_6 = go-append.lo go-assert.lo go-assert-interface.lo \
$(am__objects_1) mfixalloc.lo mgc0.lo mheap.lo msize.lo \
$(am__objects_2) panic.lo parfor.lo print.lo proc.lo \
runtime.lo signal_unix.lo thread.lo $(am__objects_3) yield.lo \
$(am__objects_4) chan.lo cpuprof.lo go-iface.lo lfstack.lo \
malloc.lo mprof.lo netpoll.lo rdebug.lo reflect.lo runtime1.lo \
sema.lo sigqueue.lo string.lo time.lo $(am__objects_5)
$(am__objects_4) cpuprof.lo go-iface.lo lfstack.lo malloc.lo \
mprof.lo netpoll.lo rdebug.lo reflect.lo runtime1.lo sema.lo \
sigqueue.lo string.lo time.lo $(am__objects_5)
am_libgo_llgo_la_OBJECTS = $(am__objects_6)
libgo_llgo_la_OBJECTS = $(am_libgo_llgo_la_OBJECTS)
libgo_llgo_la_LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) \
@ -921,7 +921,6 @@ runtime_files = \
$(runtime_thread_files) \
runtime/yield.c \
$(rtems_task_variable_add_file) \
chan.c \
cpuprof.c \
go-iface.c \
lfstack.c \
@ -1557,7 +1556,6 @@ mostlyclean-compile:
distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/chan.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpuprof.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/env_posix.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/getncpu-bsd.Plo@am__quote@

View file

@ -381,7 +381,7 @@ func TestAllocs(t *testing.T) {
<-c.Done()
},
limit: 8,
gccgoLimit: 15,
gccgoLimit: 18,
},
{
desc: "WithCancel(bg)",

724
libgo/go/runtime/chan.go Normal file
View file

@ -0,0 +1,724 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// This file contains the implementation of Go channels.
// Invariants:
// At least one of c.sendq and c.recvq is empty.
// For buffered channels, also:
// c.qcount > 0 implies that c.recvq is empty.
// c.qcount < c.dataqsiz implies that c.sendq is empty.
import (
"runtime/internal/atomic"
"unsafe"
)
// For gccgo, use go:linkname to rename compiler-called functions to
// themselves, so that the compiler will export them.
//
//go:linkname makechan runtime.makechan
//go:linkname chansend1 runtime.chansend1
//go:linkname chanrecv1 runtime.chanrecv1
//go:linkname chanrecv2 runtime.chanrecv2
//go:linkname closechan runtime.closechan
const (
maxAlign = 8
hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
debugChan = false
)
type hchan struct {
qcount uint // total data in the queue
dataqsiz uint // size of the circular queue
buf unsafe.Pointer // points to an array of dataqsiz elements
elemsize uint16
closed uint32
elemtype *_type // element type
sendx uint // send index
recvx uint // receive index
recvq waitq // list of recv waiters
sendq waitq // list of send waiters
// lock protects all fields in hchan, as well as several
// fields in sudogs blocked on this channel.
//
// Do not change another G's status while holding this lock
// (in particular, do not ready a G), as this can deadlock
// with stack shrinking.
lock mutex
}
type waitq struct {
first *sudog
last *sudog
}
//go:linkname reflect_makechan reflect.makechan
func reflect_makechan(t *chantype, size int64) *hchan {
return makechan(t, size)
}
func makechan(t *chantype, size int64) *hchan {
elem := t.elem
// compiler checks this but be safe.
if elem.size >= 1<<16 {
throw("makechan: invalid channel element type")
}
if hchanSize%maxAlign != 0 || elem.align > maxAlign {
throw("makechan: bad alignment")
}
if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/elem.size) {
panic(plainError("makechan: size out of range"))
}
var c *hchan
if elem.kind&kindNoPointers != 0 || size == 0 {
// Allocate memory in one call.
// Hchan does not contain pointers interesting for GC in this case:
// buf points into the same allocation, elemtype is persistent.
// SudoG's are referenced from their owning thread so they can't be collected.
// TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, true))
if size > 0 && elem.size != 0 {
c.buf = add(unsafe.Pointer(c), hchanSize)
} else {
// race detector uses this location for synchronization
// Also prevents us from pointing beyond the allocation (see issue 9401).
c.buf = unsafe.Pointer(c)
}
} else {
c = new(hchan)
c.buf = newarray(elem, int(size))
}
c.elemsize = uint16(elem.size)
c.elemtype = elem
c.dataqsiz = uint(size)
if debugChan {
print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n")
}
return c
}
// chanbuf(c, i) is pointer to the i'th slot in the buffer.
func chanbuf(c *hchan, i uint) unsafe.Pointer {
return add(c.buf, uintptr(i)*uintptr(c.elemsize))
}
// entry point for c <- x from compiled code
//go:nosplit
func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) {
chansend(t, c, elem, true, getcallerpc(unsafe.Pointer(&t)))
}
/*
* generic single channel send/recv
* If block is not nil,
* then the protocol will not
* sleep but return if it could
* not complete.
*
* sleep can wake up with g.param == nil
* when a channel involved in the sleep has
* been closed. it is easiest to loop and re-run
* the operation; we'll see that it's now closed.
*/
func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
if raceenabled {
raceReadObjectPC(t.elem, ep, callerpc, funcPC(chansend))
}
if msanenabled {
msanread(ep, t.elem.size)
}
if c == nil {
if !block {
return false
}
gopark(nil, nil, "chan send (nil chan)", traceEvGoStop, 2)
throw("unreachable")
}
if debugChan {
print("chansend: chan=", c, "\n")
}
if raceenabled {
racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend))
}
// Fast path: check for failed non-blocking operation without acquiring the lock.
//
// After observing that the channel is not closed, we observe that the channel is
// not ready for sending. Each of these observations is a single word-sized read
// (first c.closed and second c.recvq.first or c.qcount depending on kind of channel).
// Because a closed channel cannot transition from 'ready for sending' to
// 'not ready for sending', even if the channel is closed between the two observations,
// they imply a moment between the two when the channel was both not yet closed
// and not ready for sending. We behave as if we observed the channel at that moment,
// and report that the send cannot proceed.
//
// It is okay if the reads are reordered here: if we observe that the channel is not
// ready for sending and then observe that it is not closed, that implies that the
// channel wasn't closed during the first observation.
if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) ||
(c.dataqsiz > 0 && c.qcount == c.dataqsiz)) {
return false
}
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
}
lock(&c.lock)
if c.closed != 0 {
unlock(&c.lock)
panic(plainError("send on closed channel"))
}
if sg := c.recvq.dequeue(); sg != nil {
// Found a waiting receiver. We pass the value we want to send
// directly to the receiver, bypassing the channel buffer (if any).
send(c, sg, ep, func() { unlock(&c.lock) })
return true
}
if c.qcount < c.dataqsiz {
// Space is available in the channel buffer. Enqueue the element to send.
qp := chanbuf(c, c.sendx)
if raceenabled {
raceacquire(qp)
racerelease(qp)
}
typedmemmove(c.elemtype, qp, ep)
c.sendx++
if c.sendx == c.dataqsiz {
c.sendx = 0
}
c.qcount++
unlock(&c.lock)
return true
}
if !block {
unlock(&c.lock)
return false
}
// Block on the channel. Some receiver will complete our operation for us.
gp := getg()
mysg := acquireSudog()
mysg.releasetime = 0
if t0 != 0 {
mysg.releasetime = -1
}
// No stack splits between assigning elem and enqueuing mysg
// on gp.waiting where copystack can find it.
mysg.elem = ep
mysg.waitlink = nil
mysg.g = gp
mysg.selectdone = nil
mysg.c = c
gp.waiting = mysg
gp.param = nil
c.sendq.enqueue(mysg)
goparkunlock(&c.lock, "chan send", traceEvGoBlockSend, 3)
// someone woke us up.
if mysg != gp.waiting {
throw("G waiting list is corrupted")
}
gp.waiting = nil
if gp.param == nil {
if c.closed == 0 {
throw("chansend: spurious wakeup")
}
panic(plainError("send on closed channel"))
}
gp.param = nil
if mysg.releasetime > 0 {
blockevent(mysg.releasetime-t0, 2)
}
mysg.c = nil
releaseSudog(mysg)
return true
}
// send processes a send operation on an empty channel c.
// The value ep sent by the sender is copied to the receiver sg.
// The receiver is then woken up to go on its merry way.
// Channel c must be empty and locked. send unlocks c with unlockf.
// sg must already be dequeued from c.
// ep must be non-nil and point to the heap or the caller's stack.
func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
if raceenabled {
if c.dataqsiz == 0 {
racesync(c, sg)
} else {
// Pretend we go through the buffer, even though
// we copy directly. Note that we need to increment
// the head/tail locations only when raceenabled.
qp := chanbuf(c, c.recvx)
raceacquire(qp)
racerelease(qp)
raceacquireg(sg.g, qp)
racereleaseg(sg.g, qp)
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
}
}
if sg.elem != nil {
sendDirect(c.elemtype, sg, ep)
sg.elem = nil
}
gp := sg.g
unlockf()
gp.param = unsafe.Pointer(sg)
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
goready(gp, 4)
}
func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
// Send on an unbuffered or empty-buffered channel is the only operation
// in the entire runtime where one goroutine
// writes to the stack of another goroutine. The GC assumes that
// stack writes only happen when the goroutine is running and are
// only done by that goroutine. Using a write barrier is sufficient to
// make up for violating that assumption, but the write barrier has to work.
// typedmemmove will call heapBitsBulkBarrier, but the target bytes
// are not in the heap, so that will not help. We arrange to call
// memmove and typeBitsBulkBarrier instead.
// Once we read sg.elem out of sg, it will no longer
// be updated if the destination's stack gets copied (shrunk).
// So make sure that no preemption points can happen between read & use.
dst := sg.elem
memmove(dst, src, t.size)
typeBitsBulkBarrier(t, uintptr(dst), t.size)
}
func closechan(c *hchan) {
if c == nil {
panic(plainError("close of nil channel"))
}
lock(&c.lock)
if c.closed != 0 {
unlock(&c.lock)
panic(plainError("close of closed channel"))
}
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&c))
racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan))
racerelease(unsafe.Pointer(c))
}
c.closed = 1
var glist *g
// release all readers
for {
sg := c.recvq.dequeue()
if sg == nil {
break
}
if sg.elem != nil {
memclr(sg.elem, uintptr(c.elemsize))
sg.elem = nil
}
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
gp := sg.g
gp.param = nil
if raceenabled {
raceacquireg(gp, unsafe.Pointer(c))
}
gp.schedlink.set(glist)
glist = gp
}
// release all writers (they will panic)
for {
sg := c.sendq.dequeue()
if sg == nil {
break
}
sg.elem = nil
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
gp := sg.g
gp.param = nil
if raceenabled {
raceacquireg(gp, unsafe.Pointer(c))
}
gp.schedlink.set(glist)
glist = gp
}
unlock(&c.lock)
// Ready all Gs now that we've dropped the channel lock.
for glist != nil {
gp := glist
glist = glist.schedlink.ptr()
gp.schedlink = 0
goready(gp, 3)
}
}
// entry points for <- c from compiled code
//go:nosplit
func chanrecv1(t *chantype, c *hchan, elem unsafe.Pointer) {
chanrecv(t, c, elem, true)
}
//go:nosplit
func chanrecv2(t *chantype, c *hchan, elem unsafe.Pointer) (received bool) {
_, received = chanrecv(t, c, elem, true)
return
}
// chanrecv receives on channel c and writes the received data to ep.
// ep may be nil, in which case received data is ignored.
// If block == false and no elements are available, returns (false, false).
// Otherwise, if c is closed, zeros *ep and returns (true, false).
// Otherwise, fills in *ep with an element and returns (true, true).
// A non-nil ep must point to the heap or the caller's stack.
func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
// raceenabled: don't need to check ep, as it is always on the stack
// or is new memory allocated by reflect.
if debugChan {
print("chanrecv: chan=", c, "\n")
}
if c == nil {
if !block {
return
}
gopark(nil, nil, "chan receive (nil chan)", traceEvGoStop, 2)
throw("unreachable")
}
// Fast path: check for failed non-blocking operation without acquiring the lock.
//
// After observing that the channel is not ready for receiving, we observe that the
// channel is not closed. Each of these observations is a single word-sized read
// (first c.sendq.first or c.qcount, and second c.closed).
// Because a channel cannot be reopened, the later observation of the channel
// being not closed implies that it was also not closed at the moment of the
// first observation. We behave as if we observed the channel at that moment
// and report that the receive cannot proceed.
//
// The order of operations is important here: reversing the operations can lead to
// incorrect behavior when racing with a close.
if !block && (c.dataqsiz == 0 && c.sendq.first == nil ||
c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) &&
atomic.Load(&c.closed) == 0 {
return
}
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
}
lock(&c.lock)
if c.closed != 0 && c.qcount == 0 {
if raceenabled {
raceacquire(unsafe.Pointer(c))
}
unlock(&c.lock)
if ep != nil {
memclr(ep, uintptr(c.elemsize))
}
return true, false
}
if sg := c.sendq.dequeue(); sg != nil {
// Found a waiting sender. If buffer is size 0, receive value
// directly from sender. Otherwise, receive from head of queue
// and add sender's value to the tail of the queue (both map to
// the same buffer slot because the queue is full).
recv(c, sg, ep, func() { unlock(&c.lock) })
return true, true
}
if c.qcount > 0 {
// Receive directly from queue
qp := chanbuf(c, c.recvx)
if raceenabled {
raceacquire(qp)
racerelease(qp)
}
if ep != nil {
typedmemmove(c.elemtype, ep, qp)
}
memclr(qp, uintptr(c.elemsize))
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.qcount--
unlock(&c.lock)
return true, true
}
if !block {
unlock(&c.lock)
return false, false
}
// no sender available: block on this channel.
gp := getg()
mysg := acquireSudog()
mysg.releasetime = 0
if t0 != 0 {
mysg.releasetime = -1
}
// No stack splits between assigning elem and enqueuing mysg
// on gp.waiting where copystack can find it.
mysg.elem = ep
mysg.waitlink = nil
gp.waiting = mysg
mysg.g = gp
mysg.selectdone = nil
mysg.c = c
gp.param = nil
c.recvq.enqueue(mysg)
goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv, 3)
// someone woke us up
if mysg != gp.waiting {
throw("G waiting list is corrupted")
}
gp.waiting = nil
if mysg.releasetime > 0 {
blockevent(mysg.releasetime-t0, 2)
}
closed := gp.param == nil
gp.param = nil
mysg.c = nil
releaseSudog(mysg)
return true, !closed
}
// recv processes a receive operation on a full channel c.
// There are 2 parts:
// 1) The value sent by the sender sg is put into the channel
// and the sender is woken up to go on its merry way.
// 2) The value received by the receiver (the current G) is
// written to ep.
// For synchronous channels, both values are the same.
// For asynchronous channels, the receiver gets its data from
// the channel buffer and the sender's data is put in the
// channel buffer.
// Channel c must be full and locked. recv unlocks c with unlockf.
// sg must already be dequeued from c.
// A non-nil ep must point to the heap or the caller's stack.
func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
if c.dataqsiz == 0 {
if raceenabled {
racesync(c, sg)
}
if ep != nil {
// copy data from sender
// ep points to our own stack or heap, so nothing
// special (ala sendDirect) needed here.
typedmemmove(c.elemtype, ep, sg.elem)
}
} else {
// Queue is full. Take the item at the
// head of the queue. Make the sender enqueue
// its item at the tail of the queue. Since the
// queue is full, those are both the same slot.
qp := chanbuf(c, c.recvx)
if raceenabled {
raceacquire(qp)
racerelease(qp)
raceacquireg(sg.g, qp)
racereleaseg(sg.g, qp)
}
// copy data from queue to receiver
if ep != nil {
typedmemmove(c.elemtype, ep, qp)
}
// copy data from sender to queue
typedmemmove(c.elemtype, qp, sg.elem)
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
}
sg.elem = nil
gp := sg.g
unlockf()
gp.param = unsafe.Pointer(sg)
if sg.releasetime != 0 {
sg.releasetime = cputicks()
}
goready(gp, 4)
}
// compiler implements
//
// select {
// case c <- v:
// ... foo
// default:
// ... bar
// }
//
// as
//
// if selectnbsend(c, v) {
// ... foo
// } else {
// ... bar
// }
//
func selectnbsend(t *chantype, c *hchan, elem unsafe.Pointer) (selected bool) {
return chansend(t, c, elem, false, getcallerpc(unsafe.Pointer(&t)))
}
// compiler implements
//
// select {
// case v = <-c:
// ... foo
// default:
// ... bar
// }
//
// as
//
// if selectnbrecv(&v, c) {
// ... foo
// } else {
// ... bar
// }
//
func selectnbrecv(t *chantype, elem unsafe.Pointer, c *hchan) (selected bool) {
selected, _ = chanrecv(t, c, elem, false)
return
}
// compiler implements
//
// select {
// case v, ok = <-c:
// ... foo
// default:
// ... bar
// }
//
// as
//
// if c != nil && selectnbrecv2(&v, &ok, c) {
// ... foo
// } else {
// ... bar
// }
//
func selectnbrecv2(t *chantype, elem unsafe.Pointer, received *bool, c *hchan) (selected bool) {
// TODO(khr): just return 2 values from this function, now that it is in Go.
selected, *received = chanrecv(t, c, elem, false)
return
}
//go:linkname reflect_chansend reflect.chansend
func reflect_chansend(t *chantype, c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
return chansend(t, c, elem, !nb, getcallerpc(unsafe.Pointer(&t)))
}
//go:linkname reflect_chanrecv reflect.chanrecv
func reflect_chanrecv(t *chantype, c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
return chanrecv(t, c, elem, !nb)
}
//go:linkname reflect_chanlen reflect.chanlen
func reflect_chanlen(c *hchan) int {
if c == nil {
return 0
}
return int(c.qcount)
}
//go:linkname reflect_chancap reflect.chancap
func reflect_chancap(c *hchan) int {
if c == nil {
return 0
}
return int(c.dataqsiz)
}
//go:linkname reflect_chanclose reflect.chanclose
func reflect_chanclose(c *hchan) {
closechan(c)
}
func (q *waitq) enqueue(sgp *sudog) {
sgp.next = nil
x := q.last
if x == nil {
sgp.prev = nil
q.first = sgp
q.last = sgp
return
}
sgp.prev = x
x.next = sgp
q.last = sgp
}
func (q *waitq) dequeue() *sudog {
for {
sgp := q.first
if sgp == nil {
return nil
}
y := sgp.next
if y == nil {
q.first = nil
q.last = nil
} else {
y.prev = nil
q.first = y
sgp.next = nil // mark as removed (see dequeueSudog)
}
// if sgp participates in a select and is already signaled, ignore it
if sgp.selectdone != nil {
// claim the right to signal
if *sgp.selectdone != 0 || !atomic.Cas(sgp.selectdone, 0, 1) {
continue
}
}
return sgp
}
}
func racesync(c *hchan, sg *sudog) {
racerelease(chanbuf(c, 0))
raceacquireg(sg.g, chanbuf(c, 0))
racereleaseg(sg.g, chanbuf(c, 0))
raceacquire(chanbuf(c, 0))
}

View file

@ -231,9 +231,6 @@ func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) }
//
// sudogs are allocated from a special pool. Use acquireSudog and
// releaseSudog to allocate and free them.
/*
Commented out for gccgo for now.
type sudog struct {
// The following fields are protected by the hchan.lock of the
// channel this sudog is blocking on. shrinkstack depends on
@ -253,7 +250,6 @@ type sudog struct {
waitlink *sudog // g.waiting list
c *hchan // channel
}
*/
type gcstats struct {
// the struct must consist of only uint64's,
@ -364,7 +360,7 @@ type g struct {
gopc uintptr // pc of go statement that created this goroutine
startpc uintptr // pc of goroutine function
racectx uintptr
// Not for gccgo for now: waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
waiting *sudog // sudog structures this g is waiting on (that have a valid elem ptr); in lock order
// Not for gccgo: cgoCtxt []uintptr // cgo traceback context
// Per-G GC state
@ -528,7 +524,7 @@ type p struct {
gfree *g
gfreecnt int32
// Not for gccgo for now: sudogcache []*sudog
sudogcache []*sudog
// Not for gccgo for now: sudogbuf [128]*sudog
// Not for gccgo for now: tracebuf traceBufPtr

697
libgo/go/runtime/select.go Normal file
View file

@ -0,0 +1,697 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
// This file contains the implementation of Go select statements.
import (
"runtime/internal/sys"
"unsafe"
)
// For gccgo, use go:linkname to rename compiler-called functions to
// themselves, so that the compiler will export them.
//
//go:linkname newselect runtime.newselect
//go:linkname selectdefault runtime.selectdefault
//go:linkname selectsend runtime.selectsend
//go:linkname selectrecv runtime.selectrecv
//go:linkname selectrecv2 runtime.selectrecv2
//go:linkname selectgo runtime.selectgo
const (
debugSelect = false
// scase.kind
caseRecv = iota
caseSend
caseDefault
)
// Select statement header.
// Known to compiler.
// Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
type hselect struct {
tcase uint16 // total count of scase[]
ncase uint16 // currently filled scase[]
pollorder *uint16 // case poll order
lockorder *uint16 // channel lock order
scase [1]scase // one per case (in order of appearance)
}
// Select case descriptor.
// Known to compiler.
// Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
type scase struct {
elem unsafe.Pointer // data element
c *hchan // chan
pc uintptr // return pc
kind uint16
index uint16 // case index
receivedp *bool // pointer to received bool (recv2)
releasetime int64
}
var (
chansendpc = funcPC(chansend)
chanrecvpc = funcPC(chanrecv)
)
func selectsize(size uintptr) uintptr {
selsize := unsafe.Sizeof(hselect{}) +
(size-1)*unsafe.Sizeof(hselect{}.scase[0]) +
size*unsafe.Sizeof(*hselect{}.lockorder) +
size*unsafe.Sizeof(*hselect{}.pollorder)
return round(selsize, sys.Int64Align)
}
func newselect(sel *hselect, selsize int64, size int32) {
if selsize != int64(selectsize(uintptr(size))) {
print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n")
throw("bad select size")
}
if size != int32(uint16(size)) {
throw("select size too large")
}
sel.tcase = uint16(size)
sel.ncase = 0
sel.lockorder = (*uint16)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0])))
sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder)))
// For gccgo the temporary variable will not have been zeroed.
memclr(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0])+uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder)+uintptr(size)*unsafe.Sizeof(*hselect{}.pollorder))
if debugSelect {
print("newselect s=", sel, " size=", size, "\n")
}
}
func selectsend(sel *hselect, c *hchan, elem unsafe.Pointer, index int32) {
// nil cases do not compete
if c != nil {
selectsendImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, index)
}
return
}
// cut in half to give stack a chance to split
func selectsendImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, index int32) {
i := sel.ncase
if i >= sel.tcase {
throw("selectsend: too many cases")
}
sel.ncase = i + 1
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
cas.pc = pc
cas.c = c
cas.index = uint16(index)
cas.kind = caseSend
cas.elem = elem
if debugSelect {
print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " index=", cas.index, "\n")
}
}
func selectrecv(sel *hselect, c *hchan, elem unsafe.Pointer, index int32) {
// nil cases do not compete
if c != nil {
selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, nil, index)
}
return
}
func selectrecv2(sel *hselect, c *hchan, elem unsafe.Pointer, received *bool, index int32) {
// nil cases do not compete
if c != nil {
selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, received, index)
}
return
}
func selectrecvImpl(sel *hselect, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, index int32) {
i := sel.ncase
if i >= sel.tcase {
throw("selectrecv: too many cases")
}
sel.ncase = i + 1
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
cas.pc = pc
cas.c = c
cas.index = uint16(index)
cas.kind = caseRecv
cas.elem = elem
cas.receivedp = received
if debugSelect {
print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, " index=", cas.index, "\n")
}
}
func selectdefault(sel *hselect, index int32) {
selectdefaultImpl(sel, getcallerpc(unsafe.Pointer(&sel)), index)
return
}
func selectdefaultImpl(sel *hselect, callerpc uintptr, index int32) {
i := sel.ncase
if i >= sel.tcase {
throw("selectdefault: too many cases")
}
sel.ncase = i + 1
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
cas.pc = callerpc
cas.c = nil
cas.index = uint16(index)
cas.kind = caseDefault
if debugSelect {
print("selectdefault s=", sel, " pc=", hex(cas.pc), " index=", cas.index, "\n")
}
}
func sellock(scases []scase, lockorder []uint16) {
var c *hchan
for _, o := range lockorder {
c0 := scases[o].c
if c0 != nil && c0 != c {
c = c0
lock(&c.lock)
}
}
}
func selunlock(scases []scase, lockorder []uint16) {
// We must be very careful here to not touch sel after we have unlocked
// the last lock, because sel can be freed right after the last unlock.
// Consider the following situation.
// First M calls runtime·park() in runtime·selectgo() passing the sel.
// Once runtime·park() has unlocked the last lock, another M makes
// the G that calls select runnable again and schedules it for execution.
// When the G runs on another M, it locks all the locks and frees sel.
// Now if the first M touches sel, it will access freed memory.
n := len(scases)
r := 0
// skip the default case
if n > 0 && scases[lockorder[0]].c == nil {
r = 1
}
for i := n - 1; i >= r; i-- {
c := scases[lockorder[i]].c
if i > 0 && c == scases[lockorder[i-1]].c {
continue // will unlock it on the next iteration
}
unlock(&c.lock)
}
}
func selparkcommit(gp *g, _ unsafe.Pointer) bool {
// This must not access gp's stack (see gopark). In
// particular, it must not access the *hselect. That's okay,
// because by the time this is called, gp.waiting has all
// channels in lock order.
var lastc *hchan
for sg := gp.waiting; sg != nil; sg = sg.waitlink {
if sg.c != lastc && lastc != nil {
// As soon as we unlock the channel, fields in
// any sudog with that channel may change,
// including c and waitlink. Since multiple
// sudogs may have the same channel, we unlock
// only after we've passed the last instance
// of a channel.
unlock(&lastc.lock)
}
lastc = sg.c
}
if lastc != nil {
unlock(&lastc.lock)
}
return true
}
func block() {
gopark(nil, nil, "select (no cases)", traceEvGoStop, 1) // forever
}
// selectgo implements the select statement.
//
// *sel is on the current goroutine's stack (regardless of any
// escaping in selectgo).
//
// selectgo does not return. Instead, it overwrites its return PC and
// returns directly to the triggered select case. Because of this, it
// cannot appear at the top of a split stack.
func selectgo(sel *hselect) int32 {
_, index := selectgoImpl(sel)
return int32(index)
}
// selectgoImpl returns scase.pc and scase.so for the select
// case which fired.
func selectgoImpl(sel *hselect) (uintptr, uint16) {
if debugSelect {
print("select: sel=", sel, "\n")
}
scaseslice := slice{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)}
scases := *(*[]scase)(unsafe.Pointer(&scaseslice))
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
for i := 0; i < int(sel.ncase); i++ {
scases[i].releasetime = -1
}
}
// The compiler rewrites selects that statically have
// only 0 or 1 cases plus default into simpler constructs.
// The only way we can end up with such small sel.ncase
// values here is for a larger select in which most channels
// have been nilled out. The general code handles those
// cases correctly, and they are rare enough not to bother
// optimizing (and needing to test).
// generate permuted order
pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)}
pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice))
for i := 1; i < int(sel.ncase); i++ {
j := int(fastrand1()) % (i + 1)
pollorder[i] = pollorder[j]
pollorder[j] = uint16(i)
}
// sort the cases by Hchan address to get the locking order.
// simple heap sort, to guarantee n log n time and constant stack footprint.
lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)}
lockorder := *(*[]uint16)(unsafe.Pointer(&lockslice))
for i := 0; i < int(sel.ncase); i++ {
j := i
// Start with the pollorder to permute cases on the same channel.
c := scases[pollorder[i]].c
for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {
k := (j - 1) / 2
lockorder[j] = lockorder[k]
j = k
}
lockorder[j] = pollorder[i]
}
for i := int(sel.ncase) - 1; i >= 0; i-- {
o := lockorder[i]
c := scases[o].c
lockorder[i] = lockorder[0]
j := 0
for {
k := j*2 + 1
if k >= i {
break
}
if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() {
k++
}
if c.sortkey() < scases[lockorder[k]].c.sortkey() {
lockorder[j] = lockorder[k]
j = k
continue
}
break
}
lockorder[j] = o
}
/*
for i := 0; i+1 < int(sel.ncase); i++ {
if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
throw("select: broken sort")
}
}
*/
// lock all the channels involved in the select
sellock(scases, lockorder)
var (
gp *g
done uint32
sg *sudog
c *hchan
k *scase
sglist *sudog
sgnext *sudog
qp unsafe.Pointer
nextp **sudog
)
loop:
// pass 1 - look for something already waiting
var dfl *scase
var cas *scase
for i := 0; i < int(sel.ncase); i++ {
cas = &scases[pollorder[i]]
c = cas.c
switch cas.kind {
case caseRecv:
sg = c.sendq.dequeue()
if sg != nil {
goto recv
}
if c.qcount > 0 {
goto bufrecv
}
if c.closed != 0 {
goto rclose
}
case caseSend:
if raceenabled {
racereadpc(unsafe.Pointer(c), cas.pc, chansendpc)
}
if c.closed != 0 {
goto sclose
}
sg = c.recvq.dequeue()
if sg != nil {
goto send
}
if c.qcount < c.dataqsiz {
goto bufsend
}
case caseDefault:
dfl = cas
}
}
if dfl != nil {
selunlock(scases, lockorder)
cas = dfl
goto retc
}
// pass 2 - enqueue on all chans
gp = getg()
done = 0
if gp.waiting != nil {
throw("gp.waiting != nil")
}
nextp = &gp.waiting
for _, casei := range lockorder {
cas = &scases[casei]
c = cas.c
sg := acquireSudog()
sg.g = gp
// Note: selectdone is adjusted for stack copies in stack1.go:adjustsudogs
sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done)))
// No stack splits between assigning elem and enqueuing
// sg on gp.waiting where copystack can find it.
sg.elem = cas.elem
sg.releasetime = 0
if t0 != 0 {
sg.releasetime = -1
}
sg.c = c
// Construct waiting list in lock order.
*nextp = sg
nextp = &sg.waitlink
switch cas.kind {
case caseRecv:
c.recvq.enqueue(sg)
case caseSend:
c.sendq.enqueue(sg)
}
}
// wait for someone to wake us up
gp.param = nil
gopark(selparkcommit, nil, "select", traceEvGoBlockSelect, 2)
// someone woke us up
sellock(scases, lockorder)
sg = (*sudog)(gp.param)
gp.param = nil
// pass 3 - dequeue from unsuccessful chans
// otherwise they stack up on quiet channels
// record the successful case, if any.
// We singly-linked up the SudoGs in lock order.
cas = nil
sglist = gp.waiting
// Clear all elem before unlinking from gp.waiting.
for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
sg1.selectdone = nil
sg1.elem = nil
sg1.c = nil
}
gp.waiting = nil
for _, casei := range lockorder {
k = &scases[casei]
if sglist.releasetime > 0 {
k.releasetime = sglist.releasetime
}
if sg == sglist {
// sg has already been dequeued by the G that woke us up.
cas = k
} else {
c = k.c
if k.kind == caseSend {
c.sendq.dequeueSudoG(sglist)
} else {
c.recvq.dequeueSudoG(sglist)
}
}
sgnext = sglist.waitlink
sglist.waitlink = nil
releaseSudog(sglist)
sglist = sgnext
}
if cas == nil {
// This can happen if we were woken up by a close().
// TODO: figure that out explicitly so we don't need this loop.
goto loop
}
c = cas.c
if debugSelect {
print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n")
}
if cas.kind == caseRecv {
if cas.receivedp != nil {
*cas.receivedp = true
}
}
if raceenabled {
if cas.kind == caseRecv && cas.elem != nil {
raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
} else if cas.kind == caseSend {
raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
}
}
if msanenabled {
if cas.kind == caseRecv && cas.elem != nil {
msanwrite(cas.elem, c.elemtype.size)
} else if cas.kind == caseSend {
msanread(cas.elem, c.elemtype.size)
}
}
selunlock(scases, lockorder)
goto retc
bufrecv:
// can receive from buffer
if raceenabled {
if cas.elem != nil {
raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
}
raceacquire(chanbuf(c, c.recvx))
racerelease(chanbuf(c, c.recvx))
}
if msanenabled && cas.elem != nil {
msanwrite(cas.elem, c.elemtype.size)
}
if cas.receivedp != nil {
*cas.receivedp = true
}
qp = chanbuf(c, c.recvx)
if cas.elem != nil {
typedmemmove(c.elemtype, cas.elem, qp)
}
memclr(qp, uintptr(c.elemsize))
c.recvx++
if c.recvx == c.dataqsiz {
c.recvx = 0
}
c.qcount--
selunlock(scases, lockorder)
goto retc
bufsend:
// can send to buffer
if raceenabled {
raceacquire(chanbuf(c, c.sendx))
racerelease(chanbuf(c, c.sendx))
raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
}
if msanenabled {
msanread(cas.elem, c.elemtype.size)
}
typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
c.sendx++
if c.sendx == c.dataqsiz {
c.sendx = 0
}
c.qcount++
selunlock(scases, lockorder)
goto retc
recv:
// can receive from sleeping sender (sg)
recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) })
if debugSelect {
print("syncrecv: sel=", sel, " c=", c, "\n")
}
if cas.receivedp != nil {
*cas.receivedp = true
}
goto retc
rclose:
// read at end of closed channel
selunlock(scases, lockorder)
if cas.receivedp != nil {
*cas.receivedp = false
}
if cas.elem != nil {
memclr(cas.elem, uintptr(c.elemsize))
}
if raceenabled {
raceacquire(unsafe.Pointer(c))
}
goto retc
send:
// can send to a sleeping receiver (sg)
if raceenabled {
raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
}
if msanenabled {
msanread(cas.elem, c.elemtype.size)
}
send(c, sg, cas.elem, func() { selunlock(scases, lockorder) })
if debugSelect {
print("syncsend: sel=", sel, " c=", c, "\n")
}
goto retc
retc:
if cas.releasetime > 0 {
blockevent(cas.releasetime-t0, 2)
}
return cas.pc, cas.index
sclose:
// send on closed channel
selunlock(scases, lockorder)
panic(plainError("send on closed channel"))
}
func (c *hchan) sortkey() uintptr {
// TODO(khr): if we have a moving garbage collector, we'll need to
// change this function.
return uintptr(unsafe.Pointer(c))
}
// A runtimeSelect is a single case passed to rselect.
// This must match ../reflect/value.go:/runtimeSelect
type runtimeSelect struct {
dir selectDir
typ unsafe.Pointer // channel type (not used here)
ch *hchan // channel
val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
}
// These values must match ../reflect/value.go:/SelectDir.
type selectDir int
const (
_ selectDir = iota
selectSend // case Chan <- Send
selectRecv // case <-Chan:
selectDefault // default
)
//go:linkname reflect_rselect reflect.rselect
func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) {
// flagNoScan is safe here, because all objects are also referenced from cases.
size := selectsize(uintptr(len(cases)))
sel := (*hselect)(mallocgc(size, nil, true))
newselect(sel, int64(size), int32(len(cases)))
r := new(bool)
for i := range cases {
rc := &cases[i]
switch rc.dir {
case selectDefault:
selectdefaultImpl(sel, uintptr(i), 0)
case selectSend:
if rc.ch == nil {
break
}
selectsendImpl(sel, rc.ch, uintptr(i), rc.val, 0)
case selectRecv:
if rc.ch == nil {
break
}
selectrecvImpl(sel, rc.ch, uintptr(i), rc.val, r, 0)
}
}
pc, _ := selectgoImpl(sel)
chosen = int(pc)
recvOK = *r
return
}
func (q *waitq) dequeueSudoG(sgp *sudog) {
x := sgp.prev
y := sgp.next
if x != nil {
if y != nil {
// middle of queue
x.next = y
y.prev = x
sgp.next = nil
sgp.prev = nil
return
}
// end of queue
x.next = nil
q.last = x
sgp.prev = nil
return
}
if y != nil {
// start of queue
y.prev = nil
q.first = y
sgp.next = nil
return
}
// x==y==nil. Either sgp is the only element in the queue,
// or it has already been removed. Use q.first to disambiguate.
if q.first == sgp {
q.first = nil
q.last = nil
}
}

View file

@ -384,3 +384,67 @@ func errno() int
func entersyscall(int32)
func entersyscallblock(int32)
func exitsyscall(int32)
func gopark(func(*g, unsafe.Pointer) bool, unsafe.Pointer, string, byte, int)
func goparkunlock(*mutex, string, byte, int)
func goready(*g, int)
// Temporary for gccgo until we port mprof.go.
var blockprofilerate uint64
func blockevent(cycles int64, skip int) {}
// Temporary hack for gccgo until we port proc.go.
//go:nosplit
func acquireSudog() *sudog {
mp := acquirem()
pp := mp.p.ptr()
if len(pp.sudogcache) == 0 {
pp.sudogcache = append(pp.sudogcache, new(sudog))
}
n := len(pp.sudogcache)
s := pp.sudogcache[n-1]
pp.sudogcache[n-1] = nil
pp.sudogcache = pp.sudogcache[:n-1]
if s.elem != nil {
throw("acquireSudog: found s.elem != nil in cache")
}
releasem(mp)
return s
}
// Temporary hack for gccgo until we port proc.go.
//go:nosplit
func releaseSudog(s *sudog) {
if s.elem != nil {
throw("runtime: sudog with non-nil elem")
}
if s.selectdone != nil {
throw("runtime: sudog with non-nil selectdone")
}
if s.next != nil {
throw("runtime: sudog with non-nil next")
}
if s.prev != nil {
throw("runtime: sudog with non-nil prev")
}
if s.waitlink != nil {
throw("runtime: sudog with non-nil waitlink")
}
if s.c != nil {
throw("runtime: sudog with non-nil c")
}
gp := getg()
if gp.param != nil {
throw("runtime: releaseSudog with non-nil gp.param")
}
mp := acquirem() // avoid rescheduling to another P
pp := mp.p.ptr()
pp.sudogcache = append(pp.sudogcache, s)
releasem(mp)
}
// Temporary hack for gccgo until we port the garbage collector.
func typeBitsBulkBarrier(typ *_type, p, size uintptr) {}
// Temporary for gccgo until we port print.go.
type hex uint64

1008
libgo/go/runtime/trace.go Normal file

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,76 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
typedef struct WaitQ WaitQ;
typedef struct SudoG SudoG;
typedef struct Select Select;
typedef struct Scase Scase;
typedef struct __go_type_descriptor Type;
typedef struct __go_channel_type ChanType;
struct SudoG
{
G* g;
uint32* selectdone;
SudoG* link;
int64 releasetime;
byte* elem; // data element
uint32 ticket;
};
struct WaitQ
{
SudoG* first;
SudoG* last;
};
// The garbage collector is assuming that Hchan can only contain pointers into the stack
// and cannot contain pointers into the heap.
struct Hchan
{
uintgo qcount; // total data in the q
uintgo dataqsiz; // size of the circular q
uint16 elemsize;
uint16 pad; // ensures proper alignment of the buffer that follows Hchan in memory
bool closed;
const Type* elemtype; // element type
uintgo sendx; // send index
uintgo recvx; // receive index
WaitQ recvq; // list of recv waiters
WaitQ sendq; // list of send waiters
Lock;
};
// Buffer follows Hchan immediately in memory.
// chanbuf(c, i) is pointer to the i'th slot in the buffer.
#define chanbuf(c, i) ((byte*)((c)+1)+(uintptr)(c)->elemsize*(i))
enum
{
debug = 0,
// Scase.kind
CaseRecv,
CaseSend,
CaseDefault,
};
struct Scase
{
SudoG sg; // must be first member (cast to Scase)
Hchan* chan; // chan
uint16 kind;
uint16 index; // index to return
bool* receivedp; // pointer to received bool (recv2)
};
struct Select
{
uint16 tcase; // total count of scase[]
uint16 ncase; // currently filled scase[]
uint16* pollorder; // case poll order
Hchan** lockorder; // channel lock order
Scase scase[1]; // one per case (in order of appearance)
};

View file

@ -10,7 +10,8 @@
#include "go-panic.h"
#include "go-type.h"
extern void __go_receive (ChanType *, Hchan *, byte *);
extern void chanrecv1 (ChanType *, Hchan *, void *)
__asm__ (GOSYM_PREFIX "runtime.chanrecv1");
/* Prepare to call from code written in Go to code written in C or
C++. This takes the current goroutine out of the Go scheduler, as
@ -97,7 +98,7 @@ syscall_cgocallback ()
Go. In the case of -buildmode=c-archive or c-shared, this
call may be coming in before package initialization is
complete. Wait until it is. */
__go_receive (NULL, runtime_main_init_done, NULL);
chanrecv1 (NULL, runtime_main_init_done, NULL);
}
mp = runtime_m ();

View file

@ -462,7 +462,7 @@ dumpparams(void)
else
dumpbool(true); // big-endian ptrs
dumpint(PtrSize);
dumpint(runtime_Hchansize);
dumpint(hchanSize);
dumpint((uintptr)runtime_mheap.arena_start);
dumpint((uintptr)runtime_mheap.arena_used);
dumpint(0);
@ -769,7 +769,7 @@ dumpefacetypes(void *obj __attribute__ ((unused)), uintptr size, const Type *typ
case TypeInfo_Chan:
if(type->__size == 0) // channels may have zero-sized objects in them
break;
for(i = runtime_Hchansize; i <= size - type->__size; i += type->__size) {
for(i = hchanSize; i <= size - type->__size; i += type->__size) {
//playgcprog(i, (uintptr*)type->gc + 1, dumpeface_callback, obj);
}
break;

View file

@ -56,7 +56,6 @@
#include "arch.h"
#include "malloc.h"
#include "mgc0.h"
#include "chan.h"
#include "go-type.h"
// Map gccgo field names to gc field names.
@ -1112,15 +1111,13 @@ scanblock(Workbuf *wbuf, bool keepworking)
// There are no heap pointers in struct Hchan,
// so we can ignore the leading sizeof(Hchan) bytes.
if(!(chantype->elem->__code & kindNoPointers)) {
// Channel's buffer follows Hchan immediately in memory.
// Size of buffer (cap(c)) is second int in the chan struct.
chancap = ((uintgo*)chan)[1];
if(chancap > 0) {
chancap = chan->dataqsiz;
if(chancap > 0 && markonly(chan->buf)) {
// TODO(atom): split into two chunks so that only the
// in-use part of the circular buffer is scanned.
// (Channel routines zero the unused part, so the current
// code does not lead to leaks, it's just a little inefficient.)
*sbuf.obj.pos++ = (Obj){(byte*)chan+runtime_Hchansize, chancap*chantype->elem->__size,
*sbuf.obj.pos++ = (Obj){chan->buf, chancap*chantype->elem->__size,
(uintptr)chantype->elem->__gc | PRECISE | LOOP};
if(sbuf.obj.pos == sbuf.obj.end)
flushobjbuf(&sbuf);

View file

@ -564,7 +564,8 @@ static struct __go_channel_type chan_bool_type_descriptor =
CHANNEL_BOTH_DIR
};
extern Hchan *__go_new_channel (ChanType *, uintptr);
extern Hchan *makechan (ChanType *, int64)
__asm__ (GOSYM_PREFIX "runtime.makechan");
extern void closechan(Hchan *) __asm__ (GOSYM_PREFIX "runtime.closechan");
static void
@ -613,7 +614,7 @@ runtime_main(void* dummy __attribute__((unused)))
runtime_throw("runtime_main not on m0");
__go_go(runtime_MHeap_Scavenger, nil);
runtime_main_init_done = __go_new_channel(&chan_bool_type_descriptor, 0);
runtime_main_init_done = makechan(&chan_bool_type_descriptor, 0);
_cgo_notify_runtime_init_done();
@ -853,6 +854,14 @@ runtime_ready(G *gp)
g->m->locks--;
}
void goready(G*, int) __asm__ (GOSYM_PREFIX "runtime.goready");
void
goready(G* gp, int traceskip __attribute__ ((unused)))
{
runtime_ready(gp);
}
int32
runtime_gcprocs(void)
{
@ -1898,6 +1907,22 @@ runtime_park(bool(*unlockf)(G*, void*), void *lock, const char *reason)
runtime_mcall(park0);
}
void gopark(FuncVal *, void *, String, byte, int)
__asm__ ("runtime.gopark");
void
gopark(FuncVal *unlockf, void *lock, String reason,
byte traceEv __attribute__ ((unused)),
int traceskip __attribute__ ((unused)))
{
if(g->atomicstatus != _Grunning)
runtime_throw("bad g status");
g->m->waitlock = lock;
g->m->waitunlockf = unlockf == nil ? nil : (void*)unlockf->fn;
g->waitreason = reason;
runtime_mcall(park0);
}
static bool
parkunlock(G *gp, void *lock)
{
@ -1914,6 +1939,21 @@ runtime_parkunlock(Lock *lock, const char *reason)
runtime_park(parkunlock, lock, reason);
}
void goparkunlock(Lock *, String, byte, int)
__asm__ (GOSYM_PREFIX "runtime.goparkunlock");
void
goparkunlock(Lock *lock, String reason, byte traceEv __attribute__ ((unused)),
int traceskip __attribute__ ((unused)))
{
if(g->atomicstatus != _Grunning)
runtime_throw("bad g status");
g->m->waitlock = lock;
g->m->waitunlockf = parkunlock;
g->waitreason = reason;
runtime_mcall(park0);
}
// runtime_park continuation on g0.
static void
park0(G *gp)

View file

@ -66,7 +66,7 @@ typedef struct FuncVal FuncVal;
typedef struct SigTab SigTab;
typedef struct mcache MCache;
typedef struct FixAlloc FixAlloc;
typedef struct Hchan Hchan;
typedef struct hchan Hchan;
typedef struct Timers Timers;
typedef struct Timer Timer;
typedef struct gcstats GCStats;
@ -75,6 +75,7 @@ typedef struct ParFor ParFor;
typedef struct ParForThread ParForThread;
typedef struct cgoMal CgoMal;
typedef struct PollDesc PollDesc;
typedef struct sudog SudoG;
typedef struct __go_open_array Slice;
typedef struct __go_interface Iface;
@ -294,7 +295,6 @@ extern uint32 runtime_panicking;
extern int8* runtime_goos;
extern int32 runtime_ncpu;
extern void (*runtime_sysargs)(int32, uint8**);
extern uint32 runtime_Hchansize;
extern struct debugVars runtime_debug;
extern uintptr runtime_maxstacksize;

View file

@ -19,7 +19,6 @@
package sync
#include "runtime.h"
#include "chan.h"
#include "arch.h"
typedef struct SemaWaiter SemaWaiter;
@ -373,7 +372,7 @@ func runtime_notifyListWait(l *notifyList, t uint32) {
if (l->tail == nil) {
l->head = &s;
} else {
l->tail->link = &s;
l->tail->next = &s;
}
l->tail = &s;
runtime_parkunlock(&l->lock, "semacquire");
@ -409,8 +408,8 @@ func runtime_notifyListNotifyAll(l *notifyList) {
// Go through the local list and ready all waiters.
while (s != nil) {
SudoG* next = s->link;
s->link = nil;
SudoG* next = s->next;
s->next = nil;
readyWithTime(s, 4);
s = next;
}
@ -442,11 +441,11 @@ func runtime_notifyListNotifyOne(l *notifyList) {
// needs to be notified. If it hasn't made it to the list yet we won't
// find it, but it won't park itself once it sees the new notify number.
runtime_atomicstore(&l->notify, t+1);
for (p = nil, s = l->head; s != nil; p = s, s = s->link) {
for (p = nil, s = l->head; s != nil; p = s, s = s->next) {
if (s->ticket == t) {
SudoG *n = s->link;
SudoG *n = s->next;
if (p != nil) {
p->link = n;
p->next = n;
} else {
l->head = n;
}
@ -454,7 +453,7 @@ func runtime_notifyListNotifyOne(l *notifyList) {
l->tail = p;
}
runtime_unlock(&l->lock);
s->link = nil;
s->next = nil;
readyWithTime(s, 4);
return;
}