libgo: Upgrade to Go 1.4.2 release.
From-SVN: r221245
This commit is contained in:
parent
81b0d9065c
commit
c271e224c2
18 changed files with 170 additions and 66 deletions
|
@ -1,4 +1,4 @@
|
|||
14854533dcc7
|
||||
883bc6ed0ea815293fe6309d66f967ea60630e87
|
||||
|
||||
The first line of this file holds the Mercurial revision number of the
|
||||
The first line of this file holds the git revision number of the
|
||||
last merge done from the master library sources.
|
||||
|
|
|
@ -1 +1 @@
|
|||
go1.4
|
||||
go1.4.2
|
|
@ -308,6 +308,7 @@ The generator is run in the package's source directory.
|
|||
Go generate accepts one specific flag:
|
||||
|
||||
-run=""
|
||||
TODO: This flag is unimplemented.
|
||||
if non-empty, specifies a regular expression to
|
||||
select directives whose command matches the expression.
|
||||
|
||||
|
|
|
@ -106,6 +106,7 @@ The generator is run in the package's source directory.
|
|||
Go generate accepts one specific flag:
|
||||
|
||||
-run=""
|
||||
TODO: This flag is unimplemented.
|
||||
if non-empty, specifies a regular expression to
|
||||
select directives whose command matches the expression.
|
||||
|
||||
|
@ -255,6 +256,10 @@ func (g *Generator) split(line string) []string {
|
|||
// Parse line, obeying quoted strings.
|
||||
var words []string
|
||||
line = line[len("//go:generate ") : len(line)-1] // Drop preamble and final newline.
|
||||
// There may still be a carriage return.
|
||||
if len(line) > 0 && line[len(line)-1] == '\r' {
|
||||
line = line[:len(line)-1]
|
||||
}
|
||||
// One (possibly quoted) word per iteration.
|
||||
Words:
|
||||
for {
|
||||
|
|
|
@ -40,9 +40,15 @@ func TestGenerateCommandParse(t *testing.T) {
|
|||
}
|
||||
g.setShorthand([]string{"-command", "yacc", "go", "tool", "yacc"})
|
||||
for _, test := range splitTests {
|
||||
// First with newlines.
|
||||
got := g.split("//go:generate " + test.in + "\n")
|
||||
if !reflect.DeepEqual(got, test.out) {
|
||||
t.Errorf("split(%q): got %q expected %q", test.in, got, test.out)
|
||||
}
|
||||
// Then with CRLFs, thank you Windows.
|
||||
got = g.split("//go:generate " + test.in + "\r\n")
|
||||
if !reflect.DeepEqual(got, test.out) {
|
||||
t.Errorf("split(%q): got %q expected %q", test.in, got, test.out)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -290,7 +290,7 @@ func downloadPackage(p *Package) error {
|
|||
}
|
||||
}
|
||||
if remote != repo {
|
||||
return fmt.Errorf("%s is from %s, should be from %s", dir, remote, repo)
|
||||
return fmt.Errorf("%s is a custom import path for %s, but %s is checked out from %s", rr.root, repo, dir, remote)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -887,7 +887,7 @@ func (z *Int) AndNot(x, y *Int) *Int {
|
|||
}
|
||||
|
||||
// x &^ (-y) == x &^ ^(y-1) == x & (y-1)
|
||||
y1 := nat(nil).add(y.abs, natOne)
|
||||
y1 := nat(nil).sub(y.abs, natOne)
|
||||
z.abs = z.abs.and(x.abs, y1)
|
||||
z.neg = false
|
||||
return z
|
||||
|
|
|
@ -1201,6 +1201,7 @@ var bitwiseTests = []struct {
|
|||
{"-0x01", "-0x01", "-0x01", "-0x01", "0x00", "0x00"},
|
||||
{"0x07", "0x08", "0x00", "0x0f", "0x0f", "0x07"},
|
||||
{"0x05", "0x0f", "0x05", "0x0f", "0x0a", "0x00"},
|
||||
{"0xff", "-0x0a", "0xf6", "-0x01", "-0xf7", "0x09"},
|
||||
{"0x013ff6", "0x9a4e", "0x1a46", "0x01bffe", "0x01a5b8", "0x0125b0"},
|
||||
{"-0x013ff6", "0x9a4e", "0x800a", "-0x0125b2", "-0x01a5bc", "-0x01c000"},
|
||||
{"-0x013ff6", "-0x9a4e", "-0x01bffe", "-0x1a46", "0x01a5b8", "0x8008"},
|
||||
|
|
|
@ -248,12 +248,12 @@ const (
|
|||
// with a unique tag like `reflect:"array"` or `reflect:"ptr"`
|
||||
// so that code cannot convert from, say, *arrayType to *ptrType.
|
||||
type rtype struct {
|
||||
kind uint8 // enumeration for C
|
||||
align int8 // alignment of variable with this type
|
||||
fieldAlign uint8 // alignment of struct field with this type
|
||||
_ uint8 // unused/padding
|
||||
kind uint8 // enumeration for C
|
||||
align int8 // alignment of variable with this type
|
||||
fieldAlign uint8 // alignment of struct field with this type
|
||||
_ uint8 // unused/padding
|
||||
size uintptr
|
||||
hash uint32 // hash of type; avoids computation in hash tables
|
||||
hash uint32 // hash of type; avoids computation in hash tables
|
||||
|
||||
hashfn uintptr // hash function code
|
||||
equalfn uintptr // equality function code
|
||||
|
@ -1582,8 +1582,9 @@ func MapOf(key, elem Type) Type {
|
|||
|
||||
// gcProg is a helper type for generatation of GC pointer info.
|
||||
type gcProg struct {
|
||||
gc []byte
|
||||
size uintptr // size of type in bytes
|
||||
gc []byte
|
||||
size uintptr // size of type in bytes
|
||||
hasPtr bool
|
||||
}
|
||||
|
||||
func (gc *gcProg) append(v byte) {
|
||||
|
@ -1644,11 +1645,14 @@ func (gc *gcProg) appendWord(v byte) {
|
|||
gc.gc[nptr/2] &= ^(3 << ((nptr%2)*4 + 2))
|
||||
gc.gc[nptr/2] |= v << ((nptr%2)*4 + 2)
|
||||
gc.size += ptrsize
|
||||
if v == bitsPointer {
|
||||
gc.hasPtr = true
|
||||
}
|
||||
}
|
||||
|
||||
func (gc *gcProg) finalize() unsafe.Pointer {
|
||||
func (gc *gcProg) finalize() (unsafe.Pointer, bool) {
|
||||
if gc.size == 0 {
|
||||
return nil
|
||||
return nil, false
|
||||
}
|
||||
ptrsize := unsafe.Sizeof(uintptr(0))
|
||||
gc.align(ptrsize)
|
||||
|
@ -1663,7 +1667,7 @@ func (gc *gcProg) finalize() unsafe.Pointer {
|
|||
gc.appendWord(extractGCWord(gc.gc, i))
|
||||
}
|
||||
}
|
||||
return unsafe.Pointer(&gc.gc[0])
|
||||
return unsafe.Pointer(&gc.gc[0]), gc.hasPtr
|
||||
}
|
||||
|
||||
func extractGCWord(gc []byte, i uintptr) byte {
|
||||
|
@ -1708,10 +1712,6 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
|
|||
for i := 0; i < int(bucketSize*unsafe.Sizeof(uint8(0))/ptrsize); i++ {
|
||||
gc.append(bitsScalar)
|
||||
}
|
||||
gc.append(bitsPointer) // overflow
|
||||
if runtime.GOARCH == "amd64p32" {
|
||||
gc.append(bitsScalar)
|
||||
}
|
||||
// keys
|
||||
for i := 0; i < bucketSize; i++ {
|
||||
gc.appendProg(ktyp)
|
||||
|
@ -1720,10 +1720,15 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
|
|||
for i := 0; i < bucketSize; i++ {
|
||||
gc.appendProg(etyp)
|
||||
}
|
||||
// overflow
|
||||
gc.append(bitsPointer)
|
||||
if runtime.GOARCH == "amd64p32" {
|
||||
gc.append(bitsScalar)
|
||||
}
|
||||
|
||||
b := new(rtype)
|
||||
b.size = gc.size
|
||||
// b.gc[0] = gc.finalize()
|
||||
// b.gc[0], _ = gc.finalize()
|
||||
b.kind |= kindGCProg
|
||||
s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
|
||||
b.string = &s
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
package runtime_test
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
@ -50,6 +51,30 @@ func TestCgoExternalThreadPanic(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestCgoExternalThreadSIGPROF(t *testing.T) {
|
||||
// issue 9456.
|
||||
switch runtime.GOOS {
|
||||
case "plan9", "windows":
|
||||
t.Skipf("no pthreads on %s", runtime.GOOS)
|
||||
case "darwin":
|
||||
// static constructor needs external linking, but we don't support
|
||||
// external linking on OS X 10.6.
|
||||
out, err := exec.Command("uname", "-r").Output()
|
||||
if err != nil {
|
||||
t.Fatalf("uname -r failed: %v", err)
|
||||
}
|
||||
// OS X 10.6 == Darwin 10.x
|
||||
if strings.HasPrefix(string(out), "10.") {
|
||||
t.Skipf("no external linking on OS X 10.6")
|
||||
}
|
||||
}
|
||||
got := executeTest(t, cgoExternalThreadSIGPROFSource, nil)
|
||||
want := "OK\n"
|
||||
if got != want {
|
||||
t.Fatalf("expected %q, but got %q", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
const cgoSignalDeadlockSource = `
|
||||
package main
|
||||
|
||||
|
@ -194,3 +219,46 @@ start(void)
|
|||
printf("_beginthreadex failed\n");
|
||||
}
|
||||
`
|
||||
|
||||
const cgoExternalThreadSIGPROFSource = `
|
||||
package main
|
||||
|
||||
/*
|
||||
#include <stdint.h>
|
||||
#include <signal.h>
|
||||
#include <pthread.h>
|
||||
|
||||
volatile int32_t spinlock;
|
||||
|
||||
static void *thread1(void *p) {
|
||||
(void)p;
|
||||
while (spinlock == 0)
|
||||
;
|
||||
pthread_kill(pthread_self(), SIGPROF);
|
||||
spinlock = 0;
|
||||
return NULL;
|
||||
}
|
||||
__attribute__((constructor)) void issue9456() {
|
||||
pthread_t tid;
|
||||
pthread_create(&tid, 0, thread1, NULL);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// This test intends to test that sending SIGPROF to foreign threads
|
||||
// before we make any cgo call will not abort the whole process, so
|
||||
// we cannot make any cgo call here. See http://golang.org/issue/9456.
|
||||
atomic.StoreInt32((*int32)(unsafe.Pointer(&C.spinlock)), 1)
|
||||
for atomic.LoadInt32((*int32)(unsafe.Pointer(&C.spinlock))) == 1 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
println("OK")
|
||||
}
|
||||
`
|
||||
|
|
|
@ -117,12 +117,12 @@ type hmap struct {
|
|||
|
||||
// A bucket for a Go map.
|
||||
type bmap struct {
|
||||
tophash [bucketCnt]uint8
|
||||
overflow *bmap
|
||||
tophash [bucketCnt]uint8
|
||||
// Followed by bucketCnt keys and then bucketCnt values.
|
||||
// NOTE: packing all the keys together and then all the values together makes the
|
||||
// code a bit more complicated than alternating key/value/key/value/... but it allows
|
||||
// us to eliminate padding which would be needed for, e.g., map[int64]int8.
|
||||
// Followed by an overflow pointer.
|
||||
}
|
||||
|
||||
// A hash iteration structure.
|
||||
|
@ -149,6 +149,13 @@ func evacuated(b *bmap) bool {
|
|||
return h > empty && h < minTopHash
|
||||
}
|
||||
|
||||
func (b *bmap) overflow(t *maptype) *bmap {
|
||||
return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize))
|
||||
}
|
||||
func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
|
||||
*(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) = ovf
|
||||
}
|
||||
|
||||
func makemap(t *maptype, hint int64) *hmap {
|
||||
if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
|
||||
gothrow("bad hmap size")
|
||||
|
@ -275,7 +282,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
|||
return v
|
||||
}
|
||||
}
|
||||
b = b.overflow
|
||||
b = b.overflow(t)
|
||||
if b == nil {
|
||||
return unsafe.Pointer(t.elem.zero)
|
||||
}
|
||||
|
@ -323,7 +330,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
|
|||
return v, true
|
||||
}
|
||||
}
|
||||
b = b.overflow
|
||||
b = b.overflow(t)
|
||||
if b == nil {
|
||||
return unsafe.Pointer(t.elem.zero), false
|
||||
}
|
||||
|
@ -366,7 +373,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
|
|||
return k, v
|
||||
}
|
||||
}
|
||||
b = b.overflow
|
||||
b = b.overflow(t)
|
||||
if b == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -437,10 +444,11 @@ again:
|
|||
memmove(v2, val, uintptr(t.elem.size))
|
||||
return
|
||||
}
|
||||
if b.overflow == nil {
|
||||
ovf := b.overflow(t)
|
||||
if ovf == nil {
|
||||
break
|
||||
}
|
||||
b = b.overflow
|
||||
b = ovf
|
||||
}
|
||||
|
||||
// did not find mapping for key. Allocate new cell & add entry.
|
||||
|
@ -455,7 +463,7 @@ again:
|
|||
memstats.next_gc = memstats.heap_alloc
|
||||
}
|
||||
newb := (*bmap)(newobject(t.bucket))
|
||||
b.overflow = newb
|
||||
b.setoverflow(t, newb)
|
||||
inserti = &newb.tophash[0]
|
||||
insertk = add(unsafe.Pointer(newb), dataOffset)
|
||||
insertv = add(insertk, bucketCnt*uintptr(t.keysize))
|
||||
|
@ -525,7 +533,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
|
|||
h.count--
|
||||
return
|
||||
}
|
||||
b = b.overflow
|
||||
b = b.overflow(t)
|
||||
if b == nil {
|
||||
return
|
||||
}
|
||||
|
@ -720,7 +728,7 @@ next:
|
|||
return
|
||||
}
|
||||
}
|
||||
b = b.overflow
|
||||
b = b.overflow(t)
|
||||
i = 0
|
||||
goto next
|
||||
}
|
||||
|
@ -778,7 +786,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
yk := add(unsafe.Pointer(y), dataOffset)
|
||||
xv := add(xk, bucketCnt*uintptr(t.keysize))
|
||||
yv := add(yk, bucketCnt*uintptr(t.keysize))
|
||||
for ; b != nil; b = b.overflow {
|
||||
for ; b != nil; b = b.overflow(t) {
|
||||
k := add(unsafe.Pointer(b), dataOffset)
|
||||
v := add(k, bucketCnt*uintptr(t.keysize))
|
||||
for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
|
||||
|
@ -828,7 +836,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
memstats.next_gc = memstats.heap_alloc
|
||||
}
|
||||
newx := (*bmap)(newobject(t.bucket))
|
||||
x.overflow = newx
|
||||
x.setoverflow(t, newx)
|
||||
x = newx
|
||||
xi = 0
|
||||
xk = add(unsafe.Pointer(x), dataOffset)
|
||||
|
@ -855,7 +863,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
memstats.next_gc = memstats.heap_alloc
|
||||
}
|
||||
newy := (*bmap)(newobject(t.bucket))
|
||||
y.overflow = newy
|
||||
y.setoverflow(t, newy)
|
||||
y = newy
|
||||
yi = 0
|
||||
yk = add(unsafe.Pointer(y), dataOffset)
|
||||
|
@ -881,7 +889,6 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||
// Unlink the overflow buckets & clear key/value to help GC.
|
||||
if h.flags&oldIterator == 0 {
|
||||
b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
|
||||
b.overflow = nil
|
||||
memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
|
|||
}
|
||||
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
|
||||
}
|
||||
b = b.overflow
|
||||
b = b.overflow(t)
|
||||
if b == nil {
|
||||
return unsafe.Pointer(t.elem.zero)
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
|
|||
}
|
||||
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
|
||||
}
|
||||
b = b.overflow
|
||||
b = b.overflow(t)
|
||||
if b == nil {
|
||||
return unsafe.Pointer(t.elem.zero), false
|
||||
}
|
||||
|
@ -127,7 +127,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
|||
}
|
||||
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
|
||||
}
|
||||
b = b.overflow
|
||||
b = b.overflow(t)
|
||||
if b == nil {
|
||||
return unsafe.Pointer(t.elem.zero)
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
|
|||
}
|
||||
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
|
||||
}
|
||||
b = b.overflow
|
||||
b = b.overflow(t)
|
||||
if b == nil {
|
||||
return unsafe.Pointer(t.elem.zero), false
|
||||
}
|
||||
|
@ -271,7 +271,7 @@ dohash:
|
|||
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
|
||||
}
|
||||
}
|
||||
b = b.overflow
|
||||
b = b.overflow(t)
|
||||
if b == nil {
|
||||
return unsafe.Pointer(t.elem.zero)
|
||||
}
|
||||
|
@ -371,7 +371,7 @@ dohash:
|
|||
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
|
||||
}
|
||||
}
|
||||
b = b.overflow
|
||||
b = b.overflow(t)
|
||||
if b == nil {
|
||||
return unsafe.Pointer(t.elem.zero), false
|
||||
}
|
||||
|
|
|
@ -575,20 +575,16 @@ func saveg(pc, sp uintptr, gp *g, r *StackRecord) {
|
|||
// If all is true, Stack formats stack traces of all other goroutines
|
||||
// into buf after the trace for the current goroutine.
|
||||
func Stack(buf []byte, all bool) int {
|
||||
mp := acquirem()
|
||||
gp := mp.curg
|
||||
if all {
|
||||
semacquire(&worldsema, false)
|
||||
mp.gcing = 1
|
||||
releasem(mp)
|
||||
gp := getg()
|
||||
gp.m.gcing = 1
|
||||
onM(stoptheworld)
|
||||
if mp != acquirem() {
|
||||
gothrow("Stack: rescheduled")
|
||||
}
|
||||
}
|
||||
|
||||
n := 0
|
||||
if len(buf) > 0 {
|
||||
gp := getg()
|
||||
sp := getcallersp(unsafe.Pointer(&buf))
|
||||
pc := getcallerpc(unsafe.Pointer(&buf))
|
||||
onM(func() {
|
||||
|
@ -605,11 +601,11 @@ func Stack(buf []byte, all bool) int {
|
|||
}
|
||||
|
||||
if all {
|
||||
mp.gcing = 0
|
||||
gp := getg()
|
||||
gp.m.gcing = 0
|
||||
semrelease(&worldsema)
|
||||
onM(starttheworld)
|
||||
}
|
||||
releasem(mp)
|
||||
return n
|
||||
}
|
||||
|
||||
|
|
|
@ -6,6 +6,8 @@ package runtime
|
|||
|
||||
import "unsafe"
|
||||
|
||||
const _SIGPROF = 0 // dummy value for badsignal
|
||||
|
||||
func pread(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
|
||||
func pwrite(fd int32, buf unsafe.Pointer, nbytes int32, offset int64) int32
|
||||
func seek(fd int32, offset int64, whence int32) int64
|
||||
|
|
|
@ -154,6 +154,15 @@ func signal_disable(s uint32) {
|
|||
// This runs on a foreign stack, without an m or a g. No stack split.
|
||||
//go:nosplit
|
||||
func badsignal(sig uintptr) {
|
||||
// Some external libraries, for example, OpenBLAS, create worker threads in
|
||||
// a global constructor. If we're doing cpu profiling, and the SIGPROF signal
|
||||
// comes to one of the foreign threads before we make our first cgo call, the
|
||||
// call to cgocallback below will bring down the whole process.
|
||||
// It's better to miss a few SIGPROF signals than to abort in this case.
|
||||
// See http://golang.org/issue/9456.
|
||||
if _SIGPROF != 0 && sig == _SIGPROF && needextram != 0 {
|
||||
return
|
||||
}
|
||||
cgocallback(unsafe.Pointer(funcPC(sigsend)), noescape(unsafe.Pointer(&sig)), unsafe.Sizeof(sig))
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
"unsafe"
|
||||
)
|
||||
|
||||
const _SIGPROF = 0 // dummy value for badsignal
|
||||
|
||||
type callbacks struct {
|
||||
lock mutex
|
||||
ctxt [cb_max]*wincallbackcontext
|
||||
|
@ -52,11 +54,13 @@ func compileCallback(fn eface, cleanstack bool) (code uintptr) {
|
|||
panic("compilecallback: output parameter size is wrong")
|
||||
}
|
||||
argsize := uintptr(0)
|
||||
for _, t := range (*[1024](*_type))(unsafe.Pointer(&ft.in[0]))[:len(ft.in)] {
|
||||
if (*t).size > uintptrSize {
|
||||
panic("compilecallback: input parameter size is wrong")
|
||||
if len(ft.in) > 0 {
|
||||
for _, t := range (*[1024](*_type))(unsafe.Pointer(&ft.in[0]))[:len(ft.in)] {
|
||||
if (*t).size > uintptrSize {
|
||||
panic("compilecallback: input parameter size is wrong")
|
||||
}
|
||||
argsize += uintptrSize
|
||||
}
|
||||
argsize += uintptrSize
|
||||
}
|
||||
|
||||
lock(&cbs.lock)
|
||||
|
|
|
@ -12,16 +12,16 @@ func (any *anyMessage) toRoutingMessage(b []byte) RoutingMessage {
|
|||
switch any.Type {
|
||||
case RTM_ADD, RTM_DELETE, RTM_CHANGE, RTM_GET, RTM_LOSING, RTM_REDIRECT, RTM_MISS, RTM_LOCK, RTM_RESOLVE:
|
||||
p := (*RouteMessage)(unsafe.Pointer(any))
|
||||
return &RouteMessage{Header: p.Header, Data: b[SizeofRtMsghdr:any.Msglen]}
|
||||
return &RouteMessage{Header: p.Header, Data: b[p.Header.Hdrlen:any.Msglen]}
|
||||
case RTM_IFINFO:
|
||||
p := (*InterfaceMessage)(unsafe.Pointer(any))
|
||||
return &InterfaceMessage{Header: p.Header, Data: b[SizeofIfMsghdr:any.Msglen]}
|
||||
return &InterfaceMessage{Header: p.Header, Data: b[p.Header.Hdrlen:any.Msglen]}
|
||||
case RTM_IFANNOUNCE:
|
||||
p := (*InterfaceAnnounceMessage)(unsafe.Pointer(any))
|
||||
return &InterfaceAnnounceMessage{Header: p.Header}
|
||||
case RTM_NEWADDR, RTM_DELADDR:
|
||||
p := (*InterfaceAddrMessage)(unsafe.Pointer(any))
|
||||
return &InterfaceAddrMessage{Header: p.Header, Data: b[SizeofIfaMsghdr:any.Msglen]}
|
||||
return &InterfaceAddrMessage{Header: p.Header, Data: b[p.Header.Hdrlen:any.Msglen]}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -40,12 +40,14 @@ repository=$1
|
|||
old_rev=`sed 1q MERGE`
|
||||
|
||||
rm -rf ${OLDDIR}
|
||||
hg clone -r ${old_rev} ${repository} ${OLDDIR}
|
||||
git clone ${repository} ${OLDDIR}
|
||||
(cd ${OLDDIR} && git checkout ${old_rev})
|
||||
|
||||
rm -rf ${NEWDIR}
|
||||
hg clone -u ${rev} ${repository} ${NEWDIR}
|
||||
git clone ${repository} ${NEWDIR}
|
||||
(cd ${NEWDIR} && git checkout ${rev})
|
||||
|
||||
new_rev=`cd ${NEWDIR} && hg log -r ${rev} | sed 1q | sed -e 's/.*://'`
|
||||
new_rev=`cd ${NEWDIR} && git log | sed 1q | sed -e 's/commit //'`
|
||||
|
||||
merge() {
|
||||
name=$1
|
||||
|
@ -69,7 +71,7 @@ merge() {
|
|||
elif test -f ${old}; then
|
||||
# The file exists in the old version.
|
||||
if ! test -f ${libgo}; then
|
||||
echo "merge.sh: $name: skipping: exists in old and new hg, but not in libgo"
|
||||
echo "merge.sh: $name: skipping: exists in old and new git, but not in libgo"
|
||||
continue
|
||||
fi
|
||||
if cmp -s ${old} ${libgo}; then
|
||||
|
@ -160,11 +162,10 @@ done
|
|||
if ! test -d ${oldtd}; then
|
||||
continue
|
||||
fi
|
||||
(cd ${oldtd} && hg status -A .) | while read f; do
|
||||
if test "`basename $f`" = ".hgignore"; then
|
||||
(cd ${oldtd} && git ls-files .) | while read f; do
|
||||
if test "`basename $f`" = ".gitignore"; then
|
||||
continue
|
||||
fi
|
||||
f=`echo $f | sed -e 's/^..//'`
|
||||
name=$d/$f
|
||||
oldfile=${oldtd}/$f
|
||||
newfile=${newtd}/$f
|
||||
|
@ -189,11 +190,10 @@ for c in $cmdlist; do
|
|||
if ! test -d ${oldtd}; then
|
||||
continue
|
||||
fi
|
||||
(cd ${oldtd} && hg status -A .) | while read f; do
|
||||
if test "`basename $f`" = ".hgignore"; then
|
||||
(cd ${oldtd} && git ls-files .) | while read f; do
|
||||
if test "`basename $f`" = ".gitignore"; then
|
||||
continue
|
||||
fi
|
||||
f=`echo $f | sed -e 's/^..//'`
|
||||
name=$d/$f
|
||||
oldfile=${oldtd}/$f
|
||||
newfile=${newtd}/$f
|
||||
|
|
Loading…
Add table
Reference in a new issue