--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ thechar = '8'
+ _BigEndian = 0
+ _CacheLineSize = 64
+ _RuntimeGogoBytes = 64
+ _PhysPageSize = _NaCl*65536 + (1-_NaCl)*4096 // 4k normally; 64k on NaCl
+ _PCQuantum = 1
+ _Int64Align = 4
+)
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ thechar = '6'
+ _BigEndian = 0
+ _CacheLineSize = 64
+ _RuntimeGogoBytes = 64 + (_Plan9|_Solaris|_Windows)*16
+ _PhysPageSize = 4096
+ _PCQuantum = 1
+ _Int64Align = 8
+)
--- /dev/null
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+const (
+ thechar = '5'
+ _BigEndian = 0
+ _CacheLineSize = 32
+ _RuntimeGogoBytes = 60
+ _PhysPageSize = 65536*_NaCl + 4096*(1-_NaCl)
+ _PCQuantum = 4
+ _Int64Align = 4
+)
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-enum {
- thechar = '8',
- BigEndian = 0,
- CacheLineSize = 64,
- RuntimeGogoBytes = 64,
-#ifdef GOOS_nacl
- PhysPageSize = 65536,
-#else
- PhysPageSize = 4096,
-#endif
- PCQuantum = 1,
- Int64Align = 4
-};
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-enum {
- thechar = '6',
- BigEndian = 0,
- CacheLineSize = 64,
-#ifdef GOOS_solaris
- RuntimeGogoBytes = 80,
-#else
-#ifdef GOOS_windows
- RuntimeGogoBytes = 80,
-#else
-#ifdef GOOS_plan9
- RuntimeGogoBytes = 80,
-#else
- RuntimeGogoBytes = 64,
-#endif // Plan 9
-#endif // Windows
-#endif // Solaris
- PhysPageSize = 4096,
- PCQuantum = 1,
- Int64Align = 8
-};
+++ /dev/null
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-enum {
- thechar = '5',
- BigEndian = 0,
- CacheLineSize = 32,
- RuntimeGogoBytes = 60,
-#ifdef GOOS_nacl
- PhysPageSize = 65536,
-#else
- PhysPageSize = 4096,
-#endif
- PCQuantum = 4,
- Int64Align = 4
-};
+++ /dev/null
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !arm
-
-package runtime
-
-import "unsafe"
-
-//go:noescape
-func xadd(ptr *uint32, delta int32) uint32
-
-//go:noescape
-func xadd64(ptr *uint64, delta int64) uint64
-
-//go:noescape
-func xchg(ptr *uint32, new uint32) uint32
-
-//go:noescape
-func xchg64(ptr *uint64, new uint64) uint64
-
-//go:noescape
-func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
-
-//go:noescape
-func xchguintptr(ptr *uintptr, new uintptr) uintptr
-
-//go:noescape
-func atomicload(ptr *uint32) uint32
-
-//go:noescape
-func atomicload64(ptr *uint64) uint64
-
-//go:noescape
-func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer
-
-//go:noescape
-func atomicor8(ptr *uint8, val uint8)
-
-//go:noescape
-func cas64(ptr *uint64, old, new uint64) bool
-
-//go:noescape
-func atomicstore(ptr *uint32, val uint32)
-
-//go:noescape
-func atomicstore64(ptr *uint64, val uint64)
-
-//go:noescape
-func atomicstorep(ptr unsafe.Pointer, val unsafe.Pointer)
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-#include "textflag.h"
-
-#pragma textflag NOSPLIT
-uint32
-runtime·atomicload(uint32 volatile* addr)
-{
- return *addr;
-}
-
-#pragma textflag NOSPLIT
-void*
-runtime·atomicloadp(void* volatile* addr)
-{
- return *addr;
-}
-
-#pragma textflag NOSPLIT
-uint64
-runtime·xadd64(uint64 volatile* addr, int64 v)
-{
- uint64 old;
-
- do
- old = *addr;
- while(!runtime·cas64(addr, old, old+v));
-
- return old+v;
-}
-
-#pragma textflag NOSPLIT
-uint64
-runtime·xchg64(uint64 volatile* addr, uint64 v)
-{
- uint64 old;
-
- do
- old = *addr;
- while(!runtime·cas64(addr, old, v));
-
- return old;
-}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// The calls to nop are to keep these functions from being inlined.
+// If they are inlined we have no guarantee that later rewrites of the
+// code by optimizers will preserve the relative order of memory accesses.
+
+//go:nosplit
+func atomicload(ptr *uint32) uint32 {
+ nop()
+ return *ptr
+}
+
+//go:nosplit
+func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer {
+ nop()
+ return *(*unsafe.Pointer)(ptr)
+}
+
+//go:nosplit
+func xadd64(ptr *uint64, delta int64) uint64 {
+ for {
+ old := *ptr
+ if cas64(ptr, old, old+uint64(delta)) {
+ return old + uint64(delta)
+ }
+ }
+}
+
+//go:nosplit
+func xchg64(ptr *uint64, new uint64) uint64 {
+ for {
+ old := *ptr
+ if cas64(ptr, old, new) {
+ return old
+ }
+ }
+}
+
+//go:noescape
+func xadd(ptr *uint32, delta int32) uint32
+
+//go:noescape
+func xchg(ptr *uint32, new uint32) uint32
+
+// xchgp cannot have a go:noescape annotation, because
+// while ptr does not escape, new does. If new is marked as
+// not escaping, the compiler will make incorrect escape analysis
+// decisions about the value being xchg'ed.
+// Instead, make xchgp a wrapper around the actual atomic.
+// When calling the wrapper we mark ptr as noescape explicitly.
+
+//go:nosplit
+func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
+ return xchgp1(noescape(ptr), new)
+}
+
+func xchgp1(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
+
+//go:noescape
+func xchguintptr(ptr *uintptr, new uintptr) uintptr
+
+//go:noescape
+func atomicload64(ptr *uint64) uint64
+
+//go:noescape
+func atomicor8(ptr *uint8, val uint8)
+
+//go:noescape
+func cas64(ptr *uint64, old, new uint64) bool
+
+//go:noescape
+func atomicstore(ptr *uint32, val uint32)
+
+//go:noescape
+func atomicstore64(ptr *uint64, val uint64)
+
+// atomicstorep cannot have a go:noescape annotation.
+// See comment above for xchgp.
+
+//go:nosplit
+func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
+ atomicstorep1(noescape(ptr), new)
+}
+
+func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
+++ /dev/null
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build amd64 amd64p32
-
-#include "runtime.h"
-#include "textflag.h"
-
-#pragma textflag NOSPLIT
-uint32
-runtime·atomicload(uint32 volatile* addr)
-{
- return *addr;
-}
-
-#pragma textflag NOSPLIT
-uint64
-runtime·atomicload64(uint64 volatile* addr)
-{
- return *addr;
-}
-
-#pragma textflag NOSPLIT
-void*
-runtime·atomicloadp(void* volatile* addr)
-{
- return *addr;
-}
--- /dev/null
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64 amd64p32
+
+package runtime
+
+import "unsafe"
+
+// The calls to nop are to keep these functions from being inlined.
+// If they are inlined we have no guarantee that later rewrites of the
+// code by optimizers will preserve the relative order of memory accesses.
+
+//go:nosplit
+func atomicload(ptr *uint32) uint32 {
+ nop()
+ return *ptr
+}
+
+//go:nosplit
+func atomicloadp(ptr unsafe.Pointer) unsafe.Pointer {
+ nop()
+ return *(*unsafe.Pointer)(ptr)
+}
+
+//go:nosplit
+func atomicload64(ptr *uint64) uint64 {
+ nop()
+ return *ptr
+}
+
+//go:noescape
+func xadd(ptr *uint32, delta int32) uint32
+
+//go:noescape
+func xadd64(ptr *uint64, delta int64) uint64
+
+//go:noescape
+func xchg(ptr *uint32, new uint32) uint32
+
+//go:noescape
+func xchg64(ptr *uint64, new uint64) uint64
+
+// xchgp cannot have a go:noescape annotation, because
+// while ptr does not escape, new does. If new is marked as
+// not escaping, the compiler will make incorrect escape analysis
+// decisions about the value being xchg'ed.
+// Instead, make xchgp a wrapper around the actual atomic.
+// When calling the wrapper we mark ptr as noescape explicitly.
+
+//go:nosplit
+func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer {
+ return xchgp1(noescape(ptr), new)
+}
+
+func xchgp1(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
+
+//go:noescape
+func xchguintptr(ptr *uintptr, new uintptr) uintptr
+
+//go:noescape
+func atomicor8(ptr *uint8, val uint8)
+
+//go:noescape
+func cas64(ptr *uint64, old, new uint64) bool
+
+//go:noescape
+func atomicstore(ptr *uint32, val uint32)
+
+//go:noescape
+func atomicstore64(ptr *uint64, val uint64)
+
+// atomicstorep cannot have a go:noescape annotation.
+// See comment above for xchgp.
+
+//go:nosplit
+func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
+ atomicstorep1(noescape(ptr), new)
+}
+
+func atomicstorep1(ptr unsafe.Pointer, val unsafe.Pointer)
return unsafe.Pointer(uintptr(p) + delta)
}
-// in runtime.c
func getg() *g
-func acquirem() *m
-func releasem(mp *m)
-func gomcache() *mcache
-func readgstatus(*g) uint32 // proc.c
// mcall switches from the g to the g0 stack and invokes fn(g),
// where g is the goroutine that made the call.
gothrow("onM called from signal goroutine")
}
-// C functions that run on the M stack.
-// Call using mcall.
-func gosched_m(*g)
-func park_m(*g)
-func recovery_m(*g)
-
-// More C functions that run on the M stack.
-// Call using onM.
-func mcacheRefill_m()
-func largeAlloc_m()
-func gc_m()
-func scavenge_m()
-func setFinalizer_m()
-func removeFinalizer_m()
-func markallocated_m()
-func unrollgcprog_m()
-func unrollgcproginplace_m()
-func setgcpercent_m()
-func setmaxthreads_m()
-func ready_m()
-func deferproc_m()
-func goexit_m()
-func startpanic_m()
-func dopanic_m()
-func readmemstats_m()
-func writeheapdump_m()
-
// memclr clears n bytes starting at ptr.
// in memclr_*.s
//go:noescape
//go:noescape
func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
-func starttheworld()
-func stoptheworld()
-func newextram()
-func lockOSThread()
-func unlockOSThread()
-
// exported value for testing
var hashLoad = loadFactor
return unsafe.Pointer(x ^ 0)
}
-func entersyscall()
-func reentersyscall(pc uintptr, sp unsafe.Pointer)
-func entersyscallblock()
-func exitsyscall()
-
func cgocallback(fn, frame unsafe.Pointer, framesize uintptr)
func gogo(buf *gobuf)
func gosave(buf *gobuf)
func nanotime() int64
func usleep(usec uint32)
-// careful: cputicks is not guaranteed to be monotonic! In particular, we have
-// noticed drift between cpus on certain os/arch combinations. See issue 8976.
-func cputicks() int64
-
func mmap(addr unsafe.Pointer, n uintptr, prot, flags, fd int32, off uint32) unsafe.Pointer
func munmap(addr unsafe.Pointer, n uintptr)
func madvise(addr unsafe.Pointer, n uintptr, flags int32)
func reflectcall(fn, arg unsafe.Pointer, n uint32, retoffset uint32)
-func osyield()
func procyield(cycles uint32)
func cgocallback_gofunc(fv *funcval, frame unsafe.Pointer, framesize uintptr)
-func readgogc() int32
-func purgecachedstats(c *mcache)
-func gostringnocopy(b *byte) string
func goexit()
//go:noescape
//go:noescape
func cas(ptr *uint32, old, new uint32) bool
-//go:noescape
-func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
+// casp cannot have a go:noescape annotation, because
+// while ptr and old do not escape, new does. If new is marked as
+// not escaping, the compiler will make incorrect escape analysis
+// decisions about the value being xchg'ed.
+// Instead, make casp a wrapper around the actual atomic.
+// When calling the wrapper we mark ptr as noescape explicitly.
+
+//go:nosplit
+func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
+ return casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new)
+}
+
+func casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
+
+func nop() // call to prevent inlining of function body
//go:noescape
func casuintptr(ptr *uintptr, old, new uintptr) bool
//go:noescape
func open(name *byte, mode, perm int32) int32
-//go:noescape
-func gotraceback(*bool) int32
-
+// argp used in Defer structs when there is no argp.
const _NoArgs = ^uintptr(0)
-func newstack()
-func newproc()
func morestack()
-func mstart()
func rt0_go()
// return0 is a stub used to return 0 from deferproc.
func call268435456(fn, arg unsafe.Pointer, n, retoffset uint32)
func call536870912(fn, arg unsafe.Pointer, n, retoffset uint32)
func call1073741824(fn, arg unsafe.Pointer, n, retoffset uint32)
+
+func switchtoM()
+++ /dev/null
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-#include "runtime.h"
-
-// adjust Gobuf as if it executed a call to fn with context ctxt
-// and then did an immediate Gosave.
-void
-runtime·gostartcall(Gobuf *gobuf, void (*fn)(void), void *ctxt)
-{
- if(gobuf->lr != 0)
- runtime·throw("invalid use of gostartcall");
- gobuf->lr = gobuf->pc;
- gobuf->pc = (uintptr)fn;
- gobuf->ctxt = ctxt;
-}
-
-// Called to rewind context saved during morestack back to beginning of function.
-// To help us, the linker emits a jmp back to the beginning right after the
-// call to morestack. We just have to decode and apply that jump.
-void
-runtime·rewindmorestack(Gobuf *gobuf)
-{
- uint32 inst;
-
- inst = *(uint32*)gobuf->pc;
- if((gobuf->pc&3) == 0 && (inst>>24) == 0x9a) {
- //runtime·printf("runtime: rewind pc=%p to pc=%p\n", gobuf->pc, gobuf->pc + ((int32)(inst<<8)>>6) + 8);
- gobuf->pc += ((int32)(inst<<8)>>6) + 8;
- return;
- }
- runtime·printf("runtime: pc=%p %x\n", gobuf->pc, inst);
- runtime·throw("runtime: misuse of rewindmorestack");
-}
--- /dev/null
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import "unsafe"
+
+// adjust Gobuf as if it executed a call to fn with context ctxt
+// and then did an immediate Gosave.
+func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
+ if buf.lr != 0 {
+ gothrow("invalid use of gostartcall")
+ }
+ buf.lr = buf.pc
+ buf.pc = uintptr(fn)
+ buf.ctxt = ctxt
+}
+
+// Called to rewind context saved during morestack back to beginning of function.
+// To help us, the linker emits a jmp back to the beginning right after the
+// call to morestack. We just have to decode and apply that jump.
+func rewindmorestack(buf *gobuf) {
+ var inst uint32
+ if buf.pc&3 == 0 && buf.pc != 0 {
+ inst = *(*uint32)(unsafe.Pointer(buf.pc))
+ if inst>>24 == 0x9a {
+ buf.pc += uintptr(int32(inst<<8)>>6) + 8
+ return
+ }
+ }
+
+ print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
+ gothrow("runtime: misuse of rewindmorestack")
+}
// +build amd64 amd64p32 386
-#include "runtime.h"
+package runtime
+
+import "unsafe"
// adjust Gobuf as it if executed a call to fn with context ctxt
// and then did an immediate gosave.
-void
-runtime·gostartcall(Gobuf *gobuf, void (*fn)(void), void *ctxt)
-{
- uintptr *sp;
-
- sp = (uintptr*)gobuf->sp;
- if(sizeof(uintreg) > sizeof(uintptr))
- *--sp = 0;
- *--sp = (uintptr)gobuf->pc;
- gobuf->sp = (uintptr)sp;
- gobuf->pc = (uintptr)fn;
- gobuf->ctxt = ctxt;
+func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
+ sp := buf.sp
+ if regSize > ptrSize {
+ sp -= ptrSize
+ *(*uintptr)(unsafe.Pointer(sp)) = 0
+ }
+ sp -= ptrSize
+ *(*uintptr)(unsafe.Pointer(sp)) = buf.pc
+ buf.sp = sp
+ buf.pc = uintptr(fn)
+ buf.ctxt = ctxt
}
// Called to rewind context saved during morestack back to beginning of function.
// To help us, the linker emits a jmp back to the beginning right after the
// call to morestack. We just have to decode and apply that jump.
-void
-runtime·rewindmorestack(Gobuf *gobuf)
-{
- byte *pc;
-
- pc = (byte*)gobuf->pc;
- if(pc[0] == 0xe9) { // jmp 4-byte offset
- gobuf->pc = gobuf->pc + 5 + *(int32*)(pc+1);
- return;
+func rewindmorestack(buf *gobuf) {
+ pc := (*[8]byte)(unsafe.Pointer(buf.pc))
+ if pc[0] == 0xe9 { // jmp 4-byte offset
+ buf.pc = buf.pc + 5 + uintptr(int64(*(*int32)(unsafe.Pointer(&pc[1]))))
+ return
}
- if(pc[0] == 0xeb) { // jmp 1-byte offset
- gobuf->pc = gobuf->pc + 2 + *(int8*)(pc+1);
- return;
+ if pc[0] == 0xeb { // jmp 1-byte offset
+ buf.pc = buf.pc + 2 + uintptr(int64(*(*int8)(unsafe.Pointer(&pc[1]))))
+ return
}
- if(pc[0] == 0xcc) {
+ if pc[0] == 0xcc {
// This is a breakpoint inserted by gdb. We could use
// runtime·findfunc to find the function. But if we
// do that, then we will continue execution at the
// function entry point, and we will not hit the gdb
// breakpoint. So for this case we don't change
- // gobuf->pc, so that when we return we will execute
+ // buf.pc, so that when we return we will execute
// the jump instruction and carry on. This means that
// stack unwinding may not work entirely correctly
// (http://golang.org/issue/5723) but the user is
// running under gdb anyhow.
- return;
+ return
}
- runtime·printf("runtime: pc=%p %x %x %x %x %x\n", pc, pc[0], pc[1], pc[2], pc[3], pc[4]);
- runtime·throw("runtime: misuse of rewindmorestack");
+ print("runtime: pc=", pc, " ", hex(pc[0]), " ", hex(pc[1]), " ", hex(pc[2]), " ", hex(pc[3]), " ", hex(pc[4]), "\n")
+ gothrow("runtime: misuse of rewindmorestack")
}
+++ /dev/null
-// Inferno's libkern/vlrt-386.c
-// http://code.google.com/p/inferno-os/source/browse/libkern/vlrt-386.c
-//
-// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
-// Revisions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com). All rights reserved.
-// Portions Copyright 2009 The Go Authors. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build arm 386
-
-#include "textflag.h"
-
-/*
- * C runtime for 64-bit divide, others.
- *
- * TODO(rsc): The simple functions are dregs--8c knows how
- * to generate the code directly now. Find and remove.
- */
-
-void runtime·panicdivide(void);
-
-typedef unsigned long ulong;
-typedef unsigned int uint;
-typedef unsigned short ushort;
-typedef unsigned char uchar;
-typedef signed char schar;
-
-#define SIGN(n) (1UL<<(n-1))
-
-typedef struct Vlong Vlong;
-struct Vlong
-{
- ulong lo;
- ulong hi;
-};
-
-typedef union Vlong64 Vlong64;
-union Vlong64
-{
- long long v;
- Vlong v2;
-};
-
-void runtime·abort(void);
-
-#pragma textflag NOSPLIT
-Vlong
-_addv(Vlong a, Vlong b)
-{
- Vlong r;
-
- r.lo = a.lo + b.lo;
- r.hi = a.hi + b.hi;
- if(r.lo < a.lo)
- r.hi++;
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_subv(Vlong a, Vlong b)
-{
- Vlong r;
-
- r.lo = a.lo - b.lo;
- r.hi = a.hi - b.hi;
- if(r.lo > a.lo)
- r.hi--;
- return r;
-}
-
-Vlong
-_d2v(double d)
-{
- union { double d; Vlong vl; } x;
- ulong xhi, xlo, ylo, yhi;
- int sh;
- Vlong y;
-
- x.d = d;
-
- xhi = (x.vl.hi & 0xfffff) | 0x100000;
- xlo = x.vl.lo;
- sh = 1075 - ((x.vl.hi >> 20) & 0x7ff);
-
- ylo = 0;
- yhi = 0;
- if(sh >= 0) {
- /* v = (hi||lo) >> sh */
- if(sh < 32) {
- if(sh == 0) {
- ylo = xlo;
- yhi = xhi;
- } else {
- ylo = (xlo >> sh) | (xhi << (32-sh));
- yhi = xhi >> sh;
- }
- } else {
- if(sh == 32) {
- ylo = xhi;
- } else
- if(sh < 64) {
- ylo = xhi >> (sh-32);
- }
- }
- } else {
- /* v = (hi||lo) << -sh */
- sh = -sh;
- if(sh <= 10) { /* NOTE: sh <= 11 on ARM??? */
- ylo = xlo << sh;
- yhi = (xhi << sh) | (xlo >> (32-sh));
- } else {
- /* overflow */
- yhi = d; /* causes something awful */
- }
- }
- if(x.vl.hi & SIGN(32)) {
- if(ylo != 0) {
- ylo = -ylo;
- yhi = ~yhi;
- } else
- yhi = -yhi;
- }
-
- y.hi = yhi;
- y.lo = ylo;
- return y;
-}
-
-Vlong
-_f2v(float f)
-{
- return _d2v(f);
-}
-
-double
-_ul2d(ulong u)
-{
- // compensate for bug in c
- if(u & SIGN(32)) {
- u ^= SIGN(32);
- return 2147483648. + u;
- }
- return u;
-}
-
-double
-_v2d(Vlong x)
-{
- if(x.hi & SIGN(32)) {
- if(x.lo) {
- x.lo = -x.lo;
- x.hi = ~x.hi;
- } else
- x.hi = -x.hi;
- return -(_ul2d(x.hi)*4294967296. + _ul2d(x.lo));
- }
- return (long)x.hi*4294967296. + x.lo;
-}
-
-float
-_v2f(Vlong x)
-{
- return _v2d(x);
-}
-
-ulong runtime·_div64by32(Vlong, ulong, ulong*);
-int runtime·_mul64by32(Vlong*, Vlong, ulong);
-
-static void
-slowdodiv(Vlong num, Vlong den, Vlong *q, Vlong *r)
-{
- ulong numlo, numhi, denhi, denlo, quohi, quolo, t;
- int i;
-
- numhi = num.hi;
- numlo = num.lo;
- denhi = den.hi;
- denlo = den.lo;
-
- /*
- * get a divide by zero
- */
- if(denlo==0 && denhi==0) {
- runtime·panicdivide();
- }
-
- /*
- * set up the divisor and find the number of iterations needed
- */
- if(numhi >= SIGN(32)) {
- quohi = SIGN(32);
- quolo = 0;
- } else {
- quohi = numhi;
- quolo = numlo;
- }
- i = 0;
- while(denhi < quohi || (denhi == quohi && denlo < quolo)) {
- denhi = (denhi<<1) | (denlo>>31);
- denlo <<= 1;
- i++;
- }
-
- quohi = 0;
- quolo = 0;
- for(; i >= 0; i--) {
- quohi = (quohi<<1) | (quolo>>31);
- quolo <<= 1;
- if(numhi > denhi || (numhi == denhi && numlo >= denlo)) {
- t = numlo;
- numlo -= denlo;
- if(numlo > t)
- numhi--;
- numhi -= denhi;
- quolo |= 1;
- }
- denlo = (denlo>>1) | (denhi<<31);
- denhi >>= 1;
- }
-
- if(q) {
- q->lo = quolo;
- q->hi = quohi;
- }
- if(r) {
- r->lo = numlo;
- r->hi = numhi;
- }
-}
-
-#ifdef GOARCH_arm
-static void
-dodiv(Vlong num, Vlong den, Vlong *qp, Vlong *rp)
-{
- slowdodiv(num, den, qp, rp);
-}
-#endif
-
-#ifdef GOARCH_386
-static void
-dodiv(Vlong num, Vlong den, Vlong *qp, Vlong *rp)
-{
- ulong n;
- Vlong x, q, r;
-
- if(den.hi > num.hi || (den.hi == num.hi && den.lo > num.lo)){
- if(qp) {
- qp->hi = 0;
- qp->lo = 0;
- }
- if(rp) {
- rp->hi = num.hi;
- rp->lo = num.lo;
- }
- return;
- }
-
- if(den.hi != 0){
- q.hi = 0;
- n = num.hi/den.hi;
- if(runtime·_mul64by32(&x, den, n) || x.hi > num.hi || (x.hi == num.hi && x.lo > num.lo))
- slowdodiv(num, den, &q, &r);
- else {
- q.lo = n;
- *(long long*)&r = *(long long*)&num - *(long long*)&x;
- }
- } else {
- if(num.hi >= den.lo){
- if(den.lo == 0)
- runtime·panicdivide();
- q.hi = n = num.hi/den.lo;
- num.hi -= den.lo*n;
- } else {
- q.hi = 0;
- }
- q.lo = runtime·_div64by32(num, den.lo, &r.lo);
- r.hi = 0;
- }
- if(qp) {
- qp->lo = q.lo;
- qp->hi = q.hi;
- }
- if(rp) {
- rp->lo = r.lo;
- rp->hi = r.hi;
- }
-}
-#endif
-
-Vlong
-_divvu(Vlong n, Vlong d)
-{
- Vlong q;
-
- if(n.hi == 0 && d.hi == 0) {
- if(d.lo == 0)
- runtime·panicdivide();
- q.hi = 0;
- q.lo = n.lo / d.lo;
- return q;
- }
- dodiv(n, d, &q, 0);
- return q;
-}
-
-Vlong
-_modvu(Vlong n, Vlong d)
-{
- Vlong r;
-
- if(n.hi == 0 && d.hi == 0) {
- if(d.lo == 0)
- runtime·panicdivide();
- r.hi = 0;
- r.lo = n.lo % d.lo;
- return r;
- }
- dodiv(n, d, 0, &r);
- return r;
-}
-
-static void
-vneg(Vlong *v)
-{
-
- if(v->lo == 0) {
- v->hi = -v->hi;
- return;
- }
- v->lo = -v->lo;
- v->hi = ~v->hi;
-}
-
-Vlong
-_divv(Vlong n, Vlong d)
-{
- long nneg, dneg;
- Vlong q;
-
- if(n.hi == (((long)n.lo)>>31) && d.hi == (((long)d.lo)>>31)) {
- if((long)n.lo == -0x80000000 && (long)d.lo == -1) {
- // special case: 32-bit -0x80000000 / -1 causes divide error,
- // but it's okay in this 64-bit context.
- q.lo = 0x80000000;
- q.hi = 0;
- return q;
- }
- if(d.lo == 0)
- runtime·panicdivide();
- q.lo = (long)n.lo / (long)d.lo;
- q.hi = ((long)q.lo) >> 31;
- return q;
- }
- nneg = n.hi >> 31;
- if(nneg)
- vneg(&n);
- dneg = d.hi >> 31;
- if(dneg)
- vneg(&d);
- dodiv(n, d, &q, 0);
- if(nneg != dneg)
- vneg(&q);
- return q;
-}
-
-Vlong
-_modv(Vlong n, Vlong d)
-{
- long nneg, dneg;
- Vlong r;
-
- if(n.hi == (((long)n.lo)>>31) && d.hi == (((long)d.lo)>>31)) {
- if((long)n.lo == -0x80000000 && (long)d.lo == -1) {
- // special case: 32-bit -0x80000000 % -1 causes divide error,
- // but it's okay in this 64-bit context.
- r.lo = 0;
- r.hi = 0;
- return r;
- }
- if(d.lo == 0)
- runtime·panicdivide();
- r.lo = (long)n.lo % (long)d.lo;
- r.hi = ((long)r.lo) >> 31;
- return r;
- }
- nneg = n.hi >> 31;
- if(nneg)
- vneg(&n);
- dneg = d.hi >> 31;
- if(dneg)
- vneg(&d);
- dodiv(n, d, 0, &r);
- if(nneg)
- vneg(&r);
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_rshav(Vlong a, int b)
-{
- long t;
- Vlong r;
-
- t = a.hi;
- if(b >= 32) {
- r.hi = t>>31;
- if(b >= 64) {
- /* this is illegal re C standard */
- r.lo = t>>31;
- return r;
- }
- r.lo = t >> (b-32);
- return r;
- }
- if(b <= 0) {
- r.hi = t;
- r.lo = a.lo;
- return r;
- }
- r.hi = t >> b;
- r.lo = (t << (32-b)) | (a.lo >> b);
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_rshlv(Vlong a, int b)
-{
- ulong t;
- Vlong r;
-
- t = a.hi;
- if(b >= 32) {
- r.hi = 0;
- if(b >= 64) {
- /* this is illegal re C standard */
- r.lo = 0;
- return r;
- }
- r.lo = t >> (b-32);
- return r;
- }
- if(b <= 0) {
- r.hi = t;
- r.lo = a.lo;
- return r;
- }
- r.hi = t >> b;
- r.lo = (t << (32-b)) | (a.lo >> b);
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_lshv(Vlong a, int b)
-{
- ulong t;
-
- t = a.lo;
- if(b >= 32) {
- if(b >= 64) {
- /* this is illegal re C standard */
- return (Vlong){0, 0};
- }
- return (Vlong){0, t<<(b-32)};
- }
- if(b <= 0) {
- return (Vlong){t, a.hi};
- }
- return (Vlong){t<<b, (t >> (32-b)) | (a.hi << b)};
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_andv(Vlong a, Vlong b)
-{
- Vlong r;
-
- r.hi = a.hi & b.hi;
- r.lo = a.lo & b.lo;
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_orv(Vlong a, Vlong b)
-{
- Vlong r;
-
- r.hi = a.hi | b.hi;
- r.lo = a.lo | b.lo;
- return r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_xorv(Vlong a, Vlong b)
-{
- Vlong r;
-
- r.hi = a.hi ^ b.hi;
- r.lo = a.lo ^ b.lo;
- return r;
-}
-
-Vlong
-_vpp(Vlong *r)
-{
- Vlong l;
-
- l = *r;
- r->lo++;
- if(r->lo == 0)
- r->hi++;
- return l;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_vmm(Vlong *r)
-{
- Vlong l;
-
- l = *r;
- if(r->lo == 0)
- r->hi--;
- r->lo--;
- return l;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_ppv(Vlong *r)
-{
-
- r->lo++;
- if(r->lo == 0)
- r->hi++;
- return *r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_mmv(Vlong *r)
-{
-
- if(r->lo == 0)
- r->hi--;
- r->lo--;
- return *r;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_vasop(void *lv, Vlong fn(Vlong, Vlong), int type, Vlong rv)
-{
- Vlong t, u;
-
- u.lo = 0;
- u.hi = 0;
- switch(type) {
- default:
- runtime·abort();
- break;
-
- case 1: /* schar */
- t.lo = *(schar*)lv;
- t.hi = t.lo >> 31;
- u = fn(t, rv);
- *(schar*)lv = u.lo;
- break;
-
- case 2: /* uchar */
- t.lo = *(uchar*)lv;
- t.hi = 0;
- u = fn(t, rv);
- *(uchar*)lv = u.lo;
- break;
-
- case 3: /* short */
- t.lo = *(short*)lv;
- t.hi = t.lo >> 31;
- u = fn(t, rv);
- *(short*)lv = u.lo;
- break;
-
- case 4: /* ushort */
- t.lo = *(ushort*)lv;
- t.hi = 0;
- u = fn(t, rv);
- *(ushort*)lv = u.lo;
- break;
-
- case 9: /* int */
- t.lo = *(int*)lv;
- t.hi = t.lo >> 31;
- u = fn(t, rv);
- *(int*)lv = u.lo;
- break;
-
- case 10: /* uint */
- t.lo = *(uint*)lv;
- t.hi = 0;
- u = fn(t, rv);
- *(uint*)lv = u.lo;
- break;
-
- case 5: /* long */
- t.lo = *(long*)lv;
- t.hi = t.lo >> 31;
- u = fn(t, rv);
- *(long*)lv = u.lo;
- break;
-
- case 6: /* ulong */
- t.lo = *(ulong*)lv;
- t.hi = 0;
- u = fn(t, rv);
- *(ulong*)lv = u.lo;
- break;
-
- case 7: /* vlong */
- case 8: /* uvlong */
- if((void*)fn == _lshv || (void*)fn == _rshav || (void*)fn == _rshlv)
- u = ((Vlong(*)(Vlong,int))fn)(*(Vlong*)lv, *(int*)&rv);
- else
- u = fn(*(Vlong*)lv, rv);
- *(Vlong*)lv = u;
- break;
- }
- return u;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_p2v(void *p)
-{
- long t;
- Vlong ret;
-
- t = (ulong)p;
- ret.lo = t;
- ret.hi = 0;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_sl2v(long sl)
-{
- long t;
- Vlong ret;
-
- t = sl;
- ret.lo = t;
- ret.hi = t >> 31;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_ul2v(ulong ul)
-{
- long t;
- Vlong ret;
-
- t = ul;
- ret.lo = t;
- ret.hi = 0;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_si2v(int si)
-{
- return (Vlong){si, si>>31};
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_ui2v(uint ui)
-{
- long t;
- Vlong ret;
-
- t = ui;
- ret.lo = t;
- ret.hi = 0;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_sh2v(long sh)
-{
- long t;
- Vlong ret;
-
- t = (sh << 16) >> 16;
- ret.lo = t;
- ret.hi = t >> 31;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_uh2v(ulong ul)
-{
- long t;
- Vlong ret;
-
- t = ul & 0xffff;
- ret.lo = t;
- ret.hi = 0;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_sc2v(long uc)
-{
- long t;
- Vlong ret;
-
- t = (uc << 24) >> 24;
- ret.lo = t;
- ret.hi = t >> 31;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-Vlong
-_uc2v(ulong ul)
-{
- long t;
- Vlong ret;
-
- t = ul & 0xff;
- ret.lo = t;
- ret.hi = 0;
- return ret;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2sc(Vlong rv)
-{
- long t;
-
- t = rv.lo & 0xff;
- return (t << 24) >> 24;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2uc(Vlong rv)
-{
-
- return rv.lo & 0xff;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2sh(Vlong rv)
-{
- long t;
-
- t = rv.lo & 0xffff;
- return (t << 16) >> 16;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2uh(Vlong rv)
-{
-
- return rv.lo & 0xffff;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2sl(Vlong rv)
-{
-
- return rv.lo;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2ul(Vlong rv)
-{
-
- return rv.lo;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2si(Vlong rv)
-{
- return rv.lo;
-}
-
-#pragma textflag NOSPLIT
-long
-_v2ui(Vlong rv)
-{
-
- return rv.lo;
-}
-
-#pragma textflag NOSPLIT
-int
-_testv(Vlong rv)
-{
- return rv.lo || rv.hi;
-}
-
-#pragma textflag NOSPLIT
-int
-_eqv(Vlong lv, Vlong rv)
-{
- return lv.lo == rv.lo && lv.hi == rv.hi;
-}
-
-#pragma textflag NOSPLIT
-int
-_nev(Vlong lv, Vlong rv)
-{
- return lv.lo != rv.lo || lv.hi != rv.hi;
-}
-
-#pragma textflag NOSPLIT
-int
-_ltv(Vlong lv, Vlong rv)
-{
- return (long)lv.hi < (long)rv.hi ||
- (lv.hi == rv.hi && lv.lo < rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_lev(Vlong lv, Vlong rv)
-{
- return (long)lv.hi < (long)rv.hi ||
- (lv.hi == rv.hi && lv.lo <= rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_gtv(Vlong lv, Vlong rv)
-{
- return (long)lv.hi > (long)rv.hi ||
- (lv.hi == rv.hi && lv.lo > rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_gev(Vlong lv, Vlong rv)
-{
- return (long)lv.hi > (long)rv.hi ||
- (lv.hi == rv.hi && lv.lo >= rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_lov(Vlong lv, Vlong rv)
-{
- return lv.hi < rv.hi ||
- (lv.hi == rv.hi && lv.lo < rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_lsv(Vlong lv, Vlong rv)
-{
- return lv.hi < rv.hi ||
- (lv.hi == rv.hi && lv.lo <= rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_hiv(Vlong lv, Vlong rv)
-{
- return lv.hi > rv.hi ||
- (lv.hi == rv.hi && lv.lo > rv.lo);
-}
-
-#pragma textflag NOSPLIT
-int
-_hsv(Vlong lv, Vlong rv)
-{
- return lv.hi > rv.hi ||
- (lv.hi == rv.hi && lv.lo >= rv.lo);
-}